Switch to github.com/golang/dep for vendoring
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
parent
d6ab91be27
commit
8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cloud.go",
|
||||
"doc.go",
|
||||
"plugins.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/cloudprovider/providers:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
assignees:
|
||||
- mikedanese
|
||||
owners:
|
||||
- mikedanese
|
167
vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go
generated
vendored
Normal file
167
vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go
generated
vendored
Normal file
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudprovider
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
// Interface is an abstract, pluggable interface for cloud providers.
|
||||
type Interface interface {
|
||||
// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.
|
||||
LoadBalancer() (LoadBalancer, bool)
|
||||
// Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.
|
||||
Instances() (Instances, bool)
|
||||
// Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.
|
||||
Zones() (Zones, bool)
|
||||
// Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise.
|
||||
Clusters() (Clusters, bool)
|
||||
// Routes returns a routes interface along with whether the interface is supported.
|
||||
Routes() (Routes, bool)
|
||||
// ProviderName returns the cloud provider ID.
|
||||
ProviderName() string
|
||||
// ScrubDNS provides an opportunity for cloud-provider-specific code to process DNS settings for pods.
|
||||
ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string)
|
||||
}
|
||||
|
||||
// Clusters is an abstract, pluggable interface for clusters of containers.
|
||||
type Clusters interface {
|
||||
// ListClusters lists the names of the available clusters.
|
||||
ListClusters() ([]string, error)
|
||||
// Master gets back the address (either DNS name or IP address) of the master node for the cluster.
|
||||
Master(clusterName string) (string, error)
|
||||
}
|
||||
|
||||
// TODO(#6812): Use a shorter name that's less likely to be longer than cloud
|
||||
// providers' name length limits.
|
||||
func GetLoadBalancerName(service *v1.Service) string {
|
||||
//GCE requires that the name of a load balancer starts with a lower case letter.
|
||||
ret := "a" + string(service.UID)
|
||||
ret = strings.Replace(ret, "-", "", -1)
|
||||
//AWS requires that the name of a load balancer is shorter than 32 bytes.
|
||||
if len(ret) > 32 {
|
||||
ret = ret[:32]
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func GetInstanceProviderID(cloud Interface, nodeName types.NodeName) (string, error) {
|
||||
instances, ok := cloud.Instances()
|
||||
if !ok {
|
||||
return "", fmt.Errorf("failed to get instances from cloud provider")
|
||||
}
|
||||
instanceID, err := instances.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get instance ID from cloud provider: %v", err)
|
||||
}
|
||||
return cloud.ProviderName() + "://" + instanceID, nil
|
||||
}
|
||||
|
||||
// LoadBalancer is an abstract, pluggable interface for load balancers.
|
||||
type LoadBalancer interface {
|
||||
// TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service
|
||||
// GetLoadBalancer returns whether the specified load balancer exists, and
|
||||
// if so, what its status is.
|
||||
// Implementations must treat the *v1.Service parameter as read-only and not modify it.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
GetLoadBalancer(clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error)
|
||||
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
|
||||
// Implementations must treat the *v1.Service and *v1.Node
|
||||
// parameters as read-only and not modify them.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error)
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
// Implementations must treat the *v1.Service and *v1.Node
|
||||
// parameters as read-only and not modify them.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error
|
||||
// EnsureLoadBalancerDeleted deletes the specified load balancer if it
|
||||
// exists, returning nil if the load balancer specified either didn't exist or
|
||||
// was successfully deleted.
|
||||
// This construction is useful because many cloud providers' load balancers
|
||||
// have multiple underlying components, meaning a Get could say that the LB
|
||||
// doesn't exist even if some part of it is still laying around.
|
||||
// Implementations must treat the *v1.Service parameter as read-only and not modify it.
|
||||
// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager
|
||||
EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error
|
||||
}
|
||||
|
||||
// Instances is an abstract, pluggable interface for sets of instances.
|
||||
type Instances interface {
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
// TODO(roberthbailey): This currently is only used in such a way that it
|
||||
// returns the address of the calling instance. We should do a rename to
|
||||
// make this clearer.
|
||||
NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error)
|
||||
// ExternalID returns the cloud provider ID of the node with the specified NodeName.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
ExternalID(nodeName types.NodeName) (string, error)
|
||||
// InstanceID returns the cloud provider ID of the node with the specified NodeName.
|
||||
InstanceID(nodeName types.NodeName) (string, error)
|
||||
// InstanceType returns the type of the specified instance.
|
||||
InstanceType(name types.NodeName) (string, error)
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
|
||||
AddSSHKeyToAllInstances(user string, keyData []byte) error
|
||||
// CurrentNodeName returns the name of the node we are currently running on
|
||||
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
|
||||
CurrentNodeName(hostname string) (types.NodeName, error)
|
||||
}
|
||||
|
||||
// Route is a representation of an advanced routing rule.
|
||||
type Route struct {
|
||||
// Name is the name of the routing rule in the cloud-provider.
|
||||
// It will be ignored in a Create (although nameHint may influence it)
|
||||
Name string
|
||||
// TargetNode is the NodeName of the target instance.
|
||||
TargetNode types.NodeName
|
||||
// DestinationCIDR is the CIDR format IP range that this routing rule
|
||||
// applies to.
|
||||
DestinationCIDR string
|
||||
}
|
||||
|
||||
// Routes is an abstract, pluggable interface for advanced routing rules.
|
||||
type Routes interface {
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
ListRoutes(clusterName string) ([]*Route, error)
|
||||
// CreateRoute creates the described managed route
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
CreateRoute(clusterName string, nameHint string, route *Route) error
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
DeleteRoute(clusterName string, route *Route) error
|
||||
}
|
||||
|
||||
var InstanceNotFound = errors.New("instance not found")
|
||||
|
||||
// Zone represents the location of a particular machine.
|
||||
type Zone struct {
|
||||
FailureDomain string
|
||||
Region string
|
||||
}
|
||||
|
||||
// Zones is an abstract, pluggable interface for zone enumeration.
|
||||
type Zones interface {
|
||||
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
|
||||
GetZone() (Zone, error)
|
||||
}
|
18
vendor/k8s.io/kubernetes/pkg/cloudprovider/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/cloudprovider/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cloudprovider supplies interfaces and implementations for cloud service providers.
|
||||
package cloudprovider // import "k8s.io/kubernetes/pkg/cloudprovider"
|
122
vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go
generated
vendored
Normal file
122
vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudprovider
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Factory is a function that returns a cloudprovider.Interface.
|
||||
// The config parameter provides an io.Reader handler to the factory in
|
||||
// order to load specific configurations. If no configuration is provided
|
||||
// the parameter is nil.
|
||||
type Factory func(config io.Reader) (Interface, error)
|
||||
|
||||
// All registered cloud providers.
|
||||
var (
|
||||
providersMutex sync.Mutex
|
||||
providers = make(map[string]Factory)
|
||||
)
|
||||
|
||||
// RegisterCloudProvider registers a cloudprovider.Factory by name. This
|
||||
// is expected to happen during app startup.
|
||||
func RegisterCloudProvider(name string, cloud Factory) {
|
||||
providersMutex.Lock()
|
||||
defer providersMutex.Unlock()
|
||||
if _, found := providers[name]; found {
|
||||
glog.Fatalf("Cloud provider %q was registered twice", name)
|
||||
}
|
||||
glog.V(1).Infof("Registered cloud provider %q", name)
|
||||
providers[name] = cloud
|
||||
}
|
||||
|
||||
// IsCloudProvider returns true if name corresponds to an already registered
|
||||
// cloud provider.
|
||||
func IsCloudProvider(name string) bool {
|
||||
providersMutex.Lock()
|
||||
defer providersMutex.Unlock()
|
||||
_, found := providers[name]
|
||||
return found
|
||||
}
|
||||
|
||||
// CloudProviders returns the name of all registered cloud providers in a
|
||||
// string slice
|
||||
func CloudProviders() []string {
|
||||
names := []string{}
|
||||
providersMutex.Lock()
|
||||
defer providersMutex.Unlock()
|
||||
for name := range providers {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// GetCloudProvider creates an instance of the named cloud provider, or nil if
|
||||
// the name is unknown. The error return is only used if the named provider
|
||||
// was known but failed to initialize. The config parameter specifies the
|
||||
// io.Reader handler of the configuration file for the cloud provider, or nil
|
||||
// for no configuation.
|
||||
func GetCloudProvider(name string, config io.Reader) (Interface, error) {
|
||||
providersMutex.Lock()
|
||||
defer providersMutex.Unlock()
|
||||
f, found := providers[name]
|
||||
if !found {
|
||||
return nil, nil
|
||||
}
|
||||
return f(config)
|
||||
}
|
||||
|
||||
// InitCloudProvider creates an instance of the named cloud provider.
|
||||
func InitCloudProvider(name string, configFilePath string) (Interface, error) {
|
||||
var cloud Interface
|
||||
var err error
|
||||
|
||||
if name == "" {
|
||||
glog.Info("No cloud provider specified.")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if configFilePath != "" {
|
||||
var config *os.File
|
||||
config, err = os.Open(configFilePath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Couldn't open cloud provider configuration %s: %#v",
|
||||
configFilePath, err)
|
||||
}
|
||||
|
||||
defer config.Close()
|
||||
cloud, err = GetCloudProvider(name, config)
|
||||
} else {
|
||||
// Pass explicit nil so plugins can actually check for nil. See
|
||||
// "Why is my nil error value not equal to nil?" in golang.org/doc/faq.
|
||||
cloud, err = GetCloudProvider(name, nil)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not init cloud provider %q: %v", name, err)
|
||||
}
|
||||
if cloud == nil {
|
||||
return nil, fmt.Errorf("unknown cloud provider %q", name)
|
||||
}
|
||||
|
||||
return cloud, nil
|
||||
}
|
52
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/BUILD
generated
vendored
Normal file
52
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["providers.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/cloudprovider/providers/cloudstack:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/cloudprovider/providers/mesos:go_default_library",
|
||||
"//pkg/cloudprovider/providers/openstack:go_default_library",
|
||||
"//pkg/cloudprovider/providers/ovirt:go_default_library",
|
||||
"//pkg/cloudprovider/providers/photon:go_default_library",
|
||||
"//pkg/cloudprovider/providers/rackspace:go_default_library",
|
||||
"//pkg/cloudprovider/providers/vsphere:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/cloudprovider/providers/aws:all-srcs",
|
||||
"//pkg/cloudprovider/providers/azure:all-srcs",
|
||||
"//pkg/cloudprovider/providers/cloudstack:all-srcs",
|
||||
"//pkg/cloudprovider/providers/fake:all-srcs",
|
||||
"//pkg/cloudprovider/providers/gce:all-srcs",
|
||||
"//pkg/cloudprovider/providers/mesos:all-srcs",
|
||||
"//pkg/cloudprovider/providers/openstack:all-srcs",
|
||||
"//pkg/cloudprovider/providers/ovirt:all-srcs",
|
||||
"//pkg/cloudprovider/providers/photon:all-srcs",
|
||||
"//pkg/cloudprovider/providers/rackspace:all-srcs",
|
||||
"//pkg/cloudprovider/providers/vsphere:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
86
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD
generated
vendored
Normal file
86
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"aws.go",
|
||||
"aws_instancegroups.go",
|
||||
"aws_loadbalancer.go",
|
||||
"aws_routes.go",
|
||||
"aws_utils.go",
|
||||
"device_allocator.go",
|
||||
"log_handler.go",
|
||||
"retry_handler.go",
|
||||
"sets_ippermissions.go",
|
||||
"volumes.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/credentialprovider/aws:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws",
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws/awserr",
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws/credentials",
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws/ec2metadata",
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws/request",
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws/session",
|
||||
"//vendor:github.com/aws/aws-sdk-go/service/autoscaling",
|
||||
"//vendor:github.com/aws/aws-sdk-go/service/ec2",
|
||||
"//vendor:github.com/aws/aws-sdk-go/service/elb",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:gopkg.in/gcfg.v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"aws_test.go",
|
||||
"device_allocator_test.go",
|
||||
"retry_handler_test.go",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws",
|
||||
"//vendor:github.com/aws/aws-sdk-go/service/autoscaling",
|
||||
"//vendor:github.com/aws/aws-sdk-go/service/ec2",
|
||||
"//vendor:github.com/aws/aws-sdk-go/service/elb",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:github.com/stretchr/testify/mock",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/OWNERS
generated
vendored
Normal file
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/OWNERS
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
assignees:
|
||||
- justinsb
|
3288
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go
generated
vendored
Normal file
3288
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
90
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go
generated
vendored
Normal file
90
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// AWSCloud implements InstanceGroups
|
||||
var _ InstanceGroups = &Cloud{}
|
||||
|
||||
// ResizeInstanceGroup sets the size of the specificed instancegroup Exported
|
||||
// so it can be used by the e2e tests, which don't want to instantiate a full
|
||||
// cloudprovider.
|
||||
func ResizeInstanceGroup(asg ASG, instanceGroupName string, size int) error {
|
||||
request := &autoscaling.UpdateAutoScalingGroupInput{
|
||||
AutoScalingGroupName: aws.String(instanceGroupName),
|
||||
MinSize: aws.Int64(int64(size)),
|
||||
MaxSize: aws.Int64(int64(size)),
|
||||
}
|
||||
if _, err := asg.UpdateAutoScalingGroup(request); err != nil {
|
||||
return fmt.Errorf("error resizing AWS autoscaling group: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implement InstanceGroups.ResizeInstanceGroup
|
||||
// Set the size to the fixed size
|
||||
func (c *Cloud) ResizeInstanceGroup(instanceGroupName string, size int) error {
|
||||
return ResizeInstanceGroup(c.asg, instanceGroupName, size)
|
||||
}
|
||||
|
||||
// DescribeInstanceGroup gets info about the specified instancegroup
|
||||
// Exported so it can be used by the e2e tests,
|
||||
// which don't want to instantiate a full cloudprovider.
|
||||
func DescribeInstanceGroup(asg ASG, instanceGroupName string) (InstanceGroupInfo, error) {
|
||||
request := &autoscaling.DescribeAutoScalingGroupsInput{
|
||||
AutoScalingGroupNames: []*string{aws.String(instanceGroupName)},
|
||||
}
|
||||
response, err := asg.DescribeAutoScalingGroups(request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing AWS autoscaling group (%s): %v", instanceGroupName, err)
|
||||
}
|
||||
|
||||
if len(response.AutoScalingGroups) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(response.AutoScalingGroups) > 1 {
|
||||
glog.Warning("AWS returned multiple autoscaling groups with name ", instanceGroupName)
|
||||
}
|
||||
group := response.AutoScalingGroups[0]
|
||||
return &awsInstanceGroup{group: group}, nil
|
||||
}
|
||||
|
||||
// Implement InstanceGroups.DescribeInstanceGroup
|
||||
// Queries the cloud provider for information about the specified instance group
|
||||
func (c *Cloud) DescribeInstanceGroup(instanceGroupName string) (InstanceGroupInfo, error) {
|
||||
return DescribeInstanceGroup(c.asg, instanceGroupName)
|
||||
}
|
||||
|
||||
// awsInstanceGroup implements InstanceGroupInfo
|
||||
var _ InstanceGroupInfo = &awsInstanceGroup{}
|
||||
|
||||
type awsInstanceGroup struct {
|
||||
group *autoscaling.Group
|
||||
}
|
||||
|
||||
// Implement InstanceGroupInfo.CurrentSize
|
||||
// The number of instances currently running under control of this group
|
||||
func (g *awsInstanceGroup) CurrentSize() (int, error) {
|
||||
return len(g.group.Instances), nil
|
||||
}
|
477
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go
generated
vendored
Normal file
477
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go
generated
vendored
Normal file
|
@ -0,0 +1,477 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/elb"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
const ProxyProtocolPolicyName = "k8s-proxyprotocol-enabled"
|
||||
|
||||
func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBalancerName string, listeners []*elb.Listener, subnetIDs []string, securityGroupIDs []string, internalELB, proxyProtocol bool, loadBalancerAttributes *elb.LoadBalancerAttributes) (*elb.LoadBalancerDescription, error) {
|
||||
loadBalancer, err := c.describeLoadBalancer(loadBalancerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dirty := false
|
||||
|
||||
if loadBalancer == nil {
|
||||
createRequest := &elb.CreateLoadBalancerInput{}
|
||||
createRequest.LoadBalancerName = aws.String(loadBalancerName)
|
||||
|
||||
createRequest.Listeners = listeners
|
||||
|
||||
if internalELB {
|
||||
createRequest.Scheme = aws.String("internal")
|
||||
}
|
||||
|
||||
// We are supposed to specify one subnet per AZ.
|
||||
// TODO: What happens if we have more than one subnet per AZ?
|
||||
createRequest.Subnets = stringPointerArray(subnetIDs)
|
||||
|
||||
createRequest.SecurityGroups = stringPointerArray(securityGroupIDs)
|
||||
|
||||
createRequest.Tags = []*elb.Tag{
|
||||
{Key: aws.String(TagNameKubernetesCluster), Value: aws.String(c.getClusterName())},
|
||||
{Key: aws.String(TagNameKubernetesService), Value: aws.String(namespacedName.String())},
|
||||
}
|
||||
|
||||
glog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName)
|
||||
_, err := c.elb.CreateLoadBalancer(createRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if proxyProtocol {
|
||||
err = c.createProxyProtocolPolicy(loadBalancerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, listener := range listeners {
|
||||
glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort)
|
||||
err := c.setBackendPolicies(loadBalancerName, *listener.InstancePort, []*string{aws.String(ProxyProtocolPolicyName)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dirty = true
|
||||
} else {
|
||||
// TODO: Sync internal vs non-internal
|
||||
|
||||
{
|
||||
// Sync subnets
|
||||
expected := sets.NewString(subnetIDs...)
|
||||
actual := stringSetFromPointers(loadBalancer.Subnets)
|
||||
|
||||
additions := expected.Difference(actual)
|
||||
removals := actual.Difference(expected)
|
||||
|
||||
if removals.Len() != 0 {
|
||||
request := &elb.DetachLoadBalancerFromSubnetsInput{}
|
||||
request.LoadBalancerName = aws.String(loadBalancerName)
|
||||
request.Subnets = stringSetToPointers(removals)
|
||||
glog.V(2).Info("Detaching load balancer from removed subnets")
|
||||
_, err := c.elb.DetachLoadBalancerFromSubnets(request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %v", err)
|
||||
}
|
||||
dirty = true
|
||||
}
|
||||
|
||||
if additions.Len() != 0 {
|
||||
request := &elb.AttachLoadBalancerToSubnetsInput{}
|
||||
request.LoadBalancerName = aws.String(loadBalancerName)
|
||||
request.Subnets = stringSetToPointers(additions)
|
||||
glog.V(2).Info("Attaching load balancer to added subnets")
|
||||
_, err := c.elb.AttachLoadBalancerToSubnets(request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %v", err)
|
||||
}
|
||||
dirty = true
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// Sync security groups
|
||||
expected := sets.NewString(securityGroupIDs...)
|
||||
actual := stringSetFromPointers(loadBalancer.SecurityGroups)
|
||||
|
||||
if !expected.Equal(actual) {
|
||||
// This call just replaces the security groups, unlike e.g. subnets (!)
|
||||
request := &elb.ApplySecurityGroupsToLoadBalancerInput{}
|
||||
request.LoadBalancerName = aws.String(loadBalancerName)
|
||||
request.SecurityGroups = stringPointerArray(securityGroupIDs)
|
||||
glog.V(2).Info("Applying updated security groups to load balancer")
|
||||
_, err := c.elb.ApplySecurityGroupsToLoadBalancer(request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error applying AWS loadbalancer security groups: %v", err)
|
||||
}
|
||||
dirty = true
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// Sync listeners
|
||||
listenerDescriptions := loadBalancer.ListenerDescriptions
|
||||
|
||||
foundSet := make(map[int]bool)
|
||||
removals := []*int64{}
|
||||
for _, listenerDescription := range listenerDescriptions {
|
||||
actual := listenerDescription.Listener
|
||||
if actual == nil {
|
||||
glog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName)
|
||||
continue
|
||||
}
|
||||
|
||||
found := -1
|
||||
for i, expected := range listeners {
|
||||
if orEmpty(actual.Protocol) != orEmpty(expected.Protocol) {
|
||||
continue
|
||||
}
|
||||
if orEmpty(actual.InstanceProtocol) != orEmpty(expected.InstanceProtocol) {
|
||||
continue
|
||||
}
|
||||
if orZero(actual.InstancePort) != orZero(expected.InstancePort) {
|
||||
continue
|
||||
}
|
||||
if orZero(actual.LoadBalancerPort) != orZero(expected.LoadBalancerPort) {
|
||||
continue
|
||||
}
|
||||
if orEmpty(actual.SSLCertificateId) != orEmpty(expected.SSLCertificateId) {
|
||||
continue
|
||||
}
|
||||
found = i
|
||||
}
|
||||
if found != -1 {
|
||||
foundSet[found] = true
|
||||
} else {
|
||||
removals = append(removals, actual.LoadBalancerPort)
|
||||
}
|
||||
}
|
||||
|
||||
additions := []*elb.Listener{}
|
||||
for i := range listeners {
|
||||
if foundSet[i] {
|
||||
continue
|
||||
}
|
||||
additions = append(additions, listeners[i])
|
||||
}
|
||||
|
||||
if len(removals) != 0 {
|
||||
request := &elb.DeleteLoadBalancerListenersInput{}
|
||||
request.LoadBalancerName = aws.String(loadBalancerName)
|
||||
request.LoadBalancerPorts = removals
|
||||
glog.V(2).Info("Deleting removed load balancer listeners")
|
||||
_, err := c.elb.DeleteLoadBalancerListeners(request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error deleting AWS loadbalancer listeners: %v", err)
|
||||
}
|
||||
dirty = true
|
||||
}
|
||||
|
||||
if len(additions) != 0 {
|
||||
request := &elb.CreateLoadBalancerListenersInput{}
|
||||
request.LoadBalancerName = aws.String(loadBalancerName)
|
||||
request.Listeners = additions
|
||||
glog.V(2).Info("Creating added load balancer listeners")
|
||||
_, err := c.elb.CreateLoadBalancerListeners(request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating AWS loadbalancer listeners: %v", err)
|
||||
}
|
||||
dirty = true
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// Sync proxy protocol state for new and existing listeners
|
||||
|
||||
proxyPolicies := make([]*string, 0)
|
||||
if proxyProtocol {
|
||||
// Ensure the backend policy exists
|
||||
|
||||
// NOTE The documentation for the AWS API indicates we could get an HTTP 400
|
||||
// back if a policy of the same name already exists. However, the aws-sdk does not
|
||||
// seem to return an error to us in these cases. Therefore, this will issue an API
|
||||
// request every time.
|
||||
err := c.createProxyProtocolPolicy(loadBalancerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proxyPolicies = append(proxyPolicies, aws.String(ProxyProtocolPolicyName))
|
||||
}
|
||||
|
||||
foundBackends := make(map[int64]bool)
|
||||
proxyProtocolBackends := make(map[int64]bool)
|
||||
for _, backendListener := range loadBalancer.BackendServerDescriptions {
|
||||
foundBackends[*backendListener.InstancePort] = false
|
||||
proxyProtocolBackends[*backendListener.InstancePort] = proxyProtocolEnabled(backendListener)
|
||||
}
|
||||
|
||||
for _, listener := range listeners {
|
||||
setPolicy := false
|
||||
instancePort := *listener.InstancePort
|
||||
|
||||
if currentState, ok := proxyProtocolBackends[instancePort]; !ok {
|
||||
// This is a new ELB backend so we only need to worry about
|
||||
// potentially adding a policy and not removing an
|
||||
// existing one
|
||||
setPolicy = proxyProtocol
|
||||
} else {
|
||||
foundBackends[instancePort] = true
|
||||
// This is an existing ELB backend so we need to determine
|
||||
// if the state changed
|
||||
setPolicy = (currentState != proxyProtocol)
|
||||
}
|
||||
|
||||
if setPolicy {
|
||||
glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to %t", instancePort, proxyProtocol)
|
||||
err := c.setBackendPolicies(loadBalancerName, instancePort, proxyPolicies)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dirty = true
|
||||
}
|
||||
}
|
||||
|
||||
// We now need to figure out if any backend policies need removed
|
||||
// because these old policies will stick around even if there is no
|
||||
// corresponding listener anymore
|
||||
for instancePort, found := range foundBackends {
|
||||
if !found {
|
||||
glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to false", instancePort)
|
||||
err := c.setBackendPolicies(loadBalancerName, instancePort, []*string{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dirty = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Whether the ELB was new or existing, sync attributes regardless. This accounts for things
|
||||
// that cannot be specified at the time of creation and can only be modified after the fact,
|
||||
// e.g. idle connection timeout.
|
||||
{
|
||||
describeAttributesRequest := &elb.DescribeLoadBalancerAttributesInput{}
|
||||
describeAttributesRequest.LoadBalancerName = aws.String(loadBalancerName)
|
||||
describeAttributesOutput, err := c.elb.DescribeLoadBalancerAttributes(describeAttributesRequest)
|
||||
if err != nil {
|
||||
glog.Warning("Unable to retrieve load balancer attributes during attribute sync")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
foundAttributes := &describeAttributesOutput.LoadBalancerAttributes
|
||||
|
||||
// Update attributes if they're dirty
|
||||
if !reflect.DeepEqual(loadBalancerAttributes, foundAttributes) {
|
||||
glog.V(2).Info("Updating load-balancer attributes for %q", loadBalancerName)
|
||||
|
||||
modifyAttributesRequest := &elb.ModifyLoadBalancerAttributesInput{}
|
||||
modifyAttributesRequest.LoadBalancerName = aws.String(loadBalancerName)
|
||||
modifyAttributesRequest.LoadBalancerAttributes = loadBalancerAttributes
|
||||
_, err = c.elb.ModifyLoadBalancerAttributes(modifyAttributesRequest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to update load balancer attributes during attribute sync: %v", err)
|
||||
}
|
||||
dirty = true
|
||||
}
|
||||
}
|
||||
|
||||
if dirty {
|
||||
loadBalancer, err = c.describeLoadBalancer(loadBalancerName)
|
||||
if err != nil {
|
||||
glog.Warning("Unable to retrieve load balancer after creation/update")
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return loadBalancer, nil
|
||||
}
|
||||
|
||||
// Makes sure that the health check for an ELB matches the configured listeners
|
||||
func (c *Cloud) ensureLoadBalancerHealthCheck(loadBalancer *elb.LoadBalancerDescription, listeners []*elb.Listener) error {
|
||||
name := aws.StringValue(loadBalancer.LoadBalancerName)
|
||||
|
||||
actual := loadBalancer.HealthCheck
|
||||
|
||||
// Default AWS settings
|
||||
expectedHealthyThreshold := int64(2)
|
||||
expectedUnhealthyThreshold := int64(6)
|
||||
expectedTimeout := int64(5)
|
||||
expectedInterval := int64(10)
|
||||
|
||||
// We only configure a TCP health-check on the first port
|
||||
expectedTarget := ""
|
||||
for _, listener := range listeners {
|
||||
if listener.InstancePort == nil {
|
||||
continue
|
||||
}
|
||||
expectedTarget = "TCP:" + strconv.FormatInt(*listener.InstancePort, 10)
|
||||
break
|
||||
}
|
||||
|
||||
if expectedTarget == "" {
|
||||
return fmt.Errorf("unable to determine health check port for %q (no valid listeners)", name)
|
||||
}
|
||||
|
||||
if expectedTarget == orEmpty(actual.Target) &&
|
||||
expectedHealthyThreshold == orZero(actual.HealthyThreshold) &&
|
||||
expectedUnhealthyThreshold == orZero(actual.UnhealthyThreshold) &&
|
||||
expectedTimeout == orZero(actual.Timeout) &&
|
||||
expectedInterval == orZero(actual.Interval) {
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(2).Info("Updating load-balancer health-check for %q", name)
|
||||
|
||||
healthCheck := &elb.HealthCheck{}
|
||||
healthCheck.HealthyThreshold = &expectedHealthyThreshold
|
||||
healthCheck.UnhealthyThreshold = &expectedUnhealthyThreshold
|
||||
healthCheck.Timeout = &expectedTimeout
|
||||
healthCheck.Interval = &expectedInterval
|
||||
healthCheck.Target = &expectedTarget
|
||||
|
||||
request := &elb.ConfigureHealthCheckInput{}
|
||||
request.HealthCheck = healthCheck
|
||||
request.LoadBalancerName = loadBalancer.LoadBalancerName
|
||||
|
||||
_, err := c.elb.ConfigureHealthCheck(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error configuring load-balancer health-check for %q: %v", name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Makes sure that exactly the specified hosts are registered as instances with the load balancer
|
||||
func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances []*elb.Instance, instances []*ec2.Instance) error {
|
||||
expected := sets.NewString()
|
||||
for _, instance := range instances {
|
||||
expected.Insert(orEmpty(instance.InstanceId))
|
||||
}
|
||||
|
||||
actual := sets.NewString()
|
||||
for _, lbInstance := range lbInstances {
|
||||
actual.Insert(orEmpty(lbInstance.InstanceId))
|
||||
}
|
||||
|
||||
additions := expected.Difference(actual)
|
||||
removals := actual.Difference(expected)
|
||||
|
||||
addInstances := []*elb.Instance{}
|
||||
for _, instanceId := range additions.List() {
|
||||
addInstance := &elb.Instance{}
|
||||
addInstance.InstanceId = aws.String(instanceId)
|
||||
addInstances = append(addInstances, addInstance)
|
||||
}
|
||||
|
||||
removeInstances := []*elb.Instance{}
|
||||
for _, instanceId := range removals.List() {
|
||||
removeInstance := &elb.Instance{}
|
||||
removeInstance.InstanceId = aws.String(instanceId)
|
||||
removeInstances = append(removeInstances, removeInstance)
|
||||
}
|
||||
|
||||
if len(addInstances) > 0 {
|
||||
registerRequest := &elb.RegisterInstancesWithLoadBalancerInput{}
|
||||
registerRequest.Instances = addInstances
|
||||
registerRequest.LoadBalancerName = aws.String(loadBalancerName)
|
||||
_, err := c.elb.RegisterInstancesWithLoadBalancer(registerRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(1).Infof("Instances added to load-balancer %s", loadBalancerName)
|
||||
}
|
||||
|
||||
if len(removeInstances) > 0 {
|
||||
deregisterRequest := &elb.DeregisterInstancesFromLoadBalancerInput{}
|
||||
deregisterRequest.Instances = removeInstances
|
||||
deregisterRequest.LoadBalancerName = aws.String(loadBalancerName)
|
||||
_, err := c.elb.DeregisterInstancesFromLoadBalancer(deregisterRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(1).Infof("Instances removed from load-balancer %s", loadBalancerName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cloud) createProxyProtocolPolicy(loadBalancerName string) error {
|
||||
request := &elb.CreateLoadBalancerPolicyInput{
|
||||
LoadBalancerName: aws.String(loadBalancerName),
|
||||
PolicyName: aws.String(ProxyProtocolPolicyName),
|
||||
PolicyTypeName: aws.String("ProxyProtocolPolicyType"),
|
||||
PolicyAttributes: []*elb.PolicyAttribute{
|
||||
{
|
||||
AttributeName: aws.String("ProxyProtocol"),
|
||||
AttributeValue: aws.String("true"),
|
||||
},
|
||||
},
|
||||
}
|
||||
glog.V(2).Info("Creating proxy protocol policy on load balancer")
|
||||
_, err := c.elb.CreateLoadBalancerPolicy(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating proxy protocol policy on load balancer: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cloud) setBackendPolicies(loadBalancerName string, instancePort int64, policies []*string) error {
|
||||
request := &elb.SetLoadBalancerPoliciesForBackendServerInput{
|
||||
InstancePort: aws.Int64(instancePort),
|
||||
LoadBalancerName: aws.String(loadBalancerName),
|
||||
PolicyNames: policies,
|
||||
}
|
||||
if len(policies) > 0 {
|
||||
glog.V(2).Infof("Adding AWS loadbalancer backend policies on node port %d", instancePort)
|
||||
} else {
|
||||
glog.V(2).Infof("Removing AWS loadbalancer backend policies on node port %d", instancePort)
|
||||
}
|
||||
_, err := c.elb.SetLoadBalancerPoliciesForBackendServer(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adjusting AWS loadbalancer backend policies: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func proxyProtocolEnabled(backend *elb.BackendServerDescription) bool {
|
||||
for _, policy := range backend.PolicyNames {
|
||||
if aws.StringValue(policy) == ProxyProtocolPolicyName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
188
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go
generated
vendored
Normal file
188
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
func (c *Cloud) findRouteTable(clusterName string) (*ec2.RouteTable, error) {
|
||||
// This should be unnecessary (we already filter on TagNameKubernetesCluster,
|
||||
// and something is broken if cluster name doesn't match, but anyway...
|
||||
// TODO: All clouds should be cluster-aware by default
|
||||
filters := []*ec2.Filter{newEc2Filter("tag:"+TagNameKubernetesCluster, clusterName)}
|
||||
request := &ec2.DescribeRouteTablesInput{Filters: c.addFilters(filters)}
|
||||
|
||||
tables, err := c.ec2.DescribeRouteTables(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(tables) == 0 {
|
||||
return nil, fmt.Errorf("unable to find route table for AWS cluster: %s", clusterName)
|
||||
}
|
||||
|
||||
if len(tables) != 1 {
|
||||
return nil, fmt.Errorf("found multiple matching AWS route tables for AWS cluster: %s", clusterName)
|
||||
}
|
||||
return tables[0], nil
|
||||
}
|
||||
|
||||
// ListRoutes implements Routes.ListRoutes
|
||||
// List all routes that match the filter
|
||||
func (c *Cloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) {
|
||||
table, err := c.findRouteTable(clusterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var routes []*cloudprovider.Route
|
||||
var instanceIDs []*string
|
||||
|
||||
for _, r := range table.Routes {
|
||||
instanceID := orEmpty(r.InstanceId)
|
||||
|
||||
if instanceID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
instanceIDs = append(instanceIDs, &instanceID)
|
||||
}
|
||||
|
||||
instances, err := c.getInstancesByIDs(instanceIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, r := range table.Routes {
|
||||
instanceID := orEmpty(r.InstanceId)
|
||||
destinationCIDR := orEmpty(r.DestinationCidrBlock)
|
||||
|
||||
if instanceID == "" || destinationCIDR == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
instance, found := instances[instanceID]
|
||||
if !found {
|
||||
glog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID)
|
||||
continue
|
||||
}
|
||||
nodeName := mapInstanceToNodeName(instance)
|
||||
routeName := clusterName + "-" + destinationCIDR
|
||||
routes = append(routes, &cloudprovider.Route{Name: routeName, TargetNode: nodeName, DestinationCIDR: destinationCIDR})
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
// Sets the instance attribute "source-dest-check" to the specified value
|
||||
func (c *Cloud) configureInstanceSourceDestCheck(instanceID string, sourceDestCheck bool) error {
|
||||
request := &ec2.ModifyInstanceAttributeInput{}
|
||||
request.InstanceId = aws.String(instanceID)
|
||||
request.SourceDestCheck = &ec2.AttributeBooleanValue{Value: aws.Bool(sourceDestCheck)}
|
||||
|
||||
_, err := c.ec2.ModifyInstanceAttribute(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error configuring source-dest-check on instance %s: %v", instanceID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateRoute implements Routes.CreateRoute
|
||||
// Create the described route
|
||||
func (c *Cloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
instance, err := c.getInstanceByNodeName(route.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// In addition to configuring the route itself, we also need to configure the instance to accept that traffic
|
||||
// On AWS, this requires turning source-dest checks off
|
||||
err = c.configureInstanceSourceDestCheck(orEmpty(instance.InstanceId), false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
table, err := c.findRouteTable(clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var deleteRoute *ec2.Route
|
||||
for _, r := range table.Routes {
|
||||
destinationCIDR := aws.StringValue(r.DestinationCidrBlock)
|
||||
|
||||
if destinationCIDR != route.DestinationCIDR {
|
||||
continue
|
||||
}
|
||||
|
||||
if aws.StringValue(r.State) == ec2.RouteStateBlackhole {
|
||||
deleteRoute = r
|
||||
}
|
||||
}
|
||||
|
||||
if deleteRoute != nil {
|
||||
glog.Infof("deleting blackholed route: %s", aws.StringValue(deleteRoute.DestinationCidrBlock))
|
||||
|
||||
request := &ec2.DeleteRouteInput{}
|
||||
request.DestinationCidrBlock = deleteRoute.DestinationCidrBlock
|
||||
request.RouteTableId = table.RouteTableId
|
||||
|
||||
_, err = c.ec2.DeleteRoute(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting blackholed AWS route (%s): %v", aws.StringValue(deleteRoute.DestinationCidrBlock), err)
|
||||
}
|
||||
}
|
||||
|
||||
request := &ec2.CreateRouteInput{}
|
||||
// TODO: use ClientToken for idempotency?
|
||||
request.DestinationCidrBlock = aws.String(route.DestinationCIDR)
|
||||
request.InstanceId = instance.InstanceId
|
||||
request.RouteTableId = table.RouteTableId
|
||||
|
||||
_, err = c.ec2.CreateRoute(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating AWS route (%s): %v", route.DestinationCIDR, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRoute implements Routes.DeleteRoute
|
||||
// Delete the specified route
|
||||
func (c *Cloud) DeleteRoute(clusterName string, route *cloudprovider.Route) error {
|
||||
table, err := c.findRouteTable(clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
request := &ec2.DeleteRouteInput{}
|
||||
request.DestinationCidrBlock = aws.String(route.DestinationCIDR)
|
||||
request.RouteTableId = table.RouteTableId
|
||||
|
||||
_, err = c.ec2.DeleteRoute(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting AWS route (%s): %v", route.DestinationCIDR, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
1293
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_test.go
generated
vendored
Normal file
1293
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
50
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go
generated
vendored
Normal file
50
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
func stringSetToPointers(in sets.String) []*string {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := make([]*string, 0, len(in))
|
||||
for k := range in {
|
||||
out = append(out, aws.String(k))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func stringSetFromPointers(in []*string) sets.String {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := sets.NewString()
|
||||
for i := range in {
|
||||
out.Insert(orEmpty(in[i]))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// orZero returns the value, or 0 if the pointer is nil
|
||||
// Deprecated: prefer aws.Int64Value
|
||||
func orZero(v *int64) int64 {
|
||||
return aws.Int64Value(v)
|
||||
}
|
95
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator.go
generated
vendored
Normal file
95
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import "fmt"
|
||||
|
||||
// ExistingDevices is a map of assigned devices. Presence of a key with a device
|
||||
// name in the map means that the device is allocated. Value is irrelevant and
|
||||
// can be used for anything that DeviceAllocator user wants.
|
||||
// Only the relevant part of device name should be in the map, e.g. "ba" for
|
||||
// "/dev/xvdba".
|
||||
type ExistingDevices map[mountDevice]awsVolumeID
|
||||
|
||||
// On AWS, we should assign new (not yet used) device names to attached volumes.
|
||||
// If we reuse a previously used name, we may get the volume "attaching" forever,
|
||||
// see https://aws.amazon.com/premiumsupport/knowledge-center/ebs-stuck-attaching/.
|
||||
// DeviceAllocator finds available device name, taking into account already
|
||||
// assigned device names from ExistingDevices map. It tries to find the next
|
||||
// device name to the previously assigned one (from previous DeviceAllocator
|
||||
// call), so all available device names are used eventually and it minimizes
|
||||
// device name reuse.
|
||||
// All these allocations are in-memory, nothing is written to / read from
|
||||
// /dev directory.
|
||||
type DeviceAllocator interface {
|
||||
// GetNext returns a free device name or error when there is no free device
|
||||
// name. Only the device suffix is returned, e.g. "ba" for "/dev/xvdba".
|
||||
// It's up to the called to add appropriate "/dev/sd" or "/dev/xvd" prefix.
|
||||
GetNext(existingDevices ExistingDevices) (mountDevice, error)
|
||||
}
|
||||
|
||||
type deviceAllocator struct {
|
||||
firstDevice mountDevice
|
||||
lastAssignedDevice mountDevice
|
||||
length int
|
||||
}
|
||||
|
||||
// NewDeviceAllocator creates new DeviceAlllocator that allocates device names
|
||||
// of given length ("aaa" for length 3) and with given first device, so all
|
||||
// devices before the first device are left to the operating system.
|
||||
// With length 2 and firstDevice "ba", it will allocate device names
|
||||
// ba, bb, ..., bz, ca, ... cz, ..., da, ... zz, so a..z and aa..az can be used
|
||||
// by the operating system.
|
||||
func NewDeviceAllocator(length int, firstDevice mountDevice) DeviceAllocator {
|
||||
lastDevice := make([]byte, length)
|
||||
for i := 0; i < length; i++ {
|
||||
lastDevice[i] = 'z'
|
||||
}
|
||||
return &deviceAllocator{
|
||||
firstDevice: firstDevice,
|
||||
lastAssignedDevice: mountDevice(lastDevice),
|
||||
length: length,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *deviceAllocator) GetNext(existingDevices ExistingDevices) (mountDevice, error) {
|
||||
candidate := d.lastAssignedDevice
|
||||
|
||||
for {
|
||||
candidate = d.nextDevice(candidate)
|
||||
if _, found := existingDevices[candidate]; !found {
|
||||
d.lastAssignedDevice = candidate
|
||||
return candidate, nil
|
||||
}
|
||||
if candidate == d.lastAssignedDevice {
|
||||
return "", fmt.Errorf("no devices are available")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *deviceAllocator) nextDevice(device mountDevice) mountDevice {
|
||||
dev := []byte(device)
|
||||
for i := d.length - 1; i >= 0; i-- {
|
||||
if dev[i] != 'z' {
|
||||
dev[i]++
|
||||
return mountDevice(dev)
|
||||
}
|
||||
dev[i] = 'a'
|
||||
}
|
||||
// all parts of device were 'z', jump to the first device
|
||||
return d.firstDevice
|
||||
}
|
103
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator_test.go
generated
vendored
Normal file
103
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator_test.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestDeviceAllocator(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
existingDevices ExistingDevices
|
||||
length int
|
||||
firstDevice mountDevice
|
||||
lastAllocated mountDevice
|
||||
expectedOutput mountDevice
|
||||
}{
|
||||
{
|
||||
"empty device list",
|
||||
ExistingDevices{},
|
||||
2,
|
||||
"aa",
|
||||
"aa",
|
||||
"ab",
|
||||
},
|
||||
{
|
||||
"empty device list with wrap",
|
||||
ExistingDevices{},
|
||||
2,
|
||||
"ba",
|
||||
"zz",
|
||||
"ba", // next to 'zz' is the first one, 'ba'
|
||||
},
|
||||
{
|
||||
"device list",
|
||||
ExistingDevices{"aa": "used", "ab": "used", "ac": "used"},
|
||||
2,
|
||||
"aa",
|
||||
"aa",
|
||||
"ad", // all up to "ac" are used
|
||||
},
|
||||
{
|
||||
"device list with wrap",
|
||||
ExistingDevices{"zy": "used", "zz": "used", "ba": "used"},
|
||||
2,
|
||||
"ba",
|
||||
"zx",
|
||||
"bb", // "zy", "zz" and "ba" are used
|
||||
},
|
||||
{
|
||||
"three characters with wrap",
|
||||
ExistingDevices{"zzy": "used", "zzz": "used", "baa": "used"},
|
||||
3,
|
||||
"baa",
|
||||
"zzx",
|
||||
"bab",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
allocator := NewDeviceAllocator(test.length, test.firstDevice).(*deviceAllocator)
|
||||
allocator.lastAssignedDevice = test.lastAllocated
|
||||
|
||||
got, err := allocator.GetNext(test.existingDevices)
|
||||
if err != nil {
|
||||
t.Errorf("text %q: unexpected error: %v", test.name, err)
|
||||
}
|
||||
if got != test.expectedOutput {
|
||||
t.Errorf("text %q: expected %q, got %q", test.name, test.expectedOutput, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeviceAllocatorError(t *testing.T) {
|
||||
allocator := NewDeviceAllocator(2, "ba").(*deviceAllocator)
|
||||
existingDevices := ExistingDevices{}
|
||||
|
||||
// make all devices used
|
||||
var first, second byte
|
||||
for first = 'b'; first <= 'z'; first++ {
|
||||
for second = 'a'; second <= 'z'; second++ {
|
||||
device := [2]byte{first, second}
|
||||
existingDevices[mountDevice(device[:])] = "used"
|
||||
}
|
||||
}
|
||||
|
||||
device, err := allocator.GetNext(existingDevices)
|
||||
if err == nil {
|
||||
t.Errorf("expected error, got device %q", device)
|
||||
}
|
||||
}
|
34
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/log_handler.go
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/log_handler.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Handler for aws-sdk-go that logs all requests
|
||||
func awsHandlerLogger(req *request.Request) {
|
||||
service := req.ClientInfo.ServiceName
|
||||
|
||||
name := "?"
|
||||
if req.Operation != nil {
|
||||
name = req.Operation.Name
|
||||
}
|
||||
|
||||
glog.V(4).Infof("AWS request: %s %s", service, name)
|
||||
}
|
161
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go
generated
vendored
Normal file
161
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
decayIntervalSeconds = 20
|
||||
decayFraction = 0.8
|
||||
maxDelay = 60 * time.Second
|
||||
)
|
||||
|
||||
// CrossRequestRetryDelay inserts delays before AWS calls, when we are observing RequestLimitExceeded errors
|
||||
// Note that we share a CrossRequestRetryDelay across multiple AWS requests; this is a process-wide back-off,
|
||||
// whereas the aws-sdk-go implements a per-request exponential backoff/retry
|
||||
type CrossRequestRetryDelay struct {
|
||||
backoff Backoff
|
||||
}
|
||||
|
||||
// Create a new CrossRequestRetryDelay
|
||||
func NewCrossRequestRetryDelay() *CrossRequestRetryDelay {
|
||||
c := &CrossRequestRetryDelay{}
|
||||
c.backoff.init(decayIntervalSeconds, decayFraction, maxDelay)
|
||||
return c
|
||||
}
|
||||
|
||||
// Added to the Sign chain; called before each request
|
||||
func (c *CrossRequestRetryDelay) BeforeSign(r *request.Request) {
|
||||
now := time.Now()
|
||||
delay := c.backoff.ComputeDelayForRequest(now)
|
||||
if delay > 0 {
|
||||
glog.Warningf("Inserting delay before AWS request (%s) to avoid RequestLimitExceeded: %s",
|
||||
describeRequest(r), delay.String())
|
||||
r.Config.SleepDelay(delay)
|
||||
|
||||
// Avoid clock skew problems
|
||||
r.Time = now
|
||||
}
|
||||
}
|
||||
|
||||
// Return a user-friendly string describing the request, for use in log messages
|
||||
func describeRequest(r *request.Request) string {
|
||||
service := r.ClientInfo.ServiceName
|
||||
|
||||
name := "?"
|
||||
if r.Operation != nil {
|
||||
name = r.Operation.Name
|
||||
}
|
||||
|
||||
return service + "::" + name
|
||||
}
|
||||
|
||||
// Added to the AfterRetry chain; called after any error
|
||||
func (c *CrossRequestRetryDelay) AfterRetry(r *request.Request) {
|
||||
if r.Error == nil {
|
||||
return
|
||||
}
|
||||
awsError, ok := r.Error.(awserr.Error)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if awsError.Code() == "RequestLimitExceeded" {
|
||||
c.backoff.ReportError()
|
||||
glog.Warningf("Got RequestLimitExceeded error on AWS request (%s)",
|
||||
describeRequest(r))
|
||||
}
|
||||
}
|
||||
|
||||
// Backoff manages a backoff that varies based on the recently observed failures
|
||||
type Backoff struct {
|
||||
decayIntervalSeconds int64
|
||||
decayFraction float64
|
||||
maxDelay time.Duration
|
||||
|
||||
mutex sync.Mutex
|
||||
|
||||
// We count all requests & the number of requests which hit a
|
||||
// RequestLimit. We only really care about 'recent' requests, so we
|
||||
// decay the counts exponentially to bias towards recent values.
|
||||
countErrorsRequestLimit float32
|
||||
countRequests float32
|
||||
lastDecay int64
|
||||
}
|
||||
|
||||
func (b *Backoff) init(decayIntervalSeconds int, decayFraction float64, maxDelay time.Duration) {
|
||||
b.lastDecay = time.Now().Unix()
|
||||
// Bias so that if the first request hits the limit we don't immediately apply the full delay
|
||||
b.countRequests = 4
|
||||
b.decayIntervalSeconds = int64(decayIntervalSeconds)
|
||||
b.decayFraction = decayFraction
|
||||
b.maxDelay = maxDelay
|
||||
}
|
||||
|
||||
// Computes the delay required for a request, also updating internal state to count this request
|
||||
func (b *Backoff) ComputeDelayForRequest(now time.Time) time.Duration {
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
|
||||
// Apply exponential decay to the counters
|
||||
timeDeltaSeconds := now.Unix() - b.lastDecay
|
||||
if timeDeltaSeconds > b.decayIntervalSeconds {
|
||||
intervals := float64(timeDeltaSeconds) / float64(b.decayIntervalSeconds)
|
||||
decay := float32(math.Pow(b.decayFraction, intervals))
|
||||
b.countErrorsRequestLimit *= decay
|
||||
b.countRequests *= decay
|
||||
b.lastDecay = now.Unix()
|
||||
}
|
||||
|
||||
// Count this request
|
||||
b.countRequests += 1.0
|
||||
|
||||
// Compute the failure rate
|
||||
errorFraction := float32(0.0)
|
||||
if b.countRequests > 0.5 {
|
||||
// Avoid tiny residuals & rounding errors
|
||||
errorFraction = b.countErrorsRequestLimit / b.countRequests
|
||||
}
|
||||
|
||||
// Ignore a low fraction of errors
|
||||
// This also allows them to time-out
|
||||
if errorFraction < 0.1 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
// Delay by the max delay multiplied by the recent error rate
|
||||
// (i.e. we apply a linear delay function)
|
||||
// TODO: This is pretty arbitrary
|
||||
delay := time.Nanosecond * time.Duration(float32(b.maxDelay.Nanoseconds())*errorFraction)
|
||||
// Round down to the nearest second for sanity
|
||||
return time.Second * time.Duration(int(delay.Seconds()))
|
||||
}
|
||||
|
||||
// Called when we observe a throttling error
|
||||
func (b *Backoff) ReportError() {
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
|
||||
b.countErrorsRequestLimit += 1.0
|
||||
}
|
135
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler_test.go
generated
vendored
Normal file
135
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler_test.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// There follows a group of tests for the backoff logic. There's nothing
|
||||
// particularly special about the values chosen: if we tweak the values in the
|
||||
// backoff logic then we might well have to update the tests. However the key
|
||||
// behavioural elements should remain (e.g. no errors => no backoff), and these
|
||||
// are each tested by one of the tests below.
|
||||
|
||||
// Test that we don't apply any delays when there are no errors
|
||||
func TestBackoffNoErrors(t *testing.T) {
|
||||
b := &Backoff{}
|
||||
b.init(decayIntervalSeconds, decayFraction, maxDelay)
|
||||
|
||||
now := time.Now()
|
||||
for i := 0; i < 100; i++ {
|
||||
d := b.ComputeDelayForRequest(now)
|
||||
if d.Nanoseconds() != 0 {
|
||||
t.Fatalf("unexpected delay during no-error case")
|
||||
}
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that we always apply a delay when there are errors, and also that we
|
||||
// don't "flap" - that our own delay doesn't cause us to oscillate between
|
||||
// delay and no-delay.
|
||||
func TestBackoffAllErrors(t *testing.T) {
|
||||
b := &Backoff{}
|
||||
b.init(decayIntervalSeconds, decayFraction, maxDelay)
|
||||
|
||||
now := time.Now()
|
||||
// Warm up
|
||||
for i := 0; i < 10; i++ {
|
||||
_ = b.ComputeDelayForRequest(now)
|
||||
b.ReportError()
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
d := b.ComputeDelayForRequest(now)
|
||||
b.ReportError()
|
||||
if d.Seconds() < 5 {
|
||||
t.Fatalf("unexpected short-delay during all-error case: %v", d)
|
||||
}
|
||||
t.Logf("delay @%d %v", i, d)
|
||||
now = now.Add(d)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that we do come close to our max delay, when we see all errors at 1
|
||||
// second intervals (this simulates multiple concurrent requests, because we
|
||||
// don't wait for delay in between requests)
|
||||
func TestBackoffHitsMax(t *testing.T) {
|
||||
b := &Backoff{}
|
||||
b.init(decayIntervalSeconds, decayFraction, maxDelay)
|
||||
|
||||
now := time.Now()
|
||||
for i := 0; i < 100; i++ {
|
||||
_ = b.ComputeDelayForRequest(now)
|
||||
b.ReportError()
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
d := b.ComputeDelayForRequest(now)
|
||||
b.ReportError()
|
||||
if float32(d.Nanoseconds()) < (float32(maxDelay.Nanoseconds()) * 0.95) {
|
||||
t.Fatalf("expected delay to be >= 95 percent of max delay, was %v", d)
|
||||
}
|
||||
t.Logf("delay @%d %v", i, d)
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that after a phase of errors, we eventually stop applying a delay once there are
|
||||
// no more errors.
|
||||
func TestBackoffRecovers(t *testing.T) {
|
||||
b := &Backoff{}
|
||||
b.init(decayIntervalSeconds, decayFraction, maxDelay)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Phase of all-errors
|
||||
for i := 0; i < 100; i++ {
|
||||
_ = b.ComputeDelayForRequest(now)
|
||||
b.ReportError()
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
d := b.ComputeDelayForRequest(now)
|
||||
b.ReportError()
|
||||
if d.Seconds() < 5 {
|
||||
t.Fatalf("unexpected short-delay during all-error phase: %v", d)
|
||||
}
|
||||
t.Logf("error phase delay @%d %v", i, d)
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
|
||||
// Phase of no errors
|
||||
for i := 0; i < 100; i++ {
|
||||
_ = b.ComputeDelayForRequest(now)
|
||||
now = now.Add(3 * time.Second)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
d := b.ComputeDelayForRequest(now)
|
||||
if d.Seconds() != 0 {
|
||||
t.Fatalf("unexpected delay during error recovery phase: %v", d)
|
||||
}
|
||||
t.Logf("no-error phase delay @%d %v", i, d)
|
||||
now = now.Add(time.Second)
|
||||
}
|
||||
}
|
146
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/sets_ippermissions.go
generated
vendored
Normal file
146
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/sets_ippermissions.go
generated
vendored
Normal file
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
)
|
||||
|
||||
type IPPermissionSet map[string]*ec2.IpPermission
|
||||
|
||||
func NewIPPermissionSet(items ...*ec2.IpPermission) IPPermissionSet {
|
||||
s := make(IPPermissionSet)
|
||||
s.Insert(items...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Ungroup splits permissions out into individual permissions
|
||||
// EC2 will combine permissions with the same port but different SourceRanges together, for example
|
||||
// We ungroup them so we can process them
|
||||
func (s IPPermissionSet) Ungroup() IPPermissionSet {
|
||||
l := []*ec2.IpPermission{}
|
||||
for _, p := range s.List() {
|
||||
if len(p.IpRanges) <= 1 {
|
||||
l = append(l, p)
|
||||
continue
|
||||
}
|
||||
for _, ipRange := range p.IpRanges {
|
||||
c := &ec2.IpPermission{}
|
||||
*c = *p
|
||||
c.IpRanges = []*ec2.IpRange{ipRange}
|
||||
l = append(l, c)
|
||||
}
|
||||
}
|
||||
|
||||
l2 := []*ec2.IpPermission{}
|
||||
for _, p := range l {
|
||||
if len(p.UserIdGroupPairs) <= 1 {
|
||||
l2 = append(l2, p)
|
||||
continue
|
||||
}
|
||||
for _, u := range p.UserIdGroupPairs {
|
||||
c := &ec2.IpPermission{}
|
||||
*c = *p
|
||||
c.UserIdGroupPairs = []*ec2.UserIdGroupPair{u}
|
||||
l2 = append(l, c)
|
||||
}
|
||||
}
|
||||
|
||||
l3 := []*ec2.IpPermission{}
|
||||
for _, p := range l2 {
|
||||
if len(p.PrefixListIds) <= 1 {
|
||||
l3 = append(l3, p)
|
||||
continue
|
||||
}
|
||||
for _, v := range p.PrefixListIds {
|
||||
c := &ec2.IpPermission{}
|
||||
*c = *p
|
||||
c.PrefixListIds = []*ec2.PrefixListId{v}
|
||||
l3 = append(l3, c)
|
||||
}
|
||||
}
|
||||
|
||||
return NewIPPermissionSet(l3...)
|
||||
}
|
||||
|
||||
// Insert adds items to the set.
|
||||
func (s IPPermissionSet) Insert(items ...*ec2.IpPermission) {
|
||||
for _, p := range items {
|
||||
k := keyForIPPermission(p)
|
||||
s[k] = p
|
||||
}
|
||||
}
|
||||
|
||||
// List returns the contents as a slice. Order is not defined.
|
||||
func (s IPPermissionSet) List() []*ec2.IpPermission {
|
||||
res := make([]*ec2.IpPermission, 0, len(s))
|
||||
for _, v := range s {
|
||||
res = append(res, v)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// IsSuperset returns true if and only if s1 is a superset of s2.
|
||||
func (s1 IPPermissionSet) IsSuperset(s2 IPPermissionSet) bool {
|
||||
for k := range s2 {
|
||||
_, found := s1[k]
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if and only if s1 is equal (as a set) to s2.
|
||||
// Two sets are equal if their membership is identical.
|
||||
// (In practice, this means same elements, order doesn't matter)
|
||||
func (s1 IPPermissionSet) Equal(s2 IPPermissionSet) bool {
|
||||
return len(s1) == len(s2) && s1.IsSuperset(s2)
|
||||
}
|
||||
|
||||
// Difference returns a set of objects that are not in s2
|
||||
// For example:
|
||||
// s1 = {a1, a2, a3}
|
||||
// s2 = {a1, a2, a4, a5}
|
||||
// s1.Difference(s2) = {a3}
|
||||
// s2.Difference(s1) = {a4, a5}
|
||||
func (s IPPermissionSet) Difference(s2 IPPermissionSet) IPPermissionSet {
|
||||
result := NewIPPermissionSet()
|
||||
for k, v := range s {
|
||||
_, found := s2[k]
|
||||
if !found {
|
||||
result[k] = v
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Len returns the size of the set.
|
||||
func (s IPPermissionSet) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func keyForIPPermission(p *ec2.IpPermission) string {
|
||||
v, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("error building JSON representation of ec2.IpPermission: %v", err))
|
||||
}
|
||||
return string(v)
|
||||
}
|
84
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go
generated
vendored
Normal file
84
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go
generated
vendored
Normal file
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
)
|
||||
|
||||
// awsVolumeID represents the ID of the volume in the AWS API, e.g. vol-12345678a
|
||||
// The "traditional" format is "vol-12345678"
|
||||
// A new longer format is also being introduced: "vol-12345678abcdef01"
|
||||
// We should not assume anything about the length or format, though it seems
|
||||
// reasonable to assume that volumes will continue to start with "vol-".
|
||||
type awsVolumeID string
|
||||
|
||||
func (i awsVolumeID) awsString() *string {
|
||||
return aws.String(string(i))
|
||||
}
|
||||
|
||||
// KubernetesVolumeID represents the id for a volume in the kubernetes API;
|
||||
// a few forms are recognized:
|
||||
// * aws://<zone>/<awsVolumeId>
|
||||
// * aws:///<awsVolumeId>
|
||||
// * <awsVolumeId>
|
||||
type KubernetesVolumeID string
|
||||
|
||||
// mapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID
|
||||
func (name KubernetesVolumeID) mapToAWSVolumeID() (awsVolumeID, error) {
|
||||
// name looks like aws://availability-zone/awsVolumeId
|
||||
|
||||
// The original idea of the URL-style name was to put the AZ into the
|
||||
// host, so we could find the AZ immediately from the name without
|
||||
// querying the API. But it turns out we don't actually need it for
|
||||
// multi-AZ clusters, as we put the AZ into the labels on the PV instead.
|
||||
// However, if in future we want to support multi-AZ cluster
|
||||
// volume-awareness without using PersistentVolumes, we likely will
|
||||
// want the AZ in the host.
|
||||
|
||||
s := string(name)
|
||||
|
||||
if !strings.HasPrefix(s, "aws://") {
|
||||
// Assume a bare aws volume id (vol-1234...)
|
||||
// Build a URL with an empty host (AZ)
|
||||
s = "aws://" + "" + "/" + s
|
||||
}
|
||||
url, err := url.Parse(s)
|
||||
if err != nil {
|
||||
// TODO: Maybe we should pass a URL into the Volume functions
|
||||
return "", fmt.Errorf("Invalid disk name (%s): %v", name, err)
|
||||
}
|
||||
if url.Scheme != "aws" {
|
||||
return "", fmt.Errorf("Invalid scheme for AWS volume (%s)", name)
|
||||
}
|
||||
|
||||
awsID := url.Path
|
||||
awsID = strings.Trim(awsID, "/")
|
||||
|
||||
// We sanity check the resulting volume; the two known formats are
|
||||
// vol-12345678 and vol-12345678abcdef01
|
||||
// TODO: Regex match?
|
||||
if strings.Contains(awsID, "/") || !strings.HasPrefix(awsID, "vol-") {
|
||||
return "", fmt.Errorf("Invalid format for AWS volume (%s)", name)
|
||||
}
|
||||
|
||||
return awsVolumeID(awsID), nil
|
||||
}
|
73
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
Normal file
73
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"azure.go",
|
||||
"azure_blob.go",
|
||||
"azure_instances.go",
|
||||
"azure_loadbalancer.go",
|
||||
"azure_routes.go",
|
||||
"azure_storage.go",
|
||||
"azure_storageaccount.go",
|
||||
"azure_util.go",
|
||||
"azure_wrap.go",
|
||||
"azure_zones.go",
|
||||
"vhd.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor:github.com/Azure/azure-sdk-for-go/arm/compute",
|
||||
"//vendor:github.com/Azure/azure-sdk-for-go/arm/network",
|
||||
"//vendor:github.com/Azure/azure-sdk-for-go/arm/storage",
|
||||
"//vendor:github.com/Azure/azure-sdk-for-go/storage",
|
||||
"//vendor:github.com/Azure/go-autorest/autorest",
|
||||
"//vendor:github.com/Azure/go-autorest/autorest/azure",
|
||||
"//vendor:github.com/Azure/go-autorest/autorest/to",
|
||||
"//vendor:github.com/ghodss/yaml",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/rubiojr/go-vhd/vhd",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["azure_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//vendor:github.com/Azure/azure-sdk-for-go/arm/compute",
|
||||
"//vendor:github.com/Azure/azure-sdk-for-go/arm/network",
|
||||
"//vendor:github.com/Azure/go-autorest/autorest/to",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
3
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
assignees:
|
||||
- colemickens
|
||||
- brendandburns
|
178
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
Normal file
178
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
Normal file
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/ghodss/yaml"
|
||||
)
|
||||
|
||||
// CloudProviderName is the value used for the --cloud-provider flag
|
||||
const CloudProviderName = "azure"
|
||||
|
||||
// Config holds the configuration parsed from the --cloud-config flag
|
||||
type Config struct {
|
||||
Cloud string `json:"cloud" yaml:"cloud"`
|
||||
TenantID string `json:"tenantId" yaml:"tenantId"`
|
||||
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
|
||||
ResourceGroup string `json:"resourceGroup" yaml:"resourceGroup"`
|
||||
Location string `json:"location" yaml:"location"`
|
||||
VnetName string `json:"vnetName" yaml:"vnetName"`
|
||||
SubnetName string `json:"subnetName" yaml:"subnetName"`
|
||||
SecurityGroupName string `json:"securityGroupName" yaml:"securityGroupName"`
|
||||
RouteTableName string `json:"routeTableName" yaml:"routeTableName"`
|
||||
PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName" yaml:"primaryAvailabilitySetName"`
|
||||
|
||||
AADClientID string `json:"aadClientId" yaml:"aadClientId"`
|
||||
AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"`
|
||||
AADTenantID string `json:"aadTenantId" yaml:"aadTenantId"`
|
||||
}
|
||||
|
||||
// Cloud holds the config and clients
|
||||
type Cloud struct {
|
||||
Config
|
||||
Environment azure.Environment
|
||||
RoutesClient network.RoutesClient
|
||||
SubnetsClient network.SubnetsClient
|
||||
InterfacesClient network.InterfacesClient
|
||||
RouteTablesClient network.RouteTablesClient
|
||||
LoadBalancerClient network.LoadBalancersClient
|
||||
PublicIPAddressesClient network.PublicIPAddressesClient
|
||||
SecurityGroupsClient network.SecurityGroupsClient
|
||||
VirtualMachinesClient compute.VirtualMachinesClient
|
||||
StorageAccountClient storage.AccountsClient
|
||||
}
|
||||
|
||||
func init() {
|
||||
cloudprovider.RegisterCloudProvider(CloudProviderName, NewCloud)
|
||||
}
|
||||
|
||||
// NewCloud returns a Cloud with initialized clients
|
||||
func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
var az Cloud
|
||||
|
||||
configContents, err := ioutil.ReadAll(configReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = yaml.Unmarshal(configContents, &az)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if az.Cloud == "" {
|
||||
az.Environment = azure.PublicCloud
|
||||
} else {
|
||||
az.Environment, err = azure.EnvironmentFromName(az.Cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
oauthConfig, err := az.Environment.OAuthConfigForTenant(az.TenantID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
servicePrincipalToken, err := azure.NewServicePrincipalToken(
|
||||
*oauthConfig,
|
||||
az.AADClientID,
|
||||
az.AADClientSecret,
|
||||
az.Environment.ServiceManagementEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
az.SubnetsClient = network.NewSubnetsClient(az.SubscriptionID)
|
||||
az.SubnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.SubnetsClient.Authorizer = servicePrincipalToken
|
||||
|
||||
az.RouteTablesClient = network.NewRouteTablesClient(az.SubscriptionID)
|
||||
az.RouteTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.RouteTablesClient.Authorizer = servicePrincipalToken
|
||||
|
||||
az.RoutesClient = network.NewRoutesClient(az.SubscriptionID)
|
||||
az.RoutesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.RoutesClient.Authorizer = servicePrincipalToken
|
||||
|
||||
az.InterfacesClient = network.NewInterfacesClient(az.SubscriptionID)
|
||||
az.InterfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.InterfacesClient.Authorizer = servicePrincipalToken
|
||||
|
||||
az.LoadBalancerClient = network.NewLoadBalancersClient(az.SubscriptionID)
|
||||
az.LoadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.LoadBalancerClient.Authorizer = servicePrincipalToken
|
||||
|
||||
az.VirtualMachinesClient = compute.NewVirtualMachinesClient(az.SubscriptionID)
|
||||
az.VirtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.VirtualMachinesClient.Authorizer = servicePrincipalToken
|
||||
|
||||
az.PublicIPAddressesClient = network.NewPublicIPAddressesClient(az.SubscriptionID)
|
||||
az.PublicIPAddressesClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.PublicIPAddressesClient.Authorizer = servicePrincipalToken
|
||||
|
||||
az.SecurityGroupsClient = network.NewSecurityGroupsClient(az.SubscriptionID)
|
||||
az.SecurityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint
|
||||
az.SecurityGroupsClient.Authorizer = servicePrincipalToken
|
||||
|
||||
az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID)
|
||||
az.StorageAccountClient.Authorizer = servicePrincipalToken
|
||||
return &az, nil
|
||||
}
|
||||
|
||||
// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) Instances() (cloudprovider.Instances, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Routes returns a routes interface along with whether the interface is supported.
|
||||
func (az *Cloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// ScrubDNS provides an opportunity for cloud-provider-specific code to process DNS settings for pods.
|
||||
func (az *Cloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// ProviderName returns the cloud provider ID.
|
||||
func (az *Cloud) ProviderName() string {
|
||||
return CloudProviderName
|
||||
}
|
111
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blob.go
generated
vendored
Normal file
111
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blob.go
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
azs "github.com/Azure/azure-sdk-for-go/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
vhdContainerName = "vhds"
|
||||
useHTTPS = true
|
||||
blobServiceName = "blob"
|
||||
)
|
||||
|
||||
// create page blob
|
||||
func (az *Cloud) createVhdBlob(accountName, accountKey, name string, sizeGB int64, tags map[string]string) (string, string, error) {
|
||||
blobClient, err := az.getBlobClient(accountName, accountKey)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
size := 1024 * 1024 * 1024 * sizeGB
|
||||
vhdSize := size + vhdHeaderSize /* header size */
|
||||
// Blob name in URL must end with '.vhd' extension.
|
||||
name = name + ".vhd"
|
||||
err = blobClient.PutPageBlob(vhdContainerName, name, vhdSize, tags)
|
||||
if err != nil {
|
||||
// if container doesn't exist, create one and retry PutPageBlob
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errContainerNotFound) {
|
||||
err = blobClient.CreateContainer(vhdContainerName, azs.ContainerAccessTypeContainer)
|
||||
if err == nil {
|
||||
err = blobClient.PutPageBlob(vhdContainerName, name, vhdSize, tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to put page blob: %v", err)
|
||||
}
|
||||
|
||||
// add VHD signature to the blob
|
||||
h, err := createVHDHeader(uint64(size))
|
||||
if err != nil {
|
||||
az.deleteVhdBlob(accountName, accountKey, name)
|
||||
return "", "", fmt.Errorf("failed to create vhd header, err: %v", err)
|
||||
}
|
||||
if err = blobClient.PutPage(vhdContainerName, name, size, vhdSize-1, azs.PageWriteTypeUpdate, h[:vhdHeaderSize], nil); err != nil {
|
||||
az.deleteVhdBlob(accountName, accountKey, name)
|
||||
return "", "", fmt.Errorf("failed to update vhd header, err: %v", err)
|
||||
}
|
||||
|
||||
scheme := "http"
|
||||
if useHTTPS {
|
||||
scheme = "https"
|
||||
}
|
||||
host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, az.Environment.StorageEndpointSuffix)
|
||||
uri := fmt.Sprintf("%s/%s/%s", host, vhdContainerName, name)
|
||||
return name, uri, nil
|
||||
|
||||
}
|
||||
|
||||
// delete a vhd blob
|
||||
func (az *Cloud) deleteVhdBlob(accountName, accountKey, blobName string) error {
|
||||
blobClient, err := az.getBlobClient(accountName, accountKey)
|
||||
if err == nil {
|
||||
return blobClient.DeleteBlob(vhdContainerName, blobName, nil)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (az *Cloud) getBlobClient(accountName, accountKey string) (*azs.BlobStorageClient, error) {
|
||||
client, err := azs.NewClient(accountName, accountKey, az.Environment.StorageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating azure client: %v", err)
|
||||
}
|
||||
b := client.GetBlobService()
|
||||
return &b, nil
|
||||
}
|
||||
|
||||
// get uri https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name)
|
||||
func (az *Cloud) getBlobNameAndAccountFromURI(uri string) (string, string, error) {
|
||||
scheme := "http"
|
||||
if useHTTPS {
|
||||
scheme = "https"
|
||||
}
|
||||
host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, az.Environment.StorageEndpointSuffix)
|
||||
reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName)
|
||||
re := regexp.MustCompile(reStr)
|
||||
res := re.FindSubmatch([]byte(uri))
|
||||
if len(res) < 3 {
|
||||
return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, uri)
|
||||
}
|
||||
return string(res[1]), string(res[2]), nil
|
||||
}
|
139
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
Normal file
139
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
Normal file
|
@ -0,0 +1,139 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (az *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
ip, err := az.getIPForMachine(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: ip},
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (az *Cloud) ExternalID(name types.NodeName) (string, error) {
|
||||
return az.InstanceID(name)
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
func (az *Cloud) InstanceID(name types.NodeName) (string, error) {
|
||||
machine, exists, err := az.getVirtualMachine(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return *machine.ID, nil
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
|
||||
// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
|
||||
func (az *Cloud) InstanceType(name types.NodeName) (string, error) {
|
||||
machine, exists, err := az.getVirtualMachine(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return string(machine.HardwareProfile.VMSize), nil
|
||||
}
|
||||
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
|
||||
func (az *Cloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return fmt.Errorf("not supported")
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on
|
||||
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
|
||||
func (az *Cloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
func (az *Cloud) listAllNodesInResourceGroup() ([]compute.VirtualMachine, error) {
|
||||
allNodes := []compute.VirtualMachine{}
|
||||
|
||||
result, err := az.VirtualMachinesClient.List(az.ResourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
morePages := (result.Value != nil && len(*result.Value) > 1)
|
||||
|
||||
for morePages {
|
||||
allNodes = append(allNodes, *result.Value...)
|
||||
|
||||
result, err = az.VirtualMachinesClient.ListAllNextResults(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
morePages = (result.Value != nil && len(*result.Value) > 1)
|
||||
}
|
||||
|
||||
return allNodes, nil
|
||||
|
||||
}
|
||||
|
||||
func filterNodes(nodes []compute.VirtualMachine, filter string) ([]compute.VirtualMachine, error) {
|
||||
filteredNodes := []compute.VirtualMachine{}
|
||||
|
||||
re, err := regexp.Compile(filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
// search tags
|
||||
if re.MatchString(*node.Name) {
|
||||
filteredNodes = append(filteredNodes, node)
|
||||
}
|
||||
}
|
||||
|
||||
return filteredNodes, nil
|
||||
}
|
||||
|
||||
// mapNodeNameToVMName maps a k8s NodeName to an Azure VM Name
|
||||
// This is a simple string cast.
|
||||
func mapNodeNameToVMName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// mapVMNameToNodeName maps an Azure VM Name to a k8s NodeName
|
||||
// This is a simple string cast.
|
||||
func mapVMNameToNodeName(vmName string) types.NodeName {
|
||||
return types.NodeName(vmName)
|
||||
}
|
668
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
Normal file
668
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
Normal file
|
@ -0,0 +1,668 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// GetLoadBalancer returns whether the specified load balancer exists, and
|
||||
// if so, what its status is.
|
||||
func (az *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {
|
||||
lbName := getLoadBalancerName(clusterName)
|
||||
pipName := getPublicIPName(clusterName, service)
|
||||
serviceName := getServiceName(service)
|
||||
|
||||
_, existsLb, err := az.getAzureLoadBalancer(lbName)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !existsLb {
|
||||
glog.V(5).Infof("get(%s): lb(%s) - doesn't exist", serviceName, pipName)
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
pip, existsPip, err := az.getPublicIPAddress(pipName)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !existsPip {
|
||||
glog.V(5).Infof("get(%s): pip(%s) - doesn't exist", serviceName, pipName)
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
return &v1.LoadBalancerStatus{
|
||||
Ingress: []v1.LoadBalancerIngress{{IP: *pip.IPAddress}},
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
|
||||
func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
lbName := getLoadBalancerName(clusterName)
|
||||
pipName := getPublicIPName(clusterName, service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(2).Infof("ensure(%s): START clusterName=%q lbName=%q", serviceName, clusterName, lbName)
|
||||
|
||||
pip, err := az.ensurePublicIPExists(serviceName, pipName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sg, err := az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sg, sgNeedsUpdate, err := az.reconcileSecurityGroup(sg, clusterName, service)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sgNeedsUpdate {
|
||||
glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name)
|
||||
_, err := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
lb, existsLb, err := az.getAzureLoadBalancer(lbName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !existsLb {
|
||||
lb = network.LoadBalancer{
|
||||
Name: &lbName,
|
||||
Location: &az.Location,
|
||||
LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{},
|
||||
}
|
||||
}
|
||||
|
||||
lb, lbNeedsUpdate, err := az.reconcileLoadBalancer(lb, pip, clusterName, service, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !existsLb || lbNeedsUpdate {
|
||||
glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName)
|
||||
_, err = az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the machines to the backend pool if they're not already
|
||||
lbBackendName := getBackendPoolName(clusterName)
|
||||
lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendName)
|
||||
hostUpdates := make([]func() error, len(nodes))
|
||||
for i, node := range nodes {
|
||||
localNodeName := node.Name
|
||||
f := func() error {
|
||||
err := az.ensureHostInPool(serviceName, types.NodeName(localNodeName), lbBackendPoolID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ensure(%s): lb(%s) - failed to ensure host in pool: %q", serviceName, lbName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
hostUpdates[i] = f
|
||||
}
|
||||
|
||||
errs := utilerrors.AggregateGoroutines(hostUpdates...)
|
||||
if errs != nil {
|
||||
return nil, utilerrors.Flatten(errs)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("ensure(%s): FINISH - %s", serviceName, *pip.IPAddress)
|
||||
return &v1.LoadBalancerStatus{
|
||||
Ingress: []v1.LoadBalancerIngress{{IP: *pip.IPAddress}},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
func (az *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
_, err := az.EnsureLoadBalancer(clusterName, service, nodes)
|
||||
return err
|
||||
}
|
||||
|
||||
// EnsureLoadBalancerDeleted deletes the specified load balancer if it
|
||||
// exists, returning nil if the load balancer specified either didn't exist or
|
||||
// was successfully deleted.
|
||||
// This construction is useful because many cloud providers' load balancers
|
||||
// have multiple underlying components, meaning a Get could say that the LB
|
||||
// doesn't exist even if some part of it is still laying around.
|
||||
func (az *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error {
|
||||
lbName := getLoadBalancerName(clusterName)
|
||||
pipName := getPublicIPName(clusterName, service)
|
||||
serviceName := getServiceName(service)
|
||||
|
||||
glog.V(2).Infof("delete(%s): START clusterName=%q lbName=%q", serviceName, clusterName, lbName)
|
||||
|
||||
// reconcile logic is capable of fully reconcile, so we can use this to delete
|
||||
service.Spec.Ports = []v1.ServicePort{}
|
||||
|
||||
lb, existsLb, err := az.getAzureLoadBalancer(lbName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if existsLb {
|
||||
lb, lbNeedsUpdate, reconcileErr := az.reconcileLoadBalancer(lb, nil, clusterName, service, []*v1.Node{})
|
||||
if reconcileErr != nil {
|
||||
return reconcileErr
|
||||
}
|
||||
if lbNeedsUpdate {
|
||||
if len(*lb.FrontendIPConfigurations) > 0 {
|
||||
glog.V(3).Infof("delete(%s): lb(%s) - updating", serviceName, lbName)
|
||||
_, err = az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
glog.V(3).Infof("delete(%s): lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName)
|
||||
|
||||
_, err = az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sg, existsSg, err := az.getSecurityGroup()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if existsSg {
|
||||
reconciledSg, sgNeedsUpdate, reconcileErr := az.reconcileSecurityGroup(sg, clusterName, service)
|
||||
if reconcileErr != nil {
|
||||
return reconcileErr
|
||||
}
|
||||
if sgNeedsUpdate {
|
||||
glog.V(3).Infof("delete(%s): sg(%s) - updating", serviceName, az.SecurityGroupName)
|
||||
_, err := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *reconciledSg.Name, reconciledSg, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = az.ensurePublicIPDeleted(serviceName, pipName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("delete(%s): FINISH", serviceName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Cloud) ensurePublicIPExists(serviceName, pipName string) (*network.PublicIPAddress, error) {
|
||||
pip, existsPip, err := az.getPublicIPAddress(pipName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if existsPip {
|
||||
return &pip, nil
|
||||
}
|
||||
|
||||
pip.Name = to.StringPtr(pipName)
|
||||
pip.Location = to.StringPtr(az.Location)
|
||||
pip.PublicIPAddressPropertiesFormat = &network.PublicIPAddressPropertiesFormat{
|
||||
PublicIPAllocationMethod: network.Static,
|
||||
}
|
||||
pip.Tags = &map[string]*string{"service": &serviceName}
|
||||
|
||||
glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name)
|
||||
_, err = az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pip, err = az.PublicIPAddressesClient.Get(az.ResourceGroup, *pip.Name, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &pip, nil
|
||||
|
||||
}
|
||||
|
||||
func (az *Cloud) ensurePublicIPDeleted(serviceName, pipName string) error {
|
||||
_, deleteErr := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil)
|
||||
_, realErr := checkResourceExistsFromError(deleteErr)
|
||||
if realErr != nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This ensures load balancer exists and the frontend ip config is setup.
|
||||
// This also reconciles the Service's Ports with the LoadBalancer config.
|
||||
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
|
||||
func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, pip *network.PublicIPAddress, clusterName string, service *v1.Service, nodes []*v1.Node) (network.LoadBalancer, bool, error) {
|
||||
lbName := getLoadBalancerName(clusterName)
|
||||
serviceName := getServiceName(service)
|
||||
lbFrontendIPConfigName := getFrontendIPConfigName(service)
|
||||
lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName)
|
||||
lbBackendPoolName := getBackendPoolName(clusterName)
|
||||
lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendPoolName)
|
||||
|
||||
wantLb := len(service.Spec.Ports) > 0
|
||||
dirtyLb := false
|
||||
|
||||
// Ensure LoadBalancer's Backend Pool Configuration
|
||||
if wantLb {
|
||||
newBackendPools := []network.BackendAddressPool{}
|
||||
if lb.BackendAddressPools != nil {
|
||||
newBackendPools = *lb.BackendAddressPools
|
||||
}
|
||||
|
||||
foundBackendPool := false
|
||||
for _, bp := range newBackendPools {
|
||||
if strings.EqualFold(*bp.Name, lbBackendPoolName) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb)
|
||||
foundBackendPool = true
|
||||
break
|
||||
} else {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name)
|
||||
}
|
||||
}
|
||||
if !foundBackendPool {
|
||||
newBackendPools = append(newBackendPools, network.BackendAddressPool{
|
||||
Name: to.StringPtr(lbBackendPoolName),
|
||||
})
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb)
|
||||
|
||||
dirtyLb = true
|
||||
lb.BackendAddressPools = &newBackendPools
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure LoadBalancer's Frontend IP Configurations
|
||||
dirtyConfigs := false
|
||||
newConfigs := []network.FrontendIPConfiguration{}
|
||||
if lb.FrontendIPConfigurations != nil {
|
||||
newConfigs = *lb.FrontendIPConfigurations
|
||||
}
|
||||
if !wantLb {
|
||||
for i := len(newConfigs) - 1; i >= 0; i-- {
|
||||
config := newConfigs[i]
|
||||
if strings.EqualFold(*config.Name, lbFrontendIPConfigName) {
|
||||
glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
foundConfig := false
|
||||
for _, config := range newConfigs {
|
||||
if strings.EqualFold(*config.Name, lbFrontendIPConfigName) {
|
||||
foundConfig = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundConfig {
|
||||
newConfigs = append(newConfigs,
|
||||
network.FrontendIPConfiguration{
|
||||
Name: to.StringPtr(lbFrontendIPConfigName),
|
||||
FrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{
|
||||
PublicIPAddress: &network.PublicIPAddress{
|
||||
ID: pip.ID,
|
||||
},
|
||||
},
|
||||
})
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
}
|
||||
if dirtyConfigs {
|
||||
dirtyLb = true
|
||||
lb.FrontendIPConfigurations = &newConfigs
|
||||
}
|
||||
|
||||
// update probes/rules
|
||||
expectedProbes := make([]network.Probe, len(service.Spec.Ports))
|
||||
expectedRules := make([]network.LoadBalancingRule, len(service.Spec.Ports))
|
||||
for i, port := range service.Spec.Ports {
|
||||
lbRuleName := getRuleName(service, port)
|
||||
|
||||
transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol)
|
||||
if err != nil {
|
||||
return lb, false, err
|
||||
}
|
||||
|
||||
if serviceapi.NeedsHealthCheck(service) {
|
||||
podPresencePath, podPresencePort := serviceapi.GetServiceHealthCheckPathPort(service)
|
||||
|
||||
expectedProbes[i] = network.Probe{
|
||||
Name: &lbRuleName,
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
RequestPath: to.StringPtr(podPresencePath),
|
||||
Protocol: network.ProbeProtocolHTTP,
|
||||
Port: to.Int32Ptr(podPresencePort),
|
||||
IntervalInSeconds: to.Int32Ptr(5),
|
||||
NumberOfProbes: to.Int32Ptr(2),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
expectedProbes[i] = network.Probe{
|
||||
Name: &lbRuleName,
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Protocol: probeProto,
|
||||
Port: to.Int32Ptr(port.NodePort),
|
||||
IntervalInSeconds: to.Int32Ptr(5),
|
||||
NumberOfProbes: to.Int32Ptr(2),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
expectedRules[i] = network.LoadBalancingRule{
|
||||
Name: &lbRuleName,
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
Protocol: transportProto,
|
||||
FrontendIPConfiguration: &network.SubResource{
|
||||
ID: to.StringPtr(lbFrontendIPConfigID),
|
||||
},
|
||||
BackendAddressPool: &network.SubResource{
|
||||
ID: to.StringPtr(lbBackendPoolID),
|
||||
},
|
||||
Probe: &network.SubResource{
|
||||
ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, lbRuleName)),
|
||||
},
|
||||
FrontendPort: to.Int32Ptr(port.Port),
|
||||
BackendPort: to.Int32Ptr(port.Port),
|
||||
EnableFloatingIP: to.BoolPtr(true),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// remove unwanted probes
|
||||
dirtyProbes := false
|
||||
var updatedProbes []network.Probe
|
||||
if lb.Probes != nil {
|
||||
updatedProbes = *lb.Probes
|
||||
}
|
||||
for i := len(updatedProbes) - 1; i >= 0; i-- {
|
||||
existingProbe := updatedProbes[i]
|
||||
if serviceOwnsRule(service, *existingProbe.Name) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
|
||||
keepProbe := false
|
||||
if findProbe(expectedProbes, existingProbe) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name)
|
||||
keepProbe = true
|
||||
}
|
||||
if !keepProbe {
|
||||
updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...)
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name)
|
||||
dirtyProbes = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// add missing, wanted probes
|
||||
for _, expectedProbe := range expectedProbes {
|
||||
foundProbe := false
|
||||
if findProbe(updatedProbes, expectedProbe) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name)
|
||||
foundProbe = true
|
||||
}
|
||||
if !foundProbe {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name)
|
||||
updatedProbes = append(updatedProbes, expectedProbe)
|
||||
dirtyProbes = true
|
||||
}
|
||||
}
|
||||
if dirtyProbes {
|
||||
dirtyLb = true
|
||||
lb.Probes = &updatedProbes
|
||||
}
|
||||
|
||||
// update rules
|
||||
dirtyRules := false
|
||||
var updatedRules []network.LoadBalancingRule
|
||||
if lb.LoadBalancingRules != nil {
|
||||
updatedRules = *lb.LoadBalancingRules
|
||||
}
|
||||
// update rules: remove unwanted
|
||||
for i := len(updatedRules) - 1; i >= 0; i-- {
|
||||
existingRule := updatedRules[i]
|
||||
if serviceOwnsRule(service, *existingRule.Name) {
|
||||
keepRule := false
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
if findRule(expectedRules, existingRule) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
||||
keepRule = true
|
||||
}
|
||||
if !keepRule {
|
||||
glog.V(3).Infof("reconcile(%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
||||
updatedRules = append(updatedRules[:i], updatedRules[i+1:]...)
|
||||
dirtyRules = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// update rules: add needed
|
||||
for _, expectedRule := range expectedRules {
|
||||
foundRule := false
|
||||
if findRule(updatedRules, expectedRule) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
||||
foundRule = true
|
||||
}
|
||||
if !foundRule {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name)
|
||||
updatedRules = append(updatedRules, expectedRule)
|
||||
dirtyRules = true
|
||||
}
|
||||
}
|
||||
if dirtyRules {
|
||||
dirtyLb = true
|
||||
lb.LoadBalancingRules = &updatedRules
|
||||
}
|
||||
|
||||
return lb, dirtyLb, nil
|
||||
}
|
||||
|
||||
// This reconciles the Network Security Group similar to how the LB is reconciled.
|
||||
// This entails adding required, missing SecurityRules and removing stale rules.
|
||||
func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName string, service *v1.Service) (network.SecurityGroup, bool, error) {
|
||||
serviceName := getServiceName(service)
|
||||
wantLb := len(service.Spec.Ports) > 0
|
||||
|
||||
sourceRanges, err := serviceapi.GetLoadBalancerSourceRanges(service)
|
||||
if err != nil {
|
||||
return sg, false, err
|
||||
}
|
||||
var sourceAddressPrefixes []string
|
||||
if sourceRanges == nil || serviceapi.IsAllowAll(sourceRanges) {
|
||||
sourceAddressPrefixes = []string{"Internet"}
|
||||
} else {
|
||||
for _, ip := range sourceRanges {
|
||||
sourceAddressPrefixes = append(sourceAddressPrefixes, ip.String())
|
||||
}
|
||||
}
|
||||
expectedSecurityRules := make([]network.SecurityRule, len(service.Spec.Ports)*len(sourceAddressPrefixes))
|
||||
|
||||
for i, port := range service.Spec.Ports {
|
||||
securityRuleName := getRuleName(service, port)
|
||||
_, securityProto, _, err := getProtocolsFromKubernetesProtocol(port.Protocol)
|
||||
if err != nil {
|
||||
return sg, false, err
|
||||
}
|
||||
for j := range sourceAddressPrefixes {
|
||||
ix := i*len(sourceAddressPrefixes) + j
|
||||
expectedSecurityRules[ix] = network.SecurityRule{
|
||||
Name: to.StringPtr(securityRuleName),
|
||||
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
|
||||
Protocol: securityProto,
|
||||
SourcePortRange: to.StringPtr("*"),
|
||||
DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))),
|
||||
SourceAddressPrefix: to.StringPtr(sourceAddressPrefixes[j]),
|
||||
DestinationAddressPrefix: to.StringPtr("*"),
|
||||
Access: network.Allow,
|
||||
Direction: network.Inbound,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update security rules
|
||||
dirtySg := false
|
||||
var updatedRules []network.SecurityRule
|
||||
if sg.SecurityRules != nil {
|
||||
updatedRules = *sg.SecurityRules
|
||||
}
|
||||
// update security rules: remove unwanted
|
||||
for i := len(updatedRules) - 1; i >= 0; i-- {
|
||||
existingRule := updatedRules[i]
|
||||
if serviceOwnsRule(service, *existingRule.Name) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
keepRule := false
|
||||
if findSecurityRule(expectedSecurityRules, existingRule) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
||||
keepRule = true
|
||||
}
|
||||
if !keepRule {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
||||
updatedRules = append(updatedRules[:i], updatedRules[i+1:]...)
|
||||
dirtySg = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// update security rules: add needed
|
||||
for _, expectedRule := range expectedSecurityRules {
|
||||
foundRule := false
|
||||
if findSecurityRule(updatedRules, expectedRule) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
||||
foundRule = true
|
||||
}
|
||||
if !foundRule {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name)
|
||||
|
||||
nextAvailablePriority, err := getNextAvailablePriority(updatedRules)
|
||||
if err != nil {
|
||||
return sg, false, err
|
||||
}
|
||||
|
||||
expectedRule.Priority = to.Int32Ptr(nextAvailablePriority)
|
||||
updatedRules = append(updatedRules, expectedRule)
|
||||
dirtySg = true
|
||||
}
|
||||
}
|
||||
if dirtySg {
|
||||
sg.SecurityRules = &updatedRules
|
||||
}
|
||||
return sg, dirtySg, nil
|
||||
}
|
||||
|
||||
func findProbe(probes []network.Probe, probe network.Probe) bool {
|
||||
for _, existingProbe := range probes {
|
||||
if strings.EqualFold(*existingProbe.Name, *probe.Name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func findRule(rules []network.LoadBalancingRule, rule network.LoadBalancingRule) bool {
|
||||
for _, existingRule := range rules {
|
||||
if strings.EqualFold(*existingRule.Name, *rule.Name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) bool {
|
||||
for _, existingRule := range rules {
|
||||
if strings.EqualFold(*existingRule.Name, *rule.Name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// This ensures the given VM's Primary NIC's Primary IP Configuration is
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (az *Cloud) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string) error {
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
machine, err := az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
primaryNicID, err := getPrimaryInterfaceID(machine)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nicName, err := getLastSegment(primaryNicID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check availability set
|
||||
if az.PrimaryAvailabilitySetName != "" {
|
||||
expectedAvailabilitySetName := az.getAvailabilitySetID(az.PrimaryAvailabilitySetName)
|
||||
if !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
|
||||
glog.V(3).Infof(
|
||||
"nicupdate(%s): skipping nic (%s) since it is not in the primaryAvailabilitSet(%s)",
|
||||
serviceName, nicName, az.PrimaryAvailabilitySetName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
nic, err := az.InterfacesClient.Get(az.ResourceGroup, nicName, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var primaryIPConfig *network.InterfaceIPConfiguration
|
||||
primaryIPConfig, err = getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
foundPool := false
|
||||
newBackendPools := []network.BackendAddressPool{}
|
||||
if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
|
||||
newBackendPools = *primaryIPConfig.LoadBalancerBackendAddressPools
|
||||
}
|
||||
for _, existingPool := range newBackendPools {
|
||||
if strings.EqualFold(backendPoolID, *existingPool.ID) {
|
||||
foundPool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
newBackendPools = append(newBackendPools,
|
||||
network.BackendAddressPool{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
})
|
||||
|
||||
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
|
||||
glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
|
||||
_, err := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
141
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
Normal file
141
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
Normal file
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
func (az *Cloud) ListRoutes(clusterName string) (routes []*cloudprovider.Route, err error) {
|
||||
glog.V(10).Infof("list: START clusterName=%q", clusterName)
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !existsRouteTable {
|
||||
return []*cloudprovider.Route{}, nil
|
||||
}
|
||||
|
||||
var kubeRoutes []*cloudprovider.Route
|
||||
if routeTable.Routes != nil {
|
||||
kubeRoutes = make([]*cloudprovider.Route, len(*routeTable.Routes))
|
||||
for i, route := range *routeTable.Routes {
|
||||
instance := mapRouteNameToNodeName(*route.Name)
|
||||
cidr := *route.AddressPrefix
|
||||
glog.V(10).Infof("list: * instance=%q, cidr=%q", instance, cidr)
|
||||
|
||||
kubeRoutes[i] = &cloudprovider.Route{
|
||||
Name: *route.Name,
|
||||
TargetNode: instance,
|
||||
DestinationCIDR: cidr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(10).Info("list: FINISH")
|
||||
return kubeRoutes, nil
|
||||
}
|
||||
|
||||
// CreateRoute creates the described managed route
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("create: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !existsRouteTable {
|
||||
routeTable = network.RouteTable{
|
||||
Name: to.StringPtr(az.RouteTableName),
|
||||
Location: to.StringPtr(az.Location),
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
_, err = az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routeTable, err = az.RouteTablesClient.Get(az.ResourceGroup, az.RouteTableName, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
targetIP, err := az.getIPForMachine(kubeRoute.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
||||
route := network.Route{
|
||||
Name: to.StringPtr(routeName),
|
||||
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
||||
AddressPrefix: to.StringPtr(kubeRoute.DestinationCIDR),
|
||||
NextHopType: network.RouteNextHopTypeVirtualAppliance,
|
||||
NextHopIPAddress: to.StringPtr(targetIP),
|
||||
},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
_, err = az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("create: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
func (az *Cloud) DeleteRoute(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
||||
_, err := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("delete: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This must be kept in sync with mapRouteNameToNodeName.
|
||||
// These two functions enable stashing the instance name in the route
|
||||
// and then retrieving it later when listing. This is needed because
|
||||
// Azure does not let you put tags/descriptions on the Route itself.
|
||||
func mapNodeNameToRouteName(nodeName types.NodeName) string {
|
||||
return fmt.Sprintf("%s", nodeName)
|
||||
}
|
||||
|
||||
// Used with mapNodeNameToRouteName. See comment on mapNodeNameToRouteName.
|
||||
func mapRouteNameToNodeName(routeName string) types.NodeName {
|
||||
return types.NodeName(fmt.Sprintf("%s", routeName))
|
||||
}
|
253
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
Normal file
253
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
Normal file
|
@ -0,0 +1,253 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const (
|
||||
maxLUN = 64 // max number of LUNs per VM
|
||||
errLeaseFailed = "AcquireDiskLeaseFailed"
|
||||
errLeaseIDMissing = "LeaseIdMissing"
|
||||
errContainerNotFound = "ContainerNotFound"
|
||||
)
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (az *Cloud) AttachDisk(diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !exists {
|
||||
return cloudprovider.InstanceNotFound
|
||||
}
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
disks = append(disks,
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Vhd: &compute.VirtualHardDisk{
|
||||
URI: &diskURI,
|
||||
},
|
||||
Lun: &lun,
|
||||
Caching: cachingMode,
|
||||
CreateOption: "attach",
|
||||
})
|
||||
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
_, err = az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("azure attach failed, err: %v", err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) {
|
||||
// if lease cannot be acquired, immediately detach the disk and return the original error
|
||||
glog.Infof("failed to acquire disk lease, try detach")
|
||||
az.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Infof("azure attach succeeded")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (az *Cloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
} else if err != nil {
|
||||
return attached, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (az *Cloud) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if err != nil || !exists {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("cannot find node %s, skip detaching disk %s", nodeName, diskName)
|
||||
return nil
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for i, disk := range disks {
|
||||
if (disk.Name != nil && diskName != "" && *disk.Name == diskName) || (disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
_, err = az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("azure disk detach failed, err: %v", err)
|
||||
} else {
|
||||
glog.V(4).Infof("azure disk detach succeeded")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (az *Cloud) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
return -1, cloudprovider.InstanceNotFound
|
||||
}
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) || (disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (az *Cloud) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
return -1, cloudprovider.InstanceNotFound
|
||||
}
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("All Luns are used")
|
||||
}
|
||||
|
||||
// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account.
|
||||
// If no storage account is given, search all the storage accounts associated with the resource group and pick one that
|
||||
// fits storage type and location.
|
||||
func (az *Cloud) CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) {
|
||||
var err error
|
||||
accounts := []accountWithLocation{}
|
||||
if len(storageAccount) > 0 {
|
||||
accounts = append(accounts, accountWithLocation{Name: storageAccount})
|
||||
} else {
|
||||
// find a storage account
|
||||
accounts, err = az.getStorageAccounts()
|
||||
if err != nil {
|
||||
// TODO: create a storage account and container
|
||||
return "", "", 0, err
|
||||
}
|
||||
}
|
||||
for _, account := range accounts {
|
||||
glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location)
|
||||
if ((storageType == "" || account.StorageType == storageType) && (location == "" || account.Location == location)) || len(storageAccount) > 0 {
|
||||
// find the access key with this account
|
||||
key, err := az.getStorageAccesskey(account.Name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("no key found for storage account %s", account.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// create a page blob in this account's vhd container
|
||||
name, uri, err := az.createVhdBlob(account.Name, key, name, int64(requestGB), nil)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("failed to create vhd in account %s: %v", account.Name, err)
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("created vhd blob uri: %s", uri)
|
||||
return name, uri, requestGB, err
|
||||
}
|
||||
}
|
||||
return "", "", 0, fmt.Errorf("failed to find a matching storage account")
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a VHD blob
|
||||
func (az *Cloud) DeleteVolume(name, uri string) error {
|
||||
accountName, blob, err := az.getBlobNameAndAccountFromURI(uri)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse vhd URI %v", err)
|
||||
}
|
||||
key, err := az.getStorageAccesskey(accountName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("no key for storage account %s, err %v", accountName, err)
|
||||
}
|
||||
err = az.deleteVhdBlob(accountName, key, blob)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to delete blob %s err: %v", uri, err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseIDMissing) {
|
||||
// disk is still being used
|
||||
// see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx
|
||||
return volume.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", name))
|
||||
}
|
||||
return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", uri, accountName, blob, err)
|
||||
}
|
||||
glog.V(4).Infof("blob %s deleted", uri)
|
||||
return nil
|
||||
|
||||
}
|
77
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
Normal file
77
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type accountWithLocation struct {
|
||||
Name, StorageType, Location string
|
||||
}
|
||||
|
||||
// getStorageAccounts gets the storage accounts' name, type, location in a resource group
|
||||
func (az *Cloud) getStorageAccounts() ([]accountWithLocation, error) {
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(az.ResourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Value == nil {
|
||||
return nil, fmt.Errorf("no storage accounts from resource group %s", az.ResourceGroup)
|
||||
}
|
||||
|
||||
accounts := []accountWithLocation{}
|
||||
for _, acct := range *result.Value {
|
||||
if acct.Name != nil {
|
||||
name := *acct.Name
|
||||
loc := ""
|
||||
if acct.Location != nil {
|
||||
loc = *acct.Location
|
||||
}
|
||||
storageType := ""
|
||||
if acct.Sku != nil {
|
||||
storageType = string((*acct.Sku).Name)
|
||||
}
|
||||
accounts = append(accounts, accountWithLocation{Name: name, StorageType: storageType, Location: loc})
|
||||
}
|
||||
}
|
||||
|
||||
return accounts, nil
|
||||
}
|
||||
|
||||
// getStorageAccesskey gets the storage account access key
|
||||
func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
result, err := az.StorageAccountClient.ListKeys(az.ResourceGroup, account)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if result.Keys == nil {
|
||||
return "", fmt.Errorf("empty keys")
|
||||
}
|
||||
|
||||
for _, k := range *result.Keys {
|
||||
if k.Value != nil && *k.Value != "" {
|
||||
v := *k.Value
|
||||
if ind := strings.LastIndex(v, " "); ind >= 0 {
|
||||
v = v[(ind + 1):]
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no valid keys")
|
||||
}
|
602
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
Normal file
602
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
Normal file
|
@ -0,0 +1,602 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
)
|
||||
|
||||
var testClusterName = "testCluster"
|
||||
|
||||
// Test additional of a new service/port.
|
||||
func TestReconcileLoadBalancerAddPort(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
svc := getTestService("servicea", 80)
|
||||
pip := getTestPublicIP()
|
||||
lb := getTestLoadBalancer()
|
||||
nodes := []*v1.Node{}
|
||||
|
||||
lb, updated, err := az.reconcileLoadBalancer(lb, &pip, testClusterName, &svc, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
|
||||
if !updated {
|
||||
t.Error("Expected the loadbalancer to need an update")
|
||||
}
|
||||
|
||||
// ensure we got a frontend ip configuration
|
||||
if len(*lb.FrontendIPConfigurations) != 1 {
|
||||
t.Error("Expected the loadbalancer to have a frontend ip configuration")
|
||||
}
|
||||
|
||||
validateLoadBalancer(t, lb, svc)
|
||||
}
|
||||
|
||||
func TestReconcileLoadBalancerNodeHealth(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
svc := getTestService("servicea", 80)
|
||||
svc.Annotations = map[string]string{
|
||||
serviceapi.BetaAnnotationExternalTraffic: serviceapi.AnnotationValueExternalTrafficLocal,
|
||||
serviceapi.BetaAnnotationHealthCheckNodePort: "32456",
|
||||
}
|
||||
pip := getTestPublicIP()
|
||||
lb := getTestLoadBalancer()
|
||||
|
||||
nodes := []*v1.Node{}
|
||||
|
||||
lb, updated, err := az.reconcileLoadBalancer(lb, &pip, testClusterName, &svc, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
|
||||
if !updated {
|
||||
t.Error("Expected the loadbalancer to need an update")
|
||||
}
|
||||
|
||||
// ensure we got a frontend ip configuration
|
||||
if len(*lb.FrontendIPConfigurations) != 1 {
|
||||
t.Error("Expected the loadbalancer to have a frontend ip configuration")
|
||||
}
|
||||
|
||||
validateLoadBalancer(t, lb, svc)
|
||||
}
|
||||
|
||||
// Test removing all services results in removing the frontend ip configuration
|
||||
func TestReconcileLoadBalancerRemoveAllPortsRemovesFrontendConfig(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
svc := getTestService("servicea", 80)
|
||||
lb := getTestLoadBalancer()
|
||||
pip := getTestPublicIP()
|
||||
nodes := []*v1.Node{}
|
||||
|
||||
lb, updated, err := az.reconcileLoadBalancer(lb, &pip, testClusterName, &svc, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
|
||||
svcUpdated := getTestService("servicea")
|
||||
lb, updated, err = az.reconcileLoadBalancer(lb, nil, testClusterName, &svcUpdated, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
|
||||
if !updated {
|
||||
t.Error("Expected the loadbalancer to need an update")
|
||||
}
|
||||
|
||||
// ensure we abandoned the frontend ip configuration
|
||||
if len(*lb.FrontendIPConfigurations) != 0 {
|
||||
t.Error("Expected the loadbalancer to have no frontend ip configuration")
|
||||
}
|
||||
|
||||
validateLoadBalancer(t, lb, svcUpdated)
|
||||
}
|
||||
|
||||
// Test removal of a port from an existing service.
|
||||
func TestReconcileLoadBalancerRemovesPort(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
svc := getTestService("servicea", 80, 443)
|
||||
pip := getTestPublicIP()
|
||||
nodes := []*v1.Node{}
|
||||
|
||||
existingLoadBalancer := getTestLoadBalancer(svc)
|
||||
|
||||
svcUpdated := getTestService("servicea", 80)
|
||||
updatedLoadBalancer, _, err := az.reconcileLoadBalancer(existingLoadBalancer, &pip, testClusterName, &svcUpdated, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
|
||||
validateLoadBalancer(t, updatedLoadBalancer, svcUpdated)
|
||||
}
|
||||
|
||||
// Test reconciliation of multiple services on same port
|
||||
func TestReconcileLoadBalancerMultipleServices(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
svc1 := getTestService("servicea", 80, 443)
|
||||
svc2 := getTestService("serviceb", 80)
|
||||
pip := getTestPublicIP()
|
||||
nodes := []*v1.Node{}
|
||||
|
||||
existingLoadBalancer := getTestLoadBalancer()
|
||||
|
||||
updatedLoadBalancer, _, err := az.reconcileLoadBalancer(existingLoadBalancer, &pip, testClusterName, &svc1, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
|
||||
updatedLoadBalancer, _, err = az.reconcileLoadBalancer(updatedLoadBalancer, &pip, testClusterName, &svc2, nodes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
|
||||
validateLoadBalancer(t, updatedLoadBalancer, svc1, svc2)
|
||||
}
|
||||
|
||||
func TestReconcileSecurityGroupNewServiceAddsPort(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
svc1 := getTestService("serviceea", 80)
|
||||
|
||||
sg := getTestSecurityGroup()
|
||||
|
||||
sg, _, err := az.reconcileSecurityGroup(sg, testClusterName, &svc1)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
|
||||
validateSecurityGroup(t, sg, svc1)
|
||||
}
|
||||
|
||||
func TestReconcileSecurityGroupRemoveServiceRemovesPort(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
svc := getTestService("servicea", 80, 443)
|
||||
|
||||
sg := getTestSecurityGroup(svc)
|
||||
|
||||
svcUpdated := getTestService("servicea", 80)
|
||||
sg, _, err := az.reconcileSecurityGroup(sg, testClusterName, &svcUpdated)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
|
||||
validateSecurityGroup(t, sg, svcUpdated)
|
||||
}
|
||||
|
||||
func TestReconcileSecurityWithSourceRanges(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
svc := getTestService("servicea", 80, 443)
|
||||
svc.Spec.LoadBalancerSourceRanges = []string{
|
||||
"192.168.0.1/24",
|
||||
"10.0.0.1/32",
|
||||
}
|
||||
|
||||
sg := getTestSecurityGroup(svc)
|
||||
sg, _, err := az.reconcileSecurityGroup(sg, testClusterName, &svc)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %q", err)
|
||||
}
|
||||
|
||||
validateSecurityGroup(t, sg, svc)
|
||||
}
|
||||
|
||||
func getTestCloud() *Cloud {
|
||||
return &Cloud{
|
||||
Config: Config{
|
||||
TenantID: "tenant",
|
||||
SubscriptionID: "subscription",
|
||||
ResourceGroup: "rg",
|
||||
Location: "westus",
|
||||
VnetName: "vnet",
|
||||
SubnetName: "subnet",
|
||||
SecurityGroupName: "nsg",
|
||||
RouteTableName: "rt",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getBackendPort(port int32) int32 {
|
||||
return port + 10000
|
||||
}
|
||||
|
||||
func getTestPublicIP() network.PublicIPAddress {
|
||||
pip := network.PublicIPAddress{}
|
||||
pip.ID = to.StringPtr("/this/is/a/public/ip/address/id")
|
||||
return pip
|
||||
}
|
||||
|
||||
func getTestService(identifier string, requestedPorts ...int32) v1.Service {
|
||||
ports := []v1.ServicePort{}
|
||||
for _, port := range requestedPorts {
|
||||
ports = append(ports, v1.ServicePort{
|
||||
Name: fmt.Sprintf("port-%d", port),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: port,
|
||||
NodePort: getBackendPort(port),
|
||||
})
|
||||
}
|
||||
|
||||
svc := v1.Service{
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeLoadBalancer,
|
||||
Ports: ports,
|
||||
},
|
||||
}
|
||||
svc.Name = identifier
|
||||
svc.Namespace = "default"
|
||||
svc.UID = types.UID(identifier)
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
func getTestLoadBalancer(services ...v1.Service) network.LoadBalancer {
|
||||
rules := []network.LoadBalancingRule{}
|
||||
probes := []network.Probe{}
|
||||
|
||||
for _, service := range services {
|
||||
for _, port := range service.Spec.Ports {
|
||||
ruleName := getRuleName(&service, port)
|
||||
rules = append(rules, network.LoadBalancingRule{
|
||||
Name: to.StringPtr(ruleName),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(port.Port),
|
||||
BackendPort: to.Int32Ptr(port.Port),
|
||||
},
|
||||
})
|
||||
probes = append(probes, network.Probe{
|
||||
Name: to.StringPtr(ruleName),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(port.NodePort),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
lb := network.LoadBalancer{
|
||||
LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{
|
||||
LoadBalancingRules: &rules,
|
||||
Probes: &probes,
|
||||
},
|
||||
}
|
||||
|
||||
return lb
|
||||
}
|
||||
|
||||
func getServiceSourceRanges(service *v1.Service) []string {
|
||||
if len(service.Spec.LoadBalancerSourceRanges) == 0 {
|
||||
return []string{"Internet"}
|
||||
}
|
||||
return service.Spec.LoadBalancerSourceRanges
|
||||
}
|
||||
|
||||
func getTestSecurityGroup(services ...v1.Service) network.SecurityGroup {
|
||||
rules := []network.SecurityRule{}
|
||||
|
||||
for _, service := range services {
|
||||
for _, port := range service.Spec.Ports {
|
||||
ruleName := getRuleName(&service, port)
|
||||
|
||||
sources := getServiceSourceRanges(&service)
|
||||
for _, src := range sources {
|
||||
rules = append(rules, network.SecurityRule{
|
||||
Name: to.StringPtr(ruleName),
|
||||
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
|
||||
SourceAddressPrefix: to.StringPtr(src),
|
||||
DestinationPortRange: to.StringPtr(fmt.Sprintf("%d", port.Port)),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sg := network.SecurityGroup{
|
||||
SecurityGroupPropertiesFormat: &network.SecurityGroupPropertiesFormat{
|
||||
SecurityRules: &rules,
|
||||
},
|
||||
}
|
||||
|
||||
return sg
|
||||
}
|
||||
|
||||
func validateLoadBalancer(t *testing.T, loadBalancer network.LoadBalancer, services ...v1.Service) {
|
||||
expectedRuleCount := 0
|
||||
for _, svc := range services {
|
||||
for _, wantedRule := range svc.Spec.Ports {
|
||||
expectedRuleCount++
|
||||
wantedRuleName := getRuleName(&svc, wantedRule)
|
||||
foundRule := false
|
||||
for _, actualRule := range *loadBalancer.LoadBalancingRules {
|
||||
if strings.EqualFold(*actualRule.Name, wantedRuleName) &&
|
||||
*actualRule.FrontendPort == wantedRule.Port &&
|
||||
*actualRule.BackendPort == wantedRule.Port {
|
||||
foundRule = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundRule {
|
||||
t.Errorf("Expected load balancer rule but didn't find it: %q", wantedRuleName)
|
||||
}
|
||||
|
||||
foundProbe := false
|
||||
if serviceapi.NeedsHealthCheck(&svc) {
|
||||
path, port := serviceapi.GetServiceHealthCheckPathPort(&svc)
|
||||
for _, actualProbe := range *loadBalancer.Probes {
|
||||
if strings.EqualFold(*actualProbe.Name, wantedRuleName) &&
|
||||
*actualProbe.Port == port &&
|
||||
*actualProbe.RequestPath == path &&
|
||||
actualProbe.Protocol == network.ProbeProtocolHTTP {
|
||||
foundProbe = true
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, actualProbe := range *loadBalancer.Probes {
|
||||
if strings.EqualFold(*actualProbe.Name, wantedRuleName) &&
|
||||
*actualProbe.Port == wantedRule.NodePort {
|
||||
foundProbe = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !foundProbe {
|
||||
for _, actualProbe := range *loadBalancer.Probes {
|
||||
t.Logf("Probe: %s %d", *actualProbe.Name, *actualProbe.Port)
|
||||
}
|
||||
t.Errorf("Expected loadbalancer probe but didn't find it: %q", wantedRuleName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lenRules := len(*loadBalancer.LoadBalancingRules)
|
||||
if lenRules != expectedRuleCount {
|
||||
t.Errorf("Expected the loadbalancer to have %d rules. Found %d.\n%v", expectedRuleCount, lenRules, loadBalancer.LoadBalancingRules)
|
||||
}
|
||||
lenProbes := len(*loadBalancer.Probes)
|
||||
if lenProbes != expectedRuleCount {
|
||||
t.Errorf("Expected the loadbalancer to have %d probes. Found %d.", expectedRuleCount, lenProbes)
|
||||
}
|
||||
}
|
||||
|
||||
func validateSecurityGroup(t *testing.T, securityGroup network.SecurityGroup, services ...v1.Service) {
|
||||
expectedRuleCount := 0
|
||||
for _, svc := range services {
|
||||
for _, wantedRule := range svc.Spec.Ports {
|
||||
sources := getServiceSourceRanges(&svc)
|
||||
|
||||
for _, source := range sources {
|
||||
expectedRuleCount++
|
||||
wantedRuleName := getRuleName(&svc, wantedRule)
|
||||
foundRule := false
|
||||
for _, actualRule := range *securityGroup.SecurityRules {
|
||||
if strings.EqualFold(*actualRule.Name, wantedRuleName) &&
|
||||
*actualRule.SourceAddressPrefix == source &&
|
||||
*actualRule.DestinationPortRange == fmt.Sprintf("%d", wantedRule.Port) {
|
||||
foundRule = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundRule {
|
||||
t.Errorf("Expected security group rule but didn't find it: %q", wantedRuleName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lenRules := len(*securityGroup.SecurityRules)
|
||||
if lenRules != expectedRuleCount {
|
||||
t.Errorf("Expected the loadbalancer to have %d rules. Found %d.\n", expectedRuleCount, lenRules)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSecurityRulePriorityPicksNextAvailablePriority(t *testing.T) {
|
||||
rules := []network.SecurityRule{}
|
||||
|
||||
var expectedPriority int32 = loadBalancerMinimumPriority + 50
|
||||
|
||||
var i int32
|
||||
for i = loadBalancerMinimumPriority; i < expectedPriority; i++ {
|
||||
rules = append(rules, network.SecurityRule{
|
||||
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
|
||||
Priority: to.Int32Ptr(i),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
priority, err := getNextAvailablePriority(rules)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpectected error: %q", err)
|
||||
}
|
||||
|
||||
if priority != expectedPriority {
|
||||
t.Errorf("Expected priority %d. Got priority %d.", expectedPriority, priority)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSecurityRulePriorityFailsIfExhausted(t *testing.T) {
|
||||
rules := []network.SecurityRule{}
|
||||
|
||||
var i int32
|
||||
for i = loadBalancerMinimumPriority; i < loadBalancerMaximumPriority; i++ {
|
||||
rules = append(rules, network.SecurityRule{
|
||||
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
|
||||
Priority: to.Int32Ptr(i),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
_, err := getNextAvailablePriority(rules)
|
||||
if err == nil {
|
||||
t.Error("Expectected an error. There are no priority levels left.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProtocolTranslationTCP(t *testing.T) {
|
||||
proto := v1.ProtocolTCP
|
||||
transportProto, securityGroupProto, probeProto, err := getProtocolsFromKubernetesProtocol(proto)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if transportProto != network.TransportProtocolTCP {
|
||||
t.Errorf("Expected TCP LoadBalancer Rule Protocol. Got %v", transportProto)
|
||||
}
|
||||
if securityGroupProto != network.TCP {
|
||||
t.Errorf("Expected TCP SecurityGroup Protocol. Got %v", transportProto)
|
||||
}
|
||||
if probeProto != network.ProbeProtocolTCP {
|
||||
t.Errorf("Expected TCP LoadBalancer Probe Protocol. Got %v", transportProto)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProtocolTranslationUDP(t *testing.T) {
|
||||
proto := v1.ProtocolUDP
|
||||
_, _, _, err := getProtocolsFromKubernetesProtocol(proto)
|
||||
if err == nil {
|
||||
t.Error("Expected an error. UDP is unsupported.")
|
||||
}
|
||||
}
|
||||
|
||||
// Test Configuration deserialization (json)
|
||||
func TestNewCloudFromJSON(t *testing.T) {
|
||||
config := `{
|
||||
"tenantId": "--tenant-id--",
|
||||
"subscriptionId": "--subscription-id--",
|
||||
"aadClientId": "--aad-client-id--",
|
||||
"aadClientSecret": "--aad-client-secret--",
|
||||
"resourceGroup": "--resource-group--",
|
||||
"location": "--location--",
|
||||
"subnetName": "--subnet-name--",
|
||||
"securityGroupName": "--security-group-name--",
|
||||
"vnetName": "--vnet-name--",
|
||||
"routeTableName": "--route-table-name--",
|
||||
"primaryAvailabilitySetName": "--primary-availability-set-name--"
|
||||
}`
|
||||
validateConfig(t, config)
|
||||
}
|
||||
|
||||
// Test Configuration deserialization (yaml)
|
||||
func TestNewCloudFromYAML(t *testing.T) {
|
||||
config := `
|
||||
tenantId: --tenant-id--
|
||||
subscriptionId: --subscription-id--
|
||||
aadClientId: --aad-client-id--
|
||||
aadClientSecret: --aad-client-secret--
|
||||
resourceGroup: --resource-group--
|
||||
location: --location--
|
||||
subnetName: --subnet-name--
|
||||
securityGroupName: --security-group-name--
|
||||
vnetName: --vnet-name--
|
||||
routeTableName: --route-table-name--
|
||||
primaryAvailabilitySetName: --primary-availability-set-name--
|
||||
`
|
||||
validateConfig(t, config)
|
||||
}
|
||||
|
||||
func validateConfig(t *testing.T, config string) {
|
||||
configReader := strings.NewReader(config)
|
||||
cloud, err := NewCloud(configReader)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
azureCloud, ok := cloud.(*Cloud)
|
||||
if !ok {
|
||||
t.Error("NewCloud returned incorrect type")
|
||||
}
|
||||
|
||||
if azureCloud.TenantID != "--tenant-id--" {
|
||||
t.Errorf("got incorrect value for TenantID")
|
||||
}
|
||||
if azureCloud.SubscriptionID != "--subscription-id--" {
|
||||
t.Errorf("got incorrect value for SubscriptionID")
|
||||
}
|
||||
if azureCloud.AADClientID != "--aad-client-id--" {
|
||||
t.Errorf("got incorrect value for AADClientID")
|
||||
}
|
||||
if azureCloud.AADClientSecret != "--aad-client-secret--" {
|
||||
t.Errorf("got incorrect value for AADClientSecret")
|
||||
}
|
||||
if azureCloud.ResourceGroup != "--resource-group--" {
|
||||
t.Errorf("got incorrect value for ResourceGroup")
|
||||
}
|
||||
if azureCloud.Location != "--location--" {
|
||||
t.Errorf("got incorrect value for Location")
|
||||
}
|
||||
if azureCloud.SubnetName != "--subnet-name--" {
|
||||
t.Errorf("got incorrect value for SubnetName")
|
||||
}
|
||||
if azureCloud.SecurityGroupName != "--security-group-name--" {
|
||||
t.Errorf("got incorrect value for SecurityGroupName")
|
||||
}
|
||||
if azureCloud.VnetName != "--vnet-name--" {
|
||||
t.Errorf("got incorrect value for VnetName")
|
||||
}
|
||||
if azureCloud.RouteTableName != "--route-table-name--" {
|
||||
t.Errorf("got incorrect value for RouteTableName")
|
||||
}
|
||||
if azureCloud.PrimaryAvailabilitySetName != "--primary-availability-set-name--" {
|
||||
t.Errorf("got incorrect value for PrimaryAvailabilitySetName")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeInstanceInfo(t *testing.T) {
|
||||
response := `{"ID":"_azdev","UD":"0","FD":"99"}`
|
||||
|
||||
faultDomain, err := readFaultDomain(strings.NewReader(response))
|
||||
if err != nil {
|
||||
t.Error("Unexpected error in ReadFaultDomain")
|
||||
}
|
||||
|
||||
if faultDomain == nil {
|
||||
t.Error("Fault domain was unexpectedly nil")
|
||||
}
|
||||
|
||||
if *faultDomain != "99" {
|
||||
t.Error("got incorrect fault domain")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterNodes(t *testing.T) {
|
||||
nodes := []compute.VirtualMachine{
|
||||
{Name: to.StringPtr("test")},
|
||||
{Name: to.StringPtr("test2")},
|
||||
{Name: to.StringPtr("3test")},
|
||||
}
|
||||
|
||||
filteredNodes, err := filterNodes(nodes, "^test$")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpeted error when filtering: %q", err)
|
||||
}
|
||||
|
||||
if len(filteredNodes) != 1 {
|
||||
t.Error("Got too many nodes after filtering")
|
||||
}
|
||||
|
||||
if *filteredNodes[0].Name != "test" {
|
||||
t.Error("Get the wrong node after filtering")
|
||||
}
|
||||
}
|
250
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util.go
generated
vendored
Normal file
250
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util.go
generated
vendored
Normal file
|
@ -0,0 +1,250 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
loadBalancerMinimumPriority = 500
|
||||
loadBalancerMaximumPriority = 4096
|
||||
|
||||
machineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s"
|
||||
availabilitySetIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/availabilitySets/%s"
|
||||
frontendIPConfigIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/frontendIPConfigurations/%s"
|
||||
backendPoolIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s"
|
||||
loadBalancerRuleIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/loadBalancingRules/%s"
|
||||
loadBalancerProbeIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s"
|
||||
securityRuleIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s/securityRules/%s"
|
||||
)
|
||||
|
||||
// returns the full identifier of a machine
|
||||
func (az *Cloud) getMachineID(machineName string) string {
|
||||
return fmt.Sprintf(
|
||||
machineIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
machineName)
|
||||
}
|
||||
|
||||
// returns the full identifier of an availabilitySet
|
||||
func (az *Cloud) getAvailabilitySetID(availabilitySetName string) string {
|
||||
return fmt.Sprintf(
|
||||
availabilitySetIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
availabilitySetName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer frontendipconfiguration.
|
||||
func (az *Cloud) getFrontendIPConfigID(lbName, backendPoolName string) string {
|
||||
return fmt.Sprintf(
|
||||
frontendIPConfigIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
lbName,
|
||||
backendPoolName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer backendpool.
|
||||
func (az *Cloud) getBackendPoolID(lbName, backendPoolName string) string {
|
||||
return fmt.Sprintf(
|
||||
backendPoolIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
lbName,
|
||||
backendPoolName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer rule.
|
||||
func (az *Cloud) getLoadBalancerRuleID(lbName, lbRuleName string) string {
|
||||
return fmt.Sprintf(
|
||||
loadBalancerRuleIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
lbName,
|
||||
lbRuleName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer probe.
|
||||
func (az *Cloud) getLoadBalancerProbeID(lbName, lbRuleName string) string {
|
||||
return fmt.Sprintf(
|
||||
loadBalancerProbeIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
lbName,
|
||||
lbRuleName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a network security group security rule.
|
||||
func (az *Cloud) getSecurityRuleID(securityRuleName string) string {
|
||||
return fmt.Sprintf(
|
||||
securityRuleIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
az.SecurityGroupName,
|
||||
securityRuleName)
|
||||
}
|
||||
|
||||
// returns the deepest child's identifier from a full identifier string.
|
||||
func getLastSegment(ID string) (string, error) {
|
||||
parts := strings.Split(ID, "/")
|
||||
name := parts[len(parts)-1]
|
||||
if len(name) == 0 {
|
||||
return "", fmt.Errorf("resource name was missing from identifier")
|
||||
}
|
||||
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// returns the equivalent LoadBalancerRule, SecurityRule and LoadBalancerProbe
|
||||
// protocol types for the given Kubernetes protocol type.
|
||||
func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (network.TransportProtocol, network.SecurityRuleProtocol, network.ProbeProtocol, error) {
|
||||
switch protocol {
|
||||
case v1.ProtocolTCP:
|
||||
return network.TransportProtocolTCP, network.TCP, network.ProbeProtocolTCP, nil
|
||||
default:
|
||||
return "", "", "", fmt.Errorf("Only TCP is supported for Azure LoadBalancers")
|
||||
}
|
||||
}
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
func getPrimaryInterfaceID(machine compute.VirtualMachine) (string, error) {
|
||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
||||
}
|
||||
|
||||
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
|
||||
if *ref.Primary {
|
||||
return *ref.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
|
||||
}
|
||||
|
||||
func getPrimaryIPConfig(nic network.Interface) (*network.InterfaceIPConfiguration, error) {
|
||||
if len(*nic.IPConfigurations) == 1 {
|
||||
return &((*nic.IPConfigurations)[0]), nil
|
||||
}
|
||||
|
||||
for _, ref := range *nic.IPConfigurations {
|
||||
if *ref.Primary {
|
||||
return &ref, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to determine the determine primary ipconfig. nicname=%q", *nic.Name)
|
||||
}
|
||||
|
||||
func getLoadBalancerName(clusterName string) string {
|
||||
return clusterName
|
||||
}
|
||||
|
||||
func getBackendPoolName(clusterName string) string {
|
||||
return clusterName
|
||||
}
|
||||
|
||||
func getRuleName(service *v1.Service, port v1.ServicePort) string {
|
||||
return fmt.Sprintf("%s-%s-%d-%d", getRulePrefix(service), port.Protocol, port.Port, port.NodePort)
|
||||
}
|
||||
|
||||
// This returns a human-readable version of the Service used to tag some resources.
|
||||
// This is only used for human-readable convenience, and not to filter.
|
||||
func getServiceName(service *v1.Service) string {
|
||||
return fmt.Sprintf("%s/%s", service.Namespace, service.Name)
|
||||
}
|
||||
|
||||
// This returns a prefix for loadbalancer/security rules.
|
||||
func getRulePrefix(service *v1.Service) string {
|
||||
return cloudprovider.GetLoadBalancerName(service)
|
||||
}
|
||||
|
||||
func serviceOwnsRule(service *v1.Service, rule string) bool {
|
||||
prefix := getRulePrefix(service)
|
||||
return strings.HasPrefix(strings.ToUpper(rule), strings.ToUpper(prefix))
|
||||
}
|
||||
|
||||
func getFrontendIPConfigName(service *v1.Service) string {
|
||||
return cloudprovider.GetLoadBalancerName(service)
|
||||
}
|
||||
|
||||
func getPublicIPName(clusterName string, service *v1.Service) string {
|
||||
return fmt.Sprintf("%s-%s", clusterName, cloudprovider.GetLoadBalancerName(service))
|
||||
}
|
||||
|
||||
// This returns the next available rule priority level for a given set of security rules.
|
||||
func getNextAvailablePriority(rules []network.SecurityRule) (int32, error) {
|
||||
var smallest int32 = loadBalancerMinimumPriority
|
||||
var spread int32 = 1
|
||||
|
||||
outer:
|
||||
for smallest < loadBalancerMaximumPriority {
|
||||
for _, rule := range rules {
|
||||
if *rule.Priority == smallest {
|
||||
smallest += spread
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
// no one else had it
|
||||
return smallest, nil
|
||||
}
|
||||
|
||||
return -1, fmt.Errorf("SecurityGroup priorities are exhausted")
|
||||
}
|
||||
|
||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, error) {
|
||||
machine, exists, err := az.getVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicID, err := getPrimaryInterfaceID(machine)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
nicName, err := getLastSegment(nicID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
nic, err := az.InterfacesClient.Get(az.ResourceGroup, nicName, "")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, nil
|
||||
}
|
126
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
Normal file
126
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// checkExistsFromError inspects an error and returns a true if err is nil,
|
||||
// false if error is an autorest.Error with StatusCode=404 and will return the
|
||||
// error back if error is another status code or another type of error.
|
||||
func checkResourceExistsFromError(err error) (bool, error) {
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
v, ok := err.(autorest.DetailedError)
|
||||
if ok && v.StatusCode == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, v
|
||||
}
|
||||
|
||||
func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
vmName := string(nodeName)
|
||||
vm, err = az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, "")
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return vm, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return vm, false, nil
|
||||
}
|
||||
|
||||
return vm, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
routeTable, err = az.RouteTablesClient.Get(az.ResourceGroup, az.RouteTableName, "")
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return routeTable, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return routeTable, false, nil
|
||||
}
|
||||
|
||||
return routeTable, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getSecurityGroup() (sg network.SecurityGroup, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
sg, err = az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "")
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return sg, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return sg, false, nil
|
||||
}
|
||||
|
||||
return sg, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
lb, err = az.LoadBalancerClient.Get(az.ResourceGroup, name, "")
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return lb, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return lb, false, nil
|
||||
}
|
||||
|
||||
return lb, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getPublicIPAddress(name string) (pip network.PublicIPAddress, exists bool, err error) {
|
||||
var realErr error
|
||||
|
||||
pip, err = az.PublicIPAddressesClient.Get(az.ResourceGroup, name, "")
|
||||
|
||||
exists, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return pip, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return pip, false, nil
|
||||
}
|
||||
|
||||
return pip, exists, err
|
||||
}
|
78
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const instanceInfoURL = "http://169.254.169.254/metadata/v1/InstanceInfo"
|
||||
|
||||
var faultMutex = &sync.Mutex{}
|
||||
var faultDomain *string
|
||||
|
||||
type instanceInfo struct {
|
||||
ID string `json:"ID"`
|
||||
UpdateDomain string `json:"UD"`
|
||||
FaultDomain string `json:"FD"`
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
|
||||
func (az *Cloud) GetZone() (cloudprovider.Zone, error) {
|
||||
faultMutex.Lock()
|
||||
if faultDomain == nil {
|
||||
var err error
|
||||
faultDomain, err = fetchFaultDomain()
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
}
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: *faultDomain,
|
||||
Region: az.Location,
|
||||
}
|
||||
faultMutex.Unlock()
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
func fetchFaultDomain() (*string, error) {
|
||||
resp, err := http.Get(instanceInfoURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return readFaultDomain(resp.Body)
|
||||
}
|
||||
|
||||
func readFaultDomain(reader io.Reader) (*string, error) {
|
||||
var instanceInfo instanceInfo
|
||||
body, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(body, &instanceInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &instanceInfo.FaultDomain, nil
|
||||
}
|
38
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/vhd.go
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/vhd.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/rubiojr/go-vhd/vhd"
|
||||
)
|
||||
|
||||
const (
|
||||
vhdHeaderSize = vhd.VHD_HEADER_SIZE
|
||||
)
|
||||
|
||||
func createVHDHeader(size uint64) ([]byte, error) {
|
||||
h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{})
|
||||
b := new(bytes.Buffer)
|
||||
err := binary.Write(b, binary.BigEndian, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
46
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cloudstack.go",
|
||||
"cloudstack_loadbalancer.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/xanzy/go-cloudstack/cloudstack",
|
||||
"//vendor:gopkg.in/gcfg.v1",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["cloudstack_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = ["//pkg/api/v1:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/OWNERS
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
assignees:
|
||||
- runseb
|
||||
- ngtuna
|
||||
- svanharmelen
|
123
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack.go
generated
vendored
Normal file
123
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/xanzy/go-cloudstack/cloudstack"
|
||||
"gopkg.in/gcfg.v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// ProviderName is the name of this cloud provider.
|
||||
const ProviderName = "cloudstack"
|
||||
|
||||
// CSConfig wraps the config for the CloudStack cloud provider.
|
||||
type CSConfig struct {
|
||||
Global struct {
|
||||
APIURL string `gcfg:"api-url"`
|
||||
APIKey string `gcfg:"api-key"`
|
||||
SecretKey string `gcfg:"secret-key"`
|
||||
SSLNoVerify bool `gcfg:"ssl-no-verify"`
|
||||
ProjectID string `gcfg:"project-id"`
|
||||
Zone string `gcfg:"zone"`
|
||||
}
|
||||
}
|
||||
|
||||
// CSCloud is an implementation of Interface for CloudStack.
|
||||
type CSCloud struct {
|
||||
client *cloudstack.CloudStackClient
|
||||
projectID string // If non-"", all resources will be created within this project
|
||||
zone string
|
||||
}
|
||||
|
||||
func init() {
|
||||
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
|
||||
cfg, err := readConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newCSCloud(cfg)
|
||||
})
|
||||
}
|
||||
|
||||
func readConfig(config io.Reader) (*CSConfig, error) {
|
||||
if config == nil {
|
||||
err := fmt.Errorf("no cloud provider config given")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &CSConfig{}
|
||||
if err := gcfg.ReadInto(cfg, config); err != nil {
|
||||
glog.Errorf("Couldn't parse config: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// newCSCloud creates a new instance of CSCloud.
|
||||
func newCSCloud(cfg *CSConfig) (*CSCloud, error) {
|
||||
client := cloudstack.NewAsyncClient(cfg.Global.APIURL, cfg.Global.APIKey, cfg.Global.SecretKey, !cfg.Global.SSLNoVerify)
|
||||
|
||||
return &CSCloud{client, cfg.Global.ProjectID, cfg.Global.Zone}, nil
|
||||
}
|
||||
|
||||
// LoadBalancer returns an implementation of LoadBalancer for CloudStack.
|
||||
func (cs *CSCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
return cs, true
|
||||
}
|
||||
|
||||
// Instances returns an implementation of Instances for CloudStack.
|
||||
func (cs *CSCloud) Instances() (cloudprovider.Instances, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Zones returns an implementation of Zones for CloudStack.
|
||||
func (cs *CSCloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return cs, true
|
||||
}
|
||||
|
||||
// Clusters returns an implementation of Clusters for CloudStack.
|
||||
func (cs *CSCloud) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Routes returns an implementation of Routes for CloudStack.
|
||||
func (cs *CSCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ProviderName returns the cloud provider ID.
|
||||
func (cs *CSCloud) ProviderName() string {
|
||||
return ProviderName
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (cs *CSCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the region that the program is running in.
|
||||
func (cs *CSCloud) GetZone() (cloudprovider.Zone, error) {
|
||||
glog.V(2).Infof("Current zone is %v", cs.zone)
|
||||
return cloudprovider.Zone{Region: cs.zone}, nil
|
||||
}
|
542
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_loadbalancer.go
generated
vendored
Normal file
542
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_loadbalancer.go
generated
vendored
Normal file
|
@ -0,0 +1,542 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/xanzy/go-cloudstack/cloudstack"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
type loadBalancer struct {
|
||||
*cloudstack.CloudStackClient
|
||||
|
||||
name string
|
||||
algorithm string
|
||||
hostIDs []string
|
||||
ipAddr string
|
||||
ipAddrID string
|
||||
networkID string
|
||||
projectID string
|
||||
rules map[string]*cloudstack.LoadBalancerRule
|
||||
}
|
||||
|
||||
// GetLoadBalancer returns whether the specified load balancer exists, and if so, what its status is.
|
||||
func (cs *CSCloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
glog.V(4).Infof("GetLoadBalancer(%v, %v, %v)", clusterName, service.Namespace, service.Name)
|
||||
|
||||
// Get the load balancer details and existing rules.
|
||||
lb, err := cs.getLoadBalancer(service)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// If we don't have any rules, the load balancer does not exist.
|
||||
if len(lb.rules) == 0 {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Found a load balancer associated with IP %v", lb.ipAddr)
|
||||
|
||||
status := &v1.LoadBalancerStatus{}
|
||||
status.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: lb.ipAddr})
|
||||
|
||||
return status, true, nil
|
||||
}
|
||||
|
||||
// EnsureLoadBalancer creates a new load balancer, or updates the existing one. Returns the status of the balancer.
|
||||
func (cs *CSCloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (status *v1.LoadBalancerStatus, err error) {
|
||||
glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v)", clusterName, service.Namespace, service.Name, service.Spec.LoadBalancerIP, service.Spec.Ports, nodes)
|
||||
|
||||
if len(service.Spec.Ports) == 0 {
|
||||
return nil, fmt.Errorf("requested load balancer with no ports")
|
||||
}
|
||||
|
||||
// Get the load balancer details and existing rules.
|
||||
lb, err := cs.getLoadBalancer(service)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the load balancer algorithm.
|
||||
switch service.Spec.SessionAffinity {
|
||||
case v1.ServiceAffinityNone:
|
||||
lb.algorithm = "roundrobin"
|
||||
case v1.ServiceAffinityClientIP:
|
||||
lb.algorithm = "source"
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported load balancer affinity: %v", service.Spec.SessionAffinity)
|
||||
}
|
||||
|
||||
// Verify that all the hosts belong to the same network, and retrieve their ID's.
|
||||
lb.hostIDs, lb.networkID, err = cs.verifyHosts(nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !lb.hasLoadBalancerIP() {
|
||||
// Create or retrieve the load balancer IP.
|
||||
if err := lb.getLoadBalancerIP(service.Spec.LoadBalancerIP); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if lb.ipAddr != "" && lb.ipAddr != service.Spec.LoadBalancerIP {
|
||||
defer func(lb *loadBalancer) {
|
||||
if err != nil {
|
||||
if err := lb.releaseLoadBalancerIP(); err != nil {
|
||||
glog.Errorf(err.Error())
|
||||
}
|
||||
}
|
||||
}(lb)
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Load balancer %v is associated with IP %v", lb.name, lb.ipAddr)
|
||||
|
||||
for _, port := range service.Spec.Ports {
|
||||
// All ports have their own load balancer rule, so add the port to lbName to keep the names unique.
|
||||
lbRuleName := fmt.Sprintf("%s-%d", lb.name, port.Port)
|
||||
|
||||
// If the load balancer rule exists and is up-to-date, we move on to the next rule.
|
||||
exists, needsUpdate, err := lb.checkLoadBalancerRule(lbRuleName, port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists && !needsUpdate {
|
||||
glog.V(4).Infof("Load balancer rule %v is up-to-date", lbRuleName)
|
||||
// Delete the rule from the map, to prevent it being deleted.
|
||||
delete(lb.rules, lbRuleName)
|
||||
continue
|
||||
}
|
||||
|
||||
if needsUpdate {
|
||||
glog.V(4).Infof("Updating load balancer rule: %v", lbRuleName)
|
||||
if err := lb.updateLoadBalancerRule(lbRuleName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Delete the rule from the map, to prevent it being deleted.
|
||||
delete(lb.rules, lbRuleName)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Creating load balancer rule: %v", lbRuleName)
|
||||
lbRule, err := lb.createLoadBalancerRule(lbRuleName, port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Assigning hosts (%v) to load balancer rule: %v", lb.hostIDs, lbRuleName)
|
||||
if err = lb.assignHostsToRule(lbRule, lb.hostIDs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Cleanup any rules that are now still in the rules map, as they are no longer needed.
|
||||
for _, lbRule := range lb.rules {
|
||||
glog.V(4).Infof("Deleting obsolete load balancer rule: %v", lbRule.Name)
|
||||
if err := lb.deleteLoadBalancerRule(lbRule); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
status = &v1.LoadBalancerStatus{}
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: lb.ipAddr}}
|
||||
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
func (cs *CSCloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v)", clusterName, service.Namespace, service.Name, nodes)
|
||||
|
||||
// Get the load balancer details and existing rules.
|
||||
lb, err := cs.getLoadBalancer(service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify that all the hosts belong to the same network, and retrieve their ID's.
|
||||
lb.hostIDs, _, err = cs.verifyHosts(nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, lbRule := range lb.rules {
|
||||
p := lb.LoadBalancer.NewListLoadBalancerRuleInstancesParams(lbRule.Id)
|
||||
|
||||
// Retrieve all VMs currently associated to this load balancer rule.
|
||||
l, err := lb.LoadBalancer.ListLoadBalancerRuleInstances(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving associated instances: %v", err)
|
||||
}
|
||||
|
||||
assign, remove := symmetricDifference(lb.hostIDs, l.LoadBalancerRuleInstances)
|
||||
|
||||
if len(assign) > 0 {
|
||||
glog.V(4).Infof("Assigning new hosts (%v) to load balancer rule: %v", assign, lbRule.Name)
|
||||
if err := lb.assignHostsToRule(lbRule, assign); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(remove) > 0 {
|
||||
glog.V(4).Infof("Removing old hosts (%v) from load balancer rule: %v", assign, lbRule.Name)
|
||||
if err := lb.removeHostsFromRule(lbRule, remove); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureLoadBalancerDeleted deletes the specified load balancer if it exists, returning
|
||||
// nil if the load balancer specified either didn't exist or was successfully deleted.
|
||||
func (cs *CSCloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error {
|
||||
glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v)", clusterName, service.Namespace, service.Name)
|
||||
|
||||
// Get the load balancer details and existing rules.
|
||||
lb, err := cs.getLoadBalancer(service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, lbRule := range lb.rules {
|
||||
glog.V(4).Infof("Deleting load balancer rule: %v", lbRule.Name)
|
||||
if err := lb.deleteLoadBalancerRule(lbRule); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if lb.ipAddr != "" && lb.ipAddr != service.Spec.LoadBalancerIP {
|
||||
glog.V(4).Infof("Releasing load balancer IP: %v", lb.ipAddr)
|
||||
if err := lb.releaseLoadBalancerIP(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getLoadBalancer retrieves the IP address and ID and all the existing rules it can find.
|
||||
func (cs *CSCloud) getLoadBalancer(service *v1.Service) (*loadBalancer, error) {
|
||||
lb := &loadBalancer{
|
||||
CloudStackClient: cs.client,
|
||||
name: cloudprovider.GetLoadBalancerName(service),
|
||||
projectID: cs.projectID,
|
||||
rules: make(map[string]*cloudstack.LoadBalancerRule),
|
||||
}
|
||||
|
||||
p := cs.client.LoadBalancer.NewListLoadBalancerRulesParams()
|
||||
p.SetKeyword(lb.name)
|
||||
p.SetListall(true)
|
||||
|
||||
if cs.projectID != "" {
|
||||
p.SetProjectid(cs.projectID)
|
||||
}
|
||||
|
||||
l, err := cs.client.LoadBalancer.ListLoadBalancerRules(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving load balancer rules: %v", err)
|
||||
}
|
||||
|
||||
for _, lbRule := range l.LoadBalancerRules {
|
||||
lb.rules[lbRule.Name] = lbRule
|
||||
|
||||
if lb.ipAddr != "" && lb.ipAddr != lbRule.Publicip {
|
||||
glog.Warningf("Load balancer for service %v/%v has rules associated with different IP's: %v, %v", service.Namespace, service.Name, lb.ipAddr, lbRule.Publicip)
|
||||
}
|
||||
|
||||
lb.ipAddr = lbRule.Publicip
|
||||
lb.ipAddrID = lbRule.Publicipid
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Load balancer %v contains %d rule(s)", lb.name, len(lb.rules))
|
||||
|
||||
return lb, nil
|
||||
}
|
||||
|
||||
// verifyHosts verifies if all hosts belong to the same network, and returns the host ID's and network ID.
|
||||
func (cs *CSCloud) verifyHosts(nodes []*v1.Node) ([]string, string, error) {
|
||||
hostNames := map[string]bool{}
|
||||
for _, node := range nodes {
|
||||
hostNames[node.Name] = true
|
||||
}
|
||||
|
||||
p := cs.client.VirtualMachine.NewListVirtualMachinesParams()
|
||||
p.SetListall(true)
|
||||
|
||||
if cs.projectID != "" {
|
||||
p.SetProjectid(cs.projectID)
|
||||
}
|
||||
|
||||
l, err := cs.client.VirtualMachine.ListVirtualMachines(p)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("error retrieving list of hosts: %v", err)
|
||||
}
|
||||
|
||||
var hostIDs []string
|
||||
var networkID string
|
||||
|
||||
// Check if the virtual machine is in the hosts slice, then add the corresponding ID.
|
||||
for _, vm := range l.VirtualMachines {
|
||||
if hostNames[vm.Name] {
|
||||
if networkID != "" && networkID != vm.Nic[0].Networkid {
|
||||
return nil, "", fmt.Errorf("found hosts that belong to different networks")
|
||||
}
|
||||
|
||||
networkID = vm.Nic[0].Networkid
|
||||
hostIDs = append(hostIDs, vm.Id)
|
||||
}
|
||||
}
|
||||
|
||||
return hostIDs, networkID, nil
|
||||
}
|
||||
|
||||
// hasLoadBalancerIP returns true if we have a load balancer address and ID.
|
||||
func (lb *loadBalancer) hasLoadBalancerIP() bool {
|
||||
return lb.ipAddr != "" && lb.ipAddrID != ""
|
||||
}
|
||||
|
||||
// getLoadBalancerIP retieves an existing IP or associates a new IP.
|
||||
func (lb *loadBalancer) getLoadBalancerIP(loadBalancerIP string) error {
|
||||
if loadBalancerIP != "" {
|
||||
return lb.getPublicIPAddress(loadBalancerIP)
|
||||
}
|
||||
|
||||
return lb.associatePublicIPAddress()
|
||||
}
|
||||
|
||||
// getPublicIPAddressID retrieves the ID of the given IP, and sets the address and it's ID.
|
||||
func (lb *loadBalancer) getPublicIPAddress(loadBalancerIP string) error {
|
||||
glog.V(4).Infof("Retrieve load balancer IP details: %v", loadBalancerIP)
|
||||
|
||||
p := lb.Address.NewListPublicIpAddressesParams()
|
||||
p.SetIpaddress(loadBalancerIP)
|
||||
p.SetListall(true)
|
||||
|
||||
if lb.projectID != "" {
|
||||
p.SetProjectid(lb.projectID)
|
||||
}
|
||||
|
||||
l, err := lb.Address.ListPublicIpAddresses(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving IP address: %v", err)
|
||||
}
|
||||
|
||||
if l.Count != 1 {
|
||||
return fmt.Errorf("could not find IP address %v", loadBalancerIP)
|
||||
}
|
||||
|
||||
lb.ipAddr = l.PublicIpAddresses[0].Ipaddress
|
||||
lb.ipAddrID = l.PublicIpAddresses[0].Id
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// associatePublicIPAddress associates a new IP and sets the address and it's ID.
|
||||
func (lb *loadBalancer) associatePublicIPAddress() error {
|
||||
glog.V(4).Infof("Allocate new IP for load balancer: %v", lb.name)
|
||||
// If a network belongs to a VPC, the IP address needs to be associated with
|
||||
// the VPC instead of with the network.
|
||||
network, count, err := lb.Network.GetNetworkByID(lb.networkID, cloudstack.WithProject(lb.projectID))
|
||||
if err != nil {
|
||||
if count == 0 {
|
||||
return fmt.Errorf("could not find network %v", lb.networkID)
|
||||
}
|
||||
return fmt.Errorf("error retrieving network: %v", err)
|
||||
}
|
||||
|
||||
p := lb.Address.NewAssociateIpAddressParams()
|
||||
|
||||
if network.Vpcid != "" {
|
||||
p.SetVpcid(network.Vpcid)
|
||||
} else {
|
||||
p.SetNetworkid(lb.networkID)
|
||||
}
|
||||
|
||||
if lb.projectID != "" {
|
||||
p.SetProjectid(lb.projectID)
|
||||
}
|
||||
|
||||
// Associate a new IP address
|
||||
r, err := lb.Address.AssociateIpAddress(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error associating new IP address: %v", err)
|
||||
}
|
||||
|
||||
lb.ipAddr = r.Ipaddress
|
||||
lb.ipAddrID = r.Id
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// releasePublicIPAddress releases an associated IP.
|
||||
func (lb *loadBalancer) releaseLoadBalancerIP() error {
|
||||
p := lb.Address.NewDisassociateIpAddressParams(lb.ipAddrID)
|
||||
|
||||
if _, err := lb.Address.DisassociateIpAddress(p); err != nil {
|
||||
return fmt.Errorf("error releasing load balancer IP %v: %v", lb.ipAddr, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkLoadBalancerRule checks if the rule already exists and if it does, if it can be updated. If
|
||||
// it does exist but cannot be updated, it will delete the existing rule so it can be created again.
|
||||
func (lb *loadBalancer) checkLoadBalancerRule(lbRuleName string, port v1.ServicePort) (bool, bool, error) {
|
||||
lbRule, ok := lb.rules[lbRuleName]
|
||||
if !ok {
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// Check if any of the values we cannot update (those that require a new load balancer rule) are changed.
|
||||
if lbRule.Publicip == lb.ipAddr && lbRule.Privateport == strconv.Itoa(int(port.NodePort)) && lbRule.Publicport == strconv.Itoa(int(port.Port)) {
|
||||
return true, lbRule.Algorithm != lb.algorithm, nil
|
||||
}
|
||||
|
||||
// Delete the load balancer rule so we can create a new one using the new values.
|
||||
if err := lb.deleteLoadBalancerRule(lbRule); err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// updateLoadBalancerRule updates a load balancer rule.
|
||||
func (lb *loadBalancer) updateLoadBalancerRule(lbRuleName string) error {
|
||||
lbRule := lb.rules[lbRuleName]
|
||||
|
||||
p := lb.LoadBalancer.NewUpdateLoadBalancerRuleParams(lbRule.Id)
|
||||
p.SetAlgorithm(lb.algorithm)
|
||||
|
||||
_, err := lb.LoadBalancer.UpdateLoadBalancerRule(p)
|
||||
return err
|
||||
}
|
||||
|
||||
// createLoadBalancerRule creates a new load balancer rule and returns it's ID.
|
||||
func (lb *loadBalancer) createLoadBalancerRule(lbRuleName string, port v1.ServicePort) (*cloudstack.LoadBalancerRule, error) {
|
||||
p := lb.LoadBalancer.NewCreateLoadBalancerRuleParams(
|
||||
lb.algorithm,
|
||||
lbRuleName,
|
||||
int(port.NodePort),
|
||||
int(port.Port),
|
||||
)
|
||||
|
||||
p.SetNetworkid(lb.networkID)
|
||||
p.SetPublicipid(lb.ipAddrID)
|
||||
|
||||
switch port.Protocol {
|
||||
case v1.ProtocolTCP:
|
||||
p.SetProtocol("TCP")
|
||||
case v1.ProtocolUDP:
|
||||
p.SetProtocol("UDP")
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported load balancer protocol: %v", port.Protocol)
|
||||
}
|
||||
|
||||
// Do not create corresponding firewall rule.
|
||||
p.SetOpenfirewall(false)
|
||||
|
||||
// Create a new load balancer rule.
|
||||
r, err := lb.LoadBalancer.CreateLoadBalancerRule(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating load balancer rule %v: %v", lbRuleName, err)
|
||||
}
|
||||
|
||||
lbRule := &cloudstack.LoadBalancerRule{
|
||||
Id: r.Id,
|
||||
Algorithm: r.Algorithm,
|
||||
Cidrlist: r.Cidrlist,
|
||||
Name: r.Name,
|
||||
Networkid: r.Networkid,
|
||||
Privateport: r.Privateport,
|
||||
Publicport: r.Publicport,
|
||||
Publicip: r.Publicip,
|
||||
Publicipid: r.Publicipid,
|
||||
}
|
||||
|
||||
return lbRule, nil
|
||||
}
|
||||
|
||||
// deleteLoadBalancerRule deletes a load balancer rule.
|
||||
func (lb *loadBalancer) deleteLoadBalancerRule(lbRule *cloudstack.LoadBalancerRule) error {
|
||||
p := lb.LoadBalancer.NewDeleteLoadBalancerRuleParams(lbRule.Id)
|
||||
|
||||
if _, err := lb.LoadBalancer.DeleteLoadBalancerRule(p); err != nil {
|
||||
return fmt.Errorf("error deleting load balancer rule %v: %v", lbRule.Name, err)
|
||||
}
|
||||
|
||||
// Delete the rule from the map as it no longer exists
|
||||
delete(lb.rules, lbRule.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// assignHostsToRule assigns hosts to a load balancer rule.
|
||||
func (lb *loadBalancer) assignHostsToRule(lbRule *cloudstack.LoadBalancerRule, hostIDs []string) error {
|
||||
p := lb.LoadBalancer.NewAssignToLoadBalancerRuleParams(lbRule.Id)
|
||||
p.SetVirtualmachineids(hostIDs)
|
||||
|
||||
if _, err := lb.LoadBalancer.AssignToLoadBalancerRule(p); err != nil {
|
||||
return fmt.Errorf("error assigning hosts to load balancer rule %v: %v", lbRule.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeHostsFromRule removes hosts from a load balancer rule.
|
||||
func (lb *loadBalancer) removeHostsFromRule(lbRule *cloudstack.LoadBalancerRule, hostIDs []string) error {
|
||||
p := lb.LoadBalancer.NewRemoveFromLoadBalancerRuleParams(lbRule.Id)
|
||||
p.SetVirtualmachineids(hostIDs)
|
||||
|
||||
if _, err := lb.LoadBalancer.RemoveFromLoadBalancerRule(p); err != nil {
|
||||
return fmt.Errorf("error removing hosts from load balancer rule %v: %v", lbRule.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// symmetricDifference returns the symmetric difference between the old (existing) and new (wanted) host ID's.
|
||||
func symmetricDifference(hostIDs []string, lbInstances []*cloudstack.VirtualMachine) ([]string, []string) {
|
||||
new := make(map[string]bool)
|
||||
for _, hostID := range hostIDs {
|
||||
new[hostID] = true
|
||||
}
|
||||
|
||||
var remove []string
|
||||
for _, instance := range lbInstances {
|
||||
if new[instance.Id] {
|
||||
delete(new, instance.Id)
|
||||
continue
|
||||
}
|
||||
|
||||
remove = append(remove, instance.Id)
|
||||
}
|
||||
|
||||
var assign []string
|
||||
for hostID := range new {
|
||||
assign = append(assign, hostID)
|
||||
}
|
||||
|
||||
return assign, remove
|
||||
}
|
141
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_test.go
generated
vendored
Normal file
141
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_test.go
generated
vendored
Normal file
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudstack
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
const testClusterName = "testCluster"
|
||||
|
||||
func TestReadConfig(t *testing.T) {
|
||||
_, err := readConfig(nil)
|
||||
if err == nil {
|
||||
t.Errorf("Should fail when no config is provided: %v", err)
|
||||
}
|
||||
|
||||
cfg, err := readConfig(strings.NewReader(`
|
||||
[Global]
|
||||
api-url = https://cloudstack.url
|
||||
api-key = a-valid-api-key
|
||||
secret-key = a-valid-secret-key
|
||||
ssl-no-verify = true
|
||||
project-id = a-valid-project-id
|
||||
zone = a-valid-zone
|
||||
`))
|
||||
if err != nil {
|
||||
t.Fatalf("Should succeed when a valid config is provided: %v", err)
|
||||
}
|
||||
|
||||
if cfg.Global.APIURL != "https://cloudstack.url" {
|
||||
t.Errorf("incorrect api-url: %s", cfg.Global.APIURL)
|
||||
}
|
||||
if cfg.Global.APIKey != "a-valid-api-key" {
|
||||
t.Errorf("incorrect api-key: %s", cfg.Global.APIKey)
|
||||
}
|
||||
if cfg.Global.SecretKey != "a-valid-secret-key" {
|
||||
t.Errorf("incorrect secret-key: %s", cfg.Global.SecretKey)
|
||||
}
|
||||
if !cfg.Global.SSLNoVerify {
|
||||
t.Errorf("incorrect ssl-no-verify: %t", cfg.Global.SSLNoVerify)
|
||||
}
|
||||
if cfg.Global.Zone != "a-valid-zone" {
|
||||
t.Errorf("incorrect zone: %s", cfg.Global.Zone)
|
||||
}
|
||||
}
|
||||
|
||||
// This allows acceptance testing against an existing CloudStack environment.
|
||||
func configFromEnv() (*CSConfig, bool) {
|
||||
cfg := &CSConfig{}
|
||||
|
||||
cfg.Global.APIURL = os.Getenv("CS_API_URL")
|
||||
cfg.Global.APIKey = os.Getenv("CS_API_KEY")
|
||||
cfg.Global.SecretKey = os.Getenv("CS_SECRET_KEY")
|
||||
cfg.Global.ProjectID = os.Getenv("CS_PROJECT_ID")
|
||||
cfg.Global.Zone = os.Getenv("CS_ZONE")
|
||||
|
||||
// It is save to ignore the error here. If the input cannot be parsed SSLNoVerify
|
||||
// will still be a bool with its zero value (false) which is the expected default.
|
||||
cfg.Global.SSLNoVerify, _ = strconv.ParseBool(os.Getenv("CS_SSL_NO_VERIFY"))
|
||||
|
||||
// Check if we have the minimum required info to be able to connect to CloudStack.
|
||||
ok := cfg.Global.APIURL != "" && cfg.Global.APIKey != "" && cfg.Global.SecretKey != ""
|
||||
|
||||
return cfg, ok
|
||||
}
|
||||
|
||||
func TestNewCSCloud(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
_, err := newCSCloud(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate CloudStack: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalancer(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
cs, err := newCSCloud(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate CloudStack: %v", err)
|
||||
}
|
||||
|
||||
lb, ok := cs.LoadBalancer()
|
||||
if !ok {
|
||||
t.Fatalf("LoadBalancer() returned false")
|
||||
}
|
||||
|
||||
_, exists, err := lb.GetLoadBalancer(testClusterName, &v1.Service{ObjectMeta: v1.ObjectMeta{Name: "noexist"}})
|
||||
if err != nil {
|
||||
t.Fatalf("GetLoadBalancer(\"noexist\") returned error: %s", err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatalf("GetLoadBalancer(\"noexist\") returned exists")
|
||||
}
|
||||
}
|
||||
|
||||
func TestZones(t *testing.T) {
|
||||
cs := &CSCloud{
|
||||
zone: "myRegion",
|
||||
}
|
||||
|
||||
z, ok := cs.Zones()
|
||||
if !ok {
|
||||
t.Fatalf("Zones() returned false")
|
||||
}
|
||||
|
||||
zone, err := z.GetZone()
|
||||
if err != nil {
|
||||
t.Fatalf("GetZone() returned error: %s", err)
|
||||
}
|
||||
|
||||
if zone.Region != "myRegion" {
|
||||
t.Fatalf("GetZone() returned wrong region (%s)", zone.Region)
|
||||
}
|
||||
}
|
35
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/BUILD
generated
vendored
Normal file
35
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"fake.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package fake is a test-double implementation of cloudprovider
|
||||
// Interface, LoadBalancer and Instances. It is useful for testing.
|
||||
package fake // import "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
268
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/fake.go
generated
vendored
Normal file
268
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/fake/fake.go
generated
vendored
Normal file
|
@ -0,0 +1,268 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const ProviderName = "fake"
|
||||
|
||||
// FakeBalancer is a fake storage of balancer information
|
||||
type FakeBalancer struct {
|
||||
Name string
|
||||
Region string
|
||||
LoadBalancerIP string
|
||||
Ports []v1.ServicePort
|
||||
Hosts []*v1.Node
|
||||
}
|
||||
|
||||
type FakeUpdateBalancerCall struct {
|
||||
Service *v1.Service
|
||||
Hosts []*v1.Node
|
||||
}
|
||||
|
||||
// FakeCloud is a test-double implementation of Interface, LoadBalancer, Instances, and Routes. It is useful for testing.
|
||||
type FakeCloud struct {
|
||||
Exists bool
|
||||
Err error
|
||||
Calls []string
|
||||
Addresses []v1.NodeAddress
|
||||
ExtID map[types.NodeName]string
|
||||
InstanceTypes map[types.NodeName]string
|
||||
Machines []types.NodeName
|
||||
NodeResources *v1.NodeResources
|
||||
ClusterList []string
|
||||
MasterName string
|
||||
ExternalIP net.IP
|
||||
Balancers map[string]FakeBalancer
|
||||
UpdateCalls []FakeUpdateBalancerCall
|
||||
RouteMap map[string]*FakeRoute
|
||||
Lock sync.Mutex
|
||||
cloudprovider.Zone
|
||||
}
|
||||
|
||||
type FakeRoute struct {
|
||||
ClusterName string
|
||||
Route cloudprovider.Route
|
||||
}
|
||||
|
||||
func (f *FakeCloud) addCall(desc string) {
|
||||
f.Calls = append(f.Calls, desc)
|
||||
}
|
||||
|
||||
// ClearCalls clears internal record of method calls to this FakeCloud.
|
||||
func (f *FakeCloud) ClearCalls() {
|
||||
f.Calls = []string{}
|
||||
}
|
||||
|
||||
func (f *FakeCloud) ListClusters() ([]string, error) {
|
||||
return f.ClusterList, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) Master(name string) (string, error) {
|
||||
return f.MasterName, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return f, true
|
||||
}
|
||||
|
||||
// ProviderName returns the cloud provider ID.
|
||||
func (f *FakeCloud) ProviderName() string {
|
||||
return ProviderName
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (f *FakeCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// LoadBalancer returns a fake implementation of LoadBalancer.
|
||||
// Actually it just returns f itself.
|
||||
func (f *FakeCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
return f, true
|
||||
}
|
||||
|
||||
// Instances returns a fake implementation of Instances.
|
||||
//
|
||||
// Actually it just returns f itself.
|
||||
func (f *FakeCloud) Instances() (cloudprovider.Instances, bool) {
|
||||
return f, true
|
||||
}
|
||||
|
||||
func (f *FakeCloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return f, true
|
||||
}
|
||||
|
||||
func (f *FakeCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return f, true
|
||||
}
|
||||
|
||||
// GetLoadBalancer is a stub implementation of LoadBalancer.GetLoadBalancer.
|
||||
func (f *FakeCloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
status := &v1.LoadBalancerStatus{}
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: f.ExternalIP.String()}}
|
||||
|
||||
return status, f.Exists, f.Err
|
||||
}
|
||||
|
||||
// EnsureLoadBalancer is a test-spy implementation of LoadBalancer.EnsureLoadBalancer.
|
||||
// It adds an entry "create" into the internal method call record.
|
||||
func (f *FakeCloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
f.addCall("create")
|
||||
if f.Balancers == nil {
|
||||
f.Balancers = make(map[string]FakeBalancer)
|
||||
}
|
||||
|
||||
name := cloudprovider.GetLoadBalancerName(service)
|
||||
spec := service.Spec
|
||||
|
||||
zone, err := f.GetZone()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
region := zone.Region
|
||||
|
||||
f.Balancers[name] = FakeBalancer{name, region, spec.LoadBalancerIP, spec.Ports, nodes}
|
||||
|
||||
status := &v1.LoadBalancerStatus{}
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: f.ExternalIP.String()}}
|
||||
|
||||
return status, f.Err
|
||||
}
|
||||
|
||||
// UpdateLoadBalancer is a test-spy implementation of LoadBalancer.UpdateLoadBalancer.
|
||||
// It adds an entry "update" into the internal method call record.
|
||||
func (f *FakeCloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
f.addCall("update")
|
||||
f.UpdateCalls = append(f.UpdateCalls, FakeUpdateBalancerCall{service, nodes})
|
||||
return f.Err
|
||||
}
|
||||
|
||||
// EnsureLoadBalancerDeleted is a test-spy implementation of LoadBalancer.EnsureLoadBalancerDeleted.
|
||||
// It adds an entry "delete" into the internal method call record.
|
||||
func (f *FakeCloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error {
|
||||
f.addCall("delete")
|
||||
return f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("unimplemented")
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (f *FakeCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
// NodeAddresses is a test-spy implementation of Instances.NodeAddresses.
|
||||
// It adds an entry "node-addresses" into the internal method call record.
|
||||
func (f *FakeCloud) NodeAddresses(instance types.NodeName) ([]v1.NodeAddress, error) {
|
||||
f.addCall("node-addresses")
|
||||
return f.Addresses, f.Err
|
||||
}
|
||||
|
||||
// ExternalID is a test-spy implementation of Instances.ExternalID.
|
||||
// It adds an entry "external-id" into the internal method call record.
|
||||
// It returns an external id to the mapped instance name, if not found, it will return "ext-{instance}"
|
||||
func (f *FakeCloud) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
f.addCall("external-id")
|
||||
return f.ExtID[nodeName], f.Err
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the node with the specified Name.
|
||||
func (f *FakeCloud) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
f.addCall("instance-id")
|
||||
return f.ExtID[nodeName], nil
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (f *FakeCloud) InstanceType(instance types.NodeName) (string, error) {
|
||||
f.addCall("instance-type")
|
||||
return f.InstanceTypes[instance], nil
|
||||
}
|
||||
|
||||
// List is a test-spy implementation of Instances.List.
|
||||
// It adds an entry "list" into the internal method call record.
|
||||
func (f *FakeCloud) List(filter string) ([]types.NodeName, error) {
|
||||
f.addCall("list")
|
||||
result := []types.NodeName{}
|
||||
for _, machine := range f.Machines {
|
||||
if match, _ := regexp.MatchString(filter, string(machine)); match {
|
||||
result = append(result, machine)
|
||||
}
|
||||
}
|
||||
return result, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) GetZone() (cloudprovider.Zone, error) {
|
||||
f.addCall("get-zone")
|
||||
return f.Zone, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.addCall("list-routes")
|
||||
var routes []*cloudprovider.Route
|
||||
for _, fakeRoute := range f.RouteMap {
|
||||
if clusterName == fakeRoute.ClusterName {
|
||||
routeCopy := fakeRoute.Route
|
||||
routes = append(routes, &routeCopy)
|
||||
}
|
||||
}
|
||||
return routes, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.addCall("create-route")
|
||||
name := clusterName + "-" + nameHint
|
||||
if _, exists := f.RouteMap[name]; exists {
|
||||
f.Err = fmt.Errorf("route %q already exists", name)
|
||||
return f.Err
|
||||
}
|
||||
fakeRoute := FakeRoute{}
|
||||
fakeRoute.Route = *route
|
||||
fakeRoute.Route.Name = name
|
||||
fakeRoute.ClusterName = clusterName
|
||||
f.RouteMap[name] = &fakeRoute
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeCloud) DeleteRoute(clusterName string, route *cloudprovider.Route) error {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.addCall("delete-route")
|
||||
name := route.Name
|
||||
if _, exists := f.RouteMap[name]; !exists {
|
||||
f.Err = fmt.Errorf("no route found with name %q", name)
|
||||
return f.Err
|
||||
}
|
||||
delete(f.RouteMap, name)
|
||||
return nil
|
||||
}
|
61
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD
generated
vendored
Normal file
61
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"gce.go",
|
||||
"token_source.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/net/sets:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor:cloud.google.com/go/compute/metadata",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/prometheus/client_golang/prometheus",
|
||||
"//vendor:golang.org/x/oauth2",
|
||||
"//vendor:golang.org/x/oauth2/google",
|
||||
"//vendor:google.golang.org/api/compute/v1",
|
||||
"//vendor:google.golang.org/api/container/v1",
|
||||
"//vendor:google.golang.org/api/googleapi",
|
||||
"//vendor:gopkg.in/gcfg.v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["gce_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package gce is an implementation of Interface, LoadBalancer
|
||||
// and Instances for Google Compute Engine.
|
||||
package gce // import "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
2932
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go
generated
vendored
Normal file
2932
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
160
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_test.go
generated
vendored
Normal file
160
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_test.go
generated
vendored
Normal file
|
@ -0,0 +1,160 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetRegion(t *testing.T) {
|
||||
zoneName := "us-central1-b"
|
||||
regionName, err := GetGCERegion(zoneName)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from GetGCERegion: %v", err)
|
||||
}
|
||||
if regionName != "us-central1" {
|
||||
t.Errorf("Unexpected region from GetGCERegion: %s", regionName)
|
||||
}
|
||||
gce := &GCECloud{
|
||||
localZone: zoneName,
|
||||
region: regionName,
|
||||
}
|
||||
zones, ok := gce.Zones()
|
||||
if !ok {
|
||||
t.Fatalf("Unexpected missing zones impl")
|
||||
}
|
||||
zone, err := zones.GetZone()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
if zone.Region != "us-central1" {
|
||||
t.Errorf("Unexpected region: %s", zone.Region)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComparingHostURLs(t *testing.T) {
|
||||
tests := []struct {
|
||||
host1 string
|
||||
zone string
|
||||
name string
|
||||
expectEqual bool
|
||||
}{
|
||||
{
|
||||
host1: "https://www.googleapis.com/compute/v1/projects/1234567/zones/us-central1-f/instances/kubernetes-node-fhx1",
|
||||
zone: "us-central1-f",
|
||||
name: "kubernetes-node-fhx1",
|
||||
expectEqual: true,
|
||||
},
|
||||
{
|
||||
host1: "https://www.googleapis.com/compute/v1/projects/cool-project/zones/us-central1-f/instances/kubernetes-node-fhx1",
|
||||
zone: "us-central1-f",
|
||||
name: "kubernetes-node-fhx1",
|
||||
expectEqual: true,
|
||||
},
|
||||
{
|
||||
host1: "https://www.googleapis.com/compute/v23/projects/1234567/zones/us-central1-f/instances/kubernetes-node-fhx1",
|
||||
zone: "us-central1-f",
|
||||
name: "kubernetes-node-fhx1",
|
||||
expectEqual: true,
|
||||
},
|
||||
{
|
||||
host1: "https://www.googleapis.com/compute/v24/projects/1234567/regions/us-central1/zones/us-central1-f/instances/kubernetes-node-fhx1",
|
||||
zone: "us-central1-f",
|
||||
name: "kubernetes-node-fhx1",
|
||||
expectEqual: true,
|
||||
},
|
||||
{
|
||||
host1: "https://www.googleapis.com/compute/v1/projects/1234567/zones/us-central1-f/instances/kubernetes-node-fhx1",
|
||||
zone: "us-central1-c",
|
||||
name: "kubernetes-node-fhx1",
|
||||
expectEqual: false,
|
||||
},
|
||||
{
|
||||
host1: "https://www.googleapis.com/compute/v1/projects/1234567/zones/us-central1-f/instances/kubernetes-node-fhx",
|
||||
zone: "us-central1-f",
|
||||
name: "kubernetes-node-fhx1",
|
||||
expectEqual: false,
|
||||
},
|
||||
{
|
||||
host1: "https://www.googleapis.com/compute/v1/projects/1234567/zones/us-central1-f/instances/kubernetes-node-fhx1",
|
||||
zone: "us-central1-f",
|
||||
name: "kubernetes-node-fhx",
|
||||
expectEqual: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
link1 := hostURLToComparablePath(test.host1)
|
||||
testInstance := &gceInstance{
|
||||
Name: canonicalizeInstanceName(test.name),
|
||||
Zone: test.zone,
|
||||
}
|
||||
link2 := testInstance.makeComparableHostPath()
|
||||
if test.expectEqual && link1 != link2 {
|
||||
t.Errorf("expected link1 and link2 to be equal, got %s and %s", link1, link2)
|
||||
} else if !test.expectEqual && link1 == link2 {
|
||||
t.Errorf("expected link1 and link2 not to be equal, got %s and %s", link1, link2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestScrubDNS(t *testing.T) {
|
||||
tcs := []struct {
|
||||
nameserversIn []string
|
||||
searchesIn []string
|
||||
nameserversOut []string
|
||||
searchesOut []string
|
||||
}{
|
||||
{
|
||||
nameserversIn: []string{"1.2.3.4", "5.6.7.8"},
|
||||
nameserversOut: []string{"1.2.3.4", "5.6.7.8"},
|
||||
},
|
||||
{
|
||||
searchesIn: []string{"c.prj.internal.", "12345678910.google.internal.", "google.internal."},
|
||||
searchesOut: []string{"c.prj.internal.", "google.internal."},
|
||||
},
|
||||
{
|
||||
searchesIn: []string{"c.prj.internal.", "12345678910.google.internal.", "zone.c.prj.internal.", "google.internal."},
|
||||
searchesOut: []string{"c.prj.internal.", "zone.c.prj.internal.", "google.internal."},
|
||||
},
|
||||
{
|
||||
searchesIn: []string{"c.prj.internal.", "12345678910.google.internal.", "zone.c.prj.internal.", "google.internal.", "unexpected"},
|
||||
searchesOut: []string{"c.prj.internal.", "zone.c.prj.internal.", "google.internal.", "unexpected"},
|
||||
},
|
||||
}
|
||||
gce := &GCECloud{}
|
||||
for i := range tcs {
|
||||
n, s := gce.ScrubDNS(tcs[i].nameserversIn, tcs[i].searchesIn)
|
||||
if !reflect.DeepEqual(n, tcs[i].nameserversOut) {
|
||||
t.Errorf("Expected %v, got %v", tcs[i].nameserversOut, n)
|
||||
}
|
||||
if !reflect.DeepEqual(s, tcs[i].searchesOut) {
|
||||
t.Errorf("Expected %v, got %v", tcs[i].searchesOut, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateFirewallFails(t *testing.T) {
|
||||
name := "loadbalancer"
|
||||
region := "us-central1"
|
||||
desc := "description"
|
||||
gce := &GCECloud{}
|
||||
if err := gce.createFirewall(name, region, desc, nil, nil, nil); err == nil {
|
||||
t.Errorf("error expected when creating firewall without any tags found")
|
||||
}
|
||||
}
|
112
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go
generated
vendored
Normal file
112
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const (
|
||||
// Max QPS to allow through to the token URL.
|
||||
tokenURLQPS = .05 // back off to once every 20 seconds when failing
|
||||
// Maximum burst of requests to token URL before limiting.
|
||||
tokenURLBurst = 3
|
||||
)
|
||||
|
||||
var (
|
||||
getTokenCounter = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "get_token_count",
|
||||
Help: "Counter of total Token() requests to the alternate token source",
|
||||
},
|
||||
)
|
||||
getTokenFailCounter = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "get_token_fail_count",
|
||||
Help: "Counter of failed Token() requests to the alternate token source",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(getTokenCounter)
|
||||
prometheus.MustRegister(getTokenFailCounter)
|
||||
}
|
||||
|
||||
type AltTokenSource struct {
|
||||
oauthClient *http.Client
|
||||
tokenURL string
|
||||
tokenBody string
|
||||
throttle flowcontrol.RateLimiter
|
||||
}
|
||||
|
||||
func (a *AltTokenSource) Token() (*oauth2.Token, error) {
|
||||
a.throttle.Accept()
|
||||
getTokenCounter.Inc()
|
||||
t, err := a.token()
|
||||
if err != nil {
|
||||
getTokenFailCounter.Inc()
|
||||
}
|
||||
return t, err
|
||||
}
|
||||
|
||||
func (a *AltTokenSource) token() (*oauth2.Token, error) {
|
||||
req, err := http.NewRequest("POST", a.tokenURL, strings.NewReader(a.tokenBody))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := a.oauthClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if err := googleapi.CheckResponse(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tok struct {
|
||||
AccessToken string `json:"accessToken"`
|
||||
ExpireTime time.Time `json:"expireTime"`
|
||||
}
|
||||
if err := json.NewDecoder(res.Body).Decode(&tok); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &oauth2.Token{
|
||||
AccessToken: tok.AccessToken,
|
||||
Expiry: tok.ExpireTime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewAltTokenSource(tokenURL, tokenBody string) oauth2.TokenSource {
|
||||
client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource(""))
|
||||
a := &AltTokenSource{
|
||||
oauthClient: client,
|
||||
tokenURL: tokenURL,
|
||||
tokenBody: tokenBody,
|
||||
throttle: flowcontrol.NewTokenBucketRateLimiter(tokenURLQPS, tokenURLBurst),
|
||||
}
|
||||
return oauth2.ReuseTokenSource(nil, a)
|
||||
}
|
66
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/BUILD
generated
vendored
Normal file
66
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"client.go",
|
||||
"config.go",
|
||||
"mesos.go",
|
||||
"plugins.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/mesos/mesos-go/detector",
|
||||
"//vendor:github.com/mesos/mesos-go/detector/zoo",
|
||||
"//vendor:github.com/mesos/mesos-go/mesosproto",
|
||||
"//vendor:golang.org/x/net/context",
|
||||
"//vendor:gopkg.in/gcfg.v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/net",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"client_test.go",
|
||||
"config_test.go",
|
||||
"mesos_test.go",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/mesos/mesos-go/detector",
|
||||
"//vendor:github.com/mesos/mesos-go/mesosutil",
|
||||
"//vendor:golang.org/x/net/context",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/net",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
376
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/client.go
generated
vendored
Normal file
376
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/client.go
generated
vendored
Normal file
|
@ -0,0 +1,376 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mesos
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"github.com/mesos/mesos-go/detector"
|
||||
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||
"golang.org/x/net/context"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
const defaultClusterName = "mesos"
|
||||
|
||||
var noLeadingMasterError = errors.New("there is no current leading master available to query")
|
||||
|
||||
type mesosClient struct {
|
||||
masterLock sync.RWMutex
|
||||
master string // host:port formatted address
|
||||
httpClient *http.Client
|
||||
tr *http.Transport
|
||||
initialMaster <-chan struct{} // signal chan, closes once an initial, non-nil master is found
|
||||
state *stateCache
|
||||
}
|
||||
|
||||
type slaveNode struct {
|
||||
hostname string
|
||||
kubeletRunning bool
|
||||
resources *v1.NodeResources
|
||||
}
|
||||
|
||||
type mesosState struct {
|
||||
clusterName string
|
||||
nodes map[string]*slaveNode // by hostname
|
||||
}
|
||||
|
||||
type stateCache struct {
|
||||
sync.Mutex
|
||||
expiresAt time.Time
|
||||
cached *mesosState
|
||||
err error
|
||||
ttl time.Duration
|
||||
refill func(context.Context) (*mesosState, error)
|
||||
}
|
||||
|
||||
// reloadCache reloads the state cache if it has expired.
|
||||
func (c *stateCache) reloadCache(ctx context.Context) {
|
||||
now := time.Now()
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if c.expiresAt.Before(now) {
|
||||
log.V(4).Infof("Reloading cached Mesos state")
|
||||
c.cached, c.err = c.refill(ctx)
|
||||
c.expiresAt = now.Add(c.ttl)
|
||||
} else {
|
||||
log.V(4).Infof("Using cached Mesos state")
|
||||
}
|
||||
}
|
||||
|
||||
// cachedState returns the cached Mesos state.
|
||||
func (c *stateCache) cachedState(ctx context.Context) (*mesosState, error) {
|
||||
c.reloadCache(ctx)
|
||||
return c.cached, c.err
|
||||
}
|
||||
|
||||
// clusterName returns the cached Mesos cluster name.
|
||||
func (c *stateCache) clusterName(ctx context.Context) (string, error) {
|
||||
cached, err := c.cachedState(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return cached.clusterName, nil
|
||||
}
|
||||
|
||||
// nodes returns the cached list of slave nodes.
|
||||
func (c *stateCache) nodes(ctx context.Context) (map[string]*slaveNode, error) {
|
||||
cached, err := c.cachedState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cached.nodes, nil
|
||||
}
|
||||
|
||||
func newMesosClient(
|
||||
md detector.Master,
|
||||
mesosHttpClientTimeout, stateCacheTTL time.Duration) (*mesosClient, error) {
|
||||
|
||||
tr := utilnet.SetTransportDefaults(&http.Transport{})
|
||||
httpClient := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: mesosHttpClientTimeout,
|
||||
}
|
||||
return createMesosClient(md, httpClient, tr, stateCacheTTL)
|
||||
}
|
||||
|
||||
func createMesosClient(
|
||||
md detector.Master,
|
||||
httpClient *http.Client,
|
||||
tr *http.Transport,
|
||||
stateCacheTTL time.Duration) (*mesosClient, error) {
|
||||
|
||||
initialMaster := make(chan struct{})
|
||||
client := &mesosClient{
|
||||
httpClient: httpClient,
|
||||
tr: tr,
|
||||
initialMaster: initialMaster,
|
||||
state: &stateCache{
|
||||
ttl: stateCacheTTL,
|
||||
},
|
||||
}
|
||||
client.state.refill = client.pollMasterForState
|
||||
first := true
|
||||
if err := md.Detect(detector.OnMasterChanged(func(info *mesos.MasterInfo) {
|
||||
host, port := extractMasterAddress(info)
|
||||
if len(host) > 0 {
|
||||
client.masterLock.Lock()
|
||||
defer client.masterLock.Unlock()
|
||||
client.master = fmt.Sprintf("%s:%d", host, port)
|
||||
if first {
|
||||
first = false
|
||||
close(initialMaster)
|
||||
}
|
||||
}
|
||||
log.Infof("cloud master changed to '%v'", client.master)
|
||||
})); err != nil {
|
||||
log.V(1).Infof("detector initialization failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func extractMasterAddress(info *mesos.MasterInfo) (host string, port int) {
|
||||
if info != nil {
|
||||
host = info.GetAddress().GetHostname()
|
||||
if host == "" {
|
||||
host = info.GetAddress().GetIp()
|
||||
}
|
||||
|
||||
if host != "" {
|
||||
// use port from Address
|
||||
port = int(info.GetAddress().GetPort())
|
||||
} else {
|
||||
// deprecated: get host and port directly from MasterInfo (and not Address)
|
||||
host = info.GetHostname()
|
||||
if host == "" {
|
||||
host = unpackIPv4(info.GetIp())
|
||||
}
|
||||
port = int(info.GetPort())
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func unpackIPv4(ip uint32) string {
|
||||
octets := make([]byte, 4, 4)
|
||||
binary.BigEndian.PutUint32(octets, ip)
|
||||
ipv4 := net.IP(octets)
|
||||
return ipv4.String()
|
||||
}
|
||||
|
||||
// listSlaves returns a (possibly cached) map of slave nodes by hostname.
|
||||
// Callers must not mutate the contents of the returned slice.
|
||||
func (c *mesosClient) listSlaves(ctx context.Context) (map[string]*slaveNode, error) {
|
||||
return c.state.nodes(ctx)
|
||||
}
|
||||
|
||||
// clusterName returns a (possibly cached) cluster name.
|
||||
func (c *mesosClient) clusterName(ctx context.Context) (string, error) {
|
||||
return c.state.clusterName(ctx)
|
||||
}
|
||||
|
||||
// pollMasterForState returns an array of slave nodes
|
||||
func (c *mesosClient) pollMasterForState(ctx context.Context) (*mesosState, error) {
|
||||
// wait for initial master detection
|
||||
select {
|
||||
case <-c.initialMaster: // noop
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
master := func() string {
|
||||
c.masterLock.RLock()
|
||||
defer c.masterLock.RUnlock()
|
||||
return c.master
|
||||
}()
|
||||
if master == "" {
|
||||
return nil, noLeadingMasterError
|
||||
}
|
||||
|
||||
//TODO(jdef) should not assume master uses http (what about https?)
|
||||
|
||||
var state *mesosState
|
||||
successHandler := func(res *http.Response) error {
|
||||
blob, err1 := ioutil.ReadAll(res.Body)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
log.V(3).Infof("Got mesos state, content length %v", len(blob))
|
||||
state, err1 = parseMesosState(blob)
|
||||
return err1
|
||||
}
|
||||
// thinking here is that we may get some other status codes from mesos at some point:
|
||||
// - authentication
|
||||
// - redirection (possibly from http to https)
|
||||
// ...
|
||||
for _, tt := range []struct {
|
||||
uri string
|
||||
handlers map[int]func(*http.Response) error
|
||||
}{
|
||||
{
|
||||
uri: fmt.Sprintf("http://%s/state", master),
|
||||
handlers: map[int]func(*http.Response) error{
|
||||
200: successHandler,
|
||||
},
|
||||
},
|
||||
{
|
||||
uri: fmt.Sprintf("http://%s/state.json", master),
|
||||
handlers: map[int]func(*http.Response) error{
|
||||
200: successHandler,
|
||||
},
|
||||
},
|
||||
} {
|
||||
req, err := http.NewRequest("GET", tt.uri, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = c.httpDo(ctx, req, func(res *http.Response, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if handler, ok := tt.handlers[res.StatusCode]; ok {
|
||||
err1 := handler(res)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
}
|
||||
// no handler for this error code, proceed to the next connection type
|
||||
return nil
|
||||
})
|
||||
if state != nil || err != nil {
|
||||
return state, err
|
||||
}
|
||||
}
|
||||
return nil, errors.New("failed to sync with Mesos master")
|
||||
}
|
||||
|
||||
func parseMesosState(blob []byte) (*mesosState, error) {
|
||||
type State struct {
|
||||
ClusterName string `json:"cluster"`
|
||||
Slaves []*struct {
|
||||
Id string `json:"id"` // ex: 20150106-162714-3815890698-5050-2453-S2
|
||||
Pid string `json:"pid"` // ex: slave(1)@10.22.211.18:5051
|
||||
Hostname string `json:"hostname"` // ex: 10.22.211.18, or slave-123.nowhere.com
|
||||
Resources map[string]interface{} `json:"resources"` // ex: {"mem": 123, "ports": "[31000-3200]"}
|
||||
} `json:"slaves"`
|
||||
Frameworks []*struct {
|
||||
Id string `json:"id"` // ex: 20151105-093752-3745622208-5050-1-0000
|
||||
Pid string `json:"pid"` // ex: scheduler(1)@192.168.65.228:57124
|
||||
Executors []*struct {
|
||||
SlaveId string `json:"slave_id"` // ex: 20151105-093752-3745622208-5050-1-S1
|
||||
ExecutorId string `json:"executor_id"` // ex: 6704d375c68fee1e_k8sm-executor
|
||||
Name string `json:"name"` // ex: Kubelet-Executor
|
||||
} `json:"executors"`
|
||||
} `json:"frameworks"`
|
||||
}
|
||||
|
||||
state := &State{ClusterName: defaultClusterName}
|
||||
if err := json.Unmarshal(blob, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
executorSlaveIds := map[string]struct{}{}
|
||||
for _, f := range state.Frameworks {
|
||||
for _, e := range f.Executors {
|
||||
// Note that this simple comparison breaks when we support more than one
|
||||
// k8s instance in a cluster. At the moment this is not possible for
|
||||
// a number of reasons.
|
||||
// TODO(sttts): find way to detect executors of this k8s instance
|
||||
if e.Name == KubernetesExecutorName {
|
||||
executorSlaveIds[e.SlaveId] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodes := map[string]*slaveNode{} // by hostname
|
||||
for _, slave := range state.Slaves {
|
||||
if slave.Hostname == "" {
|
||||
continue
|
||||
}
|
||||
node := &slaveNode{hostname: slave.Hostname}
|
||||
cap := v1.ResourceList{}
|
||||
if slave.Resources != nil && len(slave.Resources) > 0 {
|
||||
// attempt to translate CPU (cores) and memory (MB) resources
|
||||
if cpu, found := slave.Resources["cpus"]; found {
|
||||
if cpuNum, ok := cpu.(float64); ok {
|
||||
cap[v1.ResourceCPU] = *resource.NewQuantity(int64(cpuNum), resource.DecimalSI)
|
||||
} else {
|
||||
log.Warningf("unexpected slave cpu resource type %T: %v", cpu, cpu)
|
||||
}
|
||||
} else {
|
||||
log.Warningf("slave failed to report cpu resource")
|
||||
}
|
||||
if mem, found := slave.Resources["mem"]; found {
|
||||
if memNum, ok := mem.(float64); ok {
|
||||
cap[v1.ResourceMemory] = *resource.NewQuantity(int64(memNum), resource.BinarySI)
|
||||
} else {
|
||||
log.Warningf("unexpected slave mem resource type %T: %v", mem, mem)
|
||||
}
|
||||
} else {
|
||||
log.Warningf("slave failed to report mem resource")
|
||||
}
|
||||
}
|
||||
if len(cap) > 0 {
|
||||
node.resources = &v1.NodeResources{
|
||||
Capacity: cap,
|
||||
}
|
||||
log.V(4).Infof("node %q reporting capacity %v", node.hostname, cap)
|
||||
}
|
||||
if _, ok := executorSlaveIds[slave.Id]; ok {
|
||||
node.kubeletRunning = true
|
||||
}
|
||||
nodes[node.hostname] = node
|
||||
}
|
||||
|
||||
result := &mesosState{
|
||||
clusterName: state.ClusterName,
|
||||
nodes: nodes,
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type responseHandler func(*http.Response, error) error
|
||||
|
||||
// httpDo executes an HTTP request in the given context, canceling an ongoing request if the context
|
||||
// is canceled prior to completion of the request. hacked from https://blog.golang.org/context
|
||||
func (c *mesosClient) httpDo(ctx context.Context, req *http.Request, f responseHandler) error {
|
||||
// Run the HTTP request in a goroutine and pass the response to f.
|
||||
ch := make(chan error, 1)
|
||||
go func() { ch <- f(c.httpClient.Do(req)) }()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
c.tr.CancelRequest(req)
|
||||
<-ch // Wait for f to return.
|
||||
return ctx.Err()
|
||||
case err := <-ch:
|
||||
return err
|
||||
}
|
||||
}
|
269
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/client_test.go
generated
vendored
Normal file
269
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/client_test.go
generated
vendored
Normal file
|
@ -0,0 +1,269 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mesos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"github.com/mesos/mesos-go/detector"
|
||||
"github.com/mesos/mesos-go/mesosutil"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
)
|
||||
|
||||
// Test data
|
||||
|
||||
const (
|
||||
TEST_MASTER_ID = "master-12345"
|
||||
TEST_MASTER_IP = 177048842 // 10.141.141.10
|
||||
TEST_MASTER_PORT = 5050
|
||||
|
||||
TEST_STATE_JSON = `
|
||||
{
|
||||
"version": "0.22.0",
|
||||
"unregistered_frameworks": [],
|
||||
"started_tasks": 0,
|
||||
"start_time": 1429456501.61141,
|
||||
"staged_tasks": 0,
|
||||
"slaves": [
|
||||
{
|
||||
"resources": {
|
||||
"ports": "[31000-32000]",
|
||||
"mem": 15360,
|
||||
"disk": 470842,
|
||||
"cpus": 8
|
||||
},
|
||||
"registered_time": 1429456502.46999,
|
||||
"pid": "slave(1)@mesos1.internal.example.org.fail:5050",
|
||||
"id": "20150419-081501-16777343-5050-16383-S2",
|
||||
"hostname": "mesos1.internal.example.org.fail",
|
||||
"attributes": {},
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"resources": {
|
||||
"ports": "[31000-32000]",
|
||||
"mem": 15360,
|
||||
"disk": 470842,
|
||||
"cpus": 8
|
||||
},
|
||||
"registered_time": 1429456502.4144,
|
||||
"pid": "slave(1)@mesos2.internal.example.org.fail:5050",
|
||||
"id": "20150419-081501-16777343-5050-16383-S1",
|
||||
"hostname": "mesos2.internal.example.org.fail",
|
||||
"attributes": {},
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"resources": {
|
||||
"ports": "[31000-32000]",
|
||||
"mem": 15360,
|
||||
"disk": 470842,
|
||||
"cpus": 8
|
||||
},
|
||||
"registered_time": 1429456502.02879,
|
||||
"pid": "slave(1)@mesos3.internal.example.org.fail:5050",
|
||||
"id": "20150419-081501-16777343-5050-16383-S0",
|
||||
"hostname": "mesos3.internal.example.org.fail",
|
||||
"attributes": {},
|
||||
"active": true
|
||||
}
|
||||
],
|
||||
"pid": "master@mesos-master0.internal.example.org.fail:5050",
|
||||
"orphan_tasks": [],
|
||||
"lost_tasks": 0,
|
||||
"leader": "master@mesos-master0.internal.example.org.fail:5050",
|
||||
"killed_tasks": 0,
|
||||
"failed_tasks": 0,
|
||||
"elected_time": 1429456501.61638,
|
||||
"deactivated_slaves": 0,
|
||||
"completed_frameworks": [],
|
||||
"build_user": "buildbot",
|
||||
"build_time": 1425085311,
|
||||
"build_date": "2015-02-27 17:01:51",
|
||||
"activated_slaves": 3,
|
||||
"finished_tasks": 0,
|
||||
"flags": {
|
||||
"zk_session_timeout": "10secs",
|
||||
"work_dir": "/somepath/mesos/local/Lc9arz",
|
||||
"webui_dir": "/usr/local/share/mesos/webui",
|
||||
"version": "false",
|
||||
"user_sorter": "drf",
|
||||
"slave_reregister_timeout": "10mins",
|
||||
"logbufsecs": "0",
|
||||
"log_auto_initialize": "true",
|
||||
"initialize_driver_logging": "true",
|
||||
"framework_sorter": "drf",
|
||||
"authenticators": "crammd5",
|
||||
"authenticate_slaves": "false",
|
||||
"authenticate": "false",
|
||||
"allocation_interval": "1secs",
|
||||
"logging_level": "INFO",
|
||||
"quiet": "false",
|
||||
"recovery_slave_removal_limit": "100%",
|
||||
"registry": "replicated_log",
|
||||
"registry_fetch_timeout": "1mins",
|
||||
"registry_store_timeout": "5secs",
|
||||
"registry_strict": "false",
|
||||
"root_submissions": "true"
|
||||
},
|
||||
"frameworks": [],
|
||||
"git_branch": "refs/heads/0.22.0-rc1",
|
||||
"git_sha": "46834faca67f877631e1beb7d61be5c080ec3dc2",
|
||||
"git_tag": "0.22.0-rc1",
|
||||
"hostname": "localhost",
|
||||
"id": "20150419-081501-16777343-5050-16383"
|
||||
}`
|
||||
)
|
||||
|
||||
// Mocks
|
||||
|
||||
type FakeMasterDetector struct {
|
||||
callback detector.MasterChanged
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func newFakeMasterDetector() *FakeMasterDetector {
|
||||
return &FakeMasterDetector{
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (md FakeMasterDetector) Cancel() {
|
||||
close(md.done)
|
||||
}
|
||||
|
||||
func (md FakeMasterDetector) Detect(cb detector.MasterChanged) error {
|
||||
md.callback = cb
|
||||
leadingMaster := mesosutil.NewMasterInfo(TEST_MASTER_ID, TEST_MASTER_IP, TEST_MASTER_PORT)
|
||||
cb.OnMasterChanged(leadingMaster)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md FakeMasterDetector) Done() <-chan struct{} {
|
||||
return md.done
|
||||
}
|
||||
|
||||
// Auxiliary functions
|
||||
|
||||
func makeHttpMocks() (*httptest.Server, *http.Client, *http.Transport) {
|
||||
httpServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
log.V(4).Infof("Mocking response for HTTP request: %#v", r)
|
||||
if r.URL.Path == "/state.json" {
|
||||
w.WriteHeader(200) // OK
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintln(w, TEST_STATE_JSON)
|
||||
} else {
|
||||
w.WriteHeader(400)
|
||||
fmt.Fprintln(w, "Bad Request")
|
||||
}
|
||||
}))
|
||||
|
||||
// Intercept all client requests and feed them to the test server
|
||||
transport := utilnet.SetTransportDefaults(&http.Transport{
|
||||
Proxy: func(req *http.Request) (*url.URL, error) {
|
||||
return url.Parse(httpServer.URL)
|
||||
},
|
||||
})
|
||||
|
||||
httpClient := &http.Client{Transport: transport}
|
||||
|
||||
return httpServer, httpClient, transport
|
||||
}
|
||||
|
||||
// Tests
|
||||
|
||||
// test mesos.parseMesosState
|
||||
func Test_parseMesosState(t *testing.T) {
|
||||
state, err := parseMesosState([]byte(TEST_STATE_JSON))
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("parseMesosState does not yield an error")
|
||||
}
|
||||
if state == nil {
|
||||
t.Fatalf("parseMesosState yields a non-nil state")
|
||||
}
|
||||
if len(state.nodes) != 3 {
|
||||
t.Fatalf("parseMesosState yields a state with 3 nodes")
|
||||
}
|
||||
}
|
||||
|
||||
// test mesos.listSlaves
|
||||
func Test_listSlaves(t *testing.T) {
|
||||
defer log.Flush()
|
||||
md := FakeMasterDetector{}
|
||||
httpServer, httpClient, httpTransport := makeHttpMocks()
|
||||
defer httpServer.Close()
|
||||
|
||||
cacheTTL := 500 * time.Millisecond
|
||||
mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("createMesosClient does not yield an error")
|
||||
}
|
||||
|
||||
slaveNodes, err := mesosClient.listSlaves(context.TODO())
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("listSlaves does not yield an error")
|
||||
}
|
||||
if len(slaveNodes) != 3 {
|
||||
t.Fatalf("listSlaves yields a collection of size 3")
|
||||
}
|
||||
|
||||
expectedHostnames := map[string]struct{}{
|
||||
"mesos1.internal.example.org.fail": {},
|
||||
"mesos2.internal.example.org.fail": {},
|
||||
"mesos3.internal.example.org.fail": {},
|
||||
}
|
||||
|
||||
actualHostnames := make(map[string]struct{})
|
||||
for _, node := range slaveNodes {
|
||||
actualHostnames[node.hostname] = struct{}{}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedHostnames, actualHostnames) {
|
||||
t.Fatalf("listSlaves yields a collection with the expected hostnames")
|
||||
}
|
||||
}
|
||||
|
||||
// test mesos.clusterName
|
||||
func Test_clusterName(t *testing.T) {
|
||||
defer log.Flush()
|
||||
md := FakeMasterDetector{}
|
||||
httpServer, httpClient, httpTransport := makeHttpMocks()
|
||||
defer httpServer.Close()
|
||||
cacheTTL := 500 * time.Millisecond
|
||||
mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL)
|
||||
|
||||
name, err := mesosClient.clusterName(context.TODO())
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("clusterName does not yield an error")
|
||||
}
|
||||
if name != defaultClusterName {
|
||||
t.Fatalf("clusterName yields the expected (default) value")
|
||||
}
|
||||
}
|
79
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/config.go
generated
vendored
Normal file
79
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/config.go
generated
vendored
Normal file
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mesos
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"gopkg.in/gcfg.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultMesosMaster = "localhost:5050"
|
||||
DefaultHttpClientTimeout = time.Duration(10) * time.Second
|
||||
DefaultStateCacheTTL = time.Duration(5) * time.Second
|
||||
)
|
||||
|
||||
// Example Mesos cloud provider configuration file:
|
||||
//
|
||||
// [mesos-cloud]
|
||||
// mesos-master = leader.mesos:5050
|
||||
// http-client-timeout = 500ms
|
||||
// state-cache-ttl = 1h
|
||||
|
||||
type ConfigWrapper struct {
|
||||
Mesos_Cloud Config
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
MesosMaster string `gcfg:"mesos-master"`
|
||||
MesosHttpClientTimeout Duration `gcfg:"http-client-timeout"`
|
||||
StateCacheTTL Duration `gcfg:"state-cache-ttl"`
|
||||
}
|
||||
|
||||
type Duration struct {
|
||||
Duration time.Duration `gcfg:"duration"`
|
||||
}
|
||||
|
||||
func (d *Duration) UnmarshalText(data []byte) error {
|
||||
underlying, err := time.ParseDuration(string(data))
|
||||
if err == nil {
|
||||
d.Duration = underlying
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func createDefaultConfig() *Config {
|
||||
return &Config{
|
||||
MesosMaster: DefaultMesosMaster,
|
||||
MesosHttpClientTimeout: Duration{Duration: DefaultHttpClientTimeout},
|
||||
StateCacheTTL: Duration{Duration: DefaultStateCacheTTL},
|
||||
}
|
||||
}
|
||||
|
||||
func readConfig(configReader io.Reader) (*Config, error) {
|
||||
config := createDefaultConfig()
|
||||
wrapper := &ConfigWrapper{Mesos_Cloud: *config}
|
||||
if configReader != nil {
|
||||
if err := gcfg.ReadInto(wrapper, configReader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config = &(wrapper.Mesos_Cloud)
|
||||
}
|
||||
return config, nil
|
||||
}
|
75
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/config_test.go
generated
vendored
Normal file
75
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/config_test.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mesos
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
)
|
||||
|
||||
// test mesos.createDefaultConfig
|
||||
func Test_createDefaultConfig(t *testing.T) {
|
||||
defer log.Flush()
|
||||
|
||||
config := createDefaultConfig()
|
||||
|
||||
if config.MesosMaster != DefaultMesosMaster {
|
||||
t.Fatalf("Default config has the expected MesosMaster value")
|
||||
}
|
||||
|
||||
if config.MesosHttpClientTimeout.Duration != DefaultHttpClientTimeout {
|
||||
t.Fatalf("Default config has the expected MesosHttpClientTimeout value")
|
||||
}
|
||||
|
||||
if config.StateCacheTTL.Duration != DefaultStateCacheTTL {
|
||||
t.Fatalf("Default config has the expected StateCacheTTL value")
|
||||
}
|
||||
}
|
||||
|
||||
// test mesos.readConfig
|
||||
func Test_readConfig(t *testing.T) {
|
||||
defer log.Flush()
|
||||
|
||||
configString := `
|
||||
[mesos-cloud]
|
||||
mesos-master = leader.mesos:5050
|
||||
http-client-timeout = 500ms
|
||||
state-cache-ttl = 1h`
|
||||
|
||||
reader := bytes.NewBufferString(configString)
|
||||
|
||||
config, err := readConfig(reader)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Reading configuration does not yield an error: %#v", err)
|
||||
}
|
||||
|
||||
if config.MesosMaster != "leader.mesos:5050" {
|
||||
t.Fatalf("Parsed config has the expected MesosMaster value")
|
||||
}
|
||||
|
||||
if config.MesosHttpClientTimeout.Duration != time.Duration(500)*time.Millisecond {
|
||||
t.Fatalf("Parsed config has the expected MesosHttpClientTimeout value")
|
||||
}
|
||||
|
||||
if config.StateCacheTTL.Duration != time.Duration(1)*time.Hour {
|
||||
t.Fatalf("Parsed config has the expected StateCacheTTL value")
|
||||
}
|
||||
}
|
298
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/mesos.go
generated
vendored
Normal file
298
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/mesos.go
generated
vendored
Normal file
|
@ -0,0 +1,298 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mesos
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"regexp"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"github.com/mesos/mesos-go/detector"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const (
|
||||
ProviderName = "mesos"
|
||||
|
||||
// KubernetesExecutorName is shared between contrib/mesos and Mesos cloud provider.
|
||||
// Because cloud provider -> contrib dependencies are forbidden, this constant
|
||||
// is defined here, not in contrib.
|
||||
KubernetesExecutorName = "Kubelet-Executor"
|
||||
)
|
||||
|
||||
var (
|
||||
CloudProvider *MesosCloud
|
||||
|
||||
noHostNameSpecified = errors.New("No hostname specified")
|
||||
)
|
||||
|
||||
func init() {
|
||||
cloudprovider.RegisterCloudProvider(
|
||||
ProviderName,
|
||||
func(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
provider, err := newMesosCloud(configReader)
|
||||
if err == nil {
|
||||
CloudProvider = provider
|
||||
}
|
||||
return provider, err
|
||||
})
|
||||
}
|
||||
|
||||
type MesosCloud struct {
|
||||
client *mesosClient
|
||||
config *Config
|
||||
}
|
||||
|
||||
func (c *MesosCloud) MasterURI() string {
|
||||
return c.config.MesosMaster
|
||||
}
|
||||
|
||||
func newMesosCloud(configReader io.Reader) (*MesosCloud, error) {
|
||||
config, err := readConfig(configReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.V(1).Infof("new mesos cloud, master='%v'", config.MesosMaster)
|
||||
if d, err := detector.New(config.MesosMaster); err != nil {
|
||||
log.V(1).Infof("failed to create master detector: %v", err)
|
||||
return nil, err
|
||||
} else if cl, err := newMesosClient(d,
|
||||
config.MesosHttpClientTimeout.Duration,
|
||||
config.StateCacheTTL.Duration); err != nil {
|
||||
log.V(1).Infof("failed to create mesos cloud client: %v", err)
|
||||
return nil, err
|
||||
} else {
|
||||
return &MesosCloud{client: cl, config: config}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (c *MesosCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
func (c *MesosCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("unimplemented")
|
||||
}
|
||||
|
||||
// Instances returns a copy of the Mesos cloud Instances implementation.
|
||||
// Mesos natively provides minimal cloud-type resources. More robust cloud
|
||||
// support requires a combination of Mesos and cloud-specific knowledge.
|
||||
func (c *MesosCloud) Instances() (cloudprovider.Instances, bool) {
|
||||
return c, true
|
||||
}
|
||||
|
||||
// LoadBalancer always returns nil, false in this implementation.
|
||||
// Mesos does not provide any type of native load balancing by default,
|
||||
// so this implementation always returns (nil, false).
|
||||
func (c *MesosCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Zones always returns nil, false in this implementation.
|
||||
// Mesos does not provide any type of native region or zone awareness,
|
||||
// so this implementation always returns (nil, false).
|
||||
func (c *MesosCloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Clusters returns a copy of the Mesos cloud Clusters implementation.
|
||||
// Mesos does not provide support for multiple clusters.
|
||||
func (c *MesosCloud) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return c, true
|
||||
}
|
||||
|
||||
// Routes always returns nil, false in this implementation.
|
||||
func (c *MesosCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ProviderName returns the cloud provider ID.
|
||||
func (c *MesosCloud) ProviderName() string {
|
||||
return ProviderName
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (c *MesosCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// ListClusters lists the names of the available Mesos clusters.
|
||||
func (c *MesosCloud) ListClusters() ([]string, error) {
|
||||
// Always returns a single cluster (this one!)
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
name, err := c.client.clusterName(ctx)
|
||||
return []string{name}, err
|
||||
}
|
||||
|
||||
// Master gets back the address (either DNS name or IP address) of the leading Mesos master node for the cluster.
|
||||
func (c *MesosCloud) Master(clusterName string) (string, error) {
|
||||
clusters, err := c.ListClusters()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, name := range clusters {
|
||||
if name == clusterName {
|
||||
if c.client.master == "" {
|
||||
return "", errors.New("The currently leading master is unknown.")
|
||||
}
|
||||
|
||||
host, _, err := net.SplitHostPort(c.client.master)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return host, nil
|
||||
}
|
||||
}
|
||||
return "", errors.New(fmt.Sprintf("The supplied cluster '%v' does not exist", clusterName))
|
||||
}
|
||||
|
||||
// ipAddress returns an IP address of the specified instance.
|
||||
func ipAddress(name string) (net.IP, error) {
|
||||
if name == "" {
|
||||
return nil, noHostNameSpecified
|
||||
}
|
||||
ipaddr := net.ParseIP(name)
|
||||
if ipaddr != nil {
|
||||
return ipaddr, nil
|
||||
}
|
||||
iplist, err := net.LookupIP(name)
|
||||
if err != nil {
|
||||
log.V(2).Infof("failed to resolve IP from host name '%v': %v", name, err)
|
||||
return nil, err
|
||||
}
|
||||
ipaddr = iplist[0]
|
||||
log.V(2).Infof("resolved host '%v' to '%v'", name, ipaddr)
|
||||
return ipaddr, nil
|
||||
}
|
||||
|
||||
// mapNodeNameToPrivateDNSName maps a k8s NodeName to an mesos hostname.
|
||||
// This is a simple string cast
|
||||
func mapNodeNameToHostname(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the instance with the specified nodeName (deprecated).
|
||||
func (c *MesosCloud) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
hostname := mapNodeNameToHostname(nodeName)
|
||||
//TODO(jdef) use a timeout here? 15s?
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
nodes, err := c.client.listSlaves(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
node := nodes[hostname]
|
||||
if node == nil {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
ip, err := ipAddress(node.hostname)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ip.String(), nil
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the instance with the specified nodeName.
|
||||
func (c *MesosCloud) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the instance with the specified nodeName.
|
||||
func (c *MesosCloud) InstanceType(nodeName types.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (c *MesosCloud) listNodes() (map[string]*slaveNode, error) {
|
||||
//TODO(jdef) use a timeout here? 15s?
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
nodes, err := c.client.listSlaves(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
log.V(2).Info("no slaves found, are any running?")
|
||||
return nil, nil
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// List lists instances that match 'filter' which is a regular expression
|
||||
// which must match the entire instance name (fqdn).
|
||||
func (c *MesosCloud) List(filter string) ([]types.NodeName, error) {
|
||||
nodes, err := c.listNodes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filterRegex, err := regexp.Compile(filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := []types.NodeName{}
|
||||
for _, node := range nodes {
|
||||
if filterRegex.MatchString(node.hostname) {
|
||||
names = append(names, types.NodeName(node.hostname))
|
||||
}
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// ListWithKubelet list those instance which have no running kubelet, i.e. the
|
||||
// Kubernetes executor.
|
||||
func (c *MesosCloud) ListWithoutKubelet() ([]string, error) {
|
||||
nodes, err := c.listNodes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addr := make([]string, 0, len(nodes))
|
||||
for _, n := range nodes {
|
||||
if !n.kubeletRunning {
|
||||
addr = append(addr, n.hostname)
|
||||
}
|
||||
}
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// NodeAddresses returns the addresses of the instance with the specified nodeName.
|
||||
func (c *MesosCloud) NodeAddresses(nodeName types.NodeName) ([]v1.NodeAddress, error) {
|
||||
name := mapNodeNameToHostname(nodeName)
|
||||
ip, err := ipAddress(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []v1.NodeAddress{
|
||||
{Type: v1.NodeLegacyHostIP, Address: ip.String()},
|
||||
{Type: v1.NodeInternalIP, Address: ip.String()},
|
||||
{Type: v1.NodeExternalIP, Address: ip.String()},
|
||||
}, nil
|
||||
}
|
280
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/mesos_test.go
generated
vendored
Normal file
280
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/mesos_test.go
generated
vendored
Normal file
|
@ -0,0 +1,280 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mesos
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
func TestIPAddress(t *testing.T) {
|
||||
expected4 := net.IPv4(127, 0, 0, 1)
|
||||
ip, err := ipAddress("127.0.0.1")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(ip, expected4) {
|
||||
t.Fatalf("expected %#v instead of %#v", expected4, ip)
|
||||
}
|
||||
|
||||
expected6 := net.ParseIP("::1")
|
||||
if expected6 == nil {
|
||||
t.Fatalf("failed to parse ipv6 ::1")
|
||||
}
|
||||
ip, err = ipAddress("::1")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(ip, expected6) {
|
||||
t.Fatalf("expected %#v instead of %#v", expected6, ip)
|
||||
}
|
||||
|
||||
ip, err = ipAddress("localhost")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(ip, expected4) && !reflect.DeepEqual(ip, expected6) {
|
||||
t.Fatalf("expected %#v or %#v instead of %#v", expected4, expected6, ip)
|
||||
}
|
||||
|
||||
_, err = ipAddress("")
|
||||
if err != noHostNameSpecified {
|
||||
t.Fatalf("expected error noHostNameSpecified but got none")
|
||||
}
|
||||
}
|
||||
|
||||
// test mesos.newMesosCloud with no config
|
||||
func Test_newMesosCloud_NoConfig(t *testing.T) {
|
||||
defer log.Flush()
|
||||
mesosCloud, err := newMesosCloud(nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Creating a new Mesos cloud provider without config does not yield an error: %#v", err)
|
||||
}
|
||||
|
||||
if mesosCloud.client.httpClient.Timeout != DefaultHttpClientTimeout {
|
||||
t.Fatalf("Creating a new Mesos cloud provider without config does not yield an error: %#v", err)
|
||||
}
|
||||
|
||||
if mesosCloud.client.state.ttl != DefaultStateCacheTTL {
|
||||
t.Fatalf("Mesos client with default config has the expected state cache TTL value")
|
||||
}
|
||||
}
|
||||
|
||||
// test mesos.newMesosCloud with custom config
|
||||
func Test_newMesosCloud_WithConfig(t *testing.T) {
|
||||
defer log.Flush()
|
||||
|
||||
configString := `
|
||||
[mesos-cloud]
|
||||
http-client-timeout = 500ms
|
||||
state-cache-ttl = 1h`
|
||||
|
||||
reader := bytes.NewBufferString(configString)
|
||||
|
||||
mesosCloud, err := newMesosCloud(reader)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Creating a new Mesos cloud provider with a custom config does not yield an error: %#v", err)
|
||||
}
|
||||
|
||||
if mesosCloud.client.httpClient.Timeout != time.Duration(500)*time.Millisecond {
|
||||
t.Fatalf("Mesos client with a custom config has the expected HTTP client timeout value")
|
||||
}
|
||||
|
||||
if mesosCloud.client.state.ttl != time.Duration(1)*time.Hour {
|
||||
t.Fatalf("Mesos client with a custom config has the expected state cache TTL value")
|
||||
}
|
||||
}
|
||||
|
||||
// tests for capability reporting functions
|
||||
|
||||
// test mesos.Instances
|
||||
func Test_Instances(t *testing.T) {
|
||||
defer log.Flush()
|
||||
mesosCloud, _ := newMesosCloud(nil)
|
||||
|
||||
instances, supports_instances := mesosCloud.Instances()
|
||||
|
||||
if !supports_instances || instances == nil {
|
||||
t.Fatalf("MesosCloud provides an implementation of Instances")
|
||||
}
|
||||
}
|
||||
|
||||
// test mesos.LoadBalancer
|
||||
func Test_TcpLoadBalancer(t *testing.T) {
|
||||
defer log.Flush()
|
||||
mesosCloud, _ := newMesosCloud(nil)
|
||||
|
||||
lb, supports_lb := mesosCloud.LoadBalancer()
|
||||
|
||||
if supports_lb || lb != nil {
|
||||
t.Fatalf("MesosCloud does not provide an implementation of LoadBalancer")
|
||||
}
|
||||
}
|
||||
|
||||
// test mesos.Zones
|
||||
func Test_Zones(t *testing.T) {
|
||||
defer log.Flush()
|
||||
mesosCloud, _ := newMesosCloud(nil)
|
||||
|
||||
zones, supports_zones := mesosCloud.Zones()
|
||||
|
||||
if supports_zones || zones != nil {
|
||||
t.Fatalf("MesosCloud does not provide an implementation of Zones")
|
||||
}
|
||||
}
|
||||
|
||||
// test mesos.Clusters
|
||||
func Test_Clusters(t *testing.T) {
|
||||
defer log.Flush()
|
||||
mesosCloud, _ := newMesosCloud(nil)
|
||||
|
||||
clusters, supports_clusters := mesosCloud.Clusters()
|
||||
|
||||
if !supports_clusters || clusters == nil {
|
||||
t.Fatalf("MesosCloud does not provide an implementation of Clusters")
|
||||
}
|
||||
}
|
||||
|
||||
// test mesos.MasterURI
|
||||
func Test_MasterURI(t *testing.T) {
|
||||
defer log.Flush()
|
||||
mesosCloud, _ := newMesosCloud(nil)
|
||||
|
||||
uri := mesosCloud.MasterURI()
|
||||
|
||||
if uri != DefaultMesosMaster {
|
||||
t.Fatalf("MasterURI returns the expected master URI (expected \"localhost\", actual \"%s\"", uri)
|
||||
}
|
||||
}
|
||||
|
||||
// test mesos.ListClusters
|
||||
func Test_ListClusters(t *testing.T) {
|
||||
defer log.Flush()
|
||||
md := FakeMasterDetector{}
|
||||
httpServer, httpClient, httpTransport := makeHttpMocks()
|
||||
defer httpServer.Close()
|
||||
cacheTTL := 500 * time.Millisecond
|
||||
mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL)
|
||||
mesosCloud := &MesosCloud{client: mesosClient, config: createDefaultConfig()}
|
||||
|
||||
clusters, err := mesosCloud.ListClusters()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("ListClusters does not yield an error: %#v", err)
|
||||
}
|
||||
|
||||
if len(clusters) != 1 {
|
||||
t.Fatalf("ListClusters should return a list of size 1: (actual: %#v)", clusters)
|
||||
}
|
||||
|
||||
expectedClusterNames := []string{"mesos"}
|
||||
|
||||
if !reflect.DeepEqual(clusters, expectedClusterNames) {
|
||||
t.Fatalf("ListClusters should return the expected list of names: (expected: %#v, actual: %#v)",
|
||||
expectedClusterNames,
|
||||
clusters)
|
||||
}
|
||||
}
|
||||
|
||||
// test mesos.Master
|
||||
func Test_Master(t *testing.T) {
|
||||
defer log.Flush()
|
||||
md := FakeMasterDetector{}
|
||||
httpServer, httpClient, httpTransport := makeHttpMocks()
|
||||
defer httpServer.Close()
|
||||
cacheTTL := 500 * time.Millisecond
|
||||
mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL)
|
||||
mesosCloud := &MesosCloud{client: mesosClient, config: createDefaultConfig()}
|
||||
|
||||
clusters, err := mesosCloud.ListClusters()
|
||||
clusterName := clusters[0]
|
||||
master, err := mesosCloud.Master(clusterName)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Master does not yield an error: %#v", err)
|
||||
}
|
||||
|
||||
expectedMaster := unpackIPv4(TEST_MASTER_IP)
|
||||
|
||||
if master != expectedMaster {
|
||||
t.Fatalf("Master returns the unexpected value: (expected: %#v, actual: %#v", expectedMaster, master)
|
||||
}
|
||||
}
|
||||
|
||||
// test mesos.List
|
||||
func Test_List(t *testing.T) {
|
||||
defer log.Flush()
|
||||
md := FakeMasterDetector{}
|
||||
httpServer, httpClient, httpTransport := makeHttpMocks()
|
||||
defer httpServer.Close()
|
||||
cacheTTL := 500 * time.Millisecond
|
||||
mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL)
|
||||
mesosCloud := &MesosCloud{client: mesosClient, config: createDefaultConfig()}
|
||||
|
||||
clusters, err := mesosCloud.List(".*") // recognizes the language of all strings
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("List does not yield an error: %#v", err)
|
||||
}
|
||||
|
||||
if len(clusters) != 3 {
|
||||
t.Fatalf("List with a catch-all filter should return a list of size 3: (actual: %#v)", clusters)
|
||||
}
|
||||
|
||||
clusters, err = mesosCloud.List("$^") // end-of-string followed by start-of-string: recognizes the empty language
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("List does not yield an error: %#v", err)
|
||||
}
|
||||
|
||||
if len(clusters) != 0 {
|
||||
t.Fatalf("List with a reject-all filter should return a list of size 0: (actual: %#v)", clusters)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ExternalID(t *testing.T) {
|
||||
defer log.Flush()
|
||||
md := FakeMasterDetector{}
|
||||
httpServer, httpClient, httpTransport := makeHttpMocks()
|
||||
defer httpServer.Close()
|
||||
cacheTTL := 500 * time.Millisecond
|
||||
mesosClient, err := createMesosClient(md, httpClient, httpTransport, cacheTTL)
|
||||
mesosCloud := &MesosCloud{client: mesosClient, config: createDefaultConfig()}
|
||||
|
||||
_, err = mesosCloud.ExternalID("unknown")
|
||||
if err != cloudprovider.InstanceNotFound {
|
||||
t.Fatalf("ExternalID did not return InstanceNotFound on an unknown instance")
|
||||
}
|
||||
|
||||
slaveName := types.NodeName("mesos3.internal.example.org.fail")
|
||||
id, err := mesosCloud.ExternalID(slaveName)
|
||||
if id != "" {
|
||||
t.Fatalf("ExternalID should not be able to resolve %q", slaveName)
|
||||
}
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
t.Fatalf("ExternalID should find %q", slaveName)
|
||||
}
|
||||
}
|
21
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/plugins.go
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/mesos/plugins.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mesos
|
||||
|
||||
import (
|
||||
_ "github.com/mesos/mesos-go/detector/zoo"
|
||||
)
|
90
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD
generated
vendored
Normal file
90
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"metadata.go",
|
||||
"openstack.go",
|
||||
"openstack_instances.go",
|
||||
"openstack_loadbalancer.go",
|
||||
"openstack_routes.go",
|
||||
"openstack_volumes.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/mitchellh/mapstructure",
|
||||
"//vendor:github.com/rackspace/gophercloud",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/compute/v2/flavors",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/compute/v2/servers",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/identity/v3/extensions/trust",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/identity/v3/tokens",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/extensions",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/networking/v2/ports",
|
||||
"//vendor:github.com/rackspace/gophercloud/pagination",
|
||||
"//vendor:gopkg.in/gcfg.v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"metadata_test.go",
|
||||
"openstack_routes_test.go",
|
||||
"openstack_test.go",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//vendor:github.com/rackspace/gophercloud",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/compute/v2/servers",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/rand",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/MAINTAINERS.md
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/MAINTAINERS.md
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
# Maintainers
|
||||
|
||||
* [Angus Lees](https://github.com/anguslees)
|
||||
|
||||
|
||||
[]()
|
3
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/OWNERS
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/OWNERS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
assignees:
|
||||
- anguslees
|
||||
- dagnello
|
156
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go
generated
vendored
Normal file
156
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go
generated
vendored
Normal file
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
// metadataUrl is URL to OpenStack metadata server. It's hardcoded IPv4
|
||||
// link-local address as documented in "OpenStack Cloud Administrator Guide",
|
||||
// chapter Compute - Networking with nova-network.
|
||||
// http://docs.openstack.org/admin-guide-cloud/compute-networking-nova.html#metadata-service
|
||||
const metadataUrl = "http://169.254.169.254/openstack/2012-08-10/meta_data.json"
|
||||
|
||||
// Config drive is defined as an iso9660 or vfat (deprecated) drive
|
||||
// with the "config-2" label.
|
||||
// http://docs.openstack.org/user-guide/cli-config-drive.html
|
||||
const configDriveLabel = "config-2"
|
||||
const configDrivePath = "openstack/2012-08-10/meta_data.json"
|
||||
|
||||
var ErrBadMetadata = errors.New("Invalid OpenStack metadata, got empty uuid")
|
||||
|
||||
// Assumes the "2012-08-10" meta_data.json format.
|
||||
// See http://docs.openstack.org/user-guide/cli_config_drive.html
|
||||
type Metadata struct {
|
||||
Uuid string `json:"uuid"`
|
||||
Name string `json:"name"`
|
||||
AvailabilityZone string `json:"availability_zone"`
|
||||
// .. and other fields we don't care about. Expand as necessary.
|
||||
}
|
||||
|
||||
// parseMetadataUUID reads JSON from OpenStack metadata server and parses
|
||||
// instance ID out of it.
|
||||
func parseMetadata(r io.Reader) (*Metadata, error) {
|
||||
var metadata Metadata
|
||||
json := json.NewDecoder(r)
|
||||
if err := json.Decode(&metadata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if metadata.Uuid == "" {
|
||||
return nil, ErrBadMetadata
|
||||
}
|
||||
|
||||
return &metadata, nil
|
||||
}
|
||||
|
||||
func getMetadataFromConfigDrive() (*Metadata, error) {
|
||||
// Try to read instance UUID from config drive.
|
||||
dev := "/dev/disk/by-label/" + configDriveLabel
|
||||
if _, err := os.Stat(dev); os.IsNotExist(err) {
|
||||
out, err := exec.New().Command(
|
||||
"blkid", "-l",
|
||||
"-t", "LABEL="+configDriveLabel,
|
||||
"-o", "device",
|
||||
).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Unable to run blkid: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
dev = strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
mntdir, err := ioutil.TempDir("", "configdrive")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.Remove(mntdir)
|
||||
|
||||
glog.V(4).Infof("Attempting to mount configdrive %s on %s", dev, mntdir)
|
||||
|
||||
mounter := mount.New("" /* default mount path */)
|
||||
err = mounter.Mount(dev, mntdir, "iso9660", []string{"ro"})
|
||||
if err != nil {
|
||||
err = mounter.Mount(dev, mntdir, "vfat", []string{"ro"})
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("Error mounting configdrive %s: %v", dev, err)
|
||||
return nil, err
|
||||
}
|
||||
defer mounter.Unmount(mntdir)
|
||||
|
||||
glog.V(4).Infof("Configdrive mounted on %s", mntdir)
|
||||
|
||||
f, err := os.Open(
|
||||
filepath.Join(mntdir, configDrivePath))
|
||||
if err != nil {
|
||||
glog.Errorf("Error reading %s on config drive: %v", configDrivePath, err)
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseMetadata(f)
|
||||
}
|
||||
|
||||
func getMetadataFromMetadataService() (*Metadata, error) {
|
||||
// Try to get JSON from metdata server.
|
||||
glog.V(4).Infof("Attempting to fetch metadata from %s", metadataUrl)
|
||||
resp, err := http.Get(metadataUrl)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("Cannot read %s: %v", metadataUrl, err)
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("Unexpected status code when reading metadata from %s: %s", metadataUrl, resp.Status)
|
||||
glog.V(3).Infof("%v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parseMetadata(resp.Body)
|
||||
}
|
||||
|
||||
// Metadata is fixed for the current host, so cache the value process-wide
|
||||
var metadataCache *Metadata
|
||||
|
||||
func getMetadata() (*Metadata, error) {
|
||||
if metadataCache == nil {
|
||||
md, err := getMetadataFromConfigDrive()
|
||||
if err != nil {
|
||||
md, err = getMetadataFromMetadataService()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metadataCache = md
|
||||
}
|
||||
return metadataCache, nil
|
||||
}
|
86
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata_test.go
generated
vendored
Normal file
86
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata_test.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var FakeMetadata = Metadata{
|
||||
Uuid: "83679162-1378-4288-a2d4-70e13ec132aa",
|
||||
Name: "test",
|
||||
AvailabilityZone: "nova",
|
||||
}
|
||||
|
||||
func SetMetadataFixture(value *Metadata) {
|
||||
metadataCache = value
|
||||
}
|
||||
|
||||
func ClearMetadata() {
|
||||
metadataCache = nil
|
||||
}
|
||||
|
||||
func TestParseMetadata(t *testing.T) {
|
||||
_, err := parseMetadata(strings.NewReader("bogus"))
|
||||
if err == nil {
|
||||
t.Errorf("Should fail when bad data is provided: %s", err)
|
||||
}
|
||||
|
||||
data := strings.NewReader(`
|
||||
{
|
||||
"availability_zone": "nova",
|
||||
"files": [
|
||||
{
|
||||
"content_path": "/content/0000",
|
||||
"path": "/etc/network/interfaces"
|
||||
},
|
||||
{
|
||||
"content_path": "/content/0001",
|
||||
"path": "known_hosts"
|
||||
}
|
||||
],
|
||||
"hostname": "test.novalocal",
|
||||
"launch_index": 0,
|
||||
"name": "test",
|
||||
"meta": {
|
||||
"role": "webservers",
|
||||
"essential": "false"
|
||||
},
|
||||
"public_keys": {
|
||||
"mykey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDBqUfVvCSez0/Wfpd8dLLgZXV9GtXQ7hnMN+Z0OWQUyebVEHey1CXuin0uY1cAJMhUq8j98SiW+cU0sU4J3x5l2+xi1bodDm1BtFWVeLIOQINpfV1n8fKjHB+ynPpe1F6tMDvrFGUlJs44t30BrujMXBe8Rq44cCk6wqyjATA3rQ== Generated by Nova\n"
|
||||
},
|
||||
"uuid": "83679162-1378-4288-a2d4-70e13ec132aa"
|
||||
}
|
||||
`)
|
||||
md, err := parseMetadata(data)
|
||||
if err != nil {
|
||||
t.Fatalf("Should succeed when provided with valid data: %s", err)
|
||||
}
|
||||
|
||||
if md.Name != "test" {
|
||||
t.Errorf("incorrect name: %s", md.Name)
|
||||
}
|
||||
|
||||
if md.Uuid != "83679162-1378-4288-a2d4-70e13ec132aa" {
|
||||
t.Errorf("incorrect uuid: %s", md.Uuid)
|
||||
}
|
||||
|
||||
if md.AvailabilityZone != "nova" {
|
||||
t.Errorf("incorrect az: %s", md.AvailabilityZone)
|
||||
}
|
||||
}
|
515
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go
generated
vendored
Normal file
515
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go
generated
vendored
Normal file
|
@ -0,0 +1,515 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/rackspace/gophercloud"
|
||||
"github.com/rackspace/gophercloud/openstack"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
|
||||
"github.com/rackspace/gophercloud/openstack/identity/v3/extensions/trust"
|
||||
token3 "github.com/rackspace/gophercloud/openstack/identity/v3/tokens"
|
||||
"github.com/rackspace/gophercloud/pagination"
|
||||
"gopkg.in/gcfg.v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const ProviderName = "openstack"
|
||||
|
||||
var ErrNotFound = errors.New("Failed to find object")
|
||||
var ErrMultipleResults = errors.New("Multiple results where only one expected")
|
||||
var ErrNoAddressFound = errors.New("No address found for host")
|
||||
var ErrAttrNotFound = errors.New("Expected attribute not found")
|
||||
|
||||
const (
|
||||
MiB = 1024 * 1024
|
||||
GB = 1000 * 1000 * 1000
|
||||
)
|
||||
|
||||
// encoding.TextUnmarshaler interface for time.Duration
|
||||
type MyDuration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *MyDuration) UnmarshalText(text []byte) error {
|
||||
res, err := time.ParseDuration(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Duration = res
|
||||
return nil
|
||||
}
|
||||
|
||||
type LoadBalancer struct {
|
||||
network *gophercloud.ServiceClient
|
||||
compute *gophercloud.ServiceClient
|
||||
opts LoadBalancerOpts
|
||||
}
|
||||
|
||||
type LoadBalancerOpts struct {
|
||||
LBVersion string `gcfg:"lb-version"` // overrides autodetection. v1 or v2
|
||||
SubnetId string `gcfg:"subnet-id"` // required
|
||||
FloatingNetworkId string `gcfg:"floating-network-id"`
|
||||
LBMethod string `gcfg:"lb-method"`
|
||||
CreateMonitor bool `gcfg:"create-monitor"`
|
||||
MonitorDelay MyDuration `gcfg:"monitor-delay"`
|
||||
MonitorTimeout MyDuration `gcfg:"monitor-timeout"`
|
||||
MonitorMaxRetries uint `gcfg:"monitor-max-retries"`
|
||||
ManageSecurityGroups bool `gcfg:"manage-security-groups"`
|
||||
NodeSecurityGroupID string `gcfg:"node-security-group"`
|
||||
}
|
||||
|
||||
type BlockStorageOpts struct {
|
||||
TrustDevicePath bool `gcfg:"trust-device-path"` // See Issue #33128
|
||||
}
|
||||
|
||||
type RouterOpts struct {
|
||||
RouterId string `gcfg:"router-id"` // required
|
||||
}
|
||||
|
||||
// OpenStack is an implementation of cloud provider Interface for OpenStack.
|
||||
type OpenStack struct {
|
||||
provider *gophercloud.ProviderClient
|
||||
region string
|
||||
lbOpts LoadBalancerOpts
|
||||
bsOpts BlockStorageOpts
|
||||
routeOpts RouterOpts
|
||||
// InstanceID of the server where this OpenStack object is instantiated.
|
||||
localInstanceID string
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Global struct {
|
||||
AuthUrl string `gcfg:"auth-url"`
|
||||
Username string
|
||||
UserId string `gcfg:"user-id"`
|
||||
Password string
|
||||
ApiKey string `gcfg:"api-key"`
|
||||
TenantId string `gcfg:"tenant-id"`
|
||||
TenantName string `gcfg:"tenant-name"`
|
||||
TrustId string `gcfg:"trust-id"`
|
||||
DomainId string `gcfg:"domain-id"`
|
||||
DomainName string `gcfg:"domain-name"`
|
||||
Region string
|
||||
}
|
||||
LoadBalancer LoadBalancerOpts
|
||||
BlockStorage BlockStorageOpts
|
||||
Route RouterOpts
|
||||
}
|
||||
|
||||
func init() {
|
||||
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
|
||||
cfg, err := readConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newOpenStack(cfg)
|
||||
})
|
||||
}
|
||||
|
||||
func (cfg Config) toAuthOptions() gophercloud.AuthOptions {
|
||||
return gophercloud.AuthOptions{
|
||||
IdentityEndpoint: cfg.Global.AuthUrl,
|
||||
Username: cfg.Global.Username,
|
||||
UserID: cfg.Global.UserId,
|
||||
Password: cfg.Global.Password,
|
||||
APIKey: cfg.Global.ApiKey,
|
||||
TenantID: cfg.Global.TenantId,
|
||||
TenantName: cfg.Global.TenantName,
|
||||
DomainID: cfg.Global.DomainId,
|
||||
DomainName: cfg.Global.DomainName,
|
||||
|
||||
// Persistent service, so we need to be able to renew tokens.
|
||||
AllowReauth: true,
|
||||
}
|
||||
}
|
||||
|
||||
func readConfig(config io.Reader) (Config, error) {
|
||||
if config == nil {
|
||||
err := fmt.Errorf("no OpenStack cloud provider config file given")
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
|
||||
// Set default values for config params
|
||||
cfg.BlockStorage.TrustDevicePath = false
|
||||
|
||||
err := gcfg.ReadInto(&cfg, config)
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
// Tiny helper for conditional unwind logic
|
||||
type Caller bool
|
||||
|
||||
func NewCaller() Caller { return Caller(true) }
|
||||
func (c *Caller) Disarm() { *c = false }
|
||||
|
||||
func (c *Caller) Call(f func()) {
|
||||
if *c {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
func readInstanceID() (string, error) {
|
||||
// Try to find instance ID on the local filesystem (created by cloud-init)
|
||||
const instanceIDFile = "/var/lib/cloud/data/instance-id"
|
||||
idBytes, err := ioutil.ReadFile(instanceIDFile)
|
||||
if err == nil {
|
||||
instanceID := string(idBytes)
|
||||
instanceID = strings.TrimSpace(instanceID)
|
||||
glog.V(3).Infof("Got instance id from %s: %s", instanceIDFile, instanceID)
|
||||
if instanceID != "" {
|
||||
return instanceID, nil
|
||||
}
|
||||
// Fall through to metadata server lookup
|
||||
}
|
||||
|
||||
md, err := getMetadata()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return md.Uuid, nil
|
||||
}
|
||||
|
||||
func newOpenStack(cfg Config) (*OpenStack, error) {
|
||||
provider, err := openstack.NewClient(cfg.Global.AuthUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cfg.Global.TrustId != "" {
|
||||
authOptionsExt := trust.AuthOptionsExt{
|
||||
TrustID: cfg.Global.TrustId,
|
||||
AuthOptions: token3.AuthOptions{AuthOptions: cfg.toAuthOptions()},
|
||||
}
|
||||
err = trust.AuthenticateV3Trust(provider, authOptionsExt)
|
||||
} else {
|
||||
err = openstack.Authenticate(provider, cfg.toAuthOptions())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, err := readInstanceID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
os := OpenStack{
|
||||
provider: provider,
|
||||
region: cfg.Global.Region,
|
||||
lbOpts: cfg.LoadBalancer,
|
||||
bsOpts: cfg.BlockStorage,
|
||||
routeOpts: cfg.Route,
|
||||
localInstanceID: id,
|
||||
}
|
||||
|
||||
return &os, nil
|
||||
}
|
||||
|
||||
// mapNodeNameToServerName maps a k8s NodeName to an OpenStack Server Name
|
||||
// This is a simple string cast.
|
||||
func mapNodeNameToServerName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// mapServerToNodeName maps an OpenStack Server to a k8s NodeName
|
||||
func mapServerToNodeName(server *servers.Server) types.NodeName {
|
||||
// Node names are always lowercase, and (at least)
|
||||
// routecontroller does case-sensitive string comparisons
|
||||
// assuming this
|
||||
return types.NodeName(strings.ToLower(server.Name))
|
||||
}
|
||||
|
||||
func foreachServer(client *gophercloud.ServiceClient, opts servers.ListOptsBuilder, handler func(*servers.Server) (bool, error)) error {
|
||||
pager := servers.List(client, opts)
|
||||
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
s, err := servers.ExtractServers(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, server := range s {
|
||||
ok, err := handler(&server)
|
||||
if !ok || err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func getServerByName(client *gophercloud.ServiceClient, name types.NodeName) (*servers.Server, error) {
|
||||
opts := servers.ListOpts{
|
||||
Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(mapNodeNameToServerName(name))),
|
||||
Status: "ACTIVE",
|
||||
}
|
||||
pager := servers.List(client, opts)
|
||||
|
||||
serverList := make([]servers.Server, 0, 1)
|
||||
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
s, err := servers.ExtractServers(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
serverList = append(serverList, s...)
|
||||
if len(serverList) > 1 {
|
||||
return false, ErrMultipleResults
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(serverList) == 0 {
|
||||
return nil, ErrNotFound
|
||||
} else if len(serverList) > 1 {
|
||||
return nil, ErrMultipleResults
|
||||
}
|
||||
|
||||
return &serverList[0], nil
|
||||
}
|
||||
|
||||
func nodeAddresses(srv *servers.Server) ([]v1.NodeAddress, error) {
|
||||
addrs := []v1.NodeAddress{}
|
||||
|
||||
type Address struct {
|
||||
IpType string `mapstructure:"OS-EXT-IPS:type"`
|
||||
Addr string
|
||||
}
|
||||
|
||||
var addresses map[string][]Address
|
||||
err := mapstructure.Decode(srv.Addresses, &addresses)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for network, addrlist := range addresses {
|
||||
for _, props := range addrlist {
|
||||
var addressType v1.NodeAddressType
|
||||
if props.IpType == "floating" || network == "public" {
|
||||
addressType = v1.NodeExternalIP
|
||||
} else {
|
||||
addressType = v1.NodeInternalIP
|
||||
}
|
||||
|
||||
v1.AddToNodeAddresses(&addrs,
|
||||
v1.NodeAddress{
|
||||
Type: addressType,
|
||||
Address: props.Addr,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// AccessIPs are usually duplicates of "public" addresses.
|
||||
if srv.AccessIPv4 != "" {
|
||||
v1.AddToNodeAddresses(&addrs,
|
||||
v1.NodeAddress{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: srv.AccessIPv4,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if srv.AccessIPv6 != "" {
|
||||
v1.AddToNodeAddresses(&addrs,
|
||||
v1.NodeAddress{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: srv.AccessIPv6,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return addrs, nil
|
||||
}
|
||||
|
||||
func getAddressesByName(client *gophercloud.ServiceClient, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
srv, err := getServerByName(client, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nodeAddresses(srv)
|
||||
}
|
||||
|
||||
func getAddressByName(client *gophercloud.ServiceClient, name types.NodeName) (string, error) {
|
||||
addrs, err := getAddressesByName(client, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if len(addrs) == 0 {
|
||||
return "", ErrNoAddressFound
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
if addr.Type == v1.NodeInternalIP {
|
||||
return addr.Address, nil
|
||||
}
|
||||
}
|
||||
|
||||
return addrs[0].Address, nil
|
||||
}
|
||||
|
||||
func (os *OpenStack) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ProviderName returns the cloud provider ID.
|
||||
func (os *OpenStack) ProviderName() string {
|
||||
return ProviderName
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (os *OpenStack) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
glog.V(4).Info("openstack.LoadBalancer() called")
|
||||
|
||||
// TODO: Search for and support Rackspace loadbalancer API, and others.
|
||||
network, err := openstack.NewNetworkV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to find network endpoint: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
compute, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to find compute endpoint: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
lbversion := os.lbOpts.LBVersion
|
||||
if lbversion == "" {
|
||||
// No version specified, try newest supported by server
|
||||
netExts, err := networkExtensions(network)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to list neutron extensions: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if netExts["lbaasv2"] {
|
||||
lbversion = "v2"
|
||||
} else if netExts["lbaas"] {
|
||||
lbversion = "v1"
|
||||
} else {
|
||||
glog.Warningf("Failed to find neutron LBaaS extension (v1 or v2)")
|
||||
return nil, false
|
||||
}
|
||||
glog.V(3).Infof("Using LBaaS extension %v", lbversion)
|
||||
}
|
||||
|
||||
glog.V(1).Info("Claiming to support LoadBalancer")
|
||||
|
||||
if lbversion == "v2" {
|
||||
return &LbaasV2{LoadBalancer{network, compute, os.lbOpts}}, true
|
||||
} else if lbversion == "v1" {
|
||||
return &LbaasV1{LoadBalancer{network, compute, os.lbOpts}}, true
|
||||
} else {
|
||||
glog.Warningf("Config error: unrecognised lb-version \"%v\"", lbversion)
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
func isNotFound(err error) bool {
|
||||
e, ok := err.(*gophercloud.UnexpectedResponseCodeError)
|
||||
return ok && e.Actual == http.StatusNotFound
|
||||
}
|
||||
|
||||
func (os *OpenStack) Zones() (cloudprovider.Zones, bool) {
|
||||
glog.V(1).Info("Claiming to support Zones")
|
||||
|
||||
return os, true
|
||||
}
|
||||
func (os *OpenStack) GetZone() (cloudprovider.Zone, error) {
|
||||
md, err := getMetadata()
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: md.AvailabilityZone,
|
||||
Region: os.region,
|
||||
}
|
||||
glog.V(1).Infof("Current zone is %v", zone)
|
||||
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
func (os *OpenStack) Routes() (cloudprovider.Routes, bool) {
|
||||
glog.V(4).Info("openstack.Routes() called")
|
||||
|
||||
network, err := openstack.NewNetworkV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to find network endpoint: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
netExts, err := networkExtensions(network)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to list neutron extensions: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if !netExts["extraroute"] {
|
||||
glog.V(3).Infof("Neutron extraroute extension not found, required for Routes support")
|
||||
return nil, false
|
||||
}
|
||||
|
||||
compute, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to find compute endpoint: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
r, err := NewRoutes(compute, network, os.routeOpts)
|
||||
if err != nil {
|
||||
glog.Warningf("Error initialising Routes support: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
glog.V(1).Info("Claiming to support Routes")
|
||||
|
||||
return r, true
|
||||
}
|
172
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go
generated
vendored
Normal file
172
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go
generated
vendored
Normal file
|
@ -0,0 +1,172 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/rackspace/gophercloud"
|
||||
"github.com/rackspace/gophercloud/openstack"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/flavors"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
|
||||
"github.com/rackspace/gophercloud/pagination"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
type Instances struct {
|
||||
compute *gophercloud.ServiceClient
|
||||
flavor_to_resource map[string]*v1.NodeResources // keyed by flavor id
|
||||
}
|
||||
|
||||
// Instances returns an implementation of Instances for OpenStack.
|
||||
func (os *OpenStack) Instances() (cloudprovider.Instances, bool) {
|
||||
glog.V(4).Info("openstack.Instances() called")
|
||||
|
||||
compute, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to find compute endpoint: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
pager := flavors.ListDetail(compute, nil)
|
||||
|
||||
flavor_to_resource := make(map[string]*v1.NodeResources)
|
||||
err = pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
flavorList, err := flavors.ExtractFlavors(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, flavor := range flavorList {
|
||||
rsrc := v1.NodeResources{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewQuantity(int64(flavor.VCPUs), resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(int64(flavor.RAM)*MiB, resource.BinarySI),
|
||||
"openstack.org/disk": *resource.NewQuantity(int64(flavor.Disk)*GB, resource.DecimalSI),
|
||||
"openstack.org/rxTxFactor": *resource.NewMilliQuantity(int64(flavor.RxTxFactor)*1000, resource.DecimalSI),
|
||||
"openstack.org/swap": *resource.NewQuantity(int64(flavor.Swap)*MiB, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
flavor_to_resource[flavor.ID] = &rsrc
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to find compute flavors: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Found %v compute flavors", len(flavor_to_resource))
|
||||
glog.V(1).Info("Claiming to support Instances")
|
||||
|
||||
return &Instances{compute, flavor_to_resource}, true
|
||||
}
|
||||
|
||||
func (i *Instances) List(name_filter string) ([]types.NodeName, error) {
|
||||
glog.V(4).Infof("openstack List(%v) called", name_filter)
|
||||
|
||||
opts := servers.ListOpts{
|
||||
Name: name_filter,
|
||||
Status: "ACTIVE",
|
||||
}
|
||||
pager := servers.List(i.compute, opts)
|
||||
|
||||
ret := make([]types.NodeName, 0)
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
sList, err := servers.ExtractServers(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range sList {
|
||||
ret = append(ret, mapServerToNodeName(&sList[i]))
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Found %v instances matching %v: %v",
|
||||
len(ret), name_filter, ret)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
// Note this is *not* necessarily the same as hostname.
|
||||
func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
md, err := getMetadata()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return types.NodeName(md.Name), nil
|
||||
}
|
||||
|
||||
func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (i *Instances) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
glog.V(4).Infof("NodeAddresses(%v) called", name)
|
||||
|
||||
addrs, err := getAddressesByName(i.compute, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("NodeAddresses(%v) => %v", name, addrs)
|
||||
return addrs, nil
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (i *Instances) ExternalID(name types.NodeName) (string, error) {
|
||||
srv, err := getServerByName(i.compute, name)
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return srv.ID, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the kubelet's cloud provider ID.
|
||||
func (os *OpenStack) InstanceID() (string, error) {
|
||||
return os.localInstanceID, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (i *Instances) InstanceID(name types.NodeName) (string, error) {
|
||||
srv, err := getServerByName(i.compute, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// In the future it is possible to also return an endpoint as:
|
||||
// <endpoint>/<instanceid>
|
||||
return "/" + srv.ID, nil
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (i *Instances) InstanceType(name types.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
1574
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go
generated
vendored
Normal file
1574
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
278
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes.go
generated
vendored
Normal file
278
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes.go
generated
vendored
Normal file
|
@ -0,0 +1,278 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/rackspace/gophercloud"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
|
||||
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers"
|
||||
neutronports "github.com/rackspace/gophercloud/openstack/networking/v2/ports"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
var ErrNoRouterId = errors.New("router-id not set in cloud provider config")
|
||||
|
||||
type Routes struct {
|
||||
compute *gophercloud.ServiceClient
|
||||
network *gophercloud.ServiceClient
|
||||
opts RouterOpts
|
||||
}
|
||||
|
||||
func NewRoutes(compute *gophercloud.ServiceClient, network *gophercloud.ServiceClient, opts RouterOpts) (cloudprovider.Routes, error) {
|
||||
if opts.RouterId == "" {
|
||||
return nil, ErrNoRouterId
|
||||
}
|
||||
|
||||
return &Routes{
|
||||
compute: compute,
|
||||
network: network,
|
||||
opts: opts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *Routes) ListRoutes(clusterName string) ([]*cloudprovider.Route, error) {
|
||||
glog.V(4).Infof("ListRoutes(%v)", clusterName)
|
||||
|
||||
nodeNamesByAddr := make(map[string]types.NodeName)
|
||||
err := foreachServer(r.compute, servers.ListOpts{Status: "ACTIVE"}, func(srv *servers.Server) (bool, error) {
|
||||
addrs, err := nodeAddresses(srv)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
name := mapServerToNodeName(srv)
|
||||
for _, addr := range addrs {
|
||||
nodeNamesByAddr[addr.Address] = name
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
router, err := routers.Get(r.network, r.opts.RouterId).Extract()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var routes []*cloudprovider.Route
|
||||
for _, item := range router.Routes {
|
||||
nodeName, ok := nodeNamesByAddr[item.NextHop]
|
||||
if !ok {
|
||||
// Not one of our routes?
|
||||
glog.V(4).Infof("Skipping route with unknown nexthop %v", item.NextHop)
|
||||
continue
|
||||
}
|
||||
route := cloudprovider.Route{
|
||||
Name: item.DestinationCIDR,
|
||||
TargetNode: nodeName,
|
||||
DestinationCIDR: item.DestinationCIDR,
|
||||
}
|
||||
routes = append(routes, &route)
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
func updateRoutes(network *gophercloud.ServiceClient, router *routers.Router, newRoutes []routers.Route) (func(), error) {
|
||||
origRoutes := router.Routes // shallow copy
|
||||
|
||||
_, err := routers.Update(network, router.ID, routers.UpdateOpts{
|
||||
Routes: newRoutes,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
unwinder := func() {
|
||||
glog.V(4).Info("Reverting routes change to router ", router.ID)
|
||||
_, err := routers.Update(network, router.ID, routers.UpdateOpts{
|
||||
Routes: origRoutes,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
glog.Warning("Unable to reset routes during error unwind: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
return unwinder, nil
|
||||
}
|
||||
|
||||
func updateAllowedAddressPairs(network *gophercloud.ServiceClient, port *neutronports.Port, newPairs []neutronports.AddressPair) (func(), error) {
|
||||
origPairs := port.AllowedAddressPairs // shallow copy
|
||||
|
||||
_, err := neutronports.Update(network, port.ID, neutronports.UpdateOpts{
|
||||
AllowedAddressPairs: newPairs,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
unwinder := func() {
|
||||
glog.V(4).Info("Reverting allowed-address-pairs change to port ", port.ID)
|
||||
_, err := neutronports.Update(network, port.ID, neutronports.UpdateOpts{
|
||||
AllowedAddressPairs: origPairs,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
glog.Warning("Unable to reset allowed-address-pairs during error unwind: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
return unwinder, nil
|
||||
}
|
||||
|
||||
func (r *Routes) CreateRoute(clusterName string, nameHint string, route *cloudprovider.Route) error {
|
||||
glog.V(4).Infof("CreateRoute(%v, %v, %v)", clusterName, nameHint, route)
|
||||
|
||||
onFailure := NewCaller()
|
||||
|
||||
addr, err := getAddressByName(r.compute, route.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Using nexthop %v for node %v", addr, route.TargetNode)
|
||||
|
||||
router, err := routers.Get(r.network, r.opts.RouterId).Extract()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routes := router.Routes
|
||||
|
||||
for _, item := range routes {
|
||||
if item.DestinationCIDR == route.DestinationCIDR && item.NextHop == addr {
|
||||
glog.V(4).Infof("Skipping existing route: %v", route)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
routes = append(routes, routers.Route{
|
||||
DestinationCIDR: route.DestinationCIDR,
|
||||
NextHop: addr,
|
||||
})
|
||||
|
||||
unwind, err := updateRoutes(r.network, router, routes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer onFailure.Call(unwind)
|
||||
|
||||
port, err := getPortByIP(r.network, addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, item := range port.AllowedAddressPairs {
|
||||
if item.IPAddress == route.DestinationCIDR {
|
||||
glog.V(4).Info("Found existing allowed-address-pair: ", item)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
newPairs := append(port.AllowedAddressPairs, neutronports.AddressPair{
|
||||
IPAddress: route.DestinationCIDR,
|
||||
})
|
||||
unwind, err := updateAllowedAddressPairs(r.network, &port, newPairs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer onFailure.Call(unwind)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Route created: %v", route)
|
||||
onFailure.Disarm()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Routes) DeleteRoute(clusterName string, route *cloudprovider.Route) error {
|
||||
glog.V(4).Infof("DeleteRoute(%v, %v)", clusterName, route)
|
||||
|
||||
onFailure := NewCaller()
|
||||
|
||||
addr, err := getAddressByName(r.compute, route.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
router, err := routers.Get(r.network, r.opts.RouterId).Extract()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routes := router.Routes
|
||||
index := -1
|
||||
for i, item := range routes {
|
||||
if item.DestinationCIDR == route.DestinationCIDR && item.NextHop == addr {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if index == -1 {
|
||||
glog.V(4).Infof("Skipping non-existent route: %v", route)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete element `index`
|
||||
routes[index] = routes[len(routes)-1]
|
||||
routes = routes[:len(routes)-1]
|
||||
|
||||
unwind, err := updateRoutes(r.network, router, routes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer onFailure.Call(unwind)
|
||||
|
||||
port, err := getPortByIP(r.network, addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addr_pairs := port.AllowedAddressPairs
|
||||
index = -1
|
||||
for i, item := range addr_pairs {
|
||||
if item.IPAddress == route.DestinationCIDR {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if index != -1 {
|
||||
// Delete element `index`
|
||||
addr_pairs[index] = addr_pairs[len(routes)-1]
|
||||
addr_pairs = addr_pairs[:len(routes)-1]
|
||||
|
||||
unwind, err := updateAllowedAddressPairs(r.network, &port, addr_pairs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer onFailure.Call(unwind)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Route deleted: %v", route)
|
||||
onFailure.Disarm()
|
||||
return nil
|
||||
}
|
71
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes_test.go
generated
vendored
Normal file
71
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes_test.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
func TestRoutes(t *testing.T) {
|
||||
const clusterName = "ignored"
|
||||
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
os, err := newOpenStack(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate OpenStack: %s", err)
|
||||
}
|
||||
|
||||
r, ok := os.Routes()
|
||||
if !ok {
|
||||
t.Fatalf("Routes() returned false - perhaps your stack doens't support Neutron?")
|
||||
}
|
||||
|
||||
newroute := cloudprovider.Route{
|
||||
DestinationCIDR: "10.164.2.0/24",
|
||||
TargetNode: types.NodeName("testinstance"),
|
||||
}
|
||||
err = r.CreateRoute(clusterName, "myhint", &newroute)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateRoute error: %v", err)
|
||||
}
|
||||
|
||||
routelist, err := r.ListRoutes(clusterName)
|
||||
if err != nil {
|
||||
t.Fatalf("ListRoutes() error: %v", err)
|
||||
}
|
||||
for _, route := range routelist {
|
||||
_, cidr, err := net.ParseCIDR(route.DestinationCIDR)
|
||||
if err != nil {
|
||||
t.Logf("Ignoring route %s, unparsable CIDR: %v", route.Name, err)
|
||||
continue
|
||||
}
|
||||
t.Logf("%s via %s", cidr, route.TargetNode)
|
||||
}
|
||||
|
||||
err = r.DeleteRoute(clusterName, &newroute)
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteRoute error: %v", err)
|
||||
}
|
||||
}
|
387
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_test.go
generated
vendored
Normal file
387
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_test.go
generated
vendored
Normal file
|
@ -0,0 +1,387 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rackspace/gophercloud"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
const volumeAvailableStatus = "available"
|
||||
const volumeInUseStatus = "in-use"
|
||||
const volumeCreateTimeoutSeconds = 30
|
||||
const testClusterName = "testCluster"
|
||||
|
||||
func WaitForVolumeStatus(t *testing.T, os *OpenStack, volumeName string, status string, timeoutSeconds int) {
|
||||
timeout := timeoutSeconds
|
||||
start := time.Now().Second()
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
if timeout >= 0 && time.Now().Second()-start >= timeout {
|
||||
t.Logf("Volume (%s) status did not change to %s after %v seconds\n",
|
||||
volumeName,
|
||||
status,
|
||||
timeout)
|
||||
return
|
||||
}
|
||||
|
||||
getVol, err := os.getVolume(volumeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot get existing Cinder volume (%s): %v", volumeName, err)
|
||||
}
|
||||
if getVol.Status == status {
|
||||
t.Logf("Volume (%s) status changed to %s after %v seconds\n",
|
||||
volumeName,
|
||||
status,
|
||||
timeout)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadConfig(t *testing.T) {
|
||||
_, err := readConfig(nil)
|
||||
if err == nil {
|
||||
t.Errorf("Should fail when no config is provided: %s", err)
|
||||
}
|
||||
|
||||
cfg, err := readConfig(strings.NewReader(`
|
||||
[Global]
|
||||
auth-url = http://auth.url
|
||||
username = user
|
||||
[LoadBalancer]
|
||||
create-monitor = yes
|
||||
monitor-delay = 1m
|
||||
monitor-timeout = 30s
|
||||
monitor-max-retries = 3
|
||||
[BlockStorage]
|
||||
trust-device-path = yes
|
||||
`))
|
||||
if err != nil {
|
||||
t.Fatalf("Should succeed when a valid config is provided: %s", err)
|
||||
}
|
||||
if cfg.Global.AuthUrl != "http://auth.url" {
|
||||
t.Errorf("incorrect authurl: %s", cfg.Global.AuthUrl)
|
||||
}
|
||||
|
||||
if !cfg.LoadBalancer.CreateMonitor {
|
||||
t.Errorf("incorrect lb.createmonitor: %t", cfg.LoadBalancer.CreateMonitor)
|
||||
}
|
||||
if cfg.LoadBalancer.MonitorDelay.Duration != 1*time.Minute {
|
||||
t.Errorf("incorrect lb.monitordelay: %s", cfg.LoadBalancer.MonitorDelay)
|
||||
}
|
||||
if cfg.LoadBalancer.MonitorTimeout.Duration != 30*time.Second {
|
||||
t.Errorf("incorrect lb.monitortimeout: %s", cfg.LoadBalancer.MonitorTimeout)
|
||||
}
|
||||
if cfg.LoadBalancer.MonitorMaxRetries != 3 {
|
||||
t.Errorf("incorrect lb.monitormaxretries: %d", cfg.LoadBalancer.MonitorMaxRetries)
|
||||
}
|
||||
if cfg.BlockStorage.TrustDevicePath != true {
|
||||
t.Errorf("incorrect bs.trustdevicepath: %v", cfg.BlockStorage.TrustDevicePath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToAuthOptions(t *testing.T) {
|
||||
cfg := Config{}
|
||||
cfg.Global.Username = "user"
|
||||
// etc.
|
||||
|
||||
ao := cfg.toAuthOptions()
|
||||
|
||||
if !ao.AllowReauth {
|
||||
t.Errorf("Will need to be able to reauthenticate")
|
||||
}
|
||||
if ao.Username != cfg.Global.Username {
|
||||
t.Errorf("Username %s != %s", ao.Username, cfg.Global.Username)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCaller(t *testing.T) {
|
||||
called := false
|
||||
myFunc := func() { called = true }
|
||||
|
||||
c := NewCaller()
|
||||
c.Call(myFunc)
|
||||
|
||||
if !called {
|
||||
t.Errorf("Caller failed to call function in default case")
|
||||
}
|
||||
|
||||
c.Disarm()
|
||||
called = false
|
||||
c.Call(myFunc)
|
||||
|
||||
if called {
|
||||
t.Error("Caller still called function when disarmed")
|
||||
}
|
||||
|
||||
// Confirm the "usual" deferred Caller pattern works as expected
|
||||
|
||||
called = false
|
||||
success_case := func() {
|
||||
c := NewCaller()
|
||||
defer c.Call(func() { called = true })
|
||||
c.Disarm()
|
||||
}
|
||||
if success_case(); called {
|
||||
t.Error("Deferred success case still invoked unwind")
|
||||
}
|
||||
|
||||
called = false
|
||||
failure_case := func() {
|
||||
c := NewCaller()
|
||||
defer c.Call(func() { called = true })
|
||||
}
|
||||
if failure_case(); !called {
|
||||
t.Error("Deferred failure case failed to invoke unwind")
|
||||
}
|
||||
}
|
||||
|
||||
// An arbitrary sort.Interface, just for easier comparison
|
||||
type AddressSlice []v1.NodeAddress
|
||||
|
||||
func (a AddressSlice) Len() int { return len(a) }
|
||||
func (a AddressSlice) Less(i, j int) bool { return a[i].Address < a[j].Address }
|
||||
func (a AddressSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
func TestNodeAddresses(t *testing.T) {
|
||||
srv := servers.Server{
|
||||
Status: "ACTIVE",
|
||||
HostID: "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362",
|
||||
AccessIPv4: "50.56.176.99",
|
||||
AccessIPv6: "2001:4800:790e:510:be76:4eff:fe04:82a8",
|
||||
Addresses: map[string]interface{}{
|
||||
"private": []interface{}{
|
||||
map[string]interface{}{
|
||||
"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:7c:1b:2b",
|
||||
"version": float64(4),
|
||||
"addr": "10.0.0.32",
|
||||
"OS-EXT-IPS:type": "fixed",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"version": float64(4),
|
||||
"addr": "50.56.176.36",
|
||||
"OS-EXT-IPS:type": "floating",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"version": float64(4),
|
||||
"addr": "10.0.0.31",
|
||||
// No OS-EXT-IPS:type
|
||||
},
|
||||
},
|
||||
"public": []interface{}{
|
||||
map[string]interface{}{
|
||||
"version": float64(4),
|
||||
"addr": "50.56.176.35",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"version": float64(6),
|
||||
"addr": "2001:4800:780e:510:be76:4eff:fe04:84a8",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
addrs, err := nodeAddresses(&srv)
|
||||
if err != nil {
|
||||
t.Fatalf("nodeAddresses returned error: %v", err)
|
||||
}
|
||||
|
||||
sort.Sort(AddressSlice(addrs))
|
||||
t.Logf("addresses is %v", addrs)
|
||||
|
||||
want := []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.0.0.31"},
|
||||
{Type: v1.NodeInternalIP, Address: "10.0.0.32"},
|
||||
{Type: v1.NodeExternalIP, Address: "2001:4800:780e:510:be76:4eff:fe04:84a8"},
|
||||
{Type: v1.NodeExternalIP, Address: "2001:4800:790e:510:be76:4eff:fe04:82a8"},
|
||||
{Type: v1.NodeExternalIP, Address: "50.56.176.35"},
|
||||
{Type: v1.NodeExternalIP, Address: "50.56.176.36"},
|
||||
{Type: v1.NodeExternalIP, Address: "50.56.176.99"},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(want, addrs) {
|
||||
t.Errorf("nodeAddresses returned incorrect value %v", addrs)
|
||||
}
|
||||
}
|
||||
|
||||
// This allows acceptance testing against an existing OpenStack
|
||||
// install, using the standard OS_* OpenStack client environment
|
||||
// variables.
|
||||
// FIXME: it would be better to hermetically test against canned JSON
|
||||
// requests/responses.
|
||||
func configFromEnv() (cfg Config, ok bool) {
|
||||
cfg.Global.AuthUrl = os.Getenv("OS_AUTH_URL")
|
||||
|
||||
cfg.Global.TenantId = os.Getenv("OS_TENANT_ID")
|
||||
// Rax/nova _insists_ that we don't specify both tenant ID and name
|
||||
if cfg.Global.TenantId == "" {
|
||||
cfg.Global.TenantName = os.Getenv("OS_TENANT_NAME")
|
||||
}
|
||||
|
||||
cfg.Global.Username = os.Getenv("OS_USERNAME")
|
||||
cfg.Global.Password = os.Getenv("OS_PASSWORD")
|
||||
cfg.Global.ApiKey = os.Getenv("OS_API_KEY")
|
||||
cfg.Global.Region = os.Getenv("OS_REGION_NAME")
|
||||
cfg.Global.DomainId = os.Getenv("OS_DOMAIN_ID")
|
||||
cfg.Global.DomainName = os.Getenv("OS_DOMAIN_NAME")
|
||||
|
||||
ok = (cfg.Global.AuthUrl != "" &&
|
||||
cfg.Global.Username != "" &&
|
||||
(cfg.Global.Password != "" || cfg.Global.ApiKey != "") &&
|
||||
(cfg.Global.TenantId != "" || cfg.Global.TenantName != "" ||
|
||||
cfg.Global.DomainId != "" || cfg.Global.DomainName != ""))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func TestNewOpenStack(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
_, err := newOpenStack(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate OpenStack: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalancer(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
versions := []string{"v1", "v2", ""}
|
||||
|
||||
for _, v := range versions {
|
||||
t.Logf("Trying LBVersion = '%s'\n", v)
|
||||
cfg.LoadBalancer.LBVersion = v
|
||||
|
||||
os, err := newOpenStack(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate OpenStack: %s", err)
|
||||
}
|
||||
|
||||
lb, ok := os.LoadBalancer()
|
||||
if !ok {
|
||||
t.Fatalf("LoadBalancer() returned false - perhaps your stack doesn't support Neutron?")
|
||||
}
|
||||
|
||||
_, exists, err := lb.GetLoadBalancer(testClusterName, &v1.Service{ObjectMeta: v1.ObjectMeta{Name: "noexist"}})
|
||||
if err != nil {
|
||||
t.Fatalf("GetLoadBalancer(\"noexist\") returned error: %s", err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatalf("GetLoadBalancer(\"noexist\") returned exists")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestZones(t *testing.T) {
|
||||
SetMetadataFixture(&FakeMetadata)
|
||||
defer ClearMetadata()
|
||||
|
||||
os := OpenStack{
|
||||
provider: &gophercloud.ProviderClient{
|
||||
IdentityBase: "http://auth.url/",
|
||||
},
|
||||
region: "myRegion",
|
||||
}
|
||||
|
||||
z, ok := os.Zones()
|
||||
if !ok {
|
||||
t.Fatalf("Zones() returned false")
|
||||
}
|
||||
|
||||
zone, err := z.GetZone()
|
||||
if err != nil {
|
||||
t.Fatalf("GetZone() returned error: %s", err)
|
||||
}
|
||||
|
||||
if zone.Region != "myRegion" {
|
||||
t.Fatalf("GetZone() returned wrong region (%s)", zone.Region)
|
||||
}
|
||||
|
||||
if zone.FailureDomain != "nova" {
|
||||
t.Fatalf("GetZone() returned wrong failure domain (%s)", zone.FailureDomain)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumes(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
os, err := newOpenStack(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate OpenStack: %s", err)
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"test": "value",
|
||||
}
|
||||
vol, err := os.CreateVolume("kubernetes-test-volume-"+rand.String(10), 1, "", "", &tags)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create a new Cinder volume: %v", err)
|
||||
}
|
||||
t.Logf("Volume (%s) created\n", vol)
|
||||
|
||||
WaitForVolumeStatus(t, os, vol, volumeAvailableStatus, volumeCreateTimeoutSeconds)
|
||||
|
||||
diskId, err := os.AttachDisk(os.localInstanceID, vol)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot AttachDisk Cinder volume %s: %v", vol, err)
|
||||
}
|
||||
t.Logf("Volume (%s) attached, disk ID: %s\n", vol, diskId)
|
||||
|
||||
WaitForVolumeStatus(t, os, vol, volumeInUseStatus, volumeCreateTimeoutSeconds)
|
||||
|
||||
devicePath := os.GetDevicePath(diskId)
|
||||
if !strings.HasPrefix(devicePath, "/dev/disk/by-id/") {
|
||||
t.Fatalf("GetDevicePath returned and unexpected path for Cinder volume %s, returned %s", vol, devicePath)
|
||||
}
|
||||
t.Logf("Volume (%s) found at path: %s\n", vol, devicePath)
|
||||
|
||||
err = os.DetachDisk(os.localInstanceID, vol)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot DetachDisk Cinder volume %s: %v", vol, err)
|
||||
}
|
||||
t.Logf("Volume (%s) detached\n", vol)
|
||||
|
||||
WaitForVolumeStatus(t, os, vol, volumeAvailableStatus, volumeCreateTimeoutSeconds)
|
||||
|
||||
err = os.DeleteVolume(vol)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot delete Cinder volume %s: %v", vol, err)
|
||||
}
|
||||
t.Logf("Volume (%s) deleted\n", vol)
|
||||
|
||||
}
|
290
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go
generated
vendored
Normal file
290
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go
generated
vendored
Normal file
|
@ -0,0 +1,290 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
"github.com/rackspace/gophercloud"
|
||||
"github.com/rackspace/gophercloud/openstack"
|
||||
"github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach"
|
||||
"github.com/rackspace/gophercloud/pagination"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Attaches given cinder volume to the compute running kubelet
|
||||
func (os *OpenStack) AttachDisk(instanceID string, diskName string) (string, error) {
|
||||
disk, err := os.getVolume(diskName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil || cClient == nil {
|
||||
glog.Errorf("Unable to initialize nova client for region: %s", os.region)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil {
|
||||
if instanceID == disk.Attachments[0]["server_id"] {
|
||||
glog.V(4).Infof("Disk: %q is already attached to compute: %q", diskName, instanceID)
|
||||
return disk.ID, nil
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Disk %q is attached to a different compute (%q), detaching", diskName, disk.Attachments[0]["server_id"])
|
||||
err = os.DetachDisk(fmt.Sprintf("%s", disk.Attachments[0]["server_id"]), diskName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// add read only flag here if possible spothanis
|
||||
_, err = volumeattach.Create(cClient, instanceID, &volumeattach.CreateOpts{
|
||||
VolumeID: disk.ID,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to attach %s volume to %s compute: %v", diskName, instanceID, err)
|
||||
return "", err
|
||||
}
|
||||
glog.V(2).Infof("Successfully attached %s volume to %s compute", diskName, instanceID)
|
||||
return disk.ID, nil
|
||||
}
|
||||
|
||||
// Detaches given cinder volume from the compute running kubelet
|
||||
func (os *OpenStack) DetachDisk(instanceID string, partialDiskId string) error {
|
||||
disk, err := os.getVolume(partialDiskId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil || cClient == nil {
|
||||
glog.Errorf("Unable to initialize nova client for region: %s", os.region)
|
||||
return err
|
||||
}
|
||||
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil && instanceID == disk.Attachments[0]["server_id"] {
|
||||
// This is a blocking call and effects kubelet's performance directly.
|
||||
// We should consider kicking it out into a separate routine, if it is bad.
|
||||
err = volumeattach.Delete(cClient, instanceID, disk.ID).ExtractErr()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete volume %s from compute %s attached %v", disk.ID, instanceID, err)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully detached volume: %s from compute: %s", disk.ID, instanceID)
|
||||
} else {
|
||||
errMsg := fmt.Sprintf("Disk: %s has no attachments or is not attached to compute: %s", disk.Name, instanceID)
|
||||
glog.Errorf(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Takes a partial/full disk id or diskname
|
||||
func (os *OpenStack) getVolume(diskName string) (volumes.Volume, error) {
|
||||
sClient, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
|
||||
var volume volumes.Volume
|
||||
if err != nil || sClient == nil {
|
||||
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
|
||||
return volume, err
|
||||
}
|
||||
|
||||
err = volumes.List(sClient, nil).EachPage(func(page pagination.Page) (bool, error) {
|
||||
vols, err := volumes.ExtractVolumes(page)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to extract volumes: %v", err)
|
||||
return false, err
|
||||
} else {
|
||||
for _, v := range vols {
|
||||
glog.V(4).Infof("%s %s %v", v.ID, v.Name, v.Attachments)
|
||||
if v.Name == diskName || strings.Contains(v.ID, diskName) {
|
||||
volume = v
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// if it reached here then no disk with the given name was found.
|
||||
errmsg := fmt.Sprintf("Unable to find disk: %s in region %s", diskName, os.region)
|
||||
return false, errors.New(errmsg)
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred getting volume: %s", diskName)
|
||||
return volume, err
|
||||
}
|
||||
return volume, err
|
||||
}
|
||||
|
||||
// Create a volume of given size (in GiB)
|
||||
func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (volumeName string, err error) {
|
||||
|
||||
sClient, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
|
||||
if err != nil || sClient == nil {
|
||||
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
|
||||
return "", err
|
||||
}
|
||||
|
||||
opts := volumes.CreateOpts{
|
||||
Name: name,
|
||||
Size: size,
|
||||
VolumeType: vtype,
|
||||
Availability: availability,
|
||||
}
|
||||
if tags != nil {
|
||||
opts.Metadata = *tags
|
||||
}
|
||||
vol, err := volumes.Create(sClient, opts).Extract()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create a %d GB volume: %v", size, err)
|
||||
return "", err
|
||||
}
|
||||
glog.Infof("Created volume %v", vol.ID)
|
||||
return vol.ID, err
|
||||
}
|
||||
|
||||
// GetDevicePath returns the path of an attached block storage volume, specified by its id.
|
||||
func (os *OpenStack) GetDevicePath(diskId string) string {
|
||||
// Build a list of candidate device paths
|
||||
candidateDeviceNodes := []string{
|
||||
// KVM
|
||||
fmt.Sprintf("virtio-%s", diskId[:20]),
|
||||
// ESXi
|
||||
fmt.Sprintf("wwn-0x%s", strings.Replace(diskId, "-", "", -1)),
|
||||
}
|
||||
|
||||
files, _ := ioutil.ReadDir("/dev/disk/by-id/")
|
||||
|
||||
for _, f := range files {
|
||||
for _, c := range candidateDeviceNodes {
|
||||
if c == f.Name() {
|
||||
glog.V(4).Infof("Found disk attached as %q; full devicepath: %s\n", f.Name(), path.Join("/dev/disk/by-id/", f.Name()))
|
||||
return path.Join("/dev/disk/by-id/", f.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
glog.Warningf("Failed to find device for the diskid: %q\n", diskId)
|
||||
return ""
|
||||
}
|
||||
|
||||
func (os *OpenStack) DeleteVolume(volumeName string) error {
|
||||
used, err := os.diskIsUsed(volumeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if used {
|
||||
msg := fmt.Sprintf("Cannot delete the volume %q, it's still attached to a node", volumeName)
|
||||
return volume.NewDeletedVolumeInUseError(msg)
|
||||
}
|
||||
|
||||
sClient, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
|
||||
if err != nil || sClient == nil {
|
||||
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
|
||||
return err
|
||||
}
|
||||
err = volumes.Delete(sClient, volumeName).ExtractErr()
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot delete volume %s: %v", volumeName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Get device path of attached volume to the compute running kubelet, as known by cinder
|
||||
func (os *OpenStack) GetAttachmentDiskPath(instanceID string, diskName string) (string, error) {
|
||||
// See issue #33128 - Cinder does not always tell you the right device path, as such
|
||||
// we must only use this value as a last resort.
|
||||
disk, err := os.getVolume(diskName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil {
|
||||
if instanceID == disk.Attachments[0]["server_id"] {
|
||||
// Attachment[0]["device"] points to the device path
|
||||
// see http://developer.openstack.org/api-ref-blockstorage-v1.html
|
||||
return disk.Attachments[0]["device"].(string), nil
|
||||
} else {
|
||||
errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", diskName, disk.Attachments[0]["server_id"])
|
||||
glog.Errorf(errMsg)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("volume %s is not attached to %s", diskName, instanceID)
|
||||
}
|
||||
|
||||
// query if a volume is attached to a compute instance
|
||||
func (os *OpenStack) DiskIsAttached(diskName, instanceID string) (bool, error) {
|
||||
disk, err := os.getVolume(diskName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil && instanceID == disk.Attachments[0]["server_id"] {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// query if a list of volumes are attached to a compute instance
|
||||
func (os *OpenStack) DisksAreAttached(diskNames []string, instanceID string) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
for _, diskName := range diskNames {
|
||||
disk, err := os.getVolume(diskName)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil && instanceID == disk.Attachments[0]["server_id"] {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
// diskIsUsed returns true a disk is attached to any node.
|
||||
func (os *OpenStack) diskIsUsed(diskName string) (bool, error) {
|
||||
disk, err := os.getVolume(diskName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(disk.Attachments) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// query if we should trust the cinder provide deviceName, See issue #33128
|
||||
func (os *OpenStack) ShouldTrustDevicePath() bool {
|
||||
return os.bsOpts.TrustDevicePath
|
||||
}
|
42
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/BUILD
generated
vendored
Normal file
42
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["ovirt.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//vendor:gopkg.in/gcfg.v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["ovirt_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = ["//pkg/cloudprovider:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
297
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/ovirt.go
generated
vendored
Normal file
297
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/ovirt.go
generated
vendored
Normal file
|
@ -0,0 +1,297 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ovirt
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/gcfg.v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const ProviderName = "ovirt"
|
||||
|
||||
type OVirtInstance struct {
|
||||
UUID string
|
||||
Name string
|
||||
IPAddress string
|
||||
}
|
||||
|
||||
type OVirtInstanceMap map[string]OVirtInstance
|
||||
|
||||
type OVirtCloud struct {
|
||||
VmsRequest *url.URL
|
||||
HostsRequest *url.URL
|
||||
}
|
||||
|
||||
type OVirtApiConfig struct {
|
||||
Connection struct {
|
||||
ApiEntry string `gcfg:"uri"`
|
||||
Username string `gcfg:"username"`
|
||||
Password string `gcfg:"password"`
|
||||
}
|
||||
Filters struct {
|
||||
VmsQuery string `gcfg:"vms"`
|
||||
}
|
||||
}
|
||||
|
||||
type XmlVmAddress struct {
|
||||
Address string `xml:"address,attr"`
|
||||
}
|
||||
|
||||
type XmlVmInfo struct {
|
||||
UUID string `xml:"id,attr"`
|
||||
Name string `xml:"name"`
|
||||
Hostname string `xml:"guest_info>fqdn"`
|
||||
Addresses []XmlVmAddress `xml:"guest_info>ips>ip"`
|
||||
State string `xml:"status>state"`
|
||||
}
|
||||
|
||||
type XmlVmsList struct {
|
||||
XMLName xml.Name `xml:"vms"`
|
||||
Vm []XmlVmInfo `xml:"vm"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
cloudprovider.RegisterCloudProvider(ProviderName,
|
||||
func(config io.Reader) (cloudprovider.Interface, error) {
|
||||
return newOVirtCloud(config)
|
||||
})
|
||||
}
|
||||
|
||||
func newOVirtCloud(config io.Reader) (*OVirtCloud, error) {
|
||||
if config == nil {
|
||||
return nil, fmt.Errorf("missing configuration file for ovirt cloud provider")
|
||||
}
|
||||
|
||||
oVirtConfig := OVirtApiConfig{}
|
||||
|
||||
/* defaults */
|
||||
oVirtConfig.Connection.Username = "admin@internal"
|
||||
|
||||
if err := gcfg.ReadInto(&oVirtConfig, config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if oVirtConfig.Connection.ApiEntry == "" {
|
||||
return nil, fmt.Errorf("missing ovirt uri in cloud provider configuration")
|
||||
}
|
||||
|
||||
request, err := url.Parse(oVirtConfig.Connection.ApiEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request.Path = path.Join(request.Path, "vms")
|
||||
request.User = url.UserPassword(oVirtConfig.Connection.Username, oVirtConfig.Connection.Password)
|
||||
request.RawQuery = url.Values{"search": {oVirtConfig.Filters.VmsQuery}}.Encode()
|
||||
|
||||
return &OVirtCloud{VmsRequest: request}, nil
|
||||
}
|
||||
|
||||
func (aws *OVirtCloud) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ProviderName returns the cloud provider ID.
|
||||
func (v *OVirtCloud) ProviderName() string {
|
||||
return ProviderName
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (v *OVirtCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// LoadBalancer returns an implementation of LoadBalancer for oVirt cloud
|
||||
func (v *OVirtCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Instances returns an implementation of Instances for oVirt cloud
|
||||
func (v *OVirtCloud) Instances() (cloudprovider.Instances, bool) {
|
||||
return v, true
|
||||
}
|
||||
|
||||
// Zones returns an implementation of Zones for oVirt cloud
|
||||
func (v *OVirtCloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Routes returns an implementation of Routes for oVirt cloud
|
||||
func (v *OVirtCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// NodeAddresses returns the NodeAddresses of the instance with the specified nodeName.
|
||||
func (v *OVirtCloud) NodeAddresses(nodeName types.NodeName) ([]v1.NodeAddress, error) {
|
||||
name := mapNodeNameToInstanceName(nodeName)
|
||||
instance, err := v.fetchInstance(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var address net.IP
|
||||
|
||||
if instance.IPAddress != "" {
|
||||
address = net.ParseIP(instance.IPAddress)
|
||||
if address == nil {
|
||||
return nil, fmt.Errorf("couldn't parse address: %s", instance.IPAddress)
|
||||
}
|
||||
} else {
|
||||
resolved, err := net.LookupIP(name)
|
||||
if err != nil || len(resolved) < 1 {
|
||||
return nil, fmt.Errorf("couldn't lookup address: %s", name)
|
||||
}
|
||||
address = resolved[0]
|
||||
}
|
||||
|
||||
return []v1.NodeAddress{
|
||||
{Type: v1.NodeLegacyHostIP, Address: address.String()},
|
||||
{Type: v1.NodeInternalIP, Address: address.String()},
|
||||
{Type: v1.NodeExternalIP, Address: address.String()},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// mapNodeNameToInstanceName maps from a k8s NodeName to an ovirt instance name (the hostname)
|
||||
// This is a simple string cast
|
||||
func mapNodeNameToInstanceName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified node with the specified NodeName (deprecated).
|
||||
func (v *OVirtCloud) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
name := mapNodeNameToInstanceName(nodeName)
|
||||
instance, err := v.fetchInstance(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return instance.UUID, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the node with the specified NodeName.
|
||||
func (v *OVirtCloud) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
name := mapNodeNameToInstanceName(nodeName)
|
||||
instance, err := v.fetchInstance(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// TODO: define a way to identify the provider instance to complete
|
||||
// the format <provider_instance_id>/<instance_id>.
|
||||
return "/" + instance.UUID, err
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (v *OVirtCloud) InstanceType(name types.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func getInstancesFromXml(body io.Reader) (OVirtInstanceMap, error) {
|
||||
if body == nil {
|
||||
return nil, fmt.Errorf("ovirt rest-api response body is missing")
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vmlist := XmlVmsList{}
|
||||
|
||||
if err := xml.Unmarshal(content, &vmlist); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instances := make(OVirtInstanceMap)
|
||||
|
||||
for _, vm := range vmlist.Vm {
|
||||
// Always return only vms that are up and running
|
||||
if vm.Hostname != "" && strings.ToLower(vm.State) == "up" {
|
||||
address := ""
|
||||
if len(vm.Addresses) > 0 {
|
||||
address = vm.Addresses[0].Address
|
||||
}
|
||||
|
||||
instances[vm.Hostname] = OVirtInstance{
|
||||
UUID: vm.UUID,
|
||||
Name: vm.Name,
|
||||
IPAddress: address,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
func (v *OVirtCloud) fetchAllInstances() (OVirtInstanceMap, error) {
|
||||
response, err := http.Get(v.VmsRequest.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
return getInstancesFromXml(response.Body)
|
||||
}
|
||||
|
||||
func (v *OVirtCloud) fetchInstance(name string) (*OVirtInstance, error) {
|
||||
allInstances, err := v.fetchAllInstances()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instance, found := allInstances[name]
|
||||
if !found {
|
||||
return nil, fmt.Errorf("cannot find instance: %s", name)
|
||||
}
|
||||
|
||||
return &instance, nil
|
||||
}
|
||||
|
||||
func (m *OVirtInstanceMap) ListSortedNames() []string {
|
||||
var names []string
|
||||
|
||||
for k := range *m {
|
||||
names = append(names, k)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
return names
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (v *OVirtCloud) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
func (v *OVirtCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("unimplemented")
|
||||
}
|
126
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/ovirt_test.go
generated
vendored
Normal file
126
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/ovirt_test.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ovirt
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
func TestOVirtCloudConfiguration(t *testing.T) {
|
||||
config1 := (io.Reader)(nil)
|
||||
|
||||
_, err1 := cloudprovider.GetCloudProvider("ovirt", config1)
|
||||
if err1 == nil {
|
||||
t.Fatalf("An error is expected when the configuration is missing")
|
||||
}
|
||||
|
||||
config2 := strings.NewReader("")
|
||||
|
||||
_, err2 := cloudprovider.GetCloudProvider("ovirt", config2)
|
||||
if err2 == nil {
|
||||
t.Fatalf("An error is expected when the configuration is empty")
|
||||
}
|
||||
|
||||
config3 := strings.NewReader(`
|
||||
[connection]
|
||||
`)
|
||||
|
||||
_, err3 := cloudprovider.GetCloudProvider("ovirt", config3)
|
||||
if err3 == nil {
|
||||
t.Fatalf("An error is expected when the uri is missing")
|
||||
}
|
||||
|
||||
config4 := strings.NewReader(`
|
||||
[connection]
|
||||
uri = https://localhost:8443/ovirt-engine/api
|
||||
`)
|
||||
|
||||
_, err4 := cloudprovider.GetCloudProvider("ovirt", config4)
|
||||
if err4 != nil {
|
||||
t.Fatalf("Unexpected error creating the provider: %s", err4)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOVirtCloudXmlParsing(t *testing.T) {
|
||||
body1 := (io.Reader)(nil)
|
||||
|
||||
_, err1 := getInstancesFromXml(body1)
|
||||
if err1 == nil {
|
||||
t.Fatalf("An error is expected when body is missing")
|
||||
}
|
||||
|
||||
body2 := strings.NewReader("")
|
||||
|
||||
_, err2 := getInstancesFromXml(body2)
|
||||
if err2 == nil {
|
||||
t.Fatalf("An error is expected when body is empty")
|
||||
}
|
||||
|
||||
body3 := strings.NewReader(`
|
||||
<vms>
|
||||
<vm></vm>
|
||||
</vms>
|
||||
`)
|
||||
|
||||
instances3, err3 := getInstancesFromXml(body3)
|
||||
if err3 != nil {
|
||||
t.Fatalf("Unexpected error listing instances: %s", err3)
|
||||
}
|
||||
if len(instances3) > 0 {
|
||||
t.Fatalf("Unexpected number of instance(s): %d", len(instances3))
|
||||
}
|
||||
|
||||
body4 := strings.NewReader(`
|
||||
<vms>
|
||||
<vm>
|
||||
<status><state>Up</state></status>
|
||||
<guest_info><fqdn>host1</fqdn></guest_info>
|
||||
</vm>
|
||||
<vm>
|
||||
<!-- empty -->
|
||||
</vm>
|
||||
<vm>
|
||||
<status><state>Up</state></status>
|
||||
</vm>
|
||||
<vm>
|
||||
<status><state>Down</state></status>
|
||||
<guest_info><fqdn>host2</fqdn></guest_info>
|
||||
</vm>
|
||||
<vm>
|
||||
<status><state>Up</state></status>
|
||||
<guest_info><fqdn>host3</fqdn></guest_info>
|
||||
</vm>
|
||||
</vms>
|
||||
`)
|
||||
|
||||
instances4, err4 := getInstancesFromXml(body4)
|
||||
if err4 != nil {
|
||||
t.Fatalf("Unexpected error listing instances: %s", err4)
|
||||
}
|
||||
if len(instances4) != 2 {
|
||||
t.Fatalf("Unexpected number of instance(s): %d", len(instances4))
|
||||
}
|
||||
|
||||
names := instances4.ListSortedNames()
|
||||
if names[0] != "host1" || names[1] != "host3" {
|
||||
t.Fatalf("Unexpected instance(s): %s", instances4)
|
||||
}
|
||||
}
|
48
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/BUILD
generated
vendored
Normal file
48
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["photon.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/vmware/photon-controller-go-sdk/photon",
|
||||
"//vendor:gopkg.in/gcfg.v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["photon_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/rand",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/OWNERS
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
maintainers:
|
||||
- luomiao
|
||||
- kerneltime
|
||||
- abrarshivani
|
592
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/photon.go
generated
vendored
Normal file
592
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/photon.go
generated
vendored
Normal file
|
@ -0,0 +1,592 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This version of Photon cloud provider supports the disk interface
|
||||
// for Photon persistent disk volume plugin. LoadBalancer, Routes, and
|
||||
// Zones are currently not supported.
|
||||
// The use of Photon cloud provider requires to start kubelet, kube-apiserver,
|
||||
// and kube-controller-manager with config flag: '--cloud-provider=photon
|
||||
// --cloud-config=[path_to_config_file]'. When running multi-node kubernetes
|
||||
// using docker, the config file should be located inside /etc/kubernetes.
|
||||
package photon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/vmware/photon-controller-go-sdk/photon"
|
||||
"gopkg.in/gcfg.v1"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const (
|
||||
ProviderName = "photon"
|
||||
DiskSpecKind = "persistent-disk"
|
||||
)
|
||||
|
||||
// Global variable pointing to photon client
|
||||
var photonClient *photon.Client
|
||||
var logger *log.Logger = nil
|
||||
|
||||
// overrideIP indicates if the hostname is overriden by IP address, such as when
|
||||
// running multi-node kubernetes using docker. In this case the user should set
|
||||
// overrideIP = true in cloud config file. Default value is false.
|
||||
var overrideIP bool = false
|
||||
|
||||
// Photon is an implementation of the cloud provider interface for Photon Controller.
|
||||
type PCCloud struct {
|
||||
cfg *PCConfig
|
||||
// InstanceID of the server where this PCCloud object is instantiated.
|
||||
localInstanceID string
|
||||
// local $HOSTNAME
|
||||
localHostname string
|
||||
// hostname from K8S, could be overridden
|
||||
localK8sHostname string
|
||||
// Photon project ID. We assume that there is only one Photon Controller project
|
||||
// in the environment per current Photon Controller deployment methodology.
|
||||
projID string
|
||||
cloudprovider.Zone
|
||||
}
|
||||
|
||||
type PCConfig struct {
|
||||
Global struct {
|
||||
// the Photon Controller endpoint IP address
|
||||
CloudTarget string `gcfg:"target"`
|
||||
// when the Photon Controller authentication is enabled, set to true;
|
||||
// otherwise, set to false.
|
||||
IgnoreCertificate bool `gcfg:"ignoreCertificate"`
|
||||
// Photon Controller tenant name
|
||||
Tenant string `gcfg:"tenant"`
|
||||
// Photon Controller project name
|
||||
Project string `gcfg:"project"`
|
||||
// when kubelet is started with '--hostname-override=${IP_ADDRESS}', set to true;
|
||||
// otherwise, set to false.
|
||||
OverrideIP bool `gcfg:"overrideIP"`
|
||||
}
|
||||
}
|
||||
|
||||
// Disks is interface for manipulation with PhotonController Persistent Disks.
|
||||
type Disks interface {
|
||||
// AttachDisk attaches given disk to given node. Current node
|
||||
// is used when nodeName is empty string.
|
||||
AttachDisk(pdID string, nodeName k8stypes.NodeName) error
|
||||
|
||||
// DetachDisk detaches given disk to given node. Current node
|
||||
// is used when nodeName is empty string.
|
||||
DetachDisk(pdID string, nodeName k8stypes.NodeName) error
|
||||
|
||||
// DiskIsAttached checks if a disk is attached to the given node.
|
||||
DiskIsAttached(pdID string, nodeName k8stypes.NodeName) (bool, error)
|
||||
|
||||
// DisksAreAttached is a batch function to check if a list of disks are attached
|
||||
// to the node with the specified NodeName.
|
||||
DisksAreAttached(pdIDs []string, nodeName k8stypes.NodeName) (map[string]bool, error)
|
||||
|
||||
// CreateDisk creates a new PD with given properties.
|
||||
CreateDisk(volumeOptions *VolumeOptions) (pdID string, err error)
|
||||
|
||||
// DeleteDisk deletes PD.
|
||||
DeleteDisk(pdID string) error
|
||||
}
|
||||
|
||||
// VolumeOptions specifies capacity, tags, name and flavorID for a volume.
|
||||
type VolumeOptions struct {
|
||||
CapacityGB int
|
||||
Tags map[string]string
|
||||
Name string
|
||||
Flavor string
|
||||
}
|
||||
|
||||
func readConfig(config io.Reader) (PCConfig, error) {
|
||||
if config == nil {
|
||||
err := fmt.Errorf("cloud provider config file is missing. Please restart kubelet with --cloud-provider=photon --cloud-config=[path_to_config_file]")
|
||||
return PCConfig{}, err
|
||||
}
|
||||
|
||||
var cfg PCConfig
|
||||
err := gcfg.ReadInto(&cfg, config)
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
func init() {
|
||||
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
|
||||
cfg, err := readConfig(config)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: failed to read in cloud provider config file. Error[%v]", err)
|
||||
return nil, err
|
||||
}
|
||||
return newPCCloud(cfg)
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve the Photon VM ID from the Photon Controller endpoint based on the node name
|
||||
func getVMIDbyNodename(project string, nodeName string) (string, error) {
|
||||
vmList, err := photonClient.Projects.GetVMs(project, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Failed to GetVMs from project %s with nodeName %s, error: [%v]", project, nodeName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, vm := range vmList.Items {
|
||||
if vm.Name == nodeName {
|
||||
return vm.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("No matching started VM is found with name %s", nodeName)
|
||||
}
|
||||
|
||||
// Retrieve the Photon VM ID from the Photon Controller endpoint based on the IP address
|
||||
func getVMIDbyIP(project string, IPAddress string) (string, error) {
|
||||
vmList, err := photonClient.Projects.GetVMs(project, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Failed to GetVMs for project %s. error: [%v]", project, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, vm := range vmList.Items {
|
||||
task, err := photonClient.VMs.GetNetworks(vm.ID)
|
||||
if err != nil {
|
||||
glog.Warningf("Photon Cloud Provider: GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err)
|
||||
} else {
|
||||
task, err = photonClient.Tasks.Wait(task.ID)
|
||||
if err != nil {
|
||||
glog.Warning("Photon Cloud Provider: Wait task for GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err)
|
||||
} else {
|
||||
networkConnections := task.ResourceProperties.(map[string]interface{})
|
||||
networks := networkConnections["networkConnections"].([]interface{})
|
||||
for _, nt := range networks {
|
||||
network := nt.(map[string]interface{})
|
||||
if val, ok := network["ipAddress"]; ok && val != nil {
|
||||
ipAddr := val.(string)
|
||||
if ipAddr == IPAddress {
|
||||
return vm.ID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("No matching VM is found with IP %s", IPAddress)
|
||||
}
|
||||
|
||||
// Retrieve the the Photon project ID from the Photon Controller endpoint based on the project name
|
||||
func getProjIDbyName(tenantName, projName string) (string, error) {
|
||||
tenants, err := photonClient.Tenants.GetAll()
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: GetAll tenants failed with error [%v].", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, tenant := range tenants.Items {
|
||||
if tenant.Name == tenantName {
|
||||
projects, err := photonClient.Tenants.GetProjects(tenant.ID, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Failed to GetProjects for tenant %s. error [%v]", tenantName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, project := range projects.Items {
|
||||
if project.Name == projName {
|
||||
return project.ID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("No matching tenant/project name is found with %s/%s", tenantName, projName)
|
||||
}
|
||||
|
||||
func newPCCloud(cfg PCConfig) (*PCCloud, error) {
|
||||
if len(cfg.Global.CloudTarget) == 0 {
|
||||
return nil, fmt.Errorf("Photon Controller endpoint was not specified.")
|
||||
}
|
||||
|
||||
// Currently we support Photon Controller endpoint with authentication disabled.
|
||||
options := &photon.ClientOptions{
|
||||
IgnoreCertificate: cfg.Global.IgnoreCertificate,
|
||||
}
|
||||
|
||||
photonClient = photon.NewClient(cfg.Global.CloudTarget, options, logger)
|
||||
status, err := photonClient.Status.Get()
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: new client creation failed. Error[%v]", err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(2).Info("Photon Cloud Provider: Status of the new photon controller client: %v", status)
|
||||
|
||||
// Get Photon Controller project ID for future use
|
||||
projID, err := getProjIDbyName(cfg.Global.Tenant, cfg.Global.Project)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: getProjIDbyName failed when creating new Photon Controller client. Error[%v]", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get local hostname for localInstanceID
|
||||
cmd := exec.Command("bash", "-c", `echo $HOSTNAME`)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: get local hostname bash command failed. Error[%v]", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(out) == 0 {
|
||||
glog.Errorf("unable to retrieve hostname for Instance ID")
|
||||
return nil, fmt.Errorf("unable to retrieve hostname for Instance ID")
|
||||
}
|
||||
hostname := strings.TrimRight(string(out), "\n")
|
||||
vmID, err := getVMIDbyNodename(projID, hostname)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: getVMIDbyNodename failed when creating new Photon Controller client. Error[%v]", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := PCCloud{
|
||||
cfg: &cfg,
|
||||
localInstanceID: vmID,
|
||||
localHostname: hostname,
|
||||
localK8sHostname: "",
|
||||
projID: projID,
|
||||
}
|
||||
|
||||
overrideIP = cfg.Global.OverrideIP
|
||||
|
||||
return &pc, nil
|
||||
}
|
||||
|
||||
// Instances returns an implementation of Instances for Photon Controller.
|
||||
func (pc *PCCloud) Instances() (cloudprovider.Instances, bool) {
|
||||
return pc, true
|
||||
}
|
||||
|
||||
// List is an implementation of Instances.List.
|
||||
func (pc *PCCloud) List(filter string) ([]k8stypes.NodeName, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NodeAddresses is an implementation of Instances.NodeAddresses.
|
||||
func (pc *PCCloud) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) {
|
||||
addrs := []v1.NodeAddress{}
|
||||
name := string(nodeName)
|
||||
|
||||
var vmID string
|
||||
var err error
|
||||
if name == pc.localK8sHostname {
|
||||
vmID = pc.localInstanceID
|
||||
} else {
|
||||
vmID, err = getInstanceID(name, pc.projID)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: getInstanceID failed for NodeAddresses. Error[%v]", err)
|
||||
return addrs, err
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the Photon VM's IP addresses from the Photon Controller endpoint based on the VM ID
|
||||
vmList, err := photonClient.Projects.GetVMs(pc.projID, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Failed to GetVMs for project %s. Error[%v]", pc.projID, err)
|
||||
return addrs, err
|
||||
}
|
||||
|
||||
for _, vm := range vmList.Items {
|
||||
if vm.ID == vmID {
|
||||
task, err := photonClient.VMs.GetNetworks(vm.ID)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: GetNetworks failed for node %s with vm.ID %s. Error[%v]", name, vm.ID, err)
|
||||
return addrs, err
|
||||
} else {
|
||||
task, err = photonClient.Tasks.Wait(task.ID)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Wait task for GetNetworks failed for node %s with vm.ID %s. Error[%v]", name, vm.ID, err)
|
||||
return addrs, err
|
||||
} else {
|
||||
networkConnections := task.ResourceProperties.(map[string]interface{})
|
||||
networks := networkConnections["networkConnections"].([]interface{})
|
||||
for _, nt := range networks {
|
||||
network := nt.(map[string]interface{})
|
||||
if val, ok := network["ipAddress"]; ok && val != nil {
|
||||
ipAddr := val.(string)
|
||||
if ipAddr != "-" {
|
||||
v1.AddToNodeAddresses(&addrs,
|
||||
v1.NodeAddress{
|
||||
// TODO: figure out the type of the IP
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: ipAddr,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
return addrs, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
glog.Errorf("Failed to find the node %s from Photon Controller endpoint", name)
|
||||
return addrs, fmt.Errorf("Failed to find the node %s from Photon Controller endpoint", name)
|
||||
}
|
||||
|
||||
func (pc *PCCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (pc *PCCloud) CurrentNodeName(hostname string) (k8stypes.NodeName, error) {
|
||||
pc.localK8sHostname = hostname
|
||||
return k8stypes.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
func getInstanceID(name string, projID string) (string, error) {
|
||||
var vmID string
|
||||
var err error
|
||||
|
||||
if overrideIP == true {
|
||||
vmID, err = getVMIDbyIP(projID, name)
|
||||
} else {
|
||||
vmID, err = getVMIDbyNodename(projID, name)
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if vmID == "" {
|
||||
err = cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return vmID, err
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
|
||||
func (pc *PCCloud) ExternalID(nodeName k8stypes.NodeName) (string, error) {
|
||||
name := string(nodeName)
|
||||
if name == pc.localK8sHostname {
|
||||
return pc.localInstanceID, nil
|
||||
} else {
|
||||
ID, err := getInstanceID(name, pc.projID)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: getInstanceID failed for ExternalID. Error[%v]", err)
|
||||
return ID, err
|
||||
} else {
|
||||
return ID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
func (pc *PCCloud) InstanceID(nodeName k8stypes.NodeName) (string, error) {
|
||||
name := string(nodeName)
|
||||
if name == pc.localK8sHostname {
|
||||
return pc.localInstanceID, nil
|
||||
} else {
|
||||
ID, err := getInstanceID(name, pc.projID)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: getInstanceID failed for InstanceID. Error[%v]", err)
|
||||
return ID, err
|
||||
} else {
|
||||
return ID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *PCCloud) InstanceType(nodeName k8stypes.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (pc *PCCloud) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
// ProviderName returns the cloud provider ID.
|
||||
func (pc *PCCloud) ProviderName() string {
|
||||
return ProviderName
|
||||
}
|
||||
|
||||
// LoadBalancer returns an implementation of LoadBalancer for Photon Controller.
|
||||
func (pc *PCCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Zones returns an implementation of Zones for Photon Controller.
|
||||
func (pc *PCCloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return pc, true
|
||||
}
|
||||
|
||||
func (pc *PCCloud) GetZone() (cloudprovider.Zone, error) {
|
||||
return pc.Zone, nil
|
||||
}
|
||||
|
||||
// Routes returns a false since the interface is not supported for photon controller.
|
||||
func (pc *PCCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (pc *PCCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
// Attaches given virtual disk volume to the compute running kubelet.
|
||||
func (pc *PCCloud) AttachDisk(pdID string, nodeName k8stypes.NodeName) error {
|
||||
operation := &photon.VmDiskOperation{
|
||||
DiskID: pdID,
|
||||
}
|
||||
|
||||
vmID, err := pc.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for AttachDisk. Error[%v]", err)
|
||||
return err
|
||||
}
|
||||
|
||||
task, err := photonClient.VMs.AttachDisk(vmID, operation)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Failed to attach disk with pdID %s. Error[%v]", pdID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = photonClient.Tasks.Wait(task.ID)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Failed to wait for task to attach disk with pdID %s. Error[%v]", pdID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Detaches given virtual disk volume from the compute running kubelet.
|
||||
func (pc *PCCloud) DetachDisk(pdID string, nodeName k8stypes.NodeName) error {
|
||||
operation := &photon.VmDiskOperation{
|
||||
DiskID: pdID,
|
||||
}
|
||||
|
||||
vmID, err := pc.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DetachDisk. Error[%v]", err)
|
||||
return err
|
||||
}
|
||||
|
||||
task, err := photonClient.VMs.DetachDisk(vmID, operation)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Failed to detach disk with pdID %s. Error[%v]", pdID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = photonClient.Tasks.Wait(task.ID)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Failed to wait for task to detach disk with pdID %s. Error[%v]", pdID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DiskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
|
||||
func (pc *PCCloud) DiskIsAttached(pdID string, nodeName k8stypes.NodeName) (bool, error) {
|
||||
disk, err := photonClient.Disks.Get(pdID)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Failed to Get disk with pdID %s. Error[%v]", pdID, err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
vmID, err := pc.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DiskIsAttached. Error[%v]", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, vm := range disk.VMs {
|
||||
if strings.Compare(vm, vmID) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// DisksAreAttached returns if disks are attached to the VM using controllers supported by the plugin.
|
||||
func (pc *PCCloud) DisksAreAttached(pdIDs []string, nodeName k8stypes.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, pdID := range pdIDs {
|
||||
attached[pdID] = false
|
||||
}
|
||||
|
||||
vmID, err := pc.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DiskIsAttached. Error[%v]", err)
|
||||
return attached, err
|
||||
}
|
||||
|
||||
for _, pdID := range pdIDs {
|
||||
disk, err := photonClient.Disks.Get(pdID)
|
||||
if err != nil {
|
||||
glog.Warningf("Photon Cloud Provider: failed to get VMs for persistent disk %s, err [%v]", pdID, err)
|
||||
} else {
|
||||
for _, vm := range disk.VMs {
|
||||
if vm == vmID {
|
||||
attached[pdID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
// Create a volume of given size (in GB).
|
||||
func (pc *PCCloud) CreateDisk(volumeOptions *VolumeOptions) (pdID string, err error) {
|
||||
diskSpec := photon.DiskCreateSpec{}
|
||||
diskSpec.Name = volumeOptions.Name
|
||||
diskSpec.Flavor = volumeOptions.Flavor
|
||||
diskSpec.CapacityGB = volumeOptions.CapacityGB
|
||||
diskSpec.Kind = DiskSpecKind
|
||||
|
||||
task, err := photonClient.Projects.CreateDisk(pc.projID, &diskSpec)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Failed to CreateDisk. Error[%v]", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
waitTask, err := photonClient.Tasks.Wait(task.ID)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Failed to wait for task to CreateDisk. Error[%v]", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return waitTask.Entity.ID, nil
|
||||
}
|
||||
|
||||
// Deletes a volume given volume name.
|
||||
func (pc *PCCloud) DeleteDisk(pdID string) error {
|
||||
task, err := photonClient.Disks.Delete(pdID)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Failed to DeleteDisk. Error[%v]", err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = photonClient.Tasks.Wait(task.ID)
|
||||
if err != nil {
|
||||
glog.Errorf("Photon Cloud Provider: Failed to wait for task to DeleteDisk. Error[%v]", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
216
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/photon_test.go
generated
vendored
Normal file
216
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/photon_test.go
generated
vendored
Normal file
|
@ -0,0 +1,216 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package photon
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
func configFromEnv() (TestVM string, TestFlavor string, cfg PCConfig, ok bool) {
|
||||
var IgnoreCertificate bool
|
||||
var OverrideIP bool
|
||||
var err error
|
||||
cfg.Global.CloudTarget = os.Getenv("PHOTON_TARGET")
|
||||
cfg.Global.Tenant = os.Getenv("PHOTON_TENANT")
|
||||
cfg.Global.Project = os.Getenv("PHOTON_PROJECT")
|
||||
if os.Getenv("PHOTON_IGNORE_CERTIFICATE") != "" {
|
||||
IgnoreCertificate, err = strconv.ParseBool(os.Getenv("PHOTON_IGNORE_CERTIFICATE"))
|
||||
} else {
|
||||
IgnoreCertificate = false
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cfg.Global.IgnoreCertificate = IgnoreCertificate
|
||||
if os.Getenv("PHOTON_OVERRIDE_IP") != "" {
|
||||
OverrideIP, err = strconv.ParseBool(os.Getenv("PHOTON_OVERRIDE_IP"))
|
||||
} else {
|
||||
OverrideIP = false
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cfg.Global.OverrideIP = OverrideIP
|
||||
|
||||
TestVM = os.Getenv("PHOTON_TEST_VM")
|
||||
if os.Getenv("PHOTON_TEST_FLAVOR") != "" {
|
||||
TestFlavor = os.Getenv("PHOTON_TEST_FLAVOR")
|
||||
} else {
|
||||
TestFlavor = ""
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ok = (cfg.Global.CloudTarget != "" &&
|
||||
cfg.Global.Tenant != "" &&
|
||||
cfg.Global.Project != "" &&
|
||||
TestVM != "")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func TestReadConfig(t *testing.T) {
|
||||
_, err := readConfig(nil)
|
||||
if err == nil {
|
||||
t.Errorf("Should fail when no config is provided: %s", err)
|
||||
}
|
||||
|
||||
cfg, err := readConfig(strings.NewReader(`
|
||||
[Global]
|
||||
target = 0.0.0.0
|
||||
ignoreCertificate = true
|
||||
tenant = tenant
|
||||
project = project
|
||||
overrideIP = false
|
||||
`))
|
||||
if err != nil {
|
||||
t.Fatalf("Should succeed when a valid config is provided: %s", err)
|
||||
}
|
||||
|
||||
if cfg.Global.CloudTarget != "0.0.0.0" {
|
||||
t.Errorf("incorrect photon target ip: %s", cfg.Global.CloudTarget)
|
||||
}
|
||||
|
||||
if cfg.Global.Tenant != "tenant" {
|
||||
t.Errorf("incorrect tenant: %s", cfg.Global.Tenant)
|
||||
}
|
||||
|
||||
if cfg.Global.Project != "project" {
|
||||
t.Errorf("incorrect project: %s", cfg.Global.Project)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPCCloud(t *testing.T) {
|
||||
_, _, cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
_, err := newPCCloud(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create new Photon client: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstances(t *testing.T) {
|
||||
testVM, _, cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
NodeName := types.NodeName(testVM)
|
||||
|
||||
pc, err := newPCCloud(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create new Photon client: %s", err)
|
||||
}
|
||||
|
||||
i, ok := pc.Instances()
|
||||
if !ok {
|
||||
t.Fatalf("Instances() returned false")
|
||||
}
|
||||
|
||||
externalId, err := i.ExternalID(NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.ExternalID(%s) failed: %s", testVM, err)
|
||||
}
|
||||
t.Logf("Found ExternalID(%s) = %s\n", testVM, externalId)
|
||||
|
||||
nonExistingVM := types.NodeName(rand.String(15))
|
||||
externalId, err = i.ExternalID(nonExistingVM)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
t.Logf("VM %s was not found as expected\n", nonExistingVM)
|
||||
} else if err == nil {
|
||||
t.Fatalf("Instances.ExternalID did not fail as expected, VM %s was found", nonExistingVM)
|
||||
} else {
|
||||
t.Fatalf("Instances.ExternalID did not fail as expected, err: %v", err)
|
||||
}
|
||||
|
||||
instanceId, err := i.InstanceID(NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.InstanceID(%s) failed: %s", testVM, err)
|
||||
}
|
||||
t.Logf("Found InstanceID(%s) = %s\n", testVM, instanceId)
|
||||
|
||||
instanceId, err = i.InstanceID(nonExistingVM)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
t.Logf("VM %s was not found as expected\n", nonExistingVM)
|
||||
} else if err == nil {
|
||||
t.Fatalf("Instances.InstanceID did not fail as expected, VM %s was found", nonExistingVM)
|
||||
} else {
|
||||
t.Fatalf("Instances.InstanceID did not fail as expected, err: %v", err)
|
||||
}
|
||||
|
||||
addrs, err := i.NodeAddresses(NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.NodeAddresses(%s) failed: %s", testVM, err)
|
||||
}
|
||||
t.Logf("Found NodeAddresses(%s) = %s\n", testVM, addrs)
|
||||
}
|
||||
|
||||
func TestVolumes(t *testing.T) {
|
||||
testVM, testFlavor, cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
pc, err := newPCCloud(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create new Photon client: %s", err)
|
||||
}
|
||||
|
||||
NodeName := types.NodeName(testVM)
|
||||
|
||||
volumeOptions := &VolumeOptions{
|
||||
CapacityGB: 2,
|
||||
Tags: nil,
|
||||
Name: "kubernetes-test-volume-" + rand.String(10),
|
||||
Flavor: testFlavor}
|
||||
|
||||
pdID, err := pc.CreateDisk(volumeOptions)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create a Photon persistent disk: %v", err)
|
||||
}
|
||||
|
||||
err = pc.AttachDisk(pdID, NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot attach persistent disk(%s) to VM(%s): %v", pdID, testVM, err)
|
||||
}
|
||||
|
||||
_, err = pc.DiskIsAttached(pdID, NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot attach persistent disk(%s) to VM(%s): %v", pdID, testVM, err)
|
||||
}
|
||||
|
||||
err = pc.DetachDisk(pdID, NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot detach persisten disk(%s) from VM(%s): %v", pdID, testVM, err)
|
||||
}
|
||||
|
||||
err = pc.DeleteDisk(pdID)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot delete persisten disk(%s): %v", pdID, err)
|
||||
}
|
||||
}
|
31
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/providers.go
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/providers.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudprovider
|
||||
|
||||
import (
|
||||
// Cloud providers
|
||||
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack"
|
||||
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/mesos"
|
||||
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
|
||||
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt"
|
||||
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/photon"
|
||||
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace"
|
||||
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
|
||||
)
|
51
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace/BUILD
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["rackspace.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/rackspace/gophercloud",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack/compute/v2/servers",
|
||||
"//vendor:github.com/rackspace/gophercloud/pagination",
|
||||
"//vendor:github.com/rackspace/gophercloud/rackspace",
|
||||
"//vendor:github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes",
|
||||
"//vendor:github.com/rackspace/gophercloud/rackspace/compute/v2/servers",
|
||||
"//vendor:github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach",
|
||||
"//vendor:gopkg.in/gcfg.v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["rackspace_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = ["//vendor:github.com/rackspace/gophercloud"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
666
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace/rackspace.go
generated
vendored
Normal file
666
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace/rackspace.go
generated
vendored
Normal file
|
@ -0,0 +1,666 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rackspace
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gopkg.in/gcfg.v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/rackspace/gophercloud"
|
||||
osvolumeattach "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach"
|
||||
osservers "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
|
||||
"github.com/rackspace/gophercloud/pagination"
|
||||
"github.com/rackspace/gophercloud/rackspace"
|
||||
"github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes"
|
||||
"github.com/rackspace/gophercloud/rackspace/compute/v2/servers"
|
||||
"github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const ProviderName = "rackspace"
|
||||
const metaDataPath = "/media/configdrive/openstack/latest/meta_data.json"
|
||||
|
||||
var ErrNotFound = errors.New("Failed to find object")
|
||||
var ErrMultipleResults = errors.New("Multiple results where only one expected")
|
||||
var ErrNoAddressFound = errors.New("No address found for host")
|
||||
var ErrAttrNotFound = errors.New("Expected attribute not found")
|
||||
|
||||
// encoding.TextUnmarshaler interface for time.Duration
|
||||
type MyDuration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *MyDuration) UnmarshalText(text []byte) error {
|
||||
res, err := time.ParseDuration(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Duration = res
|
||||
return nil
|
||||
}
|
||||
|
||||
type MetaData struct {
|
||||
UUID string `json:"uuid"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type LoadBalancerOpts struct {
|
||||
SubnetId string `gcfg:"subnet-id"` // required
|
||||
CreateMonitor bool `gcfg:"create-monitor"`
|
||||
MonitorDelay MyDuration `gcfg:"monitor-delay"`
|
||||
MonitorTimeout MyDuration `gcfg:"monitor-timeout"`
|
||||
MonitorMaxRetries uint `gcfg:"monitor-max-retries"`
|
||||
}
|
||||
|
||||
// Rackspace is an implementation of cloud provider Interface for Rackspace.
|
||||
type Rackspace struct {
|
||||
provider *gophercloud.ProviderClient
|
||||
region string
|
||||
lbOpts LoadBalancerOpts
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Global struct {
|
||||
AuthUrl string `gcfg:"auth-url"`
|
||||
Username string
|
||||
UserId string `gcfg:"user-id"`
|
||||
Password string
|
||||
ApiKey string `gcfg:"api-key"`
|
||||
TenantId string `gcfg:"tenant-id"`
|
||||
TenantName string `gcfg:"tenant-name"`
|
||||
DomainId string `gcfg:"domain-id"`
|
||||
DomainName string `gcfg:"domain-name"`
|
||||
Region string
|
||||
}
|
||||
LoadBalancer LoadBalancerOpts
|
||||
}
|
||||
|
||||
func probeNodeAddress(compute *gophercloud.ServiceClient, name string) (string, error) {
|
||||
id, err := readInstanceID()
|
||||
if err == nil {
|
||||
srv, err := servers.Get(compute, id).Extract()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return getAddressByServer(srv)
|
||||
}
|
||||
|
||||
ip, err := getAddressByName(compute, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
func probeInstanceID(client *gophercloud.ServiceClient, name string) (string, error) {
|
||||
// Attempt to read id from config drive.
|
||||
id, err := readInstanceID()
|
||||
if err == nil {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// Attempt to get the server by the name from the API
|
||||
server, err := getServerByName(client, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return server.ID, nil
|
||||
}
|
||||
|
||||
func parseMetaData(file io.Reader) (string, error) {
|
||||
metaDataBytes, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Cannot read %s: %v", file, err)
|
||||
}
|
||||
|
||||
metaData := MetaData{}
|
||||
err = json.Unmarshal(metaDataBytes, &metaData)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Cannot parse %s: %v", metaDataPath, err)
|
||||
}
|
||||
|
||||
return metaData.UUID, nil
|
||||
}
|
||||
|
||||
func readInstanceID() (string, error) {
|
||||
file, err := os.Open(metaDataPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Cannot open %s: %v", metaDataPath, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return parseMetaData(file)
|
||||
}
|
||||
|
||||
func init() {
|
||||
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
|
||||
cfg, err := readConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newRackspace(cfg)
|
||||
})
|
||||
}
|
||||
|
||||
func (cfg Config) toAuthOptions() gophercloud.AuthOptions {
|
||||
return gophercloud.AuthOptions{
|
||||
IdentityEndpoint: cfg.Global.AuthUrl,
|
||||
Username: cfg.Global.Username,
|
||||
UserID: cfg.Global.UserId,
|
||||
Password: cfg.Global.Password,
|
||||
APIKey: cfg.Global.ApiKey,
|
||||
TenantID: cfg.Global.TenantId,
|
||||
TenantName: cfg.Global.TenantName,
|
||||
|
||||
// Persistent service, so we need to be able to renew tokens
|
||||
AllowReauth: true,
|
||||
}
|
||||
}
|
||||
|
||||
func readConfig(config io.Reader) (Config, error) {
|
||||
if config == nil {
|
||||
err := fmt.Errorf("no Rackspace cloud provider config file given")
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
err := gcfg.ReadInto(&cfg, config)
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
func newRackspace(cfg Config) (*Rackspace, error) {
|
||||
provider, err := rackspace.AuthenticatedClient(cfg.toAuthOptions())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
os := Rackspace{
|
||||
provider: provider,
|
||||
region: cfg.Global.Region,
|
||||
lbOpts: cfg.LoadBalancer,
|
||||
}
|
||||
|
||||
return &os, nil
|
||||
}
|
||||
|
||||
type Instances struct {
|
||||
compute *gophercloud.ServiceClient
|
||||
}
|
||||
|
||||
// Instances returns an implementation of Instances for Rackspace.
|
||||
func (os *Rackspace) Instances() (cloudprovider.Instances, bool) {
|
||||
glog.V(2).Info("rackspace.Instances() called")
|
||||
|
||||
compute, err := os.getComputeClient()
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to find compute endpoint: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
glog.V(1).Info("Claiming to support Instances")
|
||||
|
||||
return &Instances{compute}, true
|
||||
}
|
||||
|
||||
func serverHasAddress(srv osservers.Server, ip string) bool {
|
||||
if ip == firstAddr(srv.Addresses["private"]) {
|
||||
return true
|
||||
}
|
||||
if ip == firstAddr(srv.Addresses["public"]) {
|
||||
return true
|
||||
}
|
||||
if ip == srv.AccessIPv4 {
|
||||
return true
|
||||
}
|
||||
if ip == srv.AccessIPv6 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getServerByAddress(client *gophercloud.ServiceClient, name string) (*osservers.Server, error) {
|
||||
pager := servers.List(client, nil)
|
||||
|
||||
serverList := make([]osservers.Server, 0, 1)
|
||||
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
s, err := servers.ExtractServers(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, v := range s {
|
||||
if serverHasAddress(v, name) {
|
||||
serverList = append(serverList, v)
|
||||
}
|
||||
}
|
||||
if len(serverList) > 1 {
|
||||
return false, ErrMultipleResults
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(serverList) == 0 {
|
||||
return nil, ErrNotFound
|
||||
} else if len(serverList) > 1 {
|
||||
return nil, ErrMultipleResults
|
||||
}
|
||||
|
||||
return &serverList[0], nil
|
||||
}
|
||||
|
||||
func getServerByName(client *gophercloud.ServiceClient, name string) (*osservers.Server, error) {
|
||||
if net.ParseIP(name) != nil {
|
||||
// we're an IP, so we'll have to walk the full list of servers to
|
||||
// figure out which one we are.
|
||||
return getServerByAddress(client, name)
|
||||
}
|
||||
opts := osservers.ListOpts{
|
||||
Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(name)),
|
||||
Status: "ACTIVE",
|
||||
}
|
||||
pager := servers.List(client, opts)
|
||||
|
||||
serverList := make([]osservers.Server, 0, 1)
|
||||
|
||||
err := pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
s, err := servers.ExtractServers(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
serverList = append(serverList, s...)
|
||||
if len(serverList) > 1 {
|
||||
return false, ErrMultipleResults
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(serverList) == 0 {
|
||||
return nil, ErrNotFound
|
||||
} else if len(serverList) > 1 {
|
||||
return nil, ErrMultipleResults
|
||||
}
|
||||
|
||||
return &serverList[0], nil
|
||||
}
|
||||
|
||||
func firstAddr(netblob interface{}) string {
|
||||
// Run-time types for the win :(
|
||||
list, ok := netblob.([]interface{})
|
||||
if !ok || len(list) < 1 {
|
||||
return ""
|
||||
}
|
||||
props, ok := list[0].(map[string]interface{})
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
tmp, ok := props["addr"]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
addr, ok := tmp.(string)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return addr
|
||||
}
|
||||
|
||||
func getAddressByServer(srv *osservers.Server) (string, error) {
|
||||
var s string
|
||||
if s == "" {
|
||||
s = firstAddr(srv.Addresses["private"])
|
||||
}
|
||||
if s == "" {
|
||||
s = firstAddr(srv.Addresses["public"])
|
||||
}
|
||||
if s == "" {
|
||||
s = srv.AccessIPv4
|
||||
}
|
||||
if s == "" {
|
||||
s = srv.AccessIPv6
|
||||
}
|
||||
if s == "" {
|
||||
return "", ErrNoAddressFound
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func getAddressByName(api *gophercloud.ServiceClient, name string) (string, error) {
|
||||
srv, err := getServerByName(api, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return getAddressByServer(srv)
|
||||
}
|
||||
|
||||
func (i *Instances) NodeAddresses(nodeName types.NodeName) ([]v1.NodeAddress, error) {
|
||||
glog.V(2).Infof("NodeAddresses(%v) called", nodeName)
|
||||
serverName := mapNodeNameToServerName(nodeName)
|
||||
ip, err := probeNodeAddress(i.compute, serverName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("NodeAddresses(%v) => %v", serverName, ip)
|
||||
|
||||
// net.ParseIP().String() is to maintain compatibility with the old code
|
||||
parsedIP := net.ParseIP(ip).String()
|
||||
return []v1.NodeAddress{
|
||||
{Type: v1.NodeLegacyHostIP, Address: parsedIP},
|
||||
{Type: v1.NodeInternalIP, Address: parsedIP},
|
||||
{Type: v1.NodeExternalIP, Address: parsedIP},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// mapNodeNameToServerName maps from a k8s NodeName to a rackspace Server Name
|
||||
// This is a simple string cast.
|
||||
func mapNodeNameToServerName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
||||
|
||||
// mapServerToNodeName maps a rackspace Server to an k8s NodeName
|
||||
func mapServerToNodeName(s *osservers.Server) types.NodeName {
|
||||
return types.NodeName(s.Name)
|
||||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the node with the specified Name (deprecated).
|
||||
func (i *Instances) ExternalID(nodeName types.NodeName) (string, error) {
|
||||
serverName := mapNodeNameToServerName(nodeName)
|
||||
return probeInstanceID(i.compute, serverName)
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the kubelet's instance.
|
||||
func (rs *Rackspace) InstanceID() (string, error) {
|
||||
return readInstanceID()
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the node with the specified Name.
|
||||
func (i *Instances) InstanceID(nodeName types.NodeName) (string, error) {
|
||||
serverName := mapNodeNameToServerName(nodeName)
|
||||
return probeInstanceID(i.compute, serverName)
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
func (i *Instances) InstanceType(name types.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("unimplemented")
|
||||
}
|
||||
|
||||
// Implementation of Instances.CurrentNodeName
|
||||
func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error) {
|
||||
// Beware when changing this, nodename == hostname assumption is crucial to
|
||||
// apiserver => kubelet communication.
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
func (os *Rackspace) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ProviderName returns the cloud provider ID.
|
||||
func (os *Rackspace) ProviderName() string {
|
||||
return ProviderName
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
func (os *Rackspace) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
func (os *Rackspace) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (os *Rackspace) Zones() (cloudprovider.Zones, bool) {
|
||||
glog.V(1).Info("Claiming to support Zones")
|
||||
|
||||
return os, true
|
||||
}
|
||||
|
||||
func (os *Rackspace) Routes() (cloudprovider.Routes, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (os *Rackspace) GetZone() (cloudprovider.Zone, error) {
|
||||
glog.V(1).Infof("Current zone is %v", os.region)
|
||||
|
||||
return cloudprovider.Zone{Region: os.region}, nil
|
||||
}
|
||||
|
||||
// Create a volume of given size (in GiB)
|
||||
func (rs *Rackspace) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (volumeName string, err error) {
|
||||
return "", errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (rs *Rackspace) DeleteVolume(volumeName string) error {
|
||||
return errors.New("unimplemented")
|
||||
}
|
||||
|
||||
// Attaches given cinder volume to the compute running kubelet
|
||||
func (rs *Rackspace) AttachDisk(instanceID string, diskName string) (string, error) {
|
||||
disk, err := rs.getVolume(diskName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
compute, err := rs.getComputeClient()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(disk.Attachments) > 0 {
|
||||
if instanceID == disk.Attachments[0]["server_id"] {
|
||||
glog.V(4).Infof("Disk: %q is already attached to compute: %q", diskName, instanceID)
|
||||
return disk.ID, nil
|
||||
}
|
||||
|
||||
errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", diskName, disk.Attachments[0]["server_id"])
|
||||
glog.Errorf(errMsg)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
|
||||
_, err = volumeattach.Create(compute, instanceID, &osvolumeattach.CreateOpts{
|
||||
VolumeID: disk.ID,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to attach %s volume to %s compute", diskName, instanceID)
|
||||
return "", err
|
||||
}
|
||||
glog.V(2).Infof("Successfully attached %s volume to %s compute", diskName, instanceID)
|
||||
return disk.ID, nil
|
||||
}
|
||||
|
||||
// GetDevicePath returns the path of an attached block storage volume, specified by its id.
|
||||
func (rs *Rackspace) GetDevicePath(diskId string) string {
|
||||
volume, err := rs.getVolume(diskId)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
attachments := volume.Attachments
|
||||
if len(attachments) != 1 {
|
||||
glog.Warningf("Unexpected number of volume attachments on %s: %d", diskId, len(attachments))
|
||||
return ""
|
||||
}
|
||||
return attachments[0]["device"].(string)
|
||||
}
|
||||
|
||||
// Takes a partial/full disk id or diskname
|
||||
func (rs *Rackspace) getVolume(diskName string) (volumes.Volume, error) {
|
||||
sClient, err := rackspace.NewBlockStorageV1(rs.provider, gophercloud.EndpointOpts{
|
||||
Region: rs.region,
|
||||
})
|
||||
|
||||
var volume volumes.Volume
|
||||
if err != nil || sClient == nil {
|
||||
glog.Errorf("Unable to initialize cinder client for region: %s", rs.region)
|
||||
return volume, err
|
||||
}
|
||||
|
||||
err = volumes.List(sClient).EachPage(func(page pagination.Page) (bool, error) {
|
||||
vols, err := volumes.ExtractVolumes(page)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to extract volumes: %v", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, v := range vols {
|
||||
glog.V(4).Infof("%s %s %v", v.ID, v.Name, v.Attachments)
|
||||
if v.Name == diskName || strings.Contains(v.ID, diskName) {
|
||||
volume = v
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// if it reached here then no disk with the given name was found.
|
||||
errmsg := fmt.Sprintf("Unable to find disk: %s in region %s", diskName, rs.region)
|
||||
return false, errors.New(errmsg)
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred getting volume: %s", diskName)
|
||||
}
|
||||
return volume, err
|
||||
}
|
||||
|
||||
func (rs *Rackspace) getComputeClient() (*gophercloud.ServiceClient, error) {
|
||||
client, err := rackspace.NewComputeV2(rs.provider, gophercloud.EndpointOpts{
|
||||
Region: rs.region,
|
||||
})
|
||||
if err != nil || client == nil {
|
||||
glog.Errorf("Unable to initialize nova client for region: %s", rs.region)
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Detaches given cinder volume from the compute running kubelet
|
||||
func (rs *Rackspace) DetachDisk(instanceID string, partialDiskId string) error {
|
||||
disk, err := rs.getVolume(partialDiskId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
compute, err := rs.getComputeClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(disk.Attachments) > 1 {
|
||||
// Rackspace does not support "multiattach", this is a sanity check.
|
||||
errmsg := fmt.Sprintf("Volume %s is attached to multiple instances, which is not supported by this provider.", disk.ID)
|
||||
return errors.New(errmsg)
|
||||
}
|
||||
|
||||
if len(disk.Attachments) > 0 && instanceID == disk.Attachments[0]["server_id"] {
|
||||
// This is a blocking call and effects kubelet's performance directly.
|
||||
// We should consider kicking it out into a separate routine, if it is bad.
|
||||
err = volumeattach.Delete(compute, instanceID, disk.ID).ExtractErr()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete volume %s from compute %s attached %v", disk.ID, instanceID, err)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully detached volume: %s from compute: %s", disk.ID, instanceID)
|
||||
} else {
|
||||
errMsg := fmt.Sprintf("Disk: %s has no attachments or is not attached to compute: %s", disk.Name, instanceID)
|
||||
glog.Errorf(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get device path of attached volume to the compute running kubelet, as known by cinder
|
||||
func (rs *Rackspace) GetAttachmentDiskPath(instanceID string, diskName string) (string, error) {
|
||||
// See issue #33128 - Cinder does not always tell you the right device path, as such
|
||||
// we must only use this value as a last resort.
|
||||
disk, err := rs.getVolume(diskName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil {
|
||||
if instanceID == disk.Attachments[0]["server_id"] {
|
||||
// Attachment[0]["device"] points to the device path
|
||||
// see http://developer.openstack.org/api-ref-blockstorage-v1.html
|
||||
return disk.Attachments[0]["device"].(string), nil
|
||||
} else {
|
||||
errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", diskName, disk.Attachments[0]["server_id"])
|
||||
glog.Errorf(errMsg)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("volume %s is not attached to %s", diskName, instanceID)
|
||||
}
|
||||
|
||||
// query if a volume is attached to a compute instance
|
||||
func (rs *Rackspace) DiskIsAttached(diskName, instanceID string) (bool, error) {
|
||||
disk, err := rs.getVolume(diskName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil && instanceID == disk.Attachments[0]["server_id"] {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// query if a list volumes are attached to a compute instance
|
||||
func (rs *Rackspace) DisksAreAttached(diskNames []string, instanceID string) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
var returnedErr error
|
||||
for _, diskName := range diskNames {
|
||||
result, err := rs.DiskIsAttached(diskName, instanceID)
|
||||
if err != nil {
|
||||
returnedErr = fmt.Errorf("Error in checking disk %q attached: %v \n %v", diskName, err, returnedErr)
|
||||
continue
|
||||
}
|
||||
if result {
|
||||
attached[diskName] = true
|
||||
}
|
||||
|
||||
}
|
||||
return attached, returnedErr
|
||||
}
|
||||
|
||||
// query if we should trust the cinder provide deviceName, See issue #33128
|
||||
func (rs *Rackspace) ShouldTrustDevicePath() bool {
|
||||
return true
|
||||
}
|
164
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace/rackspace_test.go
generated
vendored
Normal file
164
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace/rackspace_test.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rackspace
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rackspace/gophercloud"
|
||||
)
|
||||
|
||||
func TestReadConfig(t *testing.T) {
|
||||
_, err := readConfig(nil)
|
||||
if err == nil {
|
||||
t.Errorf("Should fail when no config is provided: %s", err)
|
||||
}
|
||||
|
||||
cfg, err := readConfig(strings.NewReader(`
|
||||
[Global]
|
||||
auth-url = http://auth.url
|
||||
username = user
|
||||
[LoadBalancer]
|
||||
create-monitor = yes
|
||||
monitor-delay = 1m
|
||||
monitor-timeout = 30s
|
||||
monitor-max-retries = 3
|
||||
`))
|
||||
if err != nil {
|
||||
t.Fatalf("Should succeed when a valid config is provided: %s", err)
|
||||
}
|
||||
if cfg.Global.AuthUrl != "http://auth.url" {
|
||||
t.Errorf("incorrect authurl: %s", cfg.Global.AuthUrl)
|
||||
}
|
||||
|
||||
if !cfg.LoadBalancer.CreateMonitor {
|
||||
t.Errorf("incorrect lb.createmonitor: %t", cfg.LoadBalancer.CreateMonitor)
|
||||
}
|
||||
if cfg.LoadBalancer.MonitorDelay.Duration != 1*time.Minute {
|
||||
t.Errorf("incorrect lb.monitordelay: %s", cfg.LoadBalancer.MonitorDelay)
|
||||
}
|
||||
if cfg.LoadBalancer.MonitorTimeout.Duration != 30*time.Second {
|
||||
t.Errorf("incorrect lb.monitortimeout: %s", cfg.LoadBalancer.MonitorTimeout)
|
||||
}
|
||||
if cfg.LoadBalancer.MonitorMaxRetries != 3 {
|
||||
t.Errorf("incorrect lb.monitormaxretries: %d", cfg.LoadBalancer.MonitorMaxRetries)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToAuthOptions(t *testing.T) {
|
||||
cfg := Config{}
|
||||
cfg.Global.Username = "user"
|
||||
// etc.
|
||||
|
||||
ao := cfg.toAuthOptions()
|
||||
|
||||
if !ao.AllowReauth {
|
||||
t.Errorf("Will need to be able to reauthenticate")
|
||||
}
|
||||
if ao.Username != cfg.Global.Username {
|
||||
t.Errorf("Username %s != %s", ao.Username, cfg.Global.Username)
|
||||
}
|
||||
}
|
||||
|
||||
// This allows acceptance testing against an existing Rackspace
|
||||
// install, using the standard OS_* Rackspace client environment
|
||||
// variables.
|
||||
// FIXME: it would be better to hermetically test against canned JSON
|
||||
// requests/responses.
|
||||
func configFromEnv() (cfg Config, ok bool) {
|
||||
cfg.Global.AuthUrl = os.Getenv("OS_AUTH_URL")
|
||||
|
||||
cfg.Global.TenantId = os.Getenv("OS_TENANT_ID")
|
||||
// Rax/nova _insists_ that we don't specify both tenant ID and name
|
||||
if cfg.Global.TenantId == "" {
|
||||
cfg.Global.TenantName = os.Getenv("OS_TENANT_NAME")
|
||||
}
|
||||
|
||||
cfg.Global.Username = os.Getenv("OS_USERNAME")
|
||||
cfg.Global.Password = os.Getenv("OS_PASSWORD")
|
||||
cfg.Global.ApiKey = os.Getenv("OS_API_KEY")
|
||||
cfg.Global.Region = os.Getenv("OS_REGION_NAME")
|
||||
cfg.Global.DomainId = os.Getenv("OS_DOMAIN_ID")
|
||||
cfg.Global.DomainName = os.Getenv("OS_DOMAIN_NAME")
|
||||
|
||||
ok = (cfg.Global.AuthUrl != "" &&
|
||||
cfg.Global.Username != "" &&
|
||||
(cfg.Global.Password != "" || cfg.Global.ApiKey != "") &&
|
||||
(cfg.Global.TenantId != "" || cfg.Global.TenantName != "" ||
|
||||
cfg.Global.DomainId != "" || cfg.Global.DomainName != ""))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func TestParseMetaData(t *testing.T) {
|
||||
_, err := parseMetaData(strings.NewReader(""))
|
||||
if err == nil {
|
||||
t.Errorf("Should fail when invalid meta data is provided: %s", err)
|
||||
}
|
||||
|
||||
id, err := parseMetaData(strings.NewReader(`
|
||||
{
|
||||
"UUID":"someuuid",
|
||||
"name":"somename",
|
||||
"project_id":"someprojectid"
|
||||
}
|
||||
`))
|
||||
if err != nil {
|
||||
t.Fatalf("Should succeed when valid meta data is provided: %s", err)
|
||||
}
|
||||
if id != "someuuid" {
|
||||
t.Errorf("incorrect uuid: %s", id)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRackspace(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
_, err := newRackspace(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate Rackspace: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestZones(t *testing.T) {
|
||||
os := Rackspace{
|
||||
provider: &gophercloud.ProviderClient{
|
||||
IdentityBase: "http://auth.url/",
|
||||
},
|
||||
region: "myRegion",
|
||||
}
|
||||
|
||||
z, ok := os.Zones()
|
||||
if !ok {
|
||||
t.Fatalf("Zones() returned false")
|
||||
}
|
||||
|
||||
zone, err := z.GetZone()
|
||||
if err != nil {
|
||||
t.Fatalf("GetZone() returned error: %s", err)
|
||||
}
|
||||
|
||||
if zone.Region != "myRegion" {
|
||||
t.Fatalf("GetZone() returned wrong region (%s)", zone.Region)
|
||||
}
|
||||
}
|
59
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/BUILD
generated
vendored
Normal file
59
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["vsphere.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/vmware/govmomi",
|
||||
"//vendor:github.com/vmware/govmomi/find",
|
||||
"//vendor:github.com/vmware/govmomi/object",
|
||||
"//vendor:github.com/vmware/govmomi/property",
|
||||
"//vendor:github.com/vmware/govmomi/session",
|
||||
"//vendor:github.com/vmware/govmomi/vim25",
|
||||
"//vendor:github.com/vmware/govmomi/vim25/mo",
|
||||
"//vendor:github.com/vmware/govmomi/vim25/soap",
|
||||
"//vendor:github.com/vmware/govmomi/vim25/types",
|
||||
"//vendor:golang.org/x/net/context",
|
||||
"//vendor:gopkg.in/gcfg.v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["vsphere_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//vendor:golang.org/x/net/context",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/rand",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/OWNERS
generated
vendored
Normal file
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/OWNERS
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
maintainers:
|
||||
- dagnello
|
||||
- abithap
|
||||
- imkin
|
||||
- abrarshivani
|
||||
- kerneltime
|
||||
- luomiao
|
1358
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go
generated
vendored
Normal file
1358
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
252
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_test.go
generated
vendored
Normal file
252
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_test.go
generated
vendored
Normal file
|
@ -0,0 +1,252 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
func configFromEnv() (cfg VSphereConfig, ok bool) {
|
||||
var InsecureFlag bool
|
||||
var err error
|
||||
cfg.Global.VCenterIP = os.Getenv("VSPHERE_VCENTER")
|
||||
cfg.Global.VCenterPort = os.Getenv("VSPHERE_VCENTER_PORT")
|
||||
cfg.Global.User = os.Getenv("VSPHERE_USER")
|
||||
cfg.Global.Password = os.Getenv("VSPHERE_PASSWORD")
|
||||
cfg.Global.Datacenter = os.Getenv("VSPHERE_DATACENTER")
|
||||
cfg.Network.PublicNetwork = os.Getenv("VSPHERE_PUBLIC_NETWORK")
|
||||
cfg.Global.Datastore = os.Getenv("VSPHERE_DATASTORE")
|
||||
cfg.Disk.SCSIControllerType = os.Getenv("VSPHERE_SCSICONTROLLER_TYPE")
|
||||
cfg.Global.WorkingDir = os.Getenv("VSPHERE_WORKING_DIR")
|
||||
if os.Getenv("VSPHERE_INSECURE") != "" {
|
||||
InsecureFlag, err = strconv.ParseBool(os.Getenv("VSPHERE_INSECURE"))
|
||||
} else {
|
||||
InsecureFlag = false
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cfg.Global.InsecureFlag = InsecureFlag
|
||||
|
||||
ok = (cfg.Global.VCenterIP != "" &&
|
||||
cfg.Global.User != "")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func TestReadConfig(t *testing.T) {
|
||||
_, err := readConfig(nil)
|
||||
if err == nil {
|
||||
t.Errorf("Should fail when no config is provided: %s", err)
|
||||
}
|
||||
|
||||
cfg, err := readConfig(strings.NewReader(`
|
||||
[Global]
|
||||
server = 0.0.0.0
|
||||
port = 443
|
||||
user = user
|
||||
password = password
|
||||
insecure-flag = true
|
||||
datacenter = us-west
|
||||
`))
|
||||
if err != nil {
|
||||
t.Fatalf("Should succeed when a valid config is provided: %s", err)
|
||||
}
|
||||
|
||||
if cfg.Global.VCenterIP != "0.0.0.0" {
|
||||
t.Errorf("incorrect vcenter ip: %s", cfg.Global.VCenterIP)
|
||||
}
|
||||
|
||||
if cfg.Global.VCenterPort != "443" {
|
||||
t.Errorf("incorrect vcenter port: %s", cfg.Global.VCenterPort)
|
||||
}
|
||||
|
||||
if cfg.Global.Datacenter != "us-west" {
|
||||
t.Errorf("incorrect datacenter: %s", cfg.Global.Datacenter)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewVSphere(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
_, err := newVSphere(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVSphereLogin(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
// Create vSphere configuration object
|
||||
vs, err := newVSphere(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
|
||||
}
|
||||
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Create vSphere client
|
||||
err = vSphereLogin(ctx, vs)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create vSpere client: %s", err)
|
||||
}
|
||||
defer vs.client.Logout(ctx)
|
||||
}
|
||||
|
||||
func TestZones(t *testing.T) {
|
||||
cfg := VSphereConfig{}
|
||||
cfg.Global.Datacenter = "myDatacenter"
|
||||
|
||||
// Create vSphere configuration object
|
||||
vs := VSphere{
|
||||
cfg: &cfg,
|
||||
}
|
||||
|
||||
_, ok := vs.Zones()
|
||||
if ok {
|
||||
t.Fatalf("Zones() returned true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstances(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
vs, err := newVSphere(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
|
||||
}
|
||||
|
||||
i, ok := vs.Instances()
|
||||
if !ok {
|
||||
t.Fatalf("Instances() returned false")
|
||||
}
|
||||
|
||||
srvs, err := vs.list("*")
|
||||
if err != nil {
|
||||
t.Fatalf("list() failed: %s", err)
|
||||
}
|
||||
|
||||
if len(srvs) == 0 {
|
||||
t.Fatalf("list() returned zero servers")
|
||||
}
|
||||
t.Logf("Found servers (%d): %s\n", len(srvs), srvs)
|
||||
|
||||
externalId, err := i.ExternalID(srvs[0])
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.ExternalID(%s) failed: %s", srvs[0], err)
|
||||
}
|
||||
t.Logf("Found ExternalID(%s) = %s\n", srvs[0], externalId)
|
||||
|
||||
nonExistingVM := types.NodeName(rand.String(15))
|
||||
externalId, err = i.ExternalID(nonExistingVM)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
t.Logf("VM %s was not found as expected\n", nonExistingVM)
|
||||
} else if err == nil {
|
||||
t.Fatalf("Instances.ExternalID did not fail as expected, VM %s was found", nonExistingVM)
|
||||
} else {
|
||||
t.Fatalf("Instances.ExternalID did not fail as expected, err: %v", err)
|
||||
}
|
||||
|
||||
instanceId, err := i.InstanceID(srvs[0])
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.InstanceID(%s) failed: %s", srvs[0], err)
|
||||
}
|
||||
t.Logf("Found InstanceID(%s) = %s\n", srvs[0], instanceId)
|
||||
|
||||
instanceId, err = i.InstanceID(nonExistingVM)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
t.Logf("VM %s was not found as expected\n", nonExistingVM)
|
||||
} else if err == nil {
|
||||
t.Fatalf("Instances.InstanceID did not fail as expected, VM %s was found", nonExistingVM)
|
||||
} else {
|
||||
t.Fatalf("Instances.InstanceID did not fail as expected, err: %v", err)
|
||||
}
|
||||
|
||||
addrs, err := i.NodeAddresses(srvs[0])
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.NodeAddresses(%s) failed: %s", srvs[0], err)
|
||||
}
|
||||
t.Logf("Found NodeAddresses(%s) = %s\n", srvs[0], addrs)
|
||||
}
|
||||
|
||||
func TestVolumes(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
vs, err := newVSphere(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
|
||||
}
|
||||
|
||||
srvs, err := vs.list("*")
|
||||
if err != nil {
|
||||
t.Fatalf("list() failed: %s", err)
|
||||
}
|
||||
if len(srvs) == 0 {
|
||||
t.Fatalf("list() returned zero servers")
|
||||
}
|
||||
|
||||
volumeOptions := &VolumeOptions{
|
||||
CapacityKB: 1 * 1024 * 1024,
|
||||
Tags: nil,
|
||||
Name: "kubernetes-test-volume-" + rand.String(10),
|
||||
DiskFormat: "thin"}
|
||||
|
||||
volPath, err := vs.CreateVolume(volumeOptions)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create a new VMDK volume: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = vs.AttachDisk(volPath, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, srvs[0], err)
|
||||
}
|
||||
|
||||
err = vs.DetachDisk(volPath, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot detach disk(%s) from VM(%s): %v", volPath, srvs[0], err)
|
||||
}
|
||||
|
||||
// todo: Deleting a volume after detach currently not working through API or UI (vSphere)
|
||||
// err = vs.DeleteVolume(volPath)
|
||||
// if err != nil {
|
||||
// t.Fatalf("Cannot delete VMDK volume %s: %v", volPath, err)
|
||||
// }
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue