Switch to github.com/golang/dep for vendoring
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
parent
d6ab91be27
commit
8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions
41
vendor/k8s.io/kubernetes/pkg/proxy/BUILD
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/pkg/proxy/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"types.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/proxy/config:all-srcs",
|
||||
"//pkg/proxy/healthcheck:all-srcs",
|
||||
"//pkg/proxy/iptables:all-srcs",
|
||||
"//pkg/proxy/userspace:all-srcs",
|
||||
"//pkg/proxy/winuserspace:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
14
vendor/k8s.io/kubernetes/pkg/proxy/OWNERS
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/pkg/proxy/OWNERS
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
approvers:
|
||||
- thockin
|
||||
- bprashanth
|
||||
- matchstick
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- brendandburns
|
||||
- vishh
|
||||
- bprashanth
|
||||
- justinsb
|
||||
- freehan
|
||||
- dcbw
|
66
vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD
generated
vendored
Normal file
66
vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"api.go",
|
||||
"config.go",
|
||||
"doc.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/util/config:go_default_library",
|
||||
"//vendor:github.com/davecgh/go-spew/spew",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["api_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_xtest",
|
||||
srcs = ["config_test.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/proxy/config:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
7
vendor/k8s.io/kubernetes/pkg/proxy/config/OWNERS
generated
vendored
Executable file
7
vendor/k8s.io/kubernetes/pkg/proxy/config/OWNERS
generated
vendored
Executable file
|
@ -0,0 +1,7 @@
|
|||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- brendandburns
|
||||
- bprashanth
|
||||
- freehan
|
74
vendor/k8s.io/kubernetes/pkg/proxy/config/api.go
generated
vendored
Normal file
74
vendor/k8s.io/kubernetes/pkg/proxy/config/api.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
)
|
||||
|
||||
// NewSourceAPI creates config source that watches for changes to the services and endpoints.
|
||||
func NewSourceAPI(c cache.Getter, period time.Duration, servicesChan chan<- ServiceUpdate, endpointsChan chan<- EndpointsUpdate) {
|
||||
servicesLW := cache.NewListWatchFromClient(c, "services", api.NamespaceAll, fields.Everything())
|
||||
cache.NewReflector(servicesLW, &api.Service{}, NewServiceStore(nil, servicesChan), period).Run()
|
||||
|
||||
endpointsLW := cache.NewListWatchFromClient(c, "endpoints", api.NamespaceAll, fields.Everything())
|
||||
cache.NewReflector(endpointsLW, &api.Endpoints{}, NewEndpointsStore(nil, endpointsChan), period).Run()
|
||||
}
|
||||
|
||||
// NewServiceStore creates an undelta store that expands updates to the store into
|
||||
// ServiceUpdate events on the channel. If no store is passed, a default store will
|
||||
// be initialized. Allows reuse of a cache store across multiple components.
|
||||
func NewServiceStore(store cache.Store, ch chan<- ServiceUpdate) cache.Store {
|
||||
fn := func(objs []interface{}) {
|
||||
var services []api.Service
|
||||
for _, o := range objs {
|
||||
services = append(services, *(o.(*api.Service)))
|
||||
}
|
||||
ch <- ServiceUpdate{Op: SET, Services: services}
|
||||
}
|
||||
if store == nil {
|
||||
store = cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
}
|
||||
return &cache.UndeltaStore{
|
||||
Store: store,
|
||||
PushFunc: fn,
|
||||
}
|
||||
}
|
||||
|
||||
// NewEndpointsStore creates an undelta store that expands updates to the store into
|
||||
// EndpointsUpdate events on the channel. If no store is passed, a default store will
|
||||
// be initialized. Allows reuse of a cache store across multiple components.
|
||||
func NewEndpointsStore(store cache.Store, ch chan<- EndpointsUpdate) cache.Store {
|
||||
fn := func(objs []interface{}) {
|
||||
var endpoints []api.Endpoints
|
||||
for _, o := range objs {
|
||||
endpoints = append(endpoints, *(o.(*api.Endpoints)))
|
||||
}
|
||||
ch <- EndpointsUpdate{Op: SET, Endpoints: endpoints}
|
||||
}
|
||||
if store == nil {
|
||||
store = cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
}
|
||||
return &cache.UndeltaStore{
|
||||
Store: store,
|
||||
PushFunc: fn,
|
||||
}
|
||||
}
|
246
vendor/k8s.io/kubernetes/pkg/proxy/config/api_test.go
generated
vendored
Normal file
246
vendor/k8s.io/kubernetes/pkg/proxy/config/api_test.go
generated
vendored
Normal file
|
@ -0,0 +1,246 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
)
|
||||
|
||||
type fakeLW struct {
|
||||
listResp runtime.Object
|
||||
watchResp watch.Interface
|
||||
}
|
||||
|
||||
func (lw fakeLW) List(options v1.ListOptions) (runtime.Object, error) {
|
||||
return lw.listResp, nil
|
||||
}
|
||||
|
||||
func (lw fakeLW) Watch(options v1.ListOptions) (watch.Interface, error) {
|
||||
return lw.watchResp, nil
|
||||
}
|
||||
|
||||
var _ cache.ListerWatcher = fakeLW{}
|
||||
|
||||
func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) {
|
||||
service1v1 := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "s1"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}}}
|
||||
service1v2 := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "s1"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 20}}}}
|
||||
service2 := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "s2"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 30}}}}
|
||||
|
||||
// Setup fake api client.
|
||||
fakeWatch := watch.NewFake()
|
||||
lw := fakeLW{
|
||||
listResp: &api.ServiceList{Items: []api.Service{}},
|
||||
watchResp: fakeWatch,
|
||||
}
|
||||
|
||||
ch := make(chan ServiceUpdate)
|
||||
|
||||
cache.NewReflector(lw, &api.Service{}, NewServiceStore(nil, ch), 30*time.Second).Run()
|
||||
|
||||
got, ok := <-ch
|
||||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
expected := ServiceUpdate{Op: SET, Services: []api.Service{}}
|
||||
if !api.Semantic.DeepEqual(expected, got) {
|
||||
t.Errorf("Expected %#v; Got %#v", expected, got)
|
||||
}
|
||||
|
||||
// Add the first service
|
||||
fakeWatch.Add(service1v1)
|
||||
got, ok = <-ch
|
||||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
expected = ServiceUpdate{Op: SET, Services: []api.Service{*service1v1}}
|
||||
if !api.Semantic.DeepEqual(expected, got) {
|
||||
t.Errorf("Expected %#v; Got %#v", expected, got)
|
||||
}
|
||||
|
||||
// Add another service
|
||||
fakeWatch.Add(service2)
|
||||
got, ok = <-ch
|
||||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
// Could be sorted either of these two ways:
|
||||
expectedA := ServiceUpdate{Op: SET, Services: []api.Service{*service1v1, *service2}}
|
||||
expectedB := ServiceUpdate{Op: SET, Services: []api.Service{*service2, *service1v1}}
|
||||
|
||||
if !api.Semantic.DeepEqual(expectedA, got) && !api.Semantic.DeepEqual(expectedB, got) {
|
||||
t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, got)
|
||||
}
|
||||
|
||||
// Modify service1
|
||||
fakeWatch.Modify(service1v2)
|
||||
got, ok = <-ch
|
||||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
expectedA = ServiceUpdate{Op: SET, Services: []api.Service{*service1v2, *service2}}
|
||||
expectedB = ServiceUpdate{Op: SET, Services: []api.Service{*service2, *service1v2}}
|
||||
|
||||
if !api.Semantic.DeepEqual(expectedA, got) && !api.Semantic.DeepEqual(expectedB, got) {
|
||||
t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, got)
|
||||
}
|
||||
|
||||
// Delete service1
|
||||
fakeWatch.Delete(service1v2)
|
||||
got, ok = <-ch
|
||||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
expected = ServiceUpdate{Op: SET, Services: []api.Service{*service2}}
|
||||
if !api.Semantic.DeepEqual(expected, got) {
|
||||
t.Errorf("Expected %#v, Got %#v", expected, got)
|
||||
}
|
||||
|
||||
// Delete service2
|
||||
fakeWatch.Delete(service2)
|
||||
got, ok = <-ch
|
||||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
expected = ServiceUpdate{Op: SET, Services: []api.Service{}}
|
||||
if !api.Semantic.DeepEqual(expected, got) {
|
||||
t.Errorf("Expected %#v, Got %#v", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) {
|
||||
endpoints1v1 := &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "e1"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{
|
||||
{IP: "1.2.3.4"},
|
||||
},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
}
|
||||
endpoints1v2 := &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "e1"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{
|
||||
{IP: "1.2.3.4"},
|
||||
{IP: "4.3.2.1"},
|
||||
},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
}
|
||||
endpoints2 := &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "e2"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{
|
||||
{IP: "5.6.7.8"},
|
||||
},
|
||||
Ports: []api.EndpointPort{{Port: 80, Protocol: "TCP"}},
|
||||
}},
|
||||
}
|
||||
|
||||
// Setup fake api client.
|
||||
fakeWatch := watch.NewFake()
|
||||
lw := fakeLW{
|
||||
listResp: &api.EndpointsList{Items: []api.Endpoints{}},
|
||||
watchResp: fakeWatch,
|
||||
}
|
||||
|
||||
ch := make(chan EndpointsUpdate)
|
||||
|
||||
cache.NewReflector(lw, &api.Endpoints{}, NewEndpointsStore(nil, ch), 30*time.Second).Run()
|
||||
|
||||
got, ok := <-ch
|
||||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
expected := EndpointsUpdate{Op: SET, Endpoints: []api.Endpoints{}}
|
||||
if !api.Semantic.DeepEqual(expected, got) {
|
||||
t.Errorf("Expected %#v; Got %#v", expected, got)
|
||||
}
|
||||
|
||||
// Add the first endpoints
|
||||
fakeWatch.Add(endpoints1v1)
|
||||
got, ok = <-ch
|
||||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
expected = EndpointsUpdate{Op: SET, Endpoints: []api.Endpoints{*endpoints1v1}}
|
||||
if !api.Semantic.DeepEqual(expected, got) {
|
||||
t.Errorf("Expected %#v; Got %#v", expected, got)
|
||||
}
|
||||
|
||||
// Add another endpoints
|
||||
fakeWatch.Add(endpoints2)
|
||||
got, ok = <-ch
|
||||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
// Could be sorted either of these two ways:
|
||||
expectedA := EndpointsUpdate{Op: SET, Endpoints: []api.Endpoints{*endpoints1v1, *endpoints2}}
|
||||
expectedB := EndpointsUpdate{Op: SET, Endpoints: []api.Endpoints{*endpoints2, *endpoints1v1}}
|
||||
|
||||
if !api.Semantic.DeepEqual(expectedA, got) && !api.Semantic.DeepEqual(expectedB, got) {
|
||||
t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, got)
|
||||
}
|
||||
|
||||
// Modify endpoints1
|
||||
fakeWatch.Modify(endpoints1v2)
|
||||
got, ok = <-ch
|
||||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
expectedA = EndpointsUpdate{Op: SET, Endpoints: []api.Endpoints{*endpoints1v2, *endpoints2}}
|
||||
expectedB = EndpointsUpdate{Op: SET, Endpoints: []api.Endpoints{*endpoints2, *endpoints1v2}}
|
||||
|
||||
if !api.Semantic.DeepEqual(expectedA, got) && !api.Semantic.DeepEqual(expectedB, got) {
|
||||
t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, got)
|
||||
}
|
||||
|
||||
// Delete endpoints1
|
||||
fakeWatch.Delete(endpoints1v2)
|
||||
got, ok = <-ch
|
||||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
expected = EndpointsUpdate{Op: SET, Endpoints: []api.Endpoints{*endpoints2}}
|
||||
if !api.Semantic.DeepEqual(expected, got) {
|
||||
t.Errorf("Expected %#v, Got %#v", expected, got)
|
||||
}
|
||||
|
||||
// Delete endpoints2
|
||||
fakeWatch.Delete(endpoints2)
|
||||
got, ok = <-ch
|
||||
if !ok {
|
||||
t.Errorf("Unable to read from channel when expected")
|
||||
}
|
||||
expected = EndpointsUpdate{Op: SET, Endpoints: []api.Endpoints{}}
|
||||
if !api.Semantic.DeepEqual(expected, got) {
|
||||
t.Errorf("Expected %#v, Got %#v", expected, got)
|
||||
}
|
||||
}
|
297
vendor/k8s.io/kubernetes/pkg/proxy/config/config.go
generated
vendored
Normal file
297
vendor/k8s.io/kubernetes/pkg/proxy/config/config.go
generated
vendored
Normal file
|
@ -0,0 +1,297 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/util/config"
|
||||
)
|
||||
|
||||
// Operation is a type of operation of services or endpoints.
|
||||
type Operation int
|
||||
|
||||
// These are the available operation types.
|
||||
const (
|
||||
SET Operation = iota
|
||||
ADD
|
||||
REMOVE
|
||||
)
|
||||
|
||||
// ServiceUpdate describes an operation of services, sent on the channel.
|
||||
// You can add or remove single services by sending an array of size one and Op == ADD|REMOVE.
|
||||
// For setting the state of the system to a given state for this source configuration, set Services as desired and Op to SET,
|
||||
// which will reset the system state to that specified in this operation for this source channel.
|
||||
// To remove all services, set Services to empty array and Op to SET
|
||||
type ServiceUpdate struct {
|
||||
Services []api.Service
|
||||
Op Operation
|
||||
}
|
||||
|
||||
// EndpointsUpdate describes an operation of endpoints, sent on the channel.
|
||||
// You can add or remove single endpoints by sending an array of size one and Op == ADD|REMOVE.
|
||||
// For setting the state of the system to a given state for this source configuration, set Endpoints as desired and Op to SET,
|
||||
// which will reset the system state to that specified in this operation for this source channel.
|
||||
// To remove all endpoints, set Endpoints to empty array and Op to SET
|
||||
type EndpointsUpdate struct {
|
||||
Endpoints []api.Endpoints
|
||||
Op Operation
|
||||
}
|
||||
|
||||
// ServiceConfigHandler is an abstract interface of objects which receive update notifications for the set of services.
|
||||
type ServiceConfigHandler interface {
|
||||
// OnServiceUpdate gets called when a configuration has been changed by one of the sources.
|
||||
// This is the union of all the configuration sources.
|
||||
OnServiceUpdate(services []api.Service)
|
||||
}
|
||||
|
||||
// EndpointsConfigHandler is an abstract interface of objects which receive update notifications for the set of endpoints.
|
||||
type EndpointsConfigHandler interface {
|
||||
// OnEndpointsUpdate gets called when endpoints configuration is changed for a given
|
||||
// service on any of the configuration sources. An example is when a new
|
||||
// service comes up, or when containers come up or down for an existing service.
|
||||
OnEndpointsUpdate(endpoints []api.Endpoints)
|
||||
}
|
||||
|
||||
// EndpointsConfig tracks a set of endpoints configurations.
|
||||
// It accepts "set", "add" and "remove" operations of endpoints via channels, and invokes registered handlers on change.
|
||||
type EndpointsConfig struct {
|
||||
mux *config.Mux
|
||||
bcaster *config.Broadcaster
|
||||
store *endpointsStore
|
||||
}
|
||||
|
||||
// NewEndpointsConfig creates a new EndpointsConfig.
|
||||
// It immediately runs the created EndpointsConfig.
|
||||
func NewEndpointsConfig() *EndpointsConfig {
|
||||
// The updates channel is used to send interrupts to the Endpoints handler.
|
||||
// It's buffered because we never want to block for as long as there is a
|
||||
// pending interrupt, but don't want to drop them if the handler is doing
|
||||
// work.
|
||||
updates := make(chan struct{}, 1)
|
||||
store := &endpointsStore{updates: updates, endpoints: make(map[string]map[types.NamespacedName]api.Endpoints)}
|
||||
mux := config.NewMux(store)
|
||||
bcaster := config.NewBroadcaster()
|
||||
go watchForUpdates(bcaster, store, updates)
|
||||
return &EndpointsConfig{mux, bcaster, store}
|
||||
}
|
||||
|
||||
func (c *EndpointsConfig) RegisterHandler(handler EndpointsConfigHandler) {
|
||||
c.bcaster.Add(config.ListenerFunc(func(instance interface{}) {
|
||||
glog.V(3).Infof("Calling handler.OnEndpointsUpdate()")
|
||||
handler.OnEndpointsUpdate(instance.([]api.Endpoints))
|
||||
}))
|
||||
}
|
||||
|
||||
func (c *EndpointsConfig) Channel(source string) chan EndpointsUpdate {
|
||||
ch := c.mux.Channel(source)
|
||||
endpointsCh := make(chan EndpointsUpdate)
|
||||
go func() {
|
||||
for update := range endpointsCh {
|
||||
ch <- update
|
||||
}
|
||||
}()
|
||||
return endpointsCh
|
||||
}
|
||||
|
||||
func (c *EndpointsConfig) Config() []api.Endpoints {
|
||||
return c.store.MergedState().([]api.Endpoints)
|
||||
}
|
||||
|
||||
type endpointsStore struct {
|
||||
endpointLock sync.RWMutex
|
||||
endpoints map[string]map[types.NamespacedName]api.Endpoints
|
||||
updates chan<- struct{}
|
||||
}
|
||||
|
||||
func (s *endpointsStore) Merge(source string, change interface{}) error {
|
||||
s.endpointLock.Lock()
|
||||
endpoints := s.endpoints[source]
|
||||
if endpoints == nil {
|
||||
endpoints = make(map[types.NamespacedName]api.Endpoints)
|
||||
}
|
||||
update := change.(EndpointsUpdate)
|
||||
switch update.Op {
|
||||
case ADD:
|
||||
glog.V(5).Infof("Adding new endpoint from source %s : %s", source, spew.Sdump(update.Endpoints))
|
||||
for _, value := range update.Endpoints {
|
||||
name := types.NamespacedName{Namespace: value.Namespace, Name: value.Name}
|
||||
endpoints[name] = value
|
||||
}
|
||||
case REMOVE:
|
||||
glog.V(5).Infof("Removing an endpoint %s", spew.Sdump(update))
|
||||
for _, value := range update.Endpoints {
|
||||
name := types.NamespacedName{Namespace: value.Namespace, Name: value.Name}
|
||||
delete(endpoints, name)
|
||||
}
|
||||
case SET:
|
||||
glog.V(5).Infof("Setting endpoints %s", spew.Sdump(update))
|
||||
// Clear the old map entries by just creating a new map
|
||||
endpoints = make(map[types.NamespacedName]api.Endpoints)
|
||||
for _, value := range update.Endpoints {
|
||||
name := types.NamespacedName{Namespace: value.Namespace, Name: value.Name}
|
||||
endpoints[name] = value
|
||||
}
|
||||
default:
|
||||
glog.V(4).Infof("Received invalid update type: %s", spew.Sdump(update))
|
||||
}
|
||||
s.endpoints[source] = endpoints
|
||||
s.endpointLock.Unlock()
|
||||
if s.updates != nil {
|
||||
// Since we record the snapshot before sending this signal, it's
|
||||
// possible that the consumer ends up performing an extra update.
|
||||
select {
|
||||
case s.updates <- struct{}{}:
|
||||
default:
|
||||
glog.V(4).Infof("Endpoints handler already has a pending interrupt.")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *endpointsStore) MergedState() interface{} {
|
||||
s.endpointLock.RLock()
|
||||
defer s.endpointLock.RUnlock()
|
||||
endpoints := make([]api.Endpoints, 0)
|
||||
for _, sourceEndpoints := range s.endpoints {
|
||||
for _, value := range sourceEndpoints {
|
||||
endpoints = append(endpoints, value)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
// ServiceConfig tracks a set of service configurations.
|
||||
// It accepts "set", "add" and "remove" operations of services via channels, and invokes registered handlers on change.
|
||||
type ServiceConfig struct {
|
||||
mux *config.Mux
|
||||
bcaster *config.Broadcaster
|
||||
store *serviceStore
|
||||
}
|
||||
|
||||
// NewServiceConfig creates a new ServiceConfig.
|
||||
// It immediately runs the created ServiceConfig.
|
||||
func NewServiceConfig() *ServiceConfig {
|
||||
// The updates channel is used to send interrupts to the Services handler.
|
||||
// It's buffered because we never want to block for as long as there is a
|
||||
// pending interrupt, but don't want to drop them if the handler is doing
|
||||
// work.
|
||||
updates := make(chan struct{}, 1)
|
||||
store := &serviceStore{updates: updates, services: make(map[string]map[types.NamespacedName]api.Service)}
|
||||
mux := config.NewMux(store)
|
||||
bcaster := config.NewBroadcaster()
|
||||
go watchForUpdates(bcaster, store, updates)
|
||||
return &ServiceConfig{mux, bcaster, store}
|
||||
}
|
||||
|
||||
func (c *ServiceConfig) RegisterHandler(handler ServiceConfigHandler) {
|
||||
c.bcaster.Add(config.ListenerFunc(func(instance interface{}) {
|
||||
glog.V(3).Infof("Calling handler.OnServiceUpdate()")
|
||||
handler.OnServiceUpdate(instance.([]api.Service))
|
||||
}))
|
||||
}
|
||||
|
||||
func (c *ServiceConfig) Channel(source string) chan ServiceUpdate {
|
||||
ch := c.mux.Channel(source)
|
||||
serviceCh := make(chan ServiceUpdate)
|
||||
go func() {
|
||||
for update := range serviceCh {
|
||||
ch <- update
|
||||
}
|
||||
}()
|
||||
return serviceCh
|
||||
}
|
||||
|
||||
func (c *ServiceConfig) Config() []api.Service {
|
||||
return c.store.MergedState().([]api.Service)
|
||||
}
|
||||
|
||||
type serviceStore struct {
|
||||
serviceLock sync.RWMutex
|
||||
services map[string]map[types.NamespacedName]api.Service
|
||||
updates chan<- struct{}
|
||||
}
|
||||
|
||||
func (s *serviceStore) Merge(source string, change interface{}) error {
|
||||
s.serviceLock.Lock()
|
||||
services := s.services[source]
|
||||
if services == nil {
|
||||
services = make(map[types.NamespacedName]api.Service)
|
||||
}
|
||||
update := change.(ServiceUpdate)
|
||||
switch update.Op {
|
||||
case ADD:
|
||||
glog.V(5).Infof("Adding new service from source %s : %s", source, spew.Sdump(update.Services))
|
||||
for _, value := range update.Services {
|
||||
name := types.NamespacedName{Namespace: value.Namespace, Name: value.Name}
|
||||
services[name] = value
|
||||
}
|
||||
case REMOVE:
|
||||
glog.V(5).Infof("Removing a service %s", spew.Sdump(update))
|
||||
for _, value := range update.Services {
|
||||
name := types.NamespacedName{Namespace: value.Namespace, Name: value.Name}
|
||||
delete(services, name)
|
||||
}
|
||||
case SET:
|
||||
glog.V(5).Infof("Setting services %s", spew.Sdump(update))
|
||||
// Clear the old map entries by just creating a new map
|
||||
services = make(map[types.NamespacedName]api.Service)
|
||||
for _, value := range update.Services {
|
||||
name := types.NamespacedName{Namespace: value.Namespace, Name: value.Name}
|
||||
services[name] = value
|
||||
}
|
||||
default:
|
||||
glog.V(4).Infof("Received invalid update type: %s", spew.Sdump(update))
|
||||
}
|
||||
s.services[source] = services
|
||||
s.serviceLock.Unlock()
|
||||
if s.updates != nil {
|
||||
// Since we record the snapshot before sending this signal, it's
|
||||
// possible that the consumer ends up performing an extra update.
|
||||
select {
|
||||
case s.updates <- struct{}{}:
|
||||
default:
|
||||
glog.V(4).Infof("Service handler already has a pending interrupt.")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *serviceStore) MergedState() interface{} {
|
||||
s.serviceLock.RLock()
|
||||
defer s.serviceLock.RUnlock()
|
||||
services := make([]api.Service, 0)
|
||||
for _, sourceServices := range s.services {
|
||||
for _, value := range sourceServices {
|
||||
services = append(services, value)
|
||||
}
|
||||
}
|
||||
return services
|
||||
}
|
||||
|
||||
// watchForUpdates invokes bcaster.Notify() with the latest version of an object
|
||||
// when changes occur.
|
||||
func watchForUpdates(bcaster *config.Broadcaster, accessor config.Accessor, updates <-chan struct{}) {
|
||||
for true {
|
||||
<-updates
|
||||
bcaster.Notify(accessor.MergedState())
|
||||
}
|
||||
}
|
343
vendor/k8s.io/kubernetes/pkg/proxy/config/config_test.go
generated
vendored
Normal file
343
vendor/k8s.io/kubernetes/pkg/proxy/config/config_test.go
generated
vendored
Normal file
|
@ -0,0 +1,343 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
. "k8s.io/kubernetes/pkg/proxy/config"
|
||||
)
|
||||
|
||||
const TomcatPort int = 8080
|
||||
const TomcatName = "tomcat"
|
||||
|
||||
var TomcatEndpoints = map[string]string{"c0": "1.1.1.1:18080", "c1": "2.2.2.2:18081"}
|
||||
|
||||
const MysqlPort int = 3306
|
||||
const MysqlName = "mysql"
|
||||
|
||||
var MysqlEndpoints = map[string]string{"c0": "1.1.1.1:13306", "c3": "2.2.2.2:13306"}
|
||||
|
||||
type sortedServices []api.Service
|
||||
|
||||
func (s sortedServices) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s sortedServices) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
func (s sortedServices) Less(i, j int) bool {
|
||||
return s[i].Name < s[j].Name
|
||||
}
|
||||
|
||||
type ServiceHandlerMock struct {
|
||||
updated chan []api.Service
|
||||
waits int
|
||||
}
|
||||
|
||||
func NewServiceHandlerMock() *ServiceHandlerMock {
|
||||
return &ServiceHandlerMock{updated: make(chan []api.Service, 5)}
|
||||
}
|
||||
|
||||
func (h *ServiceHandlerMock) OnServiceUpdate(services []api.Service) {
|
||||
sort.Sort(sortedServices(services))
|
||||
h.updated <- services
|
||||
}
|
||||
|
||||
func (h *ServiceHandlerMock) ValidateServices(t *testing.T, expectedServices []api.Service) {
|
||||
// We might get 1 or more updates for N service updates, because we
|
||||
// over write older snapshots of services from the producer go-routine
|
||||
// if the consumer falls behind.
|
||||
var services []api.Service
|
||||
for {
|
||||
select {
|
||||
case services = <-h.updated:
|
||||
if reflect.DeepEqual(services, expectedServices) {
|
||||
return
|
||||
}
|
||||
// Unittests will hard timeout in 5m with a stack trace, prevent that
|
||||
// and surface a clearer reason for failure.
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("Timed out. Expected %#v, Got %#v", expectedServices, services)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type sortedEndpoints []api.Endpoints
|
||||
|
||||
func (s sortedEndpoints) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s sortedEndpoints) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
func (s sortedEndpoints) Less(i, j int) bool {
|
||||
return s[i].Name < s[j].Name
|
||||
}
|
||||
|
||||
type EndpointsHandlerMock struct {
|
||||
updated chan []api.Endpoints
|
||||
waits int
|
||||
}
|
||||
|
||||
func NewEndpointsHandlerMock() *EndpointsHandlerMock {
|
||||
return &EndpointsHandlerMock{updated: make(chan []api.Endpoints, 5)}
|
||||
}
|
||||
|
||||
func (h *EndpointsHandlerMock) OnEndpointsUpdate(endpoints []api.Endpoints) {
|
||||
sort.Sort(sortedEndpoints(endpoints))
|
||||
h.updated <- endpoints
|
||||
}
|
||||
|
||||
func (h *EndpointsHandlerMock) ValidateEndpoints(t *testing.T, expectedEndpoints []api.Endpoints) {
|
||||
// We might get 1 or more updates for N endpoint updates, because we
|
||||
// over write older snapshots of endpoints from the producer go-routine
|
||||
// if the consumer falls behind. Unittests will hard timeout in 5m.
|
||||
var endpoints []api.Endpoints
|
||||
for {
|
||||
select {
|
||||
case endpoints = <-h.updated:
|
||||
if reflect.DeepEqual(endpoints, expectedEndpoints) {
|
||||
return
|
||||
}
|
||||
// Unittests will hard timeout in 5m with a stack trace, prevent that
|
||||
// and surface a clearer reason for failure.
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("Timed out. Expected %#v, Got %#v", expectedEndpoints, endpoints)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func CreateServiceUpdate(op Operation, services ...api.Service) ServiceUpdate {
|
||||
ret := ServiceUpdate{Op: op}
|
||||
ret.Services = make([]api.Service, len(services))
|
||||
for i, value := range services {
|
||||
ret.Services[i] = value
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func CreateEndpointsUpdate(op Operation, endpoints ...api.Endpoints) EndpointsUpdate {
|
||||
ret := EndpointsUpdate{Op: op}
|
||||
ret.Endpoints = make([]api.Endpoints, len(endpoints))
|
||||
for i, value := range endpoints {
|
||||
ret.Endpoints[i] = value
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func TestNewServiceAddedAndNotified(t *testing.T) {
|
||||
config := NewServiceConfig()
|
||||
channel := config.Channel("one")
|
||||
handler := NewServiceHandlerMock()
|
||||
config.RegisterHandler(handler)
|
||||
serviceUpdate := CreateServiceUpdate(ADD, api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
|
||||
})
|
||||
channel <- serviceUpdate
|
||||
handler.ValidateServices(t, serviceUpdate.Services)
|
||||
|
||||
}
|
||||
|
||||
func TestServiceAddedRemovedSetAndNotified(t *testing.T) {
|
||||
config := NewServiceConfig()
|
||||
channel := config.Channel("one")
|
||||
handler := NewServiceHandlerMock()
|
||||
config.RegisterHandler(handler)
|
||||
serviceUpdate := CreateServiceUpdate(ADD, api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
|
||||
})
|
||||
channel <- serviceUpdate
|
||||
handler.ValidateServices(t, serviceUpdate.Services)
|
||||
|
||||
serviceUpdate2 := CreateServiceUpdate(ADD, api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 20}}},
|
||||
})
|
||||
channel <- serviceUpdate2
|
||||
services := []api.Service{serviceUpdate2.Services[0], serviceUpdate.Services[0]}
|
||||
handler.ValidateServices(t, services)
|
||||
|
||||
serviceUpdate3 := CreateServiceUpdate(REMOVE, api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
})
|
||||
channel <- serviceUpdate3
|
||||
services = []api.Service{serviceUpdate2.Services[0]}
|
||||
handler.ValidateServices(t, services)
|
||||
|
||||
serviceUpdate4 := CreateServiceUpdate(SET, api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "foobar"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 99}}},
|
||||
})
|
||||
channel <- serviceUpdate4
|
||||
services = []api.Service{serviceUpdate4.Services[0]}
|
||||
handler.ValidateServices(t, services)
|
||||
}
|
||||
|
||||
func TestNewMultipleSourcesServicesAddedAndNotified(t *testing.T) {
|
||||
config := NewServiceConfig()
|
||||
channelOne := config.Channel("one")
|
||||
channelTwo := config.Channel("two")
|
||||
if channelOne == channelTwo {
|
||||
t.Error("Same channel handed back for one and two")
|
||||
}
|
||||
handler := NewServiceHandlerMock()
|
||||
config.RegisterHandler(handler)
|
||||
serviceUpdate1 := CreateServiceUpdate(ADD, api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
|
||||
})
|
||||
serviceUpdate2 := CreateServiceUpdate(ADD, api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 20}}},
|
||||
})
|
||||
channelOne <- serviceUpdate1
|
||||
channelTwo <- serviceUpdate2
|
||||
services := []api.Service{serviceUpdate2.Services[0], serviceUpdate1.Services[0]}
|
||||
handler.ValidateServices(t, services)
|
||||
}
|
||||
|
||||
func TestNewMultipleSourcesServicesMultipleHandlersAddedAndNotified(t *testing.T) {
|
||||
config := NewServiceConfig()
|
||||
channelOne := config.Channel("one")
|
||||
channelTwo := config.Channel("two")
|
||||
handler := NewServiceHandlerMock()
|
||||
handler2 := NewServiceHandlerMock()
|
||||
config.RegisterHandler(handler)
|
||||
config.RegisterHandler(handler2)
|
||||
serviceUpdate1 := CreateServiceUpdate(ADD, api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 10}}},
|
||||
})
|
||||
serviceUpdate2 := CreateServiceUpdate(ADD, api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 20}}},
|
||||
})
|
||||
channelOne <- serviceUpdate1
|
||||
channelTwo <- serviceUpdate2
|
||||
services := []api.Service{serviceUpdate2.Services[0], serviceUpdate1.Services[0]}
|
||||
handler.ValidateServices(t, services)
|
||||
handler2.ValidateServices(t, services)
|
||||
}
|
||||
|
||||
func TestNewMultipleSourcesEndpointsMultipleHandlersAddedAndNotified(t *testing.T) {
|
||||
config := NewEndpointsConfig()
|
||||
channelOne := config.Channel("one")
|
||||
channelTwo := config.Channel("two")
|
||||
handler := NewEndpointsHandlerMock()
|
||||
handler2 := NewEndpointsHandlerMock()
|
||||
config.RegisterHandler(handler)
|
||||
config.RegisterHandler(handler2)
|
||||
endpointsUpdate1 := CreateEndpointsUpdate(ADD, api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.1.1.1"}, {IP: "2.2.2.2"}},
|
||||
Ports: []api.EndpointPort{{Port: 80}},
|
||||
}},
|
||||
})
|
||||
endpointsUpdate2 := CreateEndpointsUpdate(ADD, api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "3.3.3.3"}, {IP: "4.4.4.4"}},
|
||||
Ports: []api.EndpointPort{{Port: 80}},
|
||||
}},
|
||||
})
|
||||
channelOne <- endpointsUpdate1
|
||||
channelTwo <- endpointsUpdate2
|
||||
|
||||
endpoints := []api.Endpoints{endpointsUpdate2.Endpoints[0], endpointsUpdate1.Endpoints[0]}
|
||||
handler.ValidateEndpoints(t, endpoints)
|
||||
handler2.ValidateEndpoints(t, endpoints)
|
||||
}
|
||||
|
||||
func TestNewMultipleSourcesEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) {
|
||||
config := NewEndpointsConfig()
|
||||
channelOne := config.Channel("one")
|
||||
channelTwo := config.Channel("two")
|
||||
handler := NewEndpointsHandlerMock()
|
||||
handler2 := NewEndpointsHandlerMock()
|
||||
config.RegisterHandler(handler)
|
||||
config.RegisterHandler(handler2)
|
||||
endpointsUpdate1 := CreateEndpointsUpdate(ADD, api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.1.1.1"}, {IP: "2.2.2.2"}},
|
||||
Ports: []api.EndpointPort{{Port: 80}},
|
||||
}},
|
||||
})
|
||||
endpointsUpdate2 := CreateEndpointsUpdate(ADD, api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "bar"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "3.3.3.3"}, {IP: "4.4.4.4"}},
|
||||
Ports: []api.EndpointPort{{Port: 80}},
|
||||
}},
|
||||
})
|
||||
channelOne <- endpointsUpdate1
|
||||
channelTwo <- endpointsUpdate2
|
||||
|
||||
endpoints := []api.Endpoints{endpointsUpdate2.Endpoints[0], endpointsUpdate1.Endpoints[0]}
|
||||
handler.ValidateEndpoints(t, endpoints)
|
||||
handler2.ValidateEndpoints(t, endpoints)
|
||||
|
||||
// Add one more
|
||||
endpointsUpdate3 := CreateEndpointsUpdate(ADD, api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "foobar"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "5.5.5.5"}, {IP: "6.6.6.6"}},
|
||||
Ports: []api.EndpointPort{{Port: 80}},
|
||||
}},
|
||||
})
|
||||
channelTwo <- endpointsUpdate3
|
||||
endpoints = []api.Endpoints{endpointsUpdate2.Endpoints[0], endpointsUpdate1.Endpoints[0], endpointsUpdate3.Endpoints[0]}
|
||||
handler.ValidateEndpoints(t, endpoints)
|
||||
handler2.ValidateEndpoints(t, endpoints)
|
||||
|
||||
// Update the "foo" service with new endpoints
|
||||
endpointsUpdate1 = CreateEndpointsUpdate(ADD, api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "foo"},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "7.7.7.7"}},
|
||||
Ports: []api.EndpointPort{{Port: 80}},
|
||||
}},
|
||||
})
|
||||
channelOne <- endpointsUpdate1
|
||||
endpoints = []api.Endpoints{endpointsUpdate2.Endpoints[0], endpointsUpdate1.Endpoints[0], endpointsUpdate3.Endpoints[0]}
|
||||
handler.ValidateEndpoints(t, endpoints)
|
||||
handler2.ValidateEndpoints(t, endpoints)
|
||||
|
||||
// Remove "bar" service
|
||||
endpointsUpdate2 = CreateEndpointsUpdate(REMOVE, api.Endpoints{ObjectMeta: api.ObjectMeta{Namespace: "testnamespace", Name: "bar"}})
|
||||
channelTwo <- endpointsUpdate2
|
||||
|
||||
endpoints = []api.Endpoints{endpointsUpdate1.Endpoints[0], endpointsUpdate3.Endpoints[0]}
|
||||
handler.ValidateEndpoints(t, endpoints)
|
||||
handler2.ValidateEndpoints(t, endpoints)
|
||||
}
|
||||
|
||||
// TODO: Add a unittest for interrupts getting processed in a timely manner.
|
||||
// Currently this module has a circular dependency with config, and so it's
|
||||
// named config_test, which means even test methods need to be public. This
|
||||
// is refactoring that we can avoid by resolving the dependency.
|
25
vendor/k8s.io/kubernetes/pkg/proxy/config/doc.go
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/pkg/proxy/config/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package config provides decoupling between various configuration sources (etcd, files,...) and
|
||||
// the pieces that actually care about them (loadbalancer, proxy). Config takes 1 or more
|
||||
// configuration sources and allows for incremental (add/remove) and full replace (set)
|
||||
// changes from each of the sources, then creates a union of the configuration and provides
|
||||
// a unified view for both service handlers as well as endpoint handlers. There is no attempt
|
||||
// to resolve conflicts of any sort. Basic idea is that each configuration source gets a channel
|
||||
// from the Config service and pushes updates to it via that channel. Config then keeps track of
|
||||
// incremental & replace changes and distributes them to listeners as appropriate.
|
||||
package config // import "k8s.io/kubernetes/pkg/proxy/config"
|
18
vendor/k8s.io/kubernetes/pkg/proxy/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/proxy/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package proxy implements the layer-3 network proxy.
|
||||
package proxy // import "k8s.io/kubernetes/pkg/proxy"
|
53
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"api.go",
|
||||
"doc.go",
|
||||
"healthcheck.go",
|
||||
"http.go",
|
||||
"listener.go",
|
||||
"worker.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["healthcheck_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
2
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/OWNERS
generated
vendored
Executable file
2
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/OWNERS
generated
vendored
Executable file
|
@ -0,0 +1,2 @@
|
|||
reviewers:
|
||||
- m1093782566
|
65
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/api.go
generated
vendored
Normal file
65
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/api.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthcheck
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
// All public API Methods for this package
|
||||
|
||||
// UpdateEndpoints Update the set of local endpoints for a service
|
||||
func UpdateEndpoints(serviceName types.NamespacedName, endpointUids sets.String) {
|
||||
req := &proxyMutationRequest{
|
||||
serviceName: serviceName,
|
||||
endpointUids: &endpointUids,
|
||||
}
|
||||
healthchecker.mutationRequestChannel <- req
|
||||
}
|
||||
|
||||
func updateServiceListener(serviceName types.NamespacedName, listenPort int, add bool) bool {
|
||||
responseChannel := make(chan bool)
|
||||
req := &proxyListenerRequest{
|
||||
serviceName: serviceName,
|
||||
listenPort: uint16(listenPort),
|
||||
add: add,
|
||||
responseChannel: responseChannel,
|
||||
}
|
||||
healthchecker.listenerRequestChannel <- req
|
||||
return <-responseChannel
|
||||
}
|
||||
|
||||
// AddServiceListener Request addition of a listener for a service's health check
|
||||
func AddServiceListener(serviceName types.NamespacedName, listenPort int) bool {
|
||||
return updateServiceListener(serviceName, listenPort, true)
|
||||
}
|
||||
|
||||
// DeleteServiceListener Request deletion of a listener for a service's health check
|
||||
func DeleteServiceListener(serviceName types.NamespacedName, listenPort int) bool {
|
||||
return updateServiceListener(serviceName, listenPort, false)
|
||||
}
|
||||
|
||||
// Run Start the healthchecker main loop
|
||||
func Run() {
|
||||
healthchecker = proxyHealthCheckFactory()
|
||||
// Wrap with a wait.Forever to handle panics.
|
||||
go wait.Forever(func() {
|
||||
healthchecker.handlerLoop()
|
||||
}, 0)
|
||||
}
|
18
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package healthcheck LoadBalancer Healthcheck responder library for kubernetes network proxies
|
||||
package healthcheck // import "k8s.io/kubernetes/pkg/proxy/healthcheck"
|
127
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go
generated
vendored
Normal file
127
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go
generated
vendored
Normal file
|
@ -0,0 +1,127 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthcheck
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
)
|
||||
|
||||
// proxyMutationRequest: Message to request addition/deletion of endpoints for a service
|
||||
type proxyMutationRequest struct {
|
||||
serviceName types.NamespacedName
|
||||
endpointUids *sets.String
|
||||
}
|
||||
|
||||
// proxyListenerRequest: Message to request addition/deletion of a service responder on a listening port
|
||||
type proxyListenerRequest struct {
|
||||
serviceName types.NamespacedName
|
||||
listenPort uint16
|
||||
add bool
|
||||
responseChannel chan bool
|
||||
}
|
||||
|
||||
// serviceEndpointsList: A list of endpoints for a service
|
||||
type serviceEndpointsList struct {
|
||||
serviceName types.NamespacedName
|
||||
endpoints *sets.String
|
||||
}
|
||||
|
||||
// serviceResponder: Contains net/http datastructures necessary for responding to each Service's health check on its aux nodePort
|
||||
type serviceResponder struct {
|
||||
serviceName types.NamespacedName
|
||||
listenPort uint16
|
||||
listener *net.Listener
|
||||
server *http.Server
|
||||
}
|
||||
|
||||
// proxyHC: Handler structure for health check, endpoint add/delete and service listener add/delete requests
|
||||
type proxyHC struct {
|
||||
serviceEndpointsMap cache.ThreadSafeStore
|
||||
serviceResponderMap map[types.NamespacedName]serviceResponder
|
||||
mutationRequestChannel chan *proxyMutationRequest
|
||||
listenerRequestChannel chan *proxyListenerRequest
|
||||
}
|
||||
|
||||
// handleHealthCheckRequest - received a health check request - lookup and respond to HC.
|
||||
func (h *proxyHC) handleHealthCheckRequest(rw http.ResponseWriter, serviceName string) {
|
||||
s, ok := h.serviceEndpointsMap.Get(serviceName)
|
||||
if !ok {
|
||||
glog.V(4).Infof("Service %s not found or has no local endpoints", serviceName)
|
||||
sendHealthCheckResponse(rw, http.StatusServiceUnavailable, "No Service Endpoints Found")
|
||||
return
|
||||
}
|
||||
numEndpoints := len(*s.(*serviceEndpointsList).endpoints)
|
||||
if numEndpoints > 0 {
|
||||
sendHealthCheckResponse(rw, http.StatusOK, fmt.Sprintf("%d Service Endpoints found", numEndpoints))
|
||||
return
|
||||
}
|
||||
sendHealthCheckResponse(rw, http.StatusServiceUnavailable, "0 local Endpoints are alive")
|
||||
}
|
||||
|
||||
// handleMutationRequest - receive requests to mutate the table entry for a service
|
||||
func (h *proxyHC) handleMutationRequest(req *proxyMutationRequest) {
|
||||
numEndpoints := len(*req.endpointUids)
|
||||
glog.V(4).Infof("LB service health check mutation request Service: %s - %d Endpoints %v",
|
||||
req.serviceName, numEndpoints, (*req.endpointUids).List())
|
||||
if numEndpoints == 0 {
|
||||
if _, ok := h.serviceEndpointsMap.Get(req.serviceName.String()); ok {
|
||||
glog.V(4).Infof("Deleting endpoints map for service %s, all local endpoints gone", req.serviceName.String())
|
||||
h.serviceEndpointsMap.Delete(req.serviceName.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
var entry *serviceEndpointsList
|
||||
e, exists := h.serviceEndpointsMap.Get(req.serviceName.String())
|
||||
if exists {
|
||||
entry = e.(*serviceEndpointsList)
|
||||
if entry.endpoints.Equal(*req.endpointUids) {
|
||||
return
|
||||
}
|
||||
// Compute differences just for printing logs about additions and removals
|
||||
deletedEndpoints := entry.endpoints.Difference(*req.endpointUids)
|
||||
newEndpoints := req.endpointUids.Difference(*entry.endpoints)
|
||||
for _, e := range newEndpoints.List() {
|
||||
glog.V(4).Infof("Adding local endpoint %s to LB health check for service %s",
|
||||
e, req.serviceName.String())
|
||||
}
|
||||
for _, d := range deletedEndpoints.List() {
|
||||
glog.V(4).Infof("Deleted endpoint %s from service %s LB health check (%d endpoints left)",
|
||||
d, req.serviceName.String(), len(*entry.endpoints))
|
||||
}
|
||||
}
|
||||
entry = &serviceEndpointsList{serviceName: req.serviceName, endpoints: req.endpointUids}
|
||||
h.serviceEndpointsMap.Add(req.serviceName.String(), entry)
|
||||
}
|
||||
|
||||
// proxyHealthCheckRequest - Factory method to instantiate the health check handler
|
||||
func proxyHealthCheckFactory() *proxyHC {
|
||||
glog.V(2).Infof("Initializing kube-proxy health checker")
|
||||
phc := &proxyHC{
|
||||
serviceEndpointsMap: cache.NewThreadSafeStore(cache.Indexers{}, cache.Indices{}),
|
||||
serviceResponderMap: make(map[types.NamespacedName]serviceResponder),
|
||||
mutationRequestChannel: make(chan *proxyMutationRequest, 1024),
|
||||
listenerRequestChannel: make(chan *proxyListenerRequest, 1024),
|
||||
}
|
||||
return phc
|
||||
}
|
158
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck_test.go
generated
vendored
Normal file
158
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck_test.go
generated
vendored
Normal file
|
@ -0,0 +1,158 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthcheck
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
type TestCaseData struct {
|
||||
nodePorts int
|
||||
numEndpoints int
|
||||
nodePortList []int
|
||||
svcNames []types.NamespacedName
|
||||
}
|
||||
|
||||
const (
|
||||
startPort = 20000
|
||||
endPort = 40000
|
||||
)
|
||||
|
||||
var (
|
||||
choices = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
)
|
||||
|
||||
func generateRandomString(n int) string {
|
||||
|
||||
b := make([]byte, n)
|
||||
l := len(choices)
|
||||
for i := range b {
|
||||
b[i] = choices[rand.Intn(l)]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func chooseServiceName(tc int, hint int) types.NamespacedName {
|
||||
var svc types.NamespacedName
|
||||
svc.Namespace = fmt.Sprintf("ns_%d", tc)
|
||||
svc.Name = fmt.Sprintf("name_%d", hint)
|
||||
return svc
|
||||
}
|
||||
|
||||
func generateEndpointSet(max int) sets.String {
|
||||
s := sets.NewString()
|
||||
for i := 0; i < max; i++ {
|
||||
s.Insert(fmt.Sprintf("%d%s", i, generateRandomString(8)))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func verifyHealthChecks(tc *TestCaseData, t *testing.T) bool {
|
||||
var success = true
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
for i := 0; i < tc.nodePorts; i++ {
|
||||
t.Logf("Validating HealthCheck works for svc %s nodePort %d\n", tc.svcNames[i], tc.nodePortList[i])
|
||||
res, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/", tc.nodePortList[i]))
|
||||
if err != nil {
|
||||
t.Logf("ERROR: Failed to connect to listening port")
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
robots, err := ioutil.ReadAll(res.Body)
|
||||
if res.StatusCode == http.StatusServiceUnavailable {
|
||||
t.Logf("ERROR: HealthCheck returned %s: %s", res.Status, string(robots))
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
t.Logf("Error: reading body of response (%s)", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
}
|
||||
if success {
|
||||
t.Logf("Success: All nodePorts found active")
|
||||
}
|
||||
return success
|
||||
}
|
||||
|
||||
func TestHealthChecker(t *testing.T) {
|
||||
testcases := []TestCaseData{
|
||||
{
|
||||
nodePorts: 1,
|
||||
numEndpoints: 2,
|
||||
},
|
||||
{
|
||||
nodePorts: 10,
|
||||
numEndpoints: 6,
|
||||
},
|
||||
{
|
||||
nodePorts: 100,
|
||||
numEndpoints: 1,
|
||||
},
|
||||
}
|
||||
|
||||
Run()
|
||||
|
||||
ports := startPort
|
||||
for n, tc := range testcases {
|
||||
tc.nodePortList = make([]int, tc.nodePorts)
|
||||
tc.svcNames = make([]types.NamespacedName, tc.nodePorts)
|
||||
for i := 0; i < tc.nodePorts; i++ {
|
||||
tc.svcNames[i] = chooseServiceName(n, i)
|
||||
t.Logf("Updating endpoints map for %s %d", tc.svcNames[i], tc.numEndpoints)
|
||||
for {
|
||||
UpdateEndpoints(tc.svcNames[i], generateEndpointSet(tc.numEndpoints))
|
||||
tc.nodePortList[i] = ports
|
||||
ports++
|
||||
if AddServiceListener(tc.svcNames[i], tc.nodePortList[i]) {
|
||||
break
|
||||
}
|
||||
DeleteServiceListener(tc.svcNames[i], tc.nodePortList[i])
|
||||
// Keep searching for a port that works
|
||||
t.Logf("Failed to bind/listen on port %d...trying next port", ports-1)
|
||||
if ports > endPort {
|
||||
t.Errorf("Exhausted range of ports available for tests")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
t.Logf("Validating if all nodePorts for tc %d work", n)
|
||||
if !verifyHealthChecks(&tc, t) {
|
||||
t.Errorf("Healthcheck validation failed")
|
||||
}
|
||||
|
||||
for i := 0; i < tc.nodePorts; i++ {
|
||||
DeleteServiceListener(tc.svcNames[i], tc.nodePortList[i])
|
||||
UpdateEndpoints(tc.svcNames[i], sets.NewString())
|
||||
}
|
||||
|
||||
// Ensure that all listeners have been shutdown
|
||||
if verifyHealthChecks(&tc, t) {
|
||||
t.Errorf("Healthcheck validation failed")
|
||||
}
|
||||
}
|
||||
}
|
46
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/http.go
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/http.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthcheck
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// A healthCheckHandler serves http requests on /healthz on the service health check node port,
|
||||
// and responds to every request with either:
|
||||
// 200 OK and the count of endpoints for the given service that are local to this node.
|
||||
// or
|
||||
// 503 Service Unavailable If the count is zero or the service does not exist
|
||||
type healthCheckHandler struct {
|
||||
svcNsName string
|
||||
}
|
||||
|
||||
// HTTP Utility function to send the required statusCode and error text to a http.ResponseWriter object
|
||||
func sendHealthCheckResponse(rw http.ResponseWriter, statusCode int, error string) {
|
||||
rw.Header().Set("Content-Type", "text/plain")
|
||||
rw.WriteHeader(statusCode)
|
||||
fmt.Fprint(rw, error)
|
||||
}
|
||||
|
||||
// ServeHTTP: Interface callback method for net.Listener Handlers
|
||||
func (h healthCheckHandler) ServeHTTP(response http.ResponseWriter, req *http.Request) {
|
||||
glog.V(4).Infof("Received HC Request Service %s from Cloud Load Balancer", h.svcNsName)
|
||||
healthchecker.handleHealthCheckRequest(response, h.svcNsName)
|
||||
}
|
77
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/listener.go
generated
vendored
Normal file
77
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/listener.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthcheck
|
||||
|
||||
// Create/Delete dynamic listeners on the required nodePorts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// handleServiceListenerRequest: receive requests to add/remove service health check listening ports
|
||||
func (h *proxyHC) handleServiceListenerRequest(req *proxyListenerRequest) bool {
|
||||
sr, serviceFound := h.serviceResponderMap[req.serviceName]
|
||||
if !req.add {
|
||||
if !serviceFound {
|
||||
return false
|
||||
}
|
||||
glog.Infof("Deleting HealthCheckListenPort for service %s port %d",
|
||||
req.serviceName, req.listenPort)
|
||||
delete(h.serviceResponderMap, req.serviceName)
|
||||
(*sr.listener).Close()
|
||||
return true
|
||||
} else if serviceFound {
|
||||
if req.listenPort == sr.listenPort {
|
||||
// Addition requested but responder for service already exists and port is unchanged
|
||||
return true
|
||||
}
|
||||
// Addition requested but responder for service already exists but the listen port has changed
|
||||
glog.Infof("HealthCheckListenPort for service %s changed from %d to %d - closing old listening port",
|
||||
req.serviceName, sr.listenPort, req.listenPort)
|
||||
delete(h.serviceResponderMap, req.serviceName)
|
||||
(*sr.listener).Close()
|
||||
}
|
||||
// Create a service responder object and start listening and serving on the provided port
|
||||
glog.V(2).Infof("Adding health check listener for service %s on nodePort %d", req.serviceName, req.listenPort)
|
||||
server := http.Server{
|
||||
Addr: fmt.Sprintf(":%d", req.listenPort),
|
||||
Handler: healthCheckHandler{svcNsName: req.serviceName.String()},
|
||||
}
|
||||
listener, err := net.Listen("tcp", server.Addr)
|
||||
if err != nil {
|
||||
glog.Warningf("FAILED to listen on address %s (%s)\n", server.Addr, err)
|
||||
return false
|
||||
}
|
||||
h.serviceResponderMap[req.serviceName] = serviceResponder{serviceName: req.serviceName,
|
||||
listenPort: req.listenPort,
|
||||
listener: &listener,
|
||||
server: &server}
|
||||
go func() {
|
||||
// Anonymous goroutine to block on Serve for this listen port - Serve will exit when the listener is closed
|
||||
glog.V(3).Infof("Goroutine blocking on serving health checks for %s on port %d", req.serviceName, req.listenPort)
|
||||
if err := server.Serve(listener); err != nil {
|
||||
glog.V(3).Infof("Proxy HealthCheck listen socket %d for service %s closed with error %s\n", req.listenPort, req.serviceName, err)
|
||||
return
|
||||
}
|
||||
glog.V(3).Infof("Proxy HealthCheck listen socket %d for service %s closed\n", req.listenPort, req.serviceName)
|
||||
}()
|
||||
return true
|
||||
}
|
53
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/worker.go
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/worker.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package healthcheck LoadBalancer Healthcheck responder library for kubernetes network proxies
|
||||
package healthcheck // import "k8s.io/kubernetes/pkg/proxy/healthcheck"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var healthchecker *proxyHC
|
||||
|
||||
// handlerLoop Serializes all requests to prevent concurrent access to the maps
|
||||
func (h *proxyHC) handlerLoop() {
|
||||
ticker := time.NewTicker(1 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case req := <-h.mutationRequestChannel:
|
||||
h.handleMutationRequest(req)
|
||||
case req := <-h.listenerRequestChannel:
|
||||
req.responseChannel <- h.handleServiceListenerRequest(req)
|
||||
case <-ticker.C:
|
||||
go h.sync()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *proxyHC) sync() {
|
||||
glog.V(4).Infof("%d Health Check Listeners", len(h.serviceResponderMap))
|
||||
glog.V(4).Infof("%d Services registered for health checking", len(h.serviceEndpointsMap.List()))
|
||||
for _, svc := range h.serviceEndpointsMap.ListKeys() {
|
||||
if e, ok := h.serviceEndpointsMap.Get(svc); ok {
|
||||
endpointList := e.(*serviceEndpointsList)
|
||||
glog.V(4).Infof("Service %s has %d local endpoints", svc, endpointList.endpoints.Len())
|
||||
}
|
||||
}
|
||||
}
|
64
vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["proxier.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/service:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/proxy/healthcheck:go_default_library",
|
||||
"//pkg/util/config:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/flowcontrol:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//pkg/util/sysctl:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor:github.com/davecgh/go-spew/spew",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["proxier_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/service:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/iptables/testing:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
7
vendor/k8s.io/kubernetes/pkg/proxy/iptables/OWNERS
generated
vendored
Executable file
7
vendor/k8s.io/kubernetes/pkg/proxy/iptables/OWNERS
generated
vendored
Executable file
|
@ -0,0 +1,7 @@
|
|||
reviewers:
|
||||
- thockin
|
||||
- smarterclayton
|
||||
- bprashanth
|
||||
- justinsb
|
||||
- freehan
|
||||
- dcbw
|
1444
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
Normal file
1444
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1174
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier_test.go
generated
vendored
Normal file
1174
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
49
vendor/k8s.io/kubernetes/pkg/proxy/types.go
generated
vendored
Normal file
49
vendor/k8s.io/kubernetes/pkg/proxy/types.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
// ProxyProvider is the interface provided by proxier implementations.
|
||||
type ProxyProvider interface {
|
||||
// OnServiceUpdate manages the active set of service proxies.
|
||||
// Active service proxies are reinitialized if found in the update set or
|
||||
// removed if missing from the update set.
|
||||
OnServiceUpdate(services []api.Service)
|
||||
// Sync immediately synchronizes the ProxyProvider's current state to iptables.
|
||||
Sync()
|
||||
// SyncLoop runs periodic work.
|
||||
// This is expected to run as a goroutine or as the main loop of the app.
|
||||
// It does not return.
|
||||
SyncLoop()
|
||||
}
|
||||
|
||||
// ServicePortName carries a namespace + name + portname. This is the unique
|
||||
// identfier for a load-balanced service.
|
||||
type ServicePortName struct {
|
||||
types.NamespacedName
|
||||
Port string
|
||||
}
|
||||
|
||||
func (spn ServicePortName) String() string {
|
||||
return fmt.Sprintf("%s:%s", spn.NamespacedName.String(), spn.Port)
|
||||
}
|
67
vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD
generated
vendored
Normal file
67
vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"loadbalancer.go",
|
||||
"port_allocator.go",
|
||||
"proxier.go",
|
||||
"proxysocket.go",
|
||||
"rlimit.go",
|
||||
"roundrobin.go",
|
||||
"udp_server.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/net",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"port_allocator_test.go",
|
||||
"proxier_test.go",
|
||||
"roundrobin_test.go",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/util/iptables/testing:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/net",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
6
vendor/k8s.io/kubernetes/pkg/proxy/userspace/OWNERS
generated
vendored
Executable file
6
vendor/k8s.io/kubernetes/pkg/proxy/userspace/OWNERS
generated
vendored
Executable file
|
@ -0,0 +1,6 @@
|
|||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- freehan
|
||||
- bprashanth
|
34
vendor/k8s.io/kubernetes/pkg/proxy/userspace/loadbalancer.go
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/pkg/proxy/userspace/loadbalancer.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
)
|
||||
|
||||
// LoadBalancer is an interface for distributing incoming requests to service endpoints.
|
||||
type LoadBalancer interface {
|
||||
// NextEndpoint returns the endpoint to handle a request for the given
|
||||
// service-port and source address.
|
||||
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
|
||||
NewService(service proxy.ServicePortName, sessionAffinityType api.ServiceAffinity, stickyMaxAgeMinutes int) error
|
||||
DeleteService(service proxy.ServicePortName)
|
||||
CleanupStaleStickySessions(service proxy.ServicePortName)
|
||||
}
|
158
vendor/k8s.io/kubernetes/pkg/proxy/userspace/port_allocator.go
generated
vendored
Normal file
158
vendor/k8s.io/kubernetes/pkg/proxy/userspace/port_allocator.go
generated
vendored
Normal file
|
@ -0,0 +1,158 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
var (
|
||||
errPortRangeNoPortsRemaining = errors.New("port allocation failed; there are no remaining ports left to allocate in the accepted range")
|
||||
)
|
||||
|
||||
type PortAllocator interface {
|
||||
AllocateNext() (int, error)
|
||||
Release(int)
|
||||
}
|
||||
|
||||
// randomAllocator is a PortAllocator implementation that allocates random ports, yielding
|
||||
// a port value of 0 for every call to AllocateNext().
|
||||
type randomAllocator struct{}
|
||||
|
||||
// AllocateNext always returns 0
|
||||
func (r *randomAllocator) AllocateNext() (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Release is a noop
|
||||
func (r *randomAllocator) Release(_ int) {
|
||||
// noop
|
||||
}
|
||||
|
||||
// newPortAllocator builds PortAllocator for a given PortRange. If the PortRange is empty
|
||||
// then a random port allocator is returned; otherwise, a new range-based allocator
|
||||
// is returned.
|
||||
func newPortAllocator(r net.PortRange) PortAllocator {
|
||||
if r.Base == 0 {
|
||||
return &randomAllocator{}
|
||||
}
|
||||
return newPortRangeAllocator(r, true)
|
||||
}
|
||||
|
||||
const (
|
||||
portsBufSize = 16
|
||||
nextFreePortCooldown = 500 * time.Millisecond
|
||||
allocateNextTimeout = 1 * time.Second
|
||||
)
|
||||
|
||||
type rangeAllocator struct {
|
||||
net.PortRange
|
||||
ports chan int
|
||||
used big.Int
|
||||
lock sync.Mutex
|
||||
rand *rand.Rand
|
||||
}
|
||||
|
||||
func newPortRangeAllocator(r net.PortRange, autoFill bool) PortAllocator {
|
||||
if r.Base == 0 || r.Size == 0 {
|
||||
panic("illegal argument: may not specify an empty port range")
|
||||
}
|
||||
ra := &rangeAllocator{
|
||||
PortRange: r,
|
||||
ports: make(chan int, portsBufSize),
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
if autoFill {
|
||||
go wait.Forever(func() { ra.fillPorts() }, nextFreePortCooldown)
|
||||
}
|
||||
return ra
|
||||
}
|
||||
|
||||
// fillPorts loops, always searching for the next free port and, if found, fills the ports buffer with it.
|
||||
// this func blocks unless there are no remaining free ports.
|
||||
func (r *rangeAllocator) fillPorts() {
|
||||
for {
|
||||
if !r.fillPortsOnce() {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) fillPortsOnce() bool {
|
||||
port := r.nextFreePort()
|
||||
if port == -1 {
|
||||
return false
|
||||
}
|
||||
r.ports <- port
|
||||
return true
|
||||
}
|
||||
|
||||
// nextFreePort finds a free port, first picking a random port. if that port is already in use
|
||||
// then the port range is scanned sequentially until either a port is found or the scan completes
|
||||
// unsuccessfully. an unsuccessful scan returns a port of -1.
|
||||
func (r *rangeAllocator) nextFreePort() int {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
// choose random port
|
||||
j := r.rand.Intn(r.Size)
|
||||
if b := r.used.Bit(j); b == 0 {
|
||||
r.used.SetBit(&r.used, j, 1)
|
||||
return j + r.Base
|
||||
}
|
||||
|
||||
// search sequentially
|
||||
for i := j + 1; i < r.Size; i++ {
|
||||
if b := r.used.Bit(i); b == 0 {
|
||||
r.used.SetBit(&r.used, i, 1)
|
||||
return i + r.Base
|
||||
}
|
||||
}
|
||||
for i := 0; i < j; i++ {
|
||||
if b := r.used.Bit(i); b == 0 {
|
||||
r.used.SetBit(&r.used, i, 1)
|
||||
return i + r.Base
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) AllocateNext() (port int, err error) {
|
||||
select {
|
||||
case port = <-r.ports:
|
||||
case <-time.After(allocateNextTimeout):
|
||||
err = errPortRangeNoPortsRemaining
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) Release(port int) {
|
||||
port -= r.Base
|
||||
if port < 0 || port >= r.Size {
|
||||
return
|
||||
}
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
r.used.SetBit(&r.used, port, 0)
|
||||
}
|
178
vendor/k8s.io/kubernetes/pkg/proxy/userspace/port_allocator_test.go
generated
vendored
Normal file
178
vendor/k8s.io/kubernetes/pkg/proxy/userspace/port_allocator_test.go
generated
vendored
Normal file
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
)
|
||||
|
||||
func TestRangeAllocatorEmpty(t *testing.T) {
|
||||
r := &net.PortRange{}
|
||||
r.Set("0-0")
|
||||
defer func() {
|
||||
if rv := recover(); rv == nil {
|
||||
t.Fatalf("expected panic because of empty port range: %#v", r)
|
||||
}
|
||||
}()
|
||||
_ = newPortRangeAllocator(*r, true)
|
||||
}
|
||||
|
||||
func TestRangeAllocatorFullyAllocated(t *testing.T) {
|
||||
r := &net.PortRange{}
|
||||
r.Set("1-1")
|
||||
// Don't auto-fill ports, we'll manually turn the crank
|
||||
pra := newPortRangeAllocator(*r, false)
|
||||
a := pra.(*rangeAllocator)
|
||||
|
||||
// Fill in the one available port
|
||||
if !a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be able to fill ports")
|
||||
}
|
||||
|
||||
// There should be no ports available
|
||||
if a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be unable to fill ports")
|
||||
}
|
||||
|
||||
p, err := a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if p != 1 {
|
||||
t.Fatalf("unexpected allocated port: %d", p)
|
||||
}
|
||||
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(p - a.Base); bit != 1 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", p)
|
||||
}
|
||||
a.lock.Unlock()
|
||||
|
||||
_, err = a.AllocateNext()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range")
|
||||
}
|
||||
|
||||
a.Release(p)
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(p - a.Base); bit != 0 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", p)
|
||||
}
|
||||
a.lock.Unlock()
|
||||
|
||||
// Fill in the one available port
|
||||
if !a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be able to fill ports")
|
||||
}
|
||||
|
||||
p, err = a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if p != 1 {
|
||||
t.Fatalf("unexpected allocated port: %d", p)
|
||||
}
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(p - a.Base); bit != 1 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", p)
|
||||
}
|
||||
a.lock.Unlock()
|
||||
|
||||
_, err = a.AllocateNext()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRangeAllocator_RandomishAllocation(t *testing.T) {
|
||||
r := &net.PortRange{}
|
||||
r.Set("1-100")
|
||||
pra := newPortRangeAllocator(*r, false)
|
||||
a := pra.(*rangeAllocator)
|
||||
|
||||
// allocate all the ports
|
||||
var err error
|
||||
ports := make([]int, 100, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
if !a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be able to fill ports")
|
||||
}
|
||||
ports[i], err = a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if ports[i] < 1 || ports[i] > 100 {
|
||||
t.Fatalf("unexpected allocated port: %d", ports[i])
|
||||
}
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(ports[i] - a.Base); bit != 1 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", ports[i])
|
||||
}
|
||||
a.lock.Unlock()
|
||||
}
|
||||
|
||||
if a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be unable to fill ports")
|
||||
}
|
||||
|
||||
// release them all
|
||||
for i := 0; i < 100; i++ {
|
||||
a.Release(ports[i])
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(ports[i] - a.Base); bit != 0 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", ports[i])
|
||||
}
|
||||
a.lock.Unlock()
|
||||
}
|
||||
|
||||
// allocate the ports again
|
||||
rports := make([]int, 100, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
if !a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be able to fill ports")
|
||||
}
|
||||
rports[i], err = a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if rports[i] < 1 || rports[i] > 100 {
|
||||
t.Fatalf("unexpected allocated port: %d", rports[i])
|
||||
}
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(rports[i] - a.Base); bit != 1 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", rports[i])
|
||||
}
|
||||
a.lock.Unlock()
|
||||
}
|
||||
|
||||
if a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be unable to fill ports")
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(ports, rports) {
|
||||
t.Fatalf("expected re-allocated ports to be in a somewhat random order")
|
||||
}
|
||||
}
|
1054
vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go
generated
vendored
Normal file
1054
vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
855
vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier_test.go
generated
vendored
Normal file
855
vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier_test.go
generated
vendored
Normal file
|
@ -0,0 +1,855 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
udpIdleTimeoutForTest = 250 * time.Millisecond
|
||||
)
|
||||
|
||||
func joinHostPort(host string, port int) string {
|
||||
return net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
||||
}
|
||||
|
||||
func waitForClosedPortTCP(p *Proxier, proxyPort int) error {
|
||||
for i := 0; i < 50; i++ {
|
||||
conn, err := net.Dial("tcp", joinHostPort("", proxyPort))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
conn.Close()
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("port %d still open", proxyPort)
|
||||
}
|
||||
|
||||
func waitForClosedPortUDP(p *Proxier, proxyPort int) error {
|
||||
for i := 0; i < 50; i++ {
|
||||
conn, err := net.Dial("udp", joinHostPort("", proxyPort))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
|
||||
// To detect a closed UDP port write, then read.
|
||||
_, err = conn.Write([]byte("x"))
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok && !e.Timeout() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
var buf [4]byte
|
||||
_, err = conn.Read(buf[0:])
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok && !e.Timeout() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
conn.Close()
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("port %d still open", proxyPort)
|
||||
}
|
||||
|
||||
var tcpServerPort int32
|
||||
var udpServerPort int32
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// Don't handle panics
|
||||
runtime.ReallyCrash = true
|
||||
|
||||
// TCP setup.
|
||||
tcp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(r.URL.Path[1:]))
|
||||
}))
|
||||
defer tcp.Close()
|
||||
|
||||
u, err := url.Parse(tcp.URL)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
_, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
tcpServerPortValue, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
|
||||
}
|
||||
tcpServerPort = int32(tcpServerPortValue)
|
||||
|
||||
// UDP setup.
|
||||
udp, err := newUDPEchoServer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to make a UDP server: %v", err))
|
||||
}
|
||||
_, port, err = net.SplitHostPort(udp.LocalAddr().String())
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
udpServerPortValue, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
|
||||
}
|
||||
udpServerPort = int32(udpServerPortValue)
|
||||
go udp.Loop()
|
||||
|
||||
ret := m.Run()
|
||||
// it should be safe to call Close() multiple times.
|
||||
tcp.Close()
|
||||
os.Exit(ret)
|
||||
}
|
||||
|
||||
func testEchoTCP(t *testing.T, address string, port int) {
|
||||
path := "aaaaa"
|
||||
res, err := http.Get("http://" + address + ":" + fmt.Sprintf("%d", port) + "/" + path)
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to server: %v", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Errorf("error reading data: %v %v", err, string(data))
|
||||
}
|
||||
if string(data) != path {
|
||||
t.Errorf("expected: %s, got %s", path, string(data))
|
||||
}
|
||||
}
|
||||
|
||||
func testEchoUDP(t *testing.T, address string, port int) {
|
||||
data := "abc123"
|
||||
|
||||
conn, err := net.Dial("udp", joinHostPort(address, port))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to server: %v", err)
|
||||
}
|
||||
if _, err := conn.Write([]byte(data)); err != nil {
|
||||
t.Fatalf("error sending to server: %v", err)
|
||||
}
|
||||
var resp [1024]byte
|
||||
n, err := conn.Read(resp[0:])
|
||||
if err != nil {
|
||||
t.Errorf("error receiving data: %v", err)
|
||||
}
|
||||
if string(resp[0:n]) != data {
|
||||
t.Errorf("expected: %s, got %s", data, string(resp[0:n]))
|
||||
}
|
||||
}
|
||||
|
||||
func waitForNumProxyLoops(t *testing.T, p *Proxier, want int32) {
|
||||
var got int32
|
||||
for i := 0; i < 600; i++ {
|
||||
got = atomic.LoadInt32(&p.numProxyLoops)
|
||||
if got == want {
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
t.Errorf("expected %d ProxyLoops running, got %d", want, got)
|
||||
}
|
||||
|
||||
func waitForNumProxyClients(t *testing.T, s *serviceInfo, want int, timeout time.Duration) {
|
||||
var got int
|
||||
now := time.Now()
|
||||
deadline := now.Add(timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
s.activeClients.mu.Lock()
|
||||
got = len(s.activeClients.clients)
|
||||
s.activeClients.mu.Unlock()
|
||||
if got == want {
|
||||
return
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
t.Errorf("expected %d ProxyClients live, got %d", want, got)
|
||||
}
|
||||
|
||||
func TestTCPProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxyTimeout(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
// When connecting to a UDP service endpoint, there should be a Conn for proxy.
|
||||
waitForNumProxyClients(t, svcInfo, 1, time.Second)
|
||||
// If conn has no activity for serviceInfo.timeout since last Read/Write, it should be closed because of timeout.
|
||||
waitForNumProxyClients(t, svcInfo, 0, 2*time.Second)
|
||||
}
|
||||
|
||||
func TestMultiPortProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{{
|
||||
ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
|
||||
}},
|
||||
}, {
|
||||
ObjectMeta: api.ObjectMeta{Name: serviceQ.Name, Namespace: serviceQ.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
|
||||
}},
|
||||
}})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfoP, err := p.addServiceOnPort(serviceP, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfoP.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
svcInfoQ, err := p.addServiceOnPort(serviceQ, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", svcInfoQ.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 2)
|
||||
}
|
||||
|
||||
func TestMultiPortOnServiceUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "q"}
|
||||
serviceX := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "x"}
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 80,
|
||||
Protocol: "TCP",
|
||||
}, {
|
||||
Name: "q",
|
||||
Port: 81,
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
}})
|
||||
waitForNumProxyLoops(t, p, 2)
|
||||
svcInfo, exists := p.getServiceInfo(serviceP)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", serviceP)
|
||||
}
|
||||
if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 80 || svcInfo.protocol != "TCP" {
|
||||
t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo)
|
||||
}
|
||||
|
||||
svcInfo, exists = p.getServiceInfo(serviceQ)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", serviceQ)
|
||||
}
|
||||
if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 81 || svcInfo.protocol != "UDP" {
|
||||
t.Errorf("unexpected serviceInfo for %s: %#v", serviceQ, svcInfo)
|
||||
}
|
||||
|
||||
svcInfo, exists = p.getServiceInfo(serviceX)
|
||||
if exists {
|
||||
t.Fatalf("found unwanted serviceInfo for %s: %#v", serviceX, svcInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Helper: Stops the proxy for the named service.
|
||||
func stopProxyByName(proxier *Proxier, service proxy.ServicePortName) error {
|
||||
info, found := proxier.getServiceInfo(service)
|
||||
if !found {
|
||||
return fmt.Errorf("unknown service: %s", service)
|
||||
}
|
||||
return proxier.stopProxy(service, info)
|
||||
}
|
||||
|
||||
func TestTCPProxyStop(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
if !svcInfo.isAlive() {
|
||||
t.Fatalf("wrong value for isAlive(): expected true")
|
||||
}
|
||||
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
stopProxyByName(p, service)
|
||||
if svcInfo.isAlive() {
|
||||
t.Fatalf("wrong value for isAlive(): expected false")
|
||||
}
|
||||
// Wait for the port to really close.
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestUDPProxyStop(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
stopProxyByName(p, service)
|
||||
// Wait for the port to really close.
|
||||
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestTCPProxyUpdateDelete(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{})
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestUDPProxyUpdateDelete(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{})
|
||||
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{endpoint})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{})
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
// need to add endpoint here because it got clean up during service delete
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{endpoint})
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", service)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{endpoint})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{})
|
||||
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
// need to add endpoint here because it got clean up during service delete
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{endpoint})
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
}})
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestTCPProxyUpdatePort(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 99,
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
// Wait for the socket to actually get free.
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
// This is a bit async, but this should be sufficient.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxyUpdatePort(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 99,
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
}})
|
||||
// Wait for the socket to actually get free.
|
||||
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestProxyUpdatePublicIPs(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{
|
||||
Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.portal.port),
|
||||
Protocol: "TCP",
|
||||
}},
|
||||
ClusterIP: svcInfo.portal.ip.String(),
|
||||
ExternalIPs: []string{"4.3.2.1"},
|
||||
},
|
||||
}})
|
||||
// Wait for the socket to actually get free.
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
// This is a bit async, but this should be sufficient.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestProxyUpdatePortal(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{endpoint})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
_, exists := p.getServiceInfo(service)
|
||||
if exists {
|
||||
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
|
||||
}
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
_, exists = p.getServiceInfo(service)
|
||||
if exists {
|
||||
t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
|
||||
}
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{endpoint})
|
||||
svcInfo, exists = p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("service with ClusterIP set not found in the proxy")
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
// TODO(justinsb): Add test for nodePort conflict detection, once we have nodePort wired in
|
300
vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxysocket.go
generated
vendored
Normal file
300
vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxysocket.go
generated
vendored
Normal file
|
@ -0,0 +1,300 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
)
|
||||
|
||||
// Abstraction over TCP/UDP sockets which are proxied.
|
||||
type proxySocket interface {
|
||||
// Addr gets the net.Addr for a proxySocket.
|
||||
Addr() net.Addr
|
||||
// Close stops the proxySocket from accepting incoming connections.
|
||||
// Each implementation should comment on the impact of calling Close
|
||||
// while sessions are active.
|
||||
Close() error
|
||||
// ProxyLoop proxies incoming connections for the specified service to the service endpoints.
|
||||
ProxyLoop(service proxy.ServicePortName, info *serviceInfo, proxier *Proxier)
|
||||
// ListenPort returns the host port that the proxySocket is listening on
|
||||
ListenPort() int
|
||||
}
|
||||
|
||||
func newProxySocket(protocol api.Protocol, ip net.IP, port int) (proxySocket, error) {
|
||||
host := ""
|
||||
if ip != nil {
|
||||
host = ip.String()
|
||||
}
|
||||
|
||||
switch strings.ToUpper(string(protocol)) {
|
||||
case "TCP":
|
||||
listener, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tcpProxySocket{Listener: listener, port: port}, nil
|
||||
case "UDP":
|
||||
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &udpProxySocket{UDPConn: conn, port: port}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown protocol %q", protocol)
|
||||
}
|
||||
|
||||
// How long we wait for a connection to a backend in seconds
|
||||
var endpointDialTimeout = []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 2 * time.Second}
|
||||
|
||||
// tcpProxySocket implements proxySocket. Close() is implemented by net.Listener. When Close() is called,
|
||||
// no new connections are allowed but existing connections are left untouched.
|
||||
type tcpProxySocket struct {
|
||||
net.Listener
|
||||
port int
|
||||
}
|
||||
|
||||
func (tcp *tcpProxySocket) ListenPort() int {
|
||||
return tcp.port
|
||||
}
|
||||
|
||||
func tryConnect(service proxy.ServicePortName, srcAddr net.Addr, protocol string, proxier *Proxier) (out net.Conn, err error) {
|
||||
sessionAffinityReset := false
|
||||
for _, dialTimeout := range endpointDialTimeout {
|
||||
endpoint, err := proxier.loadBalancer.NextEndpoint(service, srcAddr, sessionAffinityReset)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
|
||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
||||
// and keep accepting inbound traffic.
|
||||
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
|
||||
if err != nil {
|
||||
if isTooManyFDsError(err) {
|
||||
panic("Dial failed: " + err.Error())
|
||||
}
|
||||
glog.Errorf("Dial failed: %v", err)
|
||||
sessionAffinityReset = true
|
||||
continue
|
||||
}
|
||||
return outConn, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to connect to an endpoint.")
|
||||
}
|
||||
|
||||
func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *serviceInfo, proxier *Proxier) {
|
||||
for {
|
||||
if !myInfo.isAlive() {
|
||||
// The service port was closed or replaced.
|
||||
return
|
||||
}
|
||||
// Block until a connection is made.
|
||||
inConn, err := tcp.Accept()
|
||||
if err != nil {
|
||||
if isTooManyFDsError(err) {
|
||||
panic("Accept failed: " + err.Error())
|
||||
}
|
||||
|
||||
if isClosedError(err) {
|
||||
return
|
||||
}
|
||||
if !myInfo.isAlive() {
|
||||
// Then the service port was just closed so the accept failure is to be expected.
|
||||
return
|
||||
}
|
||||
glog.Errorf("Accept failed: %v", err)
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
|
||||
outConn, err := tryConnect(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", proxier)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to connect to balancer: %v", err)
|
||||
inConn.Close()
|
||||
continue
|
||||
}
|
||||
// Spin up an async copy loop.
|
||||
go proxyTCP(inConn.(*net.TCPConn), outConn.(*net.TCPConn))
|
||||
}
|
||||
}
|
||||
|
||||
// proxyTCP proxies data bi-directionally between in and out.
|
||||
func proxyTCP(in, out *net.TCPConn) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
|
||||
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
|
||||
go copyBytes("from backend", in, out, &wg)
|
||||
go copyBytes("to backend", out, in, &wg)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
n, err := io.Copy(dest, src)
|
||||
if err != nil {
|
||||
if !isClosedError(err) {
|
||||
glog.Errorf("I/O error: %v", err)
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
dest.Close()
|
||||
src.Close()
|
||||
}
|
||||
|
||||
// udpProxySocket implements proxySocket. Close() is implemented by net.UDPConn. When Close() is called,
|
||||
// no new connections are allowed and existing connections are broken.
|
||||
// TODO: We could lame-duck this ourselves, if it becomes important.
|
||||
type udpProxySocket struct {
|
||||
*net.UDPConn
|
||||
port int
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) ListenPort() int {
|
||||
return udp.port
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) Addr() net.Addr {
|
||||
return udp.LocalAddr()
|
||||
}
|
||||
|
||||
// Holds all the known UDP clients that have not timed out.
|
||||
type clientCache struct {
|
||||
mu sync.Mutex
|
||||
clients map[string]net.Conn // addr string -> connection
|
||||
}
|
||||
|
||||
func newClientCache() *clientCache {
|
||||
return &clientCache{clients: map[string]net.Conn{}}
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *serviceInfo, proxier *Proxier) {
|
||||
var buffer [4096]byte // 4KiB should be enough for most whole-packets
|
||||
for {
|
||||
if !myInfo.isAlive() {
|
||||
// The service port was closed or replaced.
|
||||
break
|
||||
}
|
||||
|
||||
// Block until data arrives.
|
||||
// TODO: Accumulate a histogram of n or something, to fine tune the buffer size.
|
||||
n, cliAddr, err := udp.ReadFrom(buffer[0:])
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok {
|
||||
if e.Temporary() {
|
||||
glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
|
||||
break
|
||||
}
|
||||
// If this is a client we know already, reuse the connection and goroutine.
|
||||
svrConn, err := udp.getBackendConn(myInfo.activeClients, cliAddr, proxier, service, myInfo.timeout)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// TODO: It would be nice to let the goroutine handle this write, but we don't
|
||||
// really want to copy the buffer. We could do a pool of buffers or something.
|
||||
_, err = svrConn.Write(buffer[0:n])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Write failed: %v", err)
|
||||
// TODO: Maybe tear down the goroutine for this client/server pair?
|
||||
}
|
||||
continue
|
||||
}
|
||||
err = svrConn.SetDeadline(time.Now().Add(myInfo.timeout))
|
||||
if err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, cliAddr net.Addr, proxier *Proxier, service proxy.ServicePortName, timeout time.Duration) (net.Conn, error) {
|
||||
activeClients.mu.Lock()
|
||||
defer activeClients.mu.Unlock()
|
||||
|
||||
svrConn, found := activeClients.clients[cliAddr.String()]
|
||||
if !found {
|
||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
||||
// and keep accepting inbound traffic.
|
||||
glog.V(3).Infof("New UDP connection from %s", cliAddr)
|
||||
var err error
|
||||
svrConn, err = tryConnect(service, cliAddr, "udp", proxier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
activeClients.clients[cliAddr.String()] = svrConn
|
||||
go func(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, timeout time.Duration) {
|
||||
defer runtime.HandleCrash()
|
||||
udp.proxyClient(cliAddr, svrConn, activeClients, timeout)
|
||||
}(cliAddr, svrConn, activeClients, timeout)
|
||||
}
|
||||
return svrConn, nil
|
||||
}
|
||||
|
||||
// This function is expected to be called as a goroutine.
|
||||
// TODO: Track and log bytes copied, like TCP
|
||||
func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, timeout time.Duration) {
|
||||
defer svrConn.Close()
|
||||
var buffer [4096]byte
|
||||
for {
|
||||
n, err := svrConn.Read(buffer[0:])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Read failed: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
err = svrConn.SetDeadline(time.Now().Add(timeout))
|
||||
if err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
break
|
||||
}
|
||||
n, err = udp.WriteTo(buffer[0:n], cliAddr)
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("WriteTo failed: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
activeClients.mu.Lock()
|
||||
delete(activeClients.clients, cliAddr.String())
|
||||
activeClients.mu.Unlock()
|
||||
}
|
25
vendor/k8s.io/kubernetes/pkg/proxy/userspace/rlimit.go
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/pkg/proxy/userspace/rlimit.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import "syscall"
|
||||
|
||||
func setRLimit(limit uint64) error {
|
||||
return syscall.Setrlimit(syscall.RLIMIT_NOFILE, &syscall.Rlimit{Max: limit, Cur: limit})
|
||||
}
|
23
vendor/k8s.io/kubernetes/pkg/proxy/userspace/rlimit_windows.go
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/pkg/proxy/userspace/rlimit_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
func setRLimit(limit uint64) error {
|
||||
return nil
|
||||
}
|
326
vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go
generated
vendored
Normal file
326
vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go
generated
vendored
Normal file
|
@ -0,0 +1,326 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrMissingServiceEntry = errors.New("missing service entry")
|
||||
ErrMissingEndpoints = errors.New("missing endpoints")
|
||||
)
|
||||
|
||||
type affinityState struct {
|
||||
clientIP string
|
||||
//clientProtocol api.Protocol //not yet used
|
||||
//sessionCookie string //not yet used
|
||||
endpoint string
|
||||
lastUsed time.Time
|
||||
}
|
||||
|
||||
type affinityPolicy struct {
|
||||
affinityType api.ServiceAffinity
|
||||
affinityMap map[string]*affinityState // map client IP -> affinity info
|
||||
ttlMinutes int
|
||||
}
|
||||
|
||||
// LoadBalancerRR is a round-robin load balancer.
|
||||
type LoadBalancerRR struct {
|
||||
lock sync.RWMutex
|
||||
services map[proxy.ServicePortName]*balancerState
|
||||
}
|
||||
|
||||
// Ensure this implements LoadBalancer.
|
||||
var _ LoadBalancer = &LoadBalancerRR{}
|
||||
|
||||
type balancerState struct {
|
||||
endpoints []string // a list of "ip:port" style strings
|
||||
index int // current index into endpoints
|
||||
affinity affinityPolicy
|
||||
}
|
||||
|
||||
func newAffinityPolicy(affinityType api.ServiceAffinity, ttlMinutes int) *affinityPolicy {
|
||||
return &affinityPolicy{
|
||||
affinityType: affinityType,
|
||||
affinityMap: make(map[string]*affinityState),
|
||||
ttlMinutes: ttlMinutes,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLoadBalancerRR returns a new LoadBalancerRR.
|
||||
func NewLoadBalancerRR() *LoadBalancerRR {
|
||||
return &LoadBalancerRR{
|
||||
services: map[proxy.ServicePortName]*balancerState{},
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlMinutes int) error {
|
||||
glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
lb.newServiceInternal(svcPort, affinityType, ttlMinutes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This assumes that lb.lock is already held.
|
||||
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlMinutes int) *balancerState {
|
||||
if ttlMinutes == 0 {
|
||||
ttlMinutes = 180 //default to 3 hours if not specified. Should 0 be unlimited instead????
|
||||
}
|
||||
|
||||
if _, exists := lb.services[svcPort]; !exists {
|
||||
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlMinutes)}
|
||||
glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
|
||||
} else if affinityType != "" {
|
||||
lb.services[svcPort].affinity.affinityType = affinityType
|
||||
}
|
||||
return lb.services[svcPort]
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
|
||||
glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
delete(lb.services, svcPort)
|
||||
}
|
||||
|
||||
// return true if this service is using some form of session affinity.
|
||||
func isSessionAffinity(affinity *affinityPolicy) bool {
|
||||
// Should never be empty string, but checking for it to be safe.
|
||||
if affinity.affinityType == "" || affinity.affinityType == api.ServiceAffinityNone {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NextEndpoint returns a service endpoint.
|
||||
// The service endpoint is chosen using the round-robin algorithm.
|
||||
func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) {
|
||||
// Coarse locking is simple. We can get more fine-grained if/when we
|
||||
// can prove it matters.
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists || state == nil {
|
||||
return "", ErrMissingServiceEntry
|
||||
}
|
||||
if len(state.endpoints) == 0 {
|
||||
return "", ErrMissingEndpoints
|
||||
}
|
||||
glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
|
||||
|
||||
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
|
||||
|
||||
var ipaddr string
|
||||
if sessionAffinityEnabled {
|
||||
// Caution: don't shadow ipaddr
|
||||
var err error
|
||||
ipaddr, _, err = net.SplitHostPort(srcAddr.String())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("malformed source address %q: %v", srcAddr.String(), err)
|
||||
}
|
||||
if !sessionAffinityReset {
|
||||
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
|
||||
if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Minutes()) < state.affinity.ttlMinutes {
|
||||
// Affinity wins.
|
||||
endpoint := sessionAffinity.endpoint
|
||||
sessionAffinity.lastUsed = time.Now()
|
||||
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
|
||||
return endpoint, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// Take the next endpoint.
|
||||
endpoint := state.endpoints[state.index]
|
||||
state.index = (state.index + 1) % len(state.endpoints)
|
||||
|
||||
if sessionAffinityEnabled {
|
||||
var affinity *affinityState
|
||||
affinity = state.affinity.affinityMap[ipaddr]
|
||||
if affinity == nil {
|
||||
affinity = new(affinityState) //&affinityState{ipaddr, "TCP", "", endpoint, time.Now()}
|
||||
state.affinity.affinityMap[ipaddr] = affinity
|
||||
}
|
||||
affinity.lastUsed = time.Now()
|
||||
affinity.endpoint = endpoint
|
||||
affinity.clientIP = ipaddr
|
||||
glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
type hostPortPair struct {
|
||||
host string
|
||||
port int
|
||||
}
|
||||
|
||||
func isValidEndpoint(hpp *hostPortPair) bool {
|
||||
return hpp.host != "" && hpp.port > 0
|
||||
}
|
||||
|
||||
func flattenValidEndpoints(endpoints []hostPortPair) []string {
|
||||
// Convert Endpoint objects into strings for easier use later. Ignore
|
||||
// the protocol field - we'll get that from the Service objects.
|
||||
var result []string
|
||||
for i := range endpoints {
|
||||
hpp := &endpoints[i]
|
||||
if isValidEndpoint(hpp) {
|
||||
result = append(result, net.JoinHostPort(hpp.host, strconv.Itoa(hpp.port)))
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Remove any session affinity records associated to a particular endpoint (for example when a pod goes down).
|
||||
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
|
||||
for _, affinity := range state.affinity.affinityMap {
|
||||
if affinity.endpoint == endpoint {
|
||||
glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
|
||||
delete(state.affinity.affinityMap, affinity.clientIP)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Loop through the valid endpoints and then the endpoints associated with the Load Balancer.
|
||||
// Then remove any session affinity records that are not in both lists.
|
||||
// This assumes the lb.lock is held.
|
||||
func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEndpoints []string) {
|
||||
allEndpoints := map[string]int{}
|
||||
for _, newEndpoint := range newEndpoints {
|
||||
allEndpoints[newEndpoint] = 1
|
||||
}
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
for _, existingEndpoint := range state.endpoints {
|
||||
allEndpoints[existingEndpoint] = allEndpoints[existingEndpoint] + 1
|
||||
}
|
||||
for mKey, mVal := range allEndpoints {
|
||||
if mVal == 1 {
|
||||
glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
|
||||
removeSessionAffinityByEndpoint(state, svcPort, mKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnEndpointsUpdate manages the registered service endpoints.
|
||||
// Registered endpoints are updated if found in the update set or
|
||||
// unregistered if missing from the update set.
|
||||
func (lb *LoadBalancerRR) OnEndpointsUpdate(allEndpoints []api.Endpoints) {
|
||||
registeredEndpoints := make(map[proxy.ServicePortName]bool)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
// Update endpoints for services.
|
||||
for i := range allEndpoints {
|
||||
svcEndpoints := &allEndpoints[i]
|
||||
|
||||
// We need to build a map of portname -> all ip:ports for that
|
||||
// portname. Explode Endpoints.Subsets[*] into this structure.
|
||||
portsToEndpoints := map[string][]hostPortPair{}
|
||||
for i := range svcEndpoints.Subsets {
|
||||
ss := &svcEndpoints.Subsets[i]
|
||||
for i := range ss.Ports {
|
||||
port := &ss.Ports[i]
|
||||
for i := range ss.Addresses {
|
||||
addr := &ss.Addresses[i]
|
||||
portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, int(port.Port)})
|
||||
// Ignore the protocol field - we'll get that from the Service objects.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for portname := range portsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: svcEndpoints.Namespace, Name: svcEndpoints.Name}, Port: portname}
|
||||
state, exists := lb.services[svcPort]
|
||||
curEndpoints := []string{}
|
||||
if state != nil {
|
||||
curEndpoints = state.endpoints
|
||||
}
|
||||
newEndpoints := flattenValidEndpoints(portsToEndpoints[portname])
|
||||
|
||||
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) {
|
||||
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
|
||||
lb.updateAffinityMap(svcPort, newEndpoints)
|
||||
// OnEndpointsUpdate can be called without NewService being called externally.
|
||||
// To be safe we will call it here. A new service will only be created
|
||||
// if one does not already exist. The affinity will be updated
|
||||
// later, once NewService is called.
|
||||
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
|
||||
state.endpoints = slice.ShuffleStrings(newEndpoints)
|
||||
|
||||
// Reset the round-robin index.
|
||||
state.index = 0
|
||||
}
|
||||
registeredEndpoints[svcPort] = true
|
||||
}
|
||||
}
|
||||
// Remove endpoints missing from the update.
|
||||
for k := range lb.services {
|
||||
if _, exists := registeredEndpoints[k]; !exists {
|
||||
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", k)
|
||||
// Reset but don't delete.
|
||||
state := lb.services[k]
|
||||
state.endpoints = []string{}
|
||||
state.index = 0
|
||||
state.affinity.affinityMap = map[string]*affinityState{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests whether two slices are equivalent. This sorts both slices in-place.
|
||||
func slicesEquiv(lhs, rhs []string) bool {
|
||||
if len(lhs) != len(rhs) {
|
||||
return false
|
||||
}
|
||||
if reflect.DeepEqual(slice.SortStrings(lhs), slice.SortStrings(rhs)) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortName) {
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
for ip, affinity := range state.affinity.affinityMap {
|
||||
if int(time.Now().Sub(affinity.lastUsed).Minutes()) >= state.affinity.ttlMinutes {
|
||||
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
|
||||
delete(state.affinity.affinityMap, ip)
|
||||
}
|
||||
}
|
||||
}
|
727
vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin_test.go
generated
vendored
Normal file
727
vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin_test.go
generated
vendored
Normal file
|
@ -0,0 +1,727 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
)
|
||||
|
||||
func TestValidateWorks(t *testing.T) {
|
||||
if isValidEndpoint(&hostPortPair{}) {
|
||||
t.Errorf("Didn't fail for empty set")
|
||||
}
|
||||
if isValidEndpoint(&hostPortPair{host: "foobar"}) {
|
||||
t.Errorf("Didn't fail with invalid port")
|
||||
}
|
||||
if isValidEndpoint(&hostPortPair{host: "foobar", port: -1}) {
|
||||
t.Errorf("Didn't fail with a negative port")
|
||||
}
|
||||
if !isValidEndpoint(&hostPortPair{host: "foobar", port: 8080}) {
|
||||
t.Errorf("Failed a valid config.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterWorks(t *testing.T) {
|
||||
endpoints := []hostPortPair{
|
||||
{host: "foobar", port: 1},
|
||||
{host: "foobar", port: 2},
|
||||
{host: "foobar", port: -1},
|
||||
{host: "foobar", port: 3},
|
||||
{host: "foobar", port: -2},
|
||||
}
|
||||
filtered := flattenValidEndpoints(endpoints)
|
||||
|
||||
if len(filtered) != 3 {
|
||||
t.Errorf("Failed to filter to the correct size")
|
||||
}
|
||||
if filtered[0] != "foobar:1" {
|
||||
t.Errorf("Index zero is not foobar:1")
|
||||
}
|
||||
if filtered[1] != "foobar:2" {
|
||||
t.Errorf("Index one is not foobar:2")
|
||||
}
|
||||
if filtered[2] != "foobar:3" {
|
||||
t.Errorf("Index two is not foobar:3")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalanceFailsWithNoEndpoints(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
var endpoints []api.Endpoints
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "does-not-exist"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
if len(endpoint) != 0 {
|
||||
t.Errorf("Got an endpoint")
|
||||
}
|
||||
}
|
||||
|
||||
func expectEndpoint(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
|
||||
}
|
||||
if endpoint != expected {
|
||||
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func expectEndpointWithSessionAffinityReset(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, true)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
|
||||
}
|
||||
if endpoint != expected {
|
||||
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 40}},
|
||||
}},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
}
|
||||
|
||||
func stringsInSlice(haystack []string, needles ...string) bool {
|
||||
for _, needle := range needles {
|
||||
found := false
|
||||
for i := range haystack {
|
||||
if haystack[i] == needle {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found == false {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
|
||||
}},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint:1", "endpoint:2", "endpoint:3") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:1", "endpoint3:3") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:2", "endpoint2:2", "endpoint3:4") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint2"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:2", "endpoint3:3") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:10", "endpoint2:20", "endpoint3:30") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
|
||||
// Then update the configuration with one fewer endpoints, make sure
|
||||
// we start in the beginning again
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint4"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint5"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceP].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint4:4", "endpoint5:5") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint4:40", "endpoint5:50") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
|
||||
// Clear endpoints
|
||||
endpoints[0] = api.Endpoints{ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
fooServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
barServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(fooServiceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := make([]api.Endpoints, 2)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 123}},
|
||||
},
|
||||
},
|
||||
}
|
||||
endpoints[1] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 456}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
shuffledFooEndpoints := loadBalancer.services[fooServiceP].endpoints
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
|
||||
|
||||
shuffledBarEndpoints := loadBalancer.services[barServiceP].endpoints
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
||||
|
||||
// Then update the configuration by removing foo
|
||||
loadBalancer.OnEndpointsUpdate(endpoints[1:])
|
||||
endpoint, err = loadBalancer.NextEndpoint(fooServiceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// but bar is still there, and we continue RR from where we left off.
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// Call NewService() before OnEndpointsUpdate()
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, 0)
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
|
||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
|
||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
|
||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// Call OnEndpointsUpdate() before NewService()
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, 0)
|
||||
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
|
||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
|
||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
|
||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
client4 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 4), Port: 0}
|
||||
client5 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 5), Port: 0}
|
||||
client6 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 6), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, 0)
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
client1Endpoint := shuffledEndpoints[0]
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
client2Endpoint := shuffledEndpoints[1]
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
|
||||
client3Endpoint := shuffledEndpoints[2]
|
||||
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
||||
if client1Endpoint == "endpoint:3" {
|
||||
client1Endpoint = shuffledEndpoints[0]
|
||||
} else if client2Endpoint == "endpoint:3" {
|
||||
client2Endpoint = shuffledEndpoints[0]
|
||||
} else if client3Endpoint == "endpoint:3" {
|
||||
client3Endpoint = shuffledEndpoints[0]
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
|
||||
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
|
||||
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
|
||||
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
|
||||
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
|
||||
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client4)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client5)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client6)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, 0)
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
// Then update the configuration with one fewer endpoints, make sure
|
||||
// we start in the beginning again
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
|
||||
// Clear endpoints
|
||||
endpoints[0] = api.Endpoints{ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
endpoint, err = loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
fooService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(fooService, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
loadBalancer.NewService(fooService, api.ServiceAffinityClientIP, 0)
|
||||
endpoints := make([]api.Endpoints, 2)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
|
||||
loadBalancer.NewService(barService, api.ServiceAffinityClientIP, 0)
|
||||
endpoints[1] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
shuffledFooEndpoints := loadBalancer.services[fooService].endpoints
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
||||
|
||||
shuffledBarEndpoints := loadBalancer.services[barService].endpoints
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
|
||||
// Then update the configuration by removing foo
|
||||
loadBalancer.OnEndpointsUpdate(endpoints[1:])
|
||||
endpoint, err = loadBalancer.NextEndpoint(fooService, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// but bar is still there, and we continue RR from where we left off.
|
||||
shuffledBarEndpoints = loadBalancer.services[barService].endpoints
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// Call NewService() before OnEndpointsUpdate()
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, 0)
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
|
||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
|
||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
|
||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client1)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
|
||||
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
|
||||
}
|
54
vendor/k8s.io/kubernetes/pkg/proxy/userspace/udp_server.go
generated
vendored
Normal file
54
vendor/k8s.io/kubernetes/pkg/proxy/userspace/udp_server.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package userspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// udpEchoServer is a simple echo server in UDP, intended for testing the proxy.
|
||||
type udpEchoServer struct {
|
||||
net.PacketConn
|
||||
}
|
||||
|
||||
func (r *udpEchoServer) Loop() {
|
||||
var buffer [4096]byte
|
||||
for {
|
||||
n, cliAddr, err := r.ReadFrom(buffer[0:])
|
||||
if err != nil {
|
||||
fmt.Printf("ReadFrom failed: %v\n", err)
|
||||
continue
|
||||
}
|
||||
r.WriteTo(buffer[0:n], cliAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func newUDPEchoServer() (*udpEchoServer, error) {
|
||||
packetconn, err := net.ListenPacket("udp", ":0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &udpEchoServer{packetconn}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
func main() {
|
||||
r,_ := newUDPEchoServer()
|
||||
r.Loop()
|
||||
}
|
||||
*/
|
66
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD
generated
vendored
Normal file
66
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"loadbalancer.go",
|
||||
"port_allocator.go",
|
||||
"proxier.go",
|
||||
"proxysocket.go",
|
||||
"roundrobin.go",
|
||||
"udp_server.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/util/netsh:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/net",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"port_allocator_test.go",
|
||||
"proxier_test.go",
|
||||
"roundrobin_test.go",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/util/netsh/testing:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/net",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
34
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/loadbalancer.go
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/loadbalancer.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
)
|
||||
|
||||
// LoadBalancer is an interface for distributing incoming requests to service endpoints.
|
||||
type LoadBalancer interface {
|
||||
// NextEndpoint returns the endpoint to handle a request for the given
|
||||
// service-port and source address.
|
||||
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
|
||||
NewService(service proxy.ServicePortName, sessionAffinityType api.ServiceAffinity, stickyMaxAgeMinutes int) error
|
||||
DeleteService(service proxy.ServicePortName)
|
||||
CleanupStaleStickySessions(service proxy.ServicePortName)
|
||||
}
|
158
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/port_allocator.go
generated
vendored
Normal file
158
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/port_allocator.go
generated
vendored
Normal file
|
@ -0,0 +1,158 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
var (
|
||||
errPortRangeNoPortsRemaining = errors.New("port allocation failed; there are no remaining ports left to allocate in the accepted range")
|
||||
)
|
||||
|
||||
type PortAllocator interface {
|
||||
AllocateNext() (int, error)
|
||||
Release(int)
|
||||
}
|
||||
|
||||
// randomAllocator is a PortAllocator implementation that allocates random ports, yielding
|
||||
// a port value of 0 for every call to AllocateNext().
|
||||
type randomAllocator struct{}
|
||||
|
||||
// AllocateNext always returns 0
|
||||
func (r *randomAllocator) AllocateNext() (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Release is a noop
|
||||
func (r *randomAllocator) Release(_ int) {
|
||||
// noop
|
||||
}
|
||||
|
||||
// newPortAllocator builds PortAllocator for a given PortRange. If the PortRange is empty
|
||||
// then a random port allocator is returned; otherwise, a new range-based allocator
|
||||
// is returned.
|
||||
func newPortAllocator(r net.PortRange) PortAllocator {
|
||||
if r.Base == 0 {
|
||||
return &randomAllocator{}
|
||||
}
|
||||
return newPortRangeAllocator(r, true)
|
||||
}
|
||||
|
||||
const (
|
||||
portsBufSize = 16
|
||||
nextFreePortCooldown = 500 * time.Millisecond
|
||||
allocateNextTimeout = 1 * time.Second
|
||||
)
|
||||
|
||||
type rangeAllocator struct {
|
||||
net.PortRange
|
||||
ports chan int
|
||||
used big.Int
|
||||
lock sync.Mutex
|
||||
rand *rand.Rand
|
||||
}
|
||||
|
||||
func newPortRangeAllocator(r net.PortRange, autoFill bool) PortAllocator {
|
||||
if r.Base == 0 || r.Size == 0 {
|
||||
panic("illegal argument: may not specify an empty port range")
|
||||
}
|
||||
ra := &rangeAllocator{
|
||||
PortRange: r,
|
||||
ports: make(chan int, portsBufSize),
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
if autoFill {
|
||||
go wait.Forever(func() { ra.fillPorts() }, nextFreePortCooldown)
|
||||
}
|
||||
return ra
|
||||
}
|
||||
|
||||
// fillPorts loops, always searching for the next free port and, if found, fills the ports buffer with it.
|
||||
// this func blocks unless there are no remaining free ports.
|
||||
func (r *rangeAllocator) fillPorts() {
|
||||
for {
|
||||
if !r.fillPortsOnce() {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) fillPortsOnce() bool {
|
||||
port := r.nextFreePort()
|
||||
if port == -1 {
|
||||
return false
|
||||
}
|
||||
r.ports <- port
|
||||
return true
|
||||
}
|
||||
|
||||
// nextFreePort finds a free port, first picking a random port. if that port is already in use
|
||||
// then the port range is scanned sequentially until either a port is found or the scan completes
|
||||
// unsuccessfully. an unsuccessful scan returns a port of -1.
|
||||
func (r *rangeAllocator) nextFreePort() int {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
// choose random port
|
||||
j := r.rand.Intn(r.Size)
|
||||
if b := r.used.Bit(j); b == 0 {
|
||||
r.used.SetBit(&r.used, j, 1)
|
||||
return j + r.Base
|
||||
}
|
||||
|
||||
// search sequentially
|
||||
for i := j + 1; i < r.Size; i++ {
|
||||
if b := r.used.Bit(i); b == 0 {
|
||||
r.used.SetBit(&r.used, i, 1)
|
||||
return i + r.Base
|
||||
}
|
||||
}
|
||||
for i := 0; i < j; i++ {
|
||||
if b := r.used.Bit(i); b == 0 {
|
||||
r.used.SetBit(&r.used, i, 1)
|
||||
return i + r.Base
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) AllocateNext() (port int, err error) {
|
||||
select {
|
||||
case port = <-r.ports:
|
||||
case <-time.After(allocateNextTimeout):
|
||||
err = errPortRangeNoPortsRemaining
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) Release(port int) {
|
||||
port -= r.Base
|
||||
if port < 0 || port >= r.Size {
|
||||
return
|
||||
}
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
r.used.SetBit(&r.used, port, 0)
|
||||
}
|
178
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/port_allocator_test.go
generated
vendored
Normal file
178
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/port_allocator_test.go
generated
vendored
Normal file
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
)
|
||||
|
||||
func TestRangeAllocatorEmpty(t *testing.T) {
|
||||
r := &net.PortRange{}
|
||||
r.Set("0-0")
|
||||
defer func() {
|
||||
if rv := recover(); rv == nil {
|
||||
t.Fatalf("expected panic because of empty port range: %#v", r)
|
||||
}
|
||||
}()
|
||||
_ = newPortRangeAllocator(*r, true)
|
||||
}
|
||||
|
||||
func TestRangeAllocatorFullyAllocated(t *testing.T) {
|
||||
r := &net.PortRange{}
|
||||
r.Set("1-1")
|
||||
// Don't auto-fill ports, we'll manually turn the crank
|
||||
pra := newPortRangeAllocator(*r, false)
|
||||
a := pra.(*rangeAllocator)
|
||||
|
||||
// Fill in the one available port
|
||||
if !a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be able to fill ports")
|
||||
}
|
||||
|
||||
// There should be no ports available
|
||||
if a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be unable to fill ports")
|
||||
}
|
||||
|
||||
p, err := a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if p != 1 {
|
||||
t.Fatalf("unexpected allocated port: %d", p)
|
||||
}
|
||||
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(p - a.Base); bit != 1 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", p)
|
||||
}
|
||||
a.lock.Unlock()
|
||||
|
||||
_, err = a.AllocateNext()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range")
|
||||
}
|
||||
|
||||
a.Release(p)
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(p - a.Base); bit != 0 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", p)
|
||||
}
|
||||
a.lock.Unlock()
|
||||
|
||||
// Fill in the one available port
|
||||
if !a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be able to fill ports")
|
||||
}
|
||||
|
||||
p, err = a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if p != 1 {
|
||||
t.Fatalf("unexpected allocated port: %d", p)
|
||||
}
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(p - a.Base); bit != 1 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", p)
|
||||
}
|
||||
a.lock.Unlock()
|
||||
|
||||
_, err = a.AllocateNext()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRangeAllocator_RandomishAllocation(t *testing.T) {
|
||||
r := &net.PortRange{}
|
||||
r.Set("1-100")
|
||||
pra := newPortRangeAllocator(*r, false)
|
||||
a := pra.(*rangeAllocator)
|
||||
|
||||
// allocate all the ports
|
||||
var err error
|
||||
ports := make([]int, 100, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
if !a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be able to fill ports")
|
||||
}
|
||||
ports[i], err = a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if ports[i] < 1 || ports[i] > 100 {
|
||||
t.Fatalf("unexpected allocated port: %d", ports[i])
|
||||
}
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(ports[i] - a.Base); bit != 1 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", ports[i])
|
||||
}
|
||||
a.lock.Unlock()
|
||||
}
|
||||
|
||||
if a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be unable to fill ports")
|
||||
}
|
||||
|
||||
// release them all
|
||||
for i := 0; i < 100; i++ {
|
||||
a.Release(ports[i])
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(ports[i] - a.Base); bit != 0 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", ports[i])
|
||||
}
|
||||
a.lock.Unlock()
|
||||
}
|
||||
|
||||
// allocate the ports again
|
||||
rports := make([]int, 100, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
if !a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be able to fill ports")
|
||||
}
|
||||
rports[i], err = a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if rports[i] < 1 || rports[i] > 100 {
|
||||
t.Fatalf("unexpected allocated port: %d", rports[i])
|
||||
}
|
||||
a.lock.Lock()
|
||||
if bit := a.used.Bit(rports[i] - a.Base); bit != 1 {
|
||||
a.lock.Unlock()
|
||||
t.Fatalf("unexpected used bit for allocated port: %d", rports[i])
|
||||
}
|
||||
a.lock.Unlock()
|
||||
}
|
||||
|
||||
if a.fillPortsOnce() {
|
||||
t.Fatalf("Expected to be unable to fill ports")
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(ports, rports) {
|
||||
t.Fatalf("expected re-allocated ports to be in a somewhat random order")
|
||||
}
|
||||
}
|
682
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go
generated
vendored
Normal file
682
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go
generated
vendored
Normal file
|
@ -0,0 +1,682 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/util/netsh"
|
||||
)
|
||||
|
||||
type portal struct {
|
||||
ip net.IP
|
||||
port int
|
||||
isExternal bool
|
||||
}
|
||||
|
||||
type serviceInfo struct {
|
||||
isAliveAtomic int32 // Only access this with atomic ops
|
||||
portal portal
|
||||
protocol api.Protocol
|
||||
proxyPort int
|
||||
socket proxySocket
|
||||
timeout time.Duration
|
||||
activeClients *clientCache
|
||||
nodePort int
|
||||
loadBalancerStatus api.LoadBalancerStatus
|
||||
sessionAffinityType api.ServiceAffinity
|
||||
stickyMaxAgeMinutes int
|
||||
// Deprecated, but required for back-compat (including e2e)
|
||||
externalIPs []string
|
||||
}
|
||||
|
||||
func (info *serviceInfo) setAlive(b bool) {
|
||||
var i int32
|
||||
if b {
|
||||
i = 1
|
||||
}
|
||||
atomic.StoreInt32(&info.isAliveAtomic, i)
|
||||
}
|
||||
|
||||
func (info *serviceInfo) isAlive() bool {
|
||||
return atomic.LoadInt32(&info.isAliveAtomic) != 0
|
||||
}
|
||||
|
||||
func logTimeout(err error) bool {
|
||||
if e, ok := err.(net.Error); ok {
|
||||
if e.Timeout() {
|
||||
glog.V(3).Infof("connection to endpoint closed due to inactivity")
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Proxier is a simple proxy for TCP connections between a localhost:lport
|
||||
// and services that provide the actual implementations.
|
||||
type Proxier struct {
|
||||
loadBalancer LoadBalancer
|
||||
mu sync.Mutex // protects serviceMap
|
||||
serviceMap map[proxy.ServicePortName]*serviceInfo
|
||||
syncPeriod time.Duration
|
||||
udpIdleTimeout time.Duration
|
||||
portMapMutex sync.Mutex
|
||||
portMap map[portMapKey]*portMapValue
|
||||
numProxyLoops int32 // use atomic ops to access this; mostly for testing
|
||||
listenIP net.IP
|
||||
netsh netsh.Interface
|
||||
hostIP net.IP
|
||||
proxyPorts PortAllocator
|
||||
}
|
||||
|
||||
// assert Proxier is a ProxyProvider
|
||||
var _ proxy.ProxyProvider = &Proxier{}
|
||||
|
||||
// A key for the portMap. The ip has to be a string because slices can't be map
|
||||
// keys.
|
||||
type portMapKey struct {
|
||||
ip string
|
||||
port int
|
||||
protocol api.Protocol
|
||||
}
|
||||
|
||||
func (k *portMapKey) String() string {
|
||||
return fmt.Sprintf("%s:%d/%s", k.ip, k.port, k.protocol)
|
||||
}
|
||||
|
||||
// A value for the portMap
|
||||
type portMapValue struct {
|
||||
owner proxy.ServicePortName
|
||||
socket interface {
|
||||
Close() error
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrProxyOnLocalhost is returned by NewProxier if the user requests a proxier on
|
||||
// the loopback address. May be checked for by callers of NewProxier to know whether
|
||||
// the caller provided invalid input.
|
||||
ErrProxyOnLocalhost = fmt.Errorf("cannot proxy on localhost")
|
||||
)
|
||||
|
||||
// IsProxyLocked returns true if the proxy could not acquire the lock on iptables.
|
||||
func IsProxyLocked(err error) bool {
|
||||
return strings.Contains(err.Error(), "holding the xtables lock")
|
||||
}
|
||||
|
||||
// Used below.
|
||||
var localhostIPv4 = net.ParseIP("127.0.0.1")
|
||||
var localhostIPv6 = net.ParseIP("::1")
|
||||
|
||||
// NewProxier returns a new Proxier given a LoadBalancer and an address on
|
||||
// which to listen. Because of the iptables logic, It is assumed that there
|
||||
// is only a single Proxier active on a machine. An error will be returned if
|
||||
// the proxier cannot be started due to an invalid ListenIP (loopback) or
|
||||
// if iptables fails to update or acquire the initial lock. Once a proxier is
|
||||
// created, it will keep iptables up to date in the background and will not
|
||||
// terminate if a particular iptables call fails.
|
||||
func NewProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interface, pr utilnet.PortRange, syncPeriod, udpIdleTimeout time.Duration) (*Proxier, error) {
|
||||
if listenIP.Equal(localhostIPv4) || listenIP.Equal(localhostIPv6) {
|
||||
return nil, ErrProxyOnLocalhost
|
||||
}
|
||||
|
||||
hostIP, err := utilnet.ChooseHostInterface()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to select a host interface: %v", err)
|
||||
}
|
||||
|
||||
proxyPorts := newPortAllocator(pr)
|
||||
|
||||
glog.V(2).Infof("Setting proxy IP to %v and initializing iptables", hostIP)
|
||||
return createProxier(loadBalancer, listenIP, netsh, hostIP, proxyPorts, syncPeriod, udpIdleTimeout)
|
||||
}
|
||||
|
||||
func createProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interface, hostIP net.IP, proxyPorts PortAllocator, syncPeriod, udpIdleTimeout time.Duration) (*Proxier, error) {
|
||||
// convenient to pass nil for tests..
|
||||
if proxyPorts == nil {
|
||||
proxyPorts = newPortAllocator(utilnet.PortRange{})
|
||||
}
|
||||
return &Proxier{
|
||||
loadBalancer: loadBalancer,
|
||||
serviceMap: make(map[proxy.ServicePortName]*serviceInfo),
|
||||
portMap: make(map[portMapKey]*portMapValue),
|
||||
syncPeriod: syncPeriod,
|
||||
udpIdleTimeout: udpIdleTimeout,
|
||||
listenIP: listenIP,
|
||||
netsh: netsh,
|
||||
hostIP: hostIP,
|
||||
proxyPorts: proxyPorts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Sync is called to immediately synchronize the proxier state to iptables
|
||||
func (proxier *Proxier) Sync() {
|
||||
proxier.ensurePortals()
|
||||
proxier.cleanupStaleStickySessions()
|
||||
}
|
||||
|
||||
// SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return.
|
||||
func (proxier *Proxier) SyncLoop() {
|
||||
t := time.NewTicker(proxier.syncPeriod)
|
||||
defer t.Stop()
|
||||
for {
|
||||
<-t.C
|
||||
glog.V(6).Infof("Periodic sync")
|
||||
proxier.Sync()
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that portals exist for all services.
|
||||
func (proxier *Proxier) ensurePortals() {
|
||||
proxier.mu.Lock()
|
||||
defer proxier.mu.Unlock()
|
||||
// NB: This does not remove rules that should not be present.
|
||||
for name, info := range proxier.serviceMap {
|
||||
err := proxier.openPortal(name, info)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to ensure portal for %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupStaleStickySessions cleans up any stale sticky session records in the hash map.
|
||||
func (proxier *Proxier) cleanupStaleStickySessions() {
|
||||
proxier.mu.Lock()
|
||||
defer proxier.mu.Unlock()
|
||||
for name := range proxier.serviceMap {
|
||||
proxier.loadBalancer.CleanupStaleStickySessions(name)
|
||||
}
|
||||
}
|
||||
|
||||
// This assumes proxier.mu is not locked.
|
||||
func (proxier *Proxier) stopProxy(service proxy.ServicePortName, info *serviceInfo) error {
|
||||
proxier.mu.Lock()
|
||||
defer proxier.mu.Unlock()
|
||||
return proxier.stopProxyInternal(service, info)
|
||||
}
|
||||
|
||||
// This assumes proxier.mu is locked.
|
||||
func (proxier *Proxier) stopProxyInternal(service proxy.ServicePortName, info *serviceInfo) error {
|
||||
delete(proxier.serviceMap, service)
|
||||
info.setAlive(false)
|
||||
err := info.socket.Close()
|
||||
port := info.socket.ListenPort()
|
||||
proxier.proxyPorts.Release(port)
|
||||
return err
|
||||
}
|
||||
|
||||
func (proxier *Proxier) getServiceInfo(service proxy.ServicePortName) (*serviceInfo, bool) {
|
||||
proxier.mu.Lock()
|
||||
defer proxier.mu.Unlock()
|
||||
info, ok := proxier.serviceMap[service]
|
||||
return info, ok
|
||||
}
|
||||
|
||||
func (proxier *Proxier) setServiceInfo(service proxy.ServicePortName, info *serviceInfo) {
|
||||
proxier.mu.Lock()
|
||||
defer proxier.mu.Unlock()
|
||||
proxier.serviceMap[service] = info
|
||||
}
|
||||
|
||||
// addServiceOnPort starts listening for a new service, returning the serviceInfo.
|
||||
// Pass proxyPort=0 to allocate a random port. The timeout only applies to UDP
|
||||
// connections, for now.
|
||||
func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol api.Protocol, proxyPort int, timeout time.Duration) (*serviceInfo, error) {
|
||||
sock, err := newProxySocket(protocol, proxier.listenIP, proxyPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, portStr, err := net.SplitHostPort(sock.Addr().String())
|
||||
if err != nil {
|
||||
sock.Close()
|
||||
return nil, err
|
||||
}
|
||||
portNum, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
sock.Close()
|
||||
return nil, err
|
||||
}
|
||||
si := &serviceInfo{
|
||||
isAliveAtomic: 1,
|
||||
proxyPort: portNum,
|
||||
protocol: protocol,
|
||||
socket: sock,
|
||||
timeout: timeout,
|
||||
activeClients: newClientCache(),
|
||||
sessionAffinityType: api.ServiceAffinityNone, // default
|
||||
stickyMaxAgeMinutes: 180, // TODO: parameterize this in the API.
|
||||
}
|
||||
proxier.setServiceInfo(service, si)
|
||||
|
||||
glog.V(2).Infof("Proxying for service %q on %s port %d", service, protocol, portNum)
|
||||
go func(service proxy.ServicePortName, proxier *Proxier) {
|
||||
defer runtime.HandleCrash()
|
||||
atomic.AddInt32(&proxier.numProxyLoops, 1)
|
||||
sock.ProxyLoop(service, si, proxier)
|
||||
atomic.AddInt32(&proxier.numProxyLoops, -1)
|
||||
}(service, proxier)
|
||||
|
||||
return si, nil
|
||||
}
|
||||
|
||||
// OnServiceUpdate manages the active set of service proxies.
|
||||
// Active service proxies are reinitialized if found in the update set or
|
||||
// shutdown if missing from the update set.
|
||||
func (proxier *Proxier) OnServiceUpdate(services []api.Service) {
|
||||
glog.V(4).Infof("Received update notice: %+v", services)
|
||||
activeServices := make(map[proxy.ServicePortName]bool) // use a map as a set
|
||||
for i := range services {
|
||||
service := &services[i]
|
||||
|
||||
// if ClusterIP is "None" or empty, skip proxying
|
||||
if !api.IsServiceIPSet(service) {
|
||||
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, service.Spec.ClusterIP)
|
||||
continue
|
||||
}
|
||||
|
||||
for i := range service.Spec.Ports {
|
||||
servicePort := &service.Spec.Ports[i]
|
||||
serviceName := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: servicePort.Name}
|
||||
activeServices[serviceName] = true
|
||||
serviceIP := net.ParseIP(service.Spec.ClusterIP)
|
||||
info, exists := proxier.getServiceInfo(serviceName)
|
||||
// TODO: check health of the socket? What if ProxyLoop exited?
|
||||
if exists && sameConfig(info, service, servicePort) {
|
||||
// Nothing changed.
|
||||
continue
|
||||
}
|
||||
if exists {
|
||||
glog.V(4).Infof("Something changed for service %q: stopping it", serviceName)
|
||||
err := proxier.closePortal(serviceName, info)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to close portal for %q: %v", serviceName, err)
|
||||
}
|
||||
err = proxier.stopProxy(serviceName, info)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to stop service %q: %v", serviceName, err)
|
||||
}
|
||||
}
|
||||
|
||||
proxyPort, err := proxier.proxyPorts.AllocateNext()
|
||||
if err != nil {
|
||||
glog.Errorf("failed to allocate proxy port for service %q: %v", serviceName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Adding new service %q at %s:%d/%s", serviceName, serviceIP, servicePort.Port, servicePort.Protocol)
|
||||
info, err = proxier.addServiceOnPort(serviceName, servicePort.Protocol, proxyPort, proxier.udpIdleTimeout)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to start proxy for %q: %v", serviceName, err)
|
||||
continue
|
||||
}
|
||||
info.portal.ip = serviceIP
|
||||
info.portal.port = int(servicePort.Port)
|
||||
info.externalIPs = service.Spec.ExternalIPs
|
||||
// Deep-copy in case the service instance changes
|
||||
info.loadBalancerStatus = *api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
|
||||
info.nodePort = int(servicePort.NodePort)
|
||||
info.sessionAffinityType = service.Spec.SessionAffinity
|
||||
glog.V(4).Infof("info: %#v", info)
|
||||
|
||||
err = proxier.openPortal(serviceName, info)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to open portal for %q: %v", serviceName, err)
|
||||
}
|
||||
proxier.loadBalancer.NewService(serviceName, info.sessionAffinityType, info.stickyMaxAgeMinutes)
|
||||
}
|
||||
}
|
||||
proxier.mu.Lock()
|
||||
defer proxier.mu.Unlock()
|
||||
for name, info := range proxier.serviceMap {
|
||||
if !activeServices[name] {
|
||||
glog.V(1).Infof("Stopping service %q", name)
|
||||
err := proxier.closePortal(name, info)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to close portal for %q: %v", name, err)
|
||||
}
|
||||
err = proxier.stopProxyInternal(name, info)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to stop service %q: %v", name, err)
|
||||
}
|
||||
proxier.loadBalancer.DeleteService(name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sameConfig(info *serviceInfo, service *api.Service, port *api.ServicePort) bool {
|
||||
if info.protocol != port.Protocol || info.portal.port != int(port.Port) || info.nodePort != int(port.NodePort) {
|
||||
return false
|
||||
}
|
||||
if !info.portal.ip.Equal(net.ParseIP(service.Spec.ClusterIP)) {
|
||||
return false
|
||||
}
|
||||
if !ipsEqual(info.externalIPs, service.Spec.ExternalIPs) {
|
||||
return false
|
||||
}
|
||||
if !api.LoadBalancerStatusEqual(&info.loadBalancerStatus, &service.Status.LoadBalancer) {
|
||||
return false
|
||||
}
|
||||
if info.sessionAffinityType != service.Spec.SessionAffinity {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func ipsEqual(lhs, rhs []string) bool {
|
||||
if len(lhs) != len(rhs) {
|
||||
return false
|
||||
}
|
||||
for i := range lhs {
|
||||
if lhs[i] != rhs[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (proxier *Proxier) openPortal(service proxy.ServicePortName, info *serviceInfo) error {
|
||||
err := proxier.openOnePortal(info.portal, info.protocol, proxier.listenIP, info.proxyPort, service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, publicIP := range info.externalIPs {
|
||||
err = proxier.openOnePortal(portal{net.ParseIP(publicIP), info.portal.port, true}, info.protocol, proxier.listenIP, info.proxyPort, service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, ingress := range info.loadBalancerStatus.Ingress {
|
||||
if ingress.IP != "" {
|
||||
err = proxier.openOnePortal(portal{net.ParseIP(ingress.IP), info.portal.port, false}, info.protocol, proxier.listenIP, info.proxyPort, service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if info.nodePort != 0 {
|
||||
err = proxier.openNodePort(info.nodePort, info.protocol, proxier.listenIP, info.proxyPort, service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (proxier *Proxier) openOnePortal(portal portal, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error {
|
||||
if protocol == api.ProtocolUDP {
|
||||
glog.Warningf("Not adding rule for %q on %s:%d as UDP protocol is not supported by netsh portproxy", name, portal.ip, portal.port)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add IP address to "vEthernet (HNSTransparent)" so that portproxy could be used to redirect the traffic
|
||||
args := proxier.netshIpv4AddressAddArgs(portal.ip)
|
||||
existed, err := proxier.netsh.EnsureIPAddress(args, portal.ip)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to add ip address for service %q, args:%v", name, args)
|
||||
return err
|
||||
}
|
||||
if !existed {
|
||||
glog.V(3).Infof("Added ip address to HNSTransparent interface for service %q on %s %s:%d", name, protocol, portal.ip, portal.port)
|
||||
}
|
||||
|
||||
args = proxier.netshPortProxyAddArgs(portal.ip, portal.port, proxyIP, proxyPort, name)
|
||||
existed, err = proxier.netsh.EnsurePortProxyRule(args)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to run portproxy rule for service %q, args:%v", name, args)
|
||||
return err
|
||||
}
|
||||
if !existed {
|
||||
glog.V(3).Infof("Added portproxy rule for service %q on %s %s:%d", name, protocol, portal.ip, portal.port)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// claimNodePort marks a port as being owned by a particular service, or returns error if already claimed.
|
||||
// Idempotent: reclaiming with the same owner is not an error
|
||||
func (proxier *Proxier) claimNodePort(ip net.IP, port int, protocol api.Protocol, owner proxy.ServicePortName) error {
|
||||
proxier.portMapMutex.Lock()
|
||||
defer proxier.portMapMutex.Unlock()
|
||||
|
||||
// TODO: We could pre-populate some reserved ports into portMap and/or blacklist some well-known ports
|
||||
|
||||
key := portMapKey{ip: ip.String(), port: port, protocol: protocol}
|
||||
existing, found := proxier.portMap[key]
|
||||
if !found {
|
||||
// Hold the actual port open, even though we use iptables to redirect
|
||||
// it. This ensures that a) it's safe to take and b) that stays true.
|
||||
// NOTE: We should not need to have a real listen()ing socket - bind()
|
||||
// should be enough, but I can't figure out a way to e2e test without
|
||||
// it. Tools like 'ss' and 'netstat' do not show sockets that are
|
||||
// bind()ed but not listen()ed, and at least the default debian netcat
|
||||
// has no way to avoid about 10 seconds of retries.
|
||||
socket, err := newProxySocket(protocol, ip, port)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open node port for %s: %v", key.String(), err)
|
||||
}
|
||||
proxier.portMap[key] = &portMapValue{owner: owner, socket: socket}
|
||||
glog.V(2).Infof("Claimed local port %s", key.String())
|
||||
return nil
|
||||
}
|
||||
if existing.owner == owner {
|
||||
// We are idempotent
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Port conflict detected on port %s. %v vs %v", key.String(), owner, existing)
|
||||
}
|
||||
|
||||
// releaseNodePort releases a claim on a port. Returns an error if the owner does not match the claim.
|
||||
// Tolerates release on an unclaimed port, to simplify .
|
||||
func (proxier *Proxier) releaseNodePort(ip net.IP, port int, protocol api.Protocol, owner proxy.ServicePortName) error {
|
||||
proxier.portMapMutex.Lock()
|
||||
defer proxier.portMapMutex.Unlock()
|
||||
|
||||
key := portMapKey{ip: ip.String(), port: port, protocol: protocol}
|
||||
existing, found := proxier.portMap[key]
|
||||
if !found {
|
||||
// We tolerate this, it happens if we are cleaning up a failed allocation
|
||||
glog.Infof("Ignoring release on unowned port: %v", key)
|
||||
return nil
|
||||
}
|
||||
if existing.owner != owner {
|
||||
return fmt.Errorf("Port conflict detected on port %v (unowned unlock). %v vs %v", key, owner, existing)
|
||||
}
|
||||
delete(proxier.portMap, key)
|
||||
existing.socket.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (proxier *Proxier) openNodePort(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error {
|
||||
if protocol == api.ProtocolUDP {
|
||||
glog.Warningf("Not adding node port rule for %q on port %d as UDP protocol is not supported by netsh portproxy", name, nodePort)
|
||||
return nil
|
||||
}
|
||||
|
||||
err := proxier.claimNodePort(nil, nodePort, protocol, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := proxier.netshPortProxyAddArgs(nil, nodePort, proxyIP, proxyPort, name)
|
||||
existed, err := proxier.netsh.EnsurePortProxyRule(args)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to run portproxy rule for service %q", name)
|
||||
return err
|
||||
}
|
||||
if !existed {
|
||||
glog.Infof("Added portproxy rule for service %q on %s port %d", name, protocol, nodePort)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (proxier *Proxier) closePortal(service proxy.ServicePortName, info *serviceInfo) error {
|
||||
// Collect errors and report them all at the end.
|
||||
el := proxier.closeOnePortal(info.portal, info.protocol, proxier.listenIP, info.proxyPort, service)
|
||||
for _, publicIP := range info.externalIPs {
|
||||
el = append(el, proxier.closeOnePortal(portal{net.ParseIP(publicIP), info.portal.port, true}, info.protocol, proxier.listenIP, info.proxyPort, service)...)
|
||||
}
|
||||
for _, ingress := range info.loadBalancerStatus.Ingress {
|
||||
if ingress.IP != "" {
|
||||
el = append(el, proxier.closeOnePortal(portal{net.ParseIP(ingress.IP), info.portal.port, false}, info.protocol, proxier.listenIP, info.proxyPort, service)...)
|
||||
}
|
||||
}
|
||||
if info.nodePort != 0 {
|
||||
el = append(el, proxier.closeNodePort(info.nodePort, info.protocol, proxier.listenIP, info.proxyPort, service)...)
|
||||
}
|
||||
if len(el) == 0 {
|
||||
glog.V(3).Infof("Closed iptables portals for service %q", service)
|
||||
} else {
|
||||
glog.Errorf("Some errors closing iptables portals for service %q", service)
|
||||
}
|
||||
return utilerrors.NewAggregate(el)
|
||||
}
|
||||
|
||||
func (proxier *Proxier) closeOnePortal(portal portal, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) []error {
|
||||
el := []error{}
|
||||
|
||||
if local, err := isLocalIP(portal.ip); err != nil {
|
||||
el = append(el, fmt.Errorf("can't determine if IP is local, assuming not: %v", err))
|
||||
} else if local {
|
||||
if err := proxier.releaseNodePort(portal.ip, portal.port, protocol, name); err != nil {
|
||||
el = append(el, err)
|
||||
}
|
||||
}
|
||||
|
||||
args := proxier.netshIpv4AddressDeleteArgs(portal.ip)
|
||||
if err := proxier.netsh.DeleteIPAddress(args); err != nil {
|
||||
glog.Errorf("Failed to delete IP address for service %q", name)
|
||||
el = append(el, err)
|
||||
}
|
||||
|
||||
args = proxier.netshPortProxyDeleteArgs(portal.ip, portal.port, proxyIP, proxyPort, name)
|
||||
if err := proxier.netsh.DeletePortProxyRule(args); err != nil {
|
||||
glog.Errorf("Failed to delete portproxy rule for service %q", name)
|
||||
el = append(el, err)
|
||||
}
|
||||
|
||||
return el
|
||||
}
|
||||
|
||||
func (proxier *Proxier) closeNodePort(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) []error {
|
||||
el := []error{}
|
||||
|
||||
args := proxier.netshPortProxyDeleteArgs(nil, nodePort, proxyIP, proxyPort, name)
|
||||
if err := proxier.netsh.DeletePortProxyRule(args); err != nil {
|
||||
glog.Errorf("Failed to delete portproxy rule for service %q", name)
|
||||
el = append(el, err)
|
||||
}
|
||||
|
||||
if err := proxier.releaseNodePort(nil, nodePort, protocol, name); err != nil {
|
||||
el = append(el, err)
|
||||
}
|
||||
|
||||
return el
|
||||
}
|
||||
|
||||
func isLocalIP(ip net.IP) (bool, error) {
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range addrs {
|
||||
intf, _, err := net.ParseCIDR(addrs[i].String())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ip.Equal(intf) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func isTooManyFDsError(err error) bool {
|
||||
return strings.Contains(err.Error(), "too many open files")
|
||||
}
|
||||
|
||||
func isClosedError(err error) bool {
|
||||
// A brief discussion about handling closed error here:
|
||||
// https://code.google.com/p/go/issues/detail?id=4373#c14
|
||||
// TODO: maybe create a stoppable TCP listener that returns a StoppedError
|
||||
return strings.HasSuffix(err.Error(), "use of closed network connection")
|
||||
}
|
||||
|
||||
func (proxier *Proxier) netshPortProxyAddArgs(destIP net.IP, destPort int, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
|
||||
args := []string{
|
||||
"interface", "portproxy", "set", "v4tov4",
|
||||
"listenPort=" + strconv.Itoa(destPort),
|
||||
"connectaddress=" + proxyIP.String(),
|
||||
"connectPort=" + strconv.Itoa(proxyPort),
|
||||
}
|
||||
if destIP != nil {
|
||||
args = append(args, "listenaddress="+destIP.String())
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func (proxier *Proxier) netshIpv4AddressAddArgs(destIP net.IP) []string {
|
||||
intName := proxier.netsh.GetInterfaceToAddIP()
|
||||
args := []string{
|
||||
"interface", "ipv4", "add", "address",
|
||||
"name=" + intName,
|
||||
"address=" + destIP.String(),
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func (proxier *Proxier) netshPortProxyDeleteArgs(destIP net.IP, destPort int, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string {
|
||||
args := []string{
|
||||
"interface", "portproxy", "delete", "v4tov4",
|
||||
"listenPort=" + strconv.Itoa(destPort),
|
||||
}
|
||||
if destIP != nil {
|
||||
args = append(args, "listenaddress="+destIP.String())
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
func (proxier *Proxier) netshIpv4AddressDeleteArgs(destIP net.IP) []string {
|
||||
intName := proxier.netsh.GetInterfaceToAddIP()
|
||||
args := []string{
|
||||
"interface", "ipv4", "delete", "address",
|
||||
"name=" + intName,
|
||||
"address=" + destIP.String(),
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
855
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier_test.go
generated
vendored
Normal file
855
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier_test.go
generated
vendored
Normal file
|
@ -0,0 +1,855 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
netshtest "k8s.io/kubernetes/pkg/util/netsh/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
udpIdleTimeoutForTest = 250 * time.Millisecond
|
||||
)
|
||||
|
||||
func joinHostPort(host string, port int) string {
|
||||
return net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
||||
}
|
||||
|
||||
func waitForClosedPortTCP(p *Proxier, proxyPort int) error {
|
||||
for i := 0; i < 50; i++ {
|
||||
conn, err := net.Dial("tcp", joinHostPort("", proxyPort))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
conn.Close()
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("port %d still open", proxyPort)
|
||||
}
|
||||
|
||||
func waitForClosedPortUDP(p *Proxier, proxyPort int) error {
|
||||
for i := 0; i < 50; i++ {
|
||||
conn, err := net.Dial("udp", joinHostPort("", proxyPort))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
|
||||
// To detect a closed UDP port write, then read.
|
||||
_, err = conn.Write([]byte("x"))
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok && !e.Timeout() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
var buf [4]byte
|
||||
_, err = conn.Read(buf[0:])
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok && !e.Timeout() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
conn.Close()
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("port %d still open", proxyPort)
|
||||
}
|
||||
|
||||
var tcpServerPort int32
|
||||
var udpServerPort int32
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// Don't handle panics
|
||||
runtime.ReallyCrash = true
|
||||
|
||||
// TCP setup.
|
||||
tcp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(r.URL.Path[1:]))
|
||||
}))
|
||||
defer tcp.Close()
|
||||
|
||||
u, err := url.Parse(tcp.URL)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
_, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
tcpServerPortValue, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
|
||||
}
|
||||
tcpServerPort = int32(tcpServerPortValue)
|
||||
|
||||
// UDP setup.
|
||||
udp, err := newUDPEchoServer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to make a UDP server: %v", err))
|
||||
}
|
||||
_, port, err = net.SplitHostPort(udp.LocalAddr().String())
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
||||
}
|
||||
udpServerPortValue, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
|
||||
}
|
||||
udpServerPort = int32(udpServerPortValue)
|
||||
go udp.Loop()
|
||||
|
||||
ret := m.Run()
|
||||
// it should be safe to call Close() multiple times.
|
||||
tcp.Close()
|
||||
os.Exit(ret)
|
||||
}
|
||||
|
||||
func testEchoTCP(t *testing.T, address string, port int) {
|
||||
path := "aaaaa"
|
||||
res, err := http.Get("http://" + address + ":" + fmt.Sprintf("%d", port) + "/" + path)
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to server: %v", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Errorf("error reading data: %v %v", err, string(data))
|
||||
}
|
||||
if string(data) != path {
|
||||
t.Errorf("expected: %s, got %s", path, string(data))
|
||||
}
|
||||
}
|
||||
|
||||
func testEchoUDP(t *testing.T, address string, port int) {
|
||||
data := "abc123"
|
||||
|
||||
conn, err := net.Dial("udp", joinHostPort(address, port))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to server: %v", err)
|
||||
}
|
||||
if _, err := conn.Write([]byte(data)); err != nil {
|
||||
t.Fatalf("error sending to server: %v", err)
|
||||
}
|
||||
var resp [1024]byte
|
||||
n, err := conn.Read(resp[0:])
|
||||
if err != nil {
|
||||
t.Errorf("error receiving data: %v", err)
|
||||
}
|
||||
if string(resp[0:n]) != data {
|
||||
t.Errorf("expected: %s, got %s", data, string(resp[0:n]))
|
||||
}
|
||||
}
|
||||
|
||||
func waitForNumProxyLoops(t *testing.T, p *Proxier, want int32) {
|
||||
var got int32
|
||||
for i := 0; i < 600; i++ {
|
||||
got = atomic.LoadInt32(&p.numProxyLoops)
|
||||
if got == want {
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
t.Errorf("expected %d ProxyLoops running, got %d", want, got)
|
||||
}
|
||||
|
||||
func waitForNumProxyClients(t *testing.T, s *serviceInfo, want int, timeout time.Duration) {
|
||||
var got int
|
||||
now := time.Now()
|
||||
deadline := now.Add(timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
s.activeClients.mu.Lock()
|
||||
got = len(s.activeClients.clients)
|
||||
s.activeClients.mu.Unlock()
|
||||
if got == want {
|
||||
return
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
t.Errorf("expected %d ProxyClients live, got %d", want, got)
|
||||
}
|
||||
|
||||
func TestTCPProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxyTimeout(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
// When connecting to a UDP service endpoint, there should be a Conn for proxy.
|
||||
waitForNumProxyClients(t, svcInfo, 1, time.Second)
|
||||
// If conn has no activity for serviceInfo.timeout since last Read/Write, it should be closed because of timeout.
|
||||
waitForNumProxyClients(t, svcInfo, 0, 2*time.Second)
|
||||
}
|
||||
|
||||
func TestMultiPortProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{{
|
||||
ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
|
||||
}},
|
||||
}, {
|
||||
ObjectMeta: api.ObjectMeta{Name: serviceQ.Name, Namespace: serviceQ.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
|
||||
}},
|
||||
}})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfoP, err := p.addServiceOnPort(serviceP, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfoP.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
svcInfoQ, err := p.addServiceOnPort(serviceQ, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", svcInfoQ.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 2)
|
||||
}
|
||||
|
||||
func TestMultiPortOnServiceUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "q"}
|
||||
serviceX := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "x"}
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 80,
|
||||
Protocol: "TCP",
|
||||
}, {
|
||||
Name: "q",
|
||||
Port: 81,
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
}})
|
||||
waitForNumProxyLoops(t, p, 2)
|
||||
svcInfo, exists := p.getServiceInfo(serviceP)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", serviceP)
|
||||
}
|
||||
if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 80 || svcInfo.protocol != "TCP" {
|
||||
t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo)
|
||||
}
|
||||
|
||||
svcInfo, exists = p.getServiceInfo(serviceQ)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", serviceQ)
|
||||
}
|
||||
if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 81 || svcInfo.protocol != "UDP" {
|
||||
t.Errorf("unexpected serviceInfo for %s: %#v", serviceQ, svcInfo)
|
||||
}
|
||||
|
||||
svcInfo, exists = p.getServiceInfo(serviceX)
|
||||
if exists {
|
||||
t.Fatalf("found unwanted serviceInfo for %s: %#v", serviceX, svcInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Helper: Stops the proxy for the named service.
|
||||
func stopProxyByName(proxier *Proxier, service proxy.ServicePortName) error {
|
||||
info, found := proxier.getServiceInfo(service)
|
||||
if !found {
|
||||
return fmt.Errorf("unknown service: %s", service)
|
||||
}
|
||||
return proxier.stopProxy(service, info)
|
||||
}
|
||||
|
||||
func TestTCPProxyStop(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
if !svcInfo.isAlive() {
|
||||
t.Fatalf("wrong value for isAlive(): expected true")
|
||||
}
|
||||
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
stopProxyByName(p, service)
|
||||
if svcInfo.isAlive() {
|
||||
t.Fatalf("wrong value for isAlive(): expected false")
|
||||
}
|
||||
// Wait for the port to really close.
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestUDPProxyStop(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
stopProxyByName(p, service)
|
||||
// Wait for the port to really close.
|
||||
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestTCPProxyUpdateDelete(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{})
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestUDPProxyUpdateDelete(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{})
|
||||
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
}
|
||||
|
||||
func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{endpoint})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{})
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
// need to add endpoint here because it got clean up during service delete
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{endpoint})
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", service)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{endpoint})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{})
|
||||
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
// need to add endpoint here because it got clean up during service delete
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{endpoint})
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
}})
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestTCPProxyUpdatePort(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 99,
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
// Wait for the socket to actually get free.
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
// This is a bit async, but this should be sufficient.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestUDPProxyUpdatePort(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: udpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 99,
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
}})
|
||||
// Wait for the socket to actually get free.
|
||||
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestProxyUpdatePublicIPs(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
},
|
||||
})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{
|
||||
Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.portal.port),
|
||||
Protocol: "TCP",
|
||||
}},
|
||||
ClusterIP: svcInfo.portal.ip.String(),
|
||||
ExternalIPs: []string{"4.3.2.1"},
|
||||
},
|
||||
}})
|
||||
// Wait for the socket to actually get free.
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
// This is a bit async, but this should be sufficient.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
func TestProxyUpdatePortal(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
endpoint := api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
}},
|
||||
}
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{endpoint})
|
||||
|
||||
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), netshtest.NewFake(), net.ParseIP("127.0.0.1"), nil, time.Minute, udpIdleTimeoutForTest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
_, exists := p.getServiceInfo(service)
|
||||
if exists {
|
||||
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
|
||||
}
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
_, exists = p.getServiceInfo(service)
|
||||
if exists {
|
||||
t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
|
||||
}
|
||||
|
||||
p.OnServiceUpdate([]api.Service{{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}})
|
||||
lb.OnEndpointsUpdate([]api.Endpoints{endpoint})
|
||||
svcInfo, exists = p.getServiceInfo(service)
|
||||
if !exists {
|
||||
t.Fatalf("service with ClusterIP set not found in the proxy")
|
||||
}
|
||||
testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
// TODO(justinsb): Add test for nodePort conflict detection, once we have nodePort wired in
|
300
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go
generated
vendored
Normal file
300
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go
generated
vendored
Normal file
|
@ -0,0 +1,300 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
)
|
||||
|
||||
// Abstraction over TCP/UDP sockets which are proxied.
|
||||
type proxySocket interface {
|
||||
// Addr gets the net.Addr for a proxySocket.
|
||||
Addr() net.Addr
|
||||
// Close stops the proxySocket from accepting incoming connections.
|
||||
// Each implementation should comment on the impact of calling Close
|
||||
// while sessions are active.
|
||||
Close() error
|
||||
// ProxyLoop proxies incoming connections for the specified service to the service endpoints.
|
||||
ProxyLoop(service proxy.ServicePortName, info *serviceInfo, proxier *Proxier)
|
||||
// ListenPort returns the host port that the proxySocket is listening on
|
||||
ListenPort() int
|
||||
}
|
||||
|
||||
func newProxySocket(protocol api.Protocol, ip net.IP, port int) (proxySocket, error) {
|
||||
host := ""
|
||||
if ip != nil {
|
||||
host = ip.String()
|
||||
}
|
||||
|
||||
switch strings.ToUpper(string(protocol)) {
|
||||
case "TCP":
|
||||
listener, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tcpProxySocket{Listener: listener, port: port}, nil
|
||||
case "UDP":
|
||||
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &udpProxySocket{UDPConn: conn, port: port}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown protocol %q", protocol)
|
||||
}
|
||||
|
||||
// How long we wait for a connection to a backend in seconds
|
||||
var endpointDialTimeout = []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 2 * time.Second}
|
||||
|
||||
// tcpProxySocket implements proxySocket. Close() is implemented by net.Listener. When Close() is called,
|
||||
// no new connections are allowed but existing connections are left untouched.
|
||||
type tcpProxySocket struct {
|
||||
net.Listener
|
||||
port int
|
||||
}
|
||||
|
||||
func (tcp *tcpProxySocket) ListenPort() int {
|
||||
return tcp.port
|
||||
}
|
||||
|
||||
func tryConnect(service proxy.ServicePortName, srcAddr net.Addr, protocol string, proxier *Proxier) (out net.Conn, err error) {
|
||||
sessionAffinityReset := false
|
||||
for _, dialTimeout := range endpointDialTimeout {
|
||||
endpoint, err := proxier.loadBalancer.NextEndpoint(service, srcAddr, sessionAffinityReset)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
|
||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
||||
// and keep accepting inbound traffic.
|
||||
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
|
||||
if err != nil {
|
||||
if isTooManyFDsError(err) {
|
||||
panic("Dial failed: " + err.Error())
|
||||
}
|
||||
glog.Errorf("Dial failed: %v", err)
|
||||
sessionAffinityReset = true
|
||||
continue
|
||||
}
|
||||
return outConn, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to connect to an endpoint.")
|
||||
}
|
||||
|
||||
func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *serviceInfo, proxier *Proxier) {
|
||||
for {
|
||||
if !myInfo.isAlive() {
|
||||
// The service port was closed or replaced.
|
||||
return
|
||||
}
|
||||
// Block until a connection is made.
|
||||
inConn, err := tcp.Accept()
|
||||
if err != nil {
|
||||
if isTooManyFDsError(err) {
|
||||
panic("Accept failed: " + err.Error())
|
||||
}
|
||||
|
||||
if isClosedError(err) {
|
||||
return
|
||||
}
|
||||
if !myInfo.isAlive() {
|
||||
// Then the service port was just closed so the accept failure is to be expected.
|
||||
return
|
||||
}
|
||||
glog.Errorf("Accept failed: %v", err)
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
|
||||
outConn, err := tryConnect(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", proxier)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to connect to balancer: %v", err)
|
||||
inConn.Close()
|
||||
continue
|
||||
}
|
||||
// Spin up an async copy loop.
|
||||
go proxyTCP(inConn.(*net.TCPConn), outConn.(*net.TCPConn))
|
||||
}
|
||||
}
|
||||
|
||||
// proxyTCP proxies data bi-directionally between in and out.
|
||||
func proxyTCP(in, out *net.TCPConn) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
|
||||
in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
|
||||
go copyBytes("from backend", in, out, &wg)
|
||||
go copyBytes("to backend", out, in, &wg)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
n, err := io.Copy(dest, src)
|
||||
if err != nil {
|
||||
if !isClosedError(err) {
|
||||
glog.Errorf("I/O error: %v", err)
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
|
||||
dest.Close()
|
||||
src.Close()
|
||||
}
|
||||
|
||||
// udpProxySocket implements proxySocket. Close() is implemented by net.UDPConn. When Close() is called,
|
||||
// no new connections are allowed and existing connections are broken.
|
||||
// TODO: We could lame-duck this ourselves, if it becomes important.
|
||||
type udpProxySocket struct {
|
||||
*net.UDPConn
|
||||
port int
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) ListenPort() int {
|
||||
return udp.port
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) Addr() net.Addr {
|
||||
return udp.LocalAddr()
|
||||
}
|
||||
|
||||
// Holds all the known UDP clients that have not timed out.
|
||||
type clientCache struct {
|
||||
mu sync.Mutex
|
||||
clients map[string]net.Conn // addr string -> connection
|
||||
}
|
||||
|
||||
func newClientCache() *clientCache {
|
||||
return &clientCache{clients: map[string]net.Conn{}}
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *serviceInfo, proxier *Proxier) {
|
||||
var buffer [4096]byte // 4KiB should be enough for most whole-packets
|
||||
for {
|
||||
if !myInfo.isAlive() {
|
||||
// The service port was closed or replaced.
|
||||
break
|
||||
}
|
||||
|
||||
// Block until data arrives.
|
||||
// TODO: Accumulate a histogram of n or something, to fine tune the buffer size.
|
||||
n, cliAddr, err := udp.ReadFrom(buffer[0:])
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); ok {
|
||||
if e.Temporary() {
|
||||
glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
|
||||
break
|
||||
}
|
||||
// If this is a client we know already, reuse the connection and goroutine.
|
||||
svrConn, err := udp.getBackendConn(myInfo.activeClients, cliAddr, proxier, service, myInfo.timeout)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// TODO: It would be nice to let the goroutine handle this write, but we don't
|
||||
// really want to copy the buffer. We could do a pool of buffers or something.
|
||||
_, err = svrConn.Write(buffer[0:n])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Write failed: %v", err)
|
||||
// TODO: Maybe tear down the goroutine for this client/server pair?
|
||||
}
|
||||
continue
|
||||
}
|
||||
err = svrConn.SetDeadline(time.Now().Add(myInfo.timeout))
|
||||
if err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, cliAddr net.Addr, proxier *Proxier, service proxy.ServicePortName, timeout time.Duration) (net.Conn, error) {
|
||||
activeClients.mu.Lock()
|
||||
defer activeClients.mu.Unlock()
|
||||
|
||||
svrConn, found := activeClients.clients[cliAddr.String()]
|
||||
if !found {
|
||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
||||
// and keep accepting inbound traffic.
|
||||
glog.V(3).Infof("New UDP connection from %s", cliAddr)
|
||||
var err error
|
||||
svrConn, err = tryConnect(service, cliAddr, "udp", proxier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
activeClients.clients[cliAddr.String()] = svrConn
|
||||
go func(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, timeout time.Duration) {
|
||||
defer runtime.HandleCrash()
|
||||
udp.proxyClient(cliAddr, svrConn, activeClients, timeout)
|
||||
}(cliAddr, svrConn, activeClients, timeout)
|
||||
}
|
||||
return svrConn, nil
|
||||
}
|
||||
|
||||
// This function is expected to be called as a goroutine.
|
||||
// TODO: Track and log bytes copied, like TCP
|
||||
func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, timeout time.Duration) {
|
||||
defer svrConn.Close()
|
||||
var buffer [4096]byte
|
||||
for {
|
||||
n, err := svrConn.Read(buffer[0:])
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("Read failed: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
err = svrConn.SetDeadline(time.Now().Add(timeout))
|
||||
if err != nil {
|
||||
glog.Errorf("SetDeadline failed: %v", err)
|
||||
break
|
||||
}
|
||||
n, err = udp.WriteTo(buffer[0:n], cliAddr)
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
glog.Errorf("WriteTo failed: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
activeClients.mu.Lock()
|
||||
delete(activeClients.clients, cliAddr.String())
|
||||
activeClients.mu.Unlock()
|
||||
}
|
326
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go
generated
vendored
Normal file
326
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go
generated
vendored
Normal file
|
@ -0,0 +1,326 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrMissingServiceEntry = errors.New("missing service entry")
|
||||
ErrMissingEndpoints = errors.New("missing endpoints")
|
||||
)
|
||||
|
||||
type affinityState struct {
|
||||
clientIP string
|
||||
//clientProtocol api.Protocol //not yet used
|
||||
//sessionCookie string //not yet used
|
||||
endpoint string
|
||||
lastUsed time.Time
|
||||
}
|
||||
|
||||
type affinityPolicy struct {
|
||||
affinityType api.ServiceAffinity
|
||||
affinityMap map[string]*affinityState // map client IP -> affinity info
|
||||
ttlMinutes int
|
||||
}
|
||||
|
||||
// LoadBalancerRR is a round-robin load balancer.
|
||||
type LoadBalancerRR struct {
|
||||
lock sync.RWMutex
|
||||
services map[proxy.ServicePortName]*balancerState
|
||||
}
|
||||
|
||||
// Ensure this implements LoadBalancer.
|
||||
var _ LoadBalancer = &LoadBalancerRR{}
|
||||
|
||||
type balancerState struct {
|
||||
endpoints []string // a list of "ip:port" style strings
|
||||
index int // current index into endpoints
|
||||
affinity affinityPolicy
|
||||
}
|
||||
|
||||
func newAffinityPolicy(affinityType api.ServiceAffinity, ttlMinutes int) *affinityPolicy {
|
||||
return &affinityPolicy{
|
||||
affinityType: affinityType,
|
||||
affinityMap: make(map[string]*affinityState),
|
||||
ttlMinutes: ttlMinutes,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLoadBalancerRR returns a new LoadBalancerRR.
|
||||
func NewLoadBalancerRR() *LoadBalancerRR {
|
||||
return &LoadBalancerRR{
|
||||
services: map[proxy.ServicePortName]*balancerState{},
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlMinutes int) error {
|
||||
glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
lb.newServiceInternal(svcPort, affinityType, ttlMinutes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This assumes that lb.lock is already held.
|
||||
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlMinutes int) *balancerState {
|
||||
if ttlMinutes == 0 {
|
||||
ttlMinutes = 180 //default to 3 hours if not specified. Should 0 be unlimited instead????
|
||||
}
|
||||
|
||||
if _, exists := lb.services[svcPort]; !exists {
|
||||
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlMinutes)}
|
||||
glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort)
|
||||
} else if affinityType != "" {
|
||||
lb.services[svcPort].affinity.affinityType = affinityType
|
||||
}
|
||||
return lb.services[svcPort]
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
|
||||
glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
delete(lb.services, svcPort)
|
||||
}
|
||||
|
||||
// return true if this service is using some form of session affinity.
|
||||
func isSessionAffinity(affinity *affinityPolicy) bool {
|
||||
// Should never be empty string, but checking for it to be safe.
|
||||
if affinity.affinityType == "" || affinity.affinityType == api.ServiceAffinityNone {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NextEndpoint returns a service endpoint.
|
||||
// The service endpoint is chosen using the round-robin algorithm.
|
||||
func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) {
|
||||
// Coarse locking is simple. We can get more fine-grained if/when we
|
||||
// can prove it matters.
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists || state == nil {
|
||||
return "", ErrMissingServiceEntry
|
||||
}
|
||||
if len(state.endpoints) == 0 {
|
||||
return "", ErrMissingEndpoints
|
||||
}
|
||||
glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints)
|
||||
|
||||
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
|
||||
|
||||
var ipaddr string
|
||||
if sessionAffinityEnabled {
|
||||
// Caution: don't shadow ipaddr
|
||||
var err error
|
||||
ipaddr, _, err = net.SplitHostPort(srcAddr.String())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("malformed source address %q: %v", srcAddr.String(), err)
|
||||
}
|
||||
if !sessionAffinityReset {
|
||||
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
|
||||
if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Minutes()) < state.affinity.ttlMinutes {
|
||||
// Affinity wins.
|
||||
endpoint := sessionAffinity.endpoint
|
||||
sessionAffinity.lastUsed = time.Now()
|
||||
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
|
||||
return endpoint, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// Take the next endpoint.
|
||||
endpoint := state.endpoints[state.index]
|
||||
state.index = (state.index + 1) % len(state.endpoints)
|
||||
|
||||
if sessionAffinityEnabled {
|
||||
var affinity *affinityState
|
||||
affinity = state.affinity.affinityMap[ipaddr]
|
||||
if affinity == nil {
|
||||
affinity = new(affinityState) //&affinityState{ipaddr, "TCP", "", endpoint, time.Now()}
|
||||
state.affinity.affinityMap[ipaddr] = affinity
|
||||
}
|
||||
affinity.lastUsed = time.Now()
|
||||
affinity.endpoint = endpoint
|
||||
affinity.clientIP = ipaddr
|
||||
glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
type hostPortPair struct {
|
||||
host string
|
||||
port int
|
||||
}
|
||||
|
||||
func isValidEndpoint(hpp *hostPortPair) bool {
|
||||
return hpp.host != "" && hpp.port > 0
|
||||
}
|
||||
|
||||
func flattenValidEndpoints(endpoints []hostPortPair) []string {
|
||||
// Convert Endpoint objects into strings for easier use later. Ignore
|
||||
// the protocol field - we'll get that from the Service objects.
|
||||
var result []string
|
||||
for i := range endpoints {
|
||||
hpp := &endpoints[i]
|
||||
if isValidEndpoint(hpp) {
|
||||
result = append(result, net.JoinHostPort(hpp.host, strconv.Itoa(hpp.port)))
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Remove any session affinity records associated to a particular endpoint (for example when a pod goes down).
|
||||
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
|
||||
for _, affinity := range state.affinity.affinityMap {
|
||||
if affinity.endpoint == endpoint {
|
||||
glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort)
|
||||
delete(state.affinity.affinityMap, affinity.clientIP)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Loop through the valid endpoints and then the endpoints associated with the Load Balancer.
|
||||
// Then remove any session affinity records that are not in both lists.
|
||||
// This assumes the lb.lock is held.
|
||||
func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEndpoints []string) {
|
||||
allEndpoints := map[string]int{}
|
||||
for _, newEndpoint := range newEndpoints {
|
||||
allEndpoints[newEndpoint] = 1
|
||||
}
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
for _, existingEndpoint := range state.endpoints {
|
||||
allEndpoints[existingEndpoint] = allEndpoints[existingEndpoint] + 1
|
||||
}
|
||||
for mKey, mVal := range allEndpoints {
|
||||
if mVal == 1 {
|
||||
glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort)
|
||||
removeSessionAffinityByEndpoint(state, svcPort, mKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnEndpointsUpdate manages the registered service endpoints.
|
||||
// Registered endpoints are updated if found in the update set or
|
||||
// unregistered if missing from the update set.
|
||||
func (lb *LoadBalancerRR) OnEndpointsUpdate(allEndpoints []api.Endpoints) {
|
||||
registeredEndpoints := make(map[proxy.ServicePortName]bool)
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
// Update endpoints for services.
|
||||
for i := range allEndpoints {
|
||||
svcEndpoints := &allEndpoints[i]
|
||||
|
||||
// We need to build a map of portname -> all ip:ports for that
|
||||
// portname. Explode Endpoints.Subsets[*] into this structure.
|
||||
portsToEndpoints := map[string][]hostPortPair{}
|
||||
for i := range svcEndpoints.Subsets {
|
||||
ss := &svcEndpoints.Subsets[i]
|
||||
for i := range ss.Ports {
|
||||
port := &ss.Ports[i]
|
||||
for i := range ss.Addresses {
|
||||
addr := &ss.Addresses[i]
|
||||
portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, int(port.Port)})
|
||||
// Ignore the protocol field - we'll get that from the Service objects.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for portname := range portsToEndpoints {
|
||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: svcEndpoints.Namespace, Name: svcEndpoints.Name}, Port: portname}
|
||||
state, exists := lb.services[svcPort]
|
||||
curEndpoints := []string{}
|
||||
if state != nil {
|
||||
curEndpoints = state.endpoints
|
||||
}
|
||||
newEndpoints := flattenValidEndpoints(portsToEndpoints[portname])
|
||||
|
||||
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) {
|
||||
glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints)
|
||||
lb.updateAffinityMap(svcPort, newEndpoints)
|
||||
// OnEndpointsUpdate can be called without NewService being called externally.
|
||||
// To be safe we will call it here. A new service will only be created
|
||||
// if one does not already exist. The affinity will be updated
|
||||
// later, once NewService is called.
|
||||
state = lb.newServiceInternal(svcPort, api.ServiceAffinity(""), 0)
|
||||
state.endpoints = slice.ShuffleStrings(newEndpoints)
|
||||
|
||||
// Reset the round-robin index.
|
||||
state.index = 0
|
||||
}
|
||||
registeredEndpoints[svcPort] = true
|
||||
}
|
||||
}
|
||||
// Remove endpoints missing from the update.
|
||||
for k := range lb.services {
|
||||
if _, exists := registeredEndpoints[k]; !exists {
|
||||
glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", k)
|
||||
// Reset but don't delete.
|
||||
state := lb.services[k]
|
||||
state.endpoints = []string{}
|
||||
state.index = 0
|
||||
state.affinity.affinityMap = map[string]*affinityState{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests whether two slices are equivalent. This sorts both slices in-place.
|
||||
func slicesEquiv(lhs, rhs []string) bool {
|
||||
if len(lhs) != len(rhs) {
|
||||
return false
|
||||
}
|
||||
if reflect.DeepEqual(slice.SortStrings(lhs), slice.SortStrings(rhs)) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortName) {
|
||||
lb.lock.Lock()
|
||||
defer lb.lock.Unlock()
|
||||
|
||||
state, exists := lb.services[svcPort]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
for ip, affinity := range state.affinity.affinityMap {
|
||||
if int(time.Now().Sub(affinity.lastUsed).Minutes()) >= state.affinity.ttlMinutes {
|
||||
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
|
||||
delete(state.affinity.affinityMap, ip)
|
||||
}
|
||||
}
|
||||
}
|
727
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin_test.go
generated
vendored
Normal file
727
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin_test.go
generated
vendored
Normal file
|
@ -0,0 +1,727 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
)
|
||||
|
||||
func TestValidateWorks(t *testing.T) {
|
||||
if isValidEndpoint(&hostPortPair{}) {
|
||||
t.Errorf("Didn't fail for empty set")
|
||||
}
|
||||
if isValidEndpoint(&hostPortPair{host: "foobar"}) {
|
||||
t.Errorf("Didn't fail with invalid port")
|
||||
}
|
||||
if isValidEndpoint(&hostPortPair{host: "foobar", port: -1}) {
|
||||
t.Errorf("Didn't fail with a negative port")
|
||||
}
|
||||
if !isValidEndpoint(&hostPortPair{host: "foobar", port: 8080}) {
|
||||
t.Errorf("Failed a valid config.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterWorks(t *testing.T) {
|
||||
endpoints := []hostPortPair{
|
||||
{host: "foobar", port: 1},
|
||||
{host: "foobar", port: 2},
|
||||
{host: "foobar", port: -1},
|
||||
{host: "foobar", port: 3},
|
||||
{host: "foobar", port: -2},
|
||||
}
|
||||
filtered := flattenValidEndpoints(endpoints)
|
||||
|
||||
if len(filtered) != 3 {
|
||||
t.Errorf("Failed to filter to the correct size")
|
||||
}
|
||||
if filtered[0] != "foobar:1" {
|
||||
t.Errorf("Index zero is not foobar:1")
|
||||
}
|
||||
if filtered[1] != "foobar:2" {
|
||||
t.Errorf("Index one is not foobar:2")
|
||||
}
|
||||
if filtered[2] != "foobar:3" {
|
||||
t.Errorf("Index two is not foobar:3")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalanceFailsWithNoEndpoints(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
var endpoints []api.Endpoints
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "does-not-exist"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
if len(endpoint) != 0 {
|
||||
t.Errorf("Got an endpoint")
|
||||
}
|
||||
}
|
||||
|
||||
func expectEndpoint(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
|
||||
}
|
||||
if endpoint != expected {
|
||||
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func expectEndpointWithSessionAffinityReset(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, true)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
|
||||
}
|
||||
if endpoint != expected {
|
||||
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 40}},
|
||||
}},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
||||
}
|
||||
|
||||
func stringsInSlice(haystack []string, needles ...string) bool {
|
||||
for _, needle := range needles {
|
||||
found := false
|
||||
for i := range haystack {
|
||||
if haystack[i] == needle {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found == false {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
|
||||
}},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint:1", "endpoint:2", "endpoint:3") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:1", "endpoint3:3") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:2", "endpoint2:2", "endpoint3:4") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint2"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:2", "endpoint3:3") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:10", "endpoint2:20", "endpoint3:30") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
|
||||
// Then update the configuration with one fewer endpoints, make sure
|
||||
// we start in the beginning again
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint4"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
|
||||
},
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint5"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceP].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint4:4", "endpoint5:5") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
||||
|
||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
||||
if !stringsInSlice(shuffledEndpoints, "endpoint4:40", "endpoint5:50") {
|
||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
||||
|
||||
// Clear endpoints
|
||||
endpoints[0] = api.Endpoints{ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
fooServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
||||
barServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: "p"}
|
||||
endpoint, err := loadBalancer.NextEndpoint(fooServiceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
endpoints := make([]api.Endpoints, 2)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 123}},
|
||||
},
|
||||
},
|
||||
}
|
||||
endpoints[1] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
|
||||
Ports: []api.EndpointPort{{Name: "p", Port: 456}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
shuffledFooEndpoints := loadBalancer.services[fooServiceP].endpoints
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
|
||||
|
||||
shuffledBarEndpoints := loadBalancer.services[barServiceP].endpoints
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
||||
|
||||
// Then update the configuration by removing foo
|
||||
loadBalancer.OnEndpointsUpdate(endpoints[1:])
|
||||
endpoint, err = loadBalancer.NextEndpoint(fooServiceP, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// but bar is still there, and we continue RR from where we left off.
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// Call NewService() before OnEndpointsUpdate()
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, 0)
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
|
||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
|
||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
|
||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// Call OnEndpointsUpdate() before NewService()
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, 0)
|
||||
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
|
||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
|
||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
|
||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
client4 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 4), Port: 0}
|
||||
client5 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 5), Port: 0}
|
||||
client6 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 6), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, 0)
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
client1Endpoint := shuffledEndpoints[0]
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
client2Endpoint := shuffledEndpoints[1]
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
|
||||
client3Endpoint := shuffledEndpoints[2]
|
||||
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
||||
if client1Endpoint == "endpoint:3" {
|
||||
client1Endpoint = shuffledEndpoints[0]
|
||||
} else if client2Endpoint == "endpoint:3" {
|
||||
client2Endpoint = shuffledEndpoints[0]
|
||||
} else if client3Endpoint == "endpoint:3" {
|
||||
client3Endpoint = shuffledEndpoints[0]
|
||||
}
|
||||
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
|
||||
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
|
||||
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
|
||||
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
|
||||
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
|
||||
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client4)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client5)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client6)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, 0)
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
// Then update the configuration with one fewer endpoints, make sure
|
||||
// we start in the beginning again
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
||||
|
||||
// Clear endpoints
|
||||
endpoints[0] = api.Endpoints{ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
endpoint, err = loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
fooService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(fooService, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
loadBalancer.NewService(fooService, api.ServiceAffinityClientIP, 0)
|
||||
endpoints := make([]api.Endpoints, 2)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
|
||||
loadBalancer.NewService(barService, api.ServiceAffinityClientIP, 0)
|
||||
endpoints[1] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{
|
||||
Addresses: []api.EndpointAddress{{IP: "endpoint"}},
|
||||
Ports: []api.EndpointPort{{Port: 4}, {Port: 5}},
|
||||
},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
shuffledFooEndpoints := loadBalancer.services[fooService].endpoints
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
||||
|
||||
shuffledBarEndpoints := loadBalancer.services[barService].endpoints
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
|
||||
// Then update the configuration by removing foo
|
||||
loadBalancer.OnEndpointsUpdate(endpoints[1:])
|
||||
endpoint, err = loadBalancer.NextEndpoint(fooService, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// but bar is still there, and we continue RR from where we left off.
|
||||
shuffledBarEndpoints = loadBalancer.services[barService].endpoints
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
||||
}
|
||||
|
||||
func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) {
|
||||
loadBalancer := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
||||
if err == nil || len(endpoint) != 0 {
|
||||
t.Errorf("Didn't fail with non-existent service")
|
||||
}
|
||||
|
||||
// Call NewService() before OnEndpointsUpdate()
|
||||
loadBalancer.NewService(service, api.ServiceAffinityClientIP, 0)
|
||||
endpoints := make([]api.Endpoints, 1)
|
||||
endpoints[0] = api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
Subsets: []api.EndpointSubset{
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint1"}}, Ports: []api.EndpointPort{{Port: 1}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint2"}}, Ports: []api.EndpointPort{{Port: 2}}},
|
||||
{Addresses: []api.EndpointAddress{{IP: "endpoint3"}}, Ports: []api.EndpointPort{{Port: 3}}},
|
||||
},
|
||||
}
|
||||
loadBalancer.OnEndpointsUpdate(endpoints)
|
||||
|
||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
||||
|
||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
|
||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
|
||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
||||
if err != nil {
|
||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
||||
}
|
||||
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client1)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client1)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
|
||||
|
||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
|
||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
|
||||
}
|
47
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/udp_server.go
generated
vendored
Normal file
47
vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/udp_server.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package winuserspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// udpEchoServer is a simple echo server in UDP, intended for testing the proxy.
|
||||
type udpEchoServer struct {
|
||||
net.PacketConn
|
||||
}
|
||||
|
||||
func (r *udpEchoServer) Loop() {
|
||||
var buffer [4096]byte
|
||||
for {
|
||||
n, cliAddr, err := r.ReadFrom(buffer[0:])
|
||||
if err != nil {
|
||||
fmt.Printf("ReadFrom failed: %v\n", err)
|
||||
continue
|
||||
}
|
||||
r.WriteTo(buffer[0:n], cliAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func newUDPEchoServer() (*udpEchoServer, error) {
|
||||
packetconn, err := net.ListenPacket("udp", ":0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &udpEchoServer{packetconn}, nil
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue