*: initial update to kube 1.8

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2017-09-26 16:23:09 +02:00
parent 2453222695
commit d6e819133d
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
1237 changed files with 84117 additions and 564982 deletions

View file

@ -28,13 +28,16 @@ import (
"github.com/golang/glog"
"github.com/renstrom/dedent"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
clientv1 "k8s.io/client-go/pkg/api/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api"
)
var nodeHealthzRetryInterval = 60 * time.Second
// Server serves HTTP endpoints for each service name, with results
// based on the endpoints. If there are 0 endpoints for a service, it returns a
// 503 "Service Unavailable" error (telling LBs not to use this node). If there
@ -156,12 +159,12 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err
if hcs.recorder != nil {
hcs.recorder.Eventf(
&clientv1.ObjectReference{
&v1.ObjectReference{
Kind: "Service",
Namespace: nsn.Namespace,
Name: nsn.Name,
UID: types.UID(nsn.String()),
}, api.EventTypeWarning, "FailedToStartHealthcheck", msg)
}, api.EventTypeWarning, "FailedToStartServiceHealthcheck", msg)
}
glog.Error(msg)
continue
@ -259,16 +262,18 @@ type HealthzServer struct {
addr string
port int32
healthTimeout time.Duration
recorder record.EventRecorder
nodeRef *v1.ObjectReference
lastUpdated atomic.Value
}
// NewDefaultHealthzServer returns a default healthz http server.
func NewDefaultHealthzServer(addr string, healthTimeout time.Duration) *HealthzServer {
return newHealthzServer(nil, nil, nil, addr, healthTimeout)
func NewDefaultHealthzServer(addr string, healthTimeout time.Duration, recorder record.EventRecorder, nodeRef *v1.ObjectReference) *HealthzServer {
return newHealthzServer(nil, nil, nil, addr, healthTimeout, recorder, nodeRef)
}
func newHealthzServer(listener Listener, httpServerFactory HTTPServerFactory, c clock.Clock, addr string, healthTimeout time.Duration) *HealthzServer {
func newHealthzServer(listener Listener, httpServerFactory HTTPServerFactory, c clock.Clock, addr string, healthTimeout time.Duration, recorder record.EventRecorder, nodeRef *v1.ObjectReference) *HealthzServer {
if listener == nil {
listener = stdNetListener{}
}
@ -284,6 +289,8 @@ func newHealthzServer(listener Listener, httpServerFactory HTTPServerFactory, c
clock: c,
addr: addr,
healthTimeout: healthTimeout,
recorder: recorder,
nodeRef: nodeRef,
}
}
@ -297,19 +304,26 @@ func (hs *HealthzServer) Run() {
serveMux := http.NewServeMux()
serveMux.Handle("/healthz", healthzHandler{hs: hs})
server := hs.httpFactory.New(hs.addr, serveMux)
listener, err := hs.listener.Listen(hs.addr)
if err != nil {
glog.Errorf("Failed to start healthz on %s: %v", hs.addr, err)
return
}
go func() {
go wait.Until(func() {
glog.V(3).Infof("Starting goroutine for healthz on %s", hs.addr)
if err := server.Serve(listener); err != nil {
glog.Errorf("Healhz closed: %v", err)
listener, err := hs.listener.Listen(hs.addr)
if err != nil {
msg := fmt.Sprintf("Failed to start node healthz on %s: %v", hs.addr, err)
if hs.recorder != nil {
hs.recorder.Eventf(hs.nodeRef, api.EventTypeWarning, "FailedToStartNodeHealthcheck", msg)
}
glog.Error(msg)
return
}
glog.Errorf("Unexpected healhz closed.")
}()
if err := server.Serve(listener); err != nil {
glog.Errorf("Healthz closed with error: %v", err)
return
}
glog.Errorf("Unexpected healthz closed.")
}, nodeHealthzRetryInterval, wait.NeverStop)
}
type healthzHandler struct {

View file

@ -35,11 +35,11 @@ import (
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientv1 "k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/helper"
@ -49,10 +49,10 @@ import (
"k8s.io/kubernetes/pkg/proxy/healthcheck"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
"k8s.io/kubernetes/pkg/util/async"
utilexec "k8s.io/kubernetes/pkg/util/exec"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilversion "k8s.io/kubernetes/pkg/util/version"
utilexec "k8s.io/utils/exec"
)
const (
@ -143,7 +143,7 @@ type serviceInfo struct {
nodePort int
loadBalancerStatus api.LoadBalancerStatus
sessionAffinityType api.ServiceAffinity
stickyMaxAgeMinutes int
stickyMaxAgeSeconds int
externalIPs []string
loadBalancerSourceRanges []string
onlyNodeLocalEndpoints bool
@ -194,6 +194,11 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, se
apiservice.RequestsOnlyLocalTraffic(service) {
onlyNodeLocalEndpoints = true
}
var stickyMaxAgeSeconds int
if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
// Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP
stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
}
info := &serviceInfo{
clusterIP: net.ParseIP(service.Spec.ClusterIP),
port: int(port.Port),
@ -202,16 +207,17 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, se
// Deep-copy in case the service instance changes
loadBalancerStatus: *helper.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer),
sessionAffinityType: service.Spec.SessionAffinity,
stickyMaxAgeMinutes: 180, // TODO: paramaterize this in the API.
stickyMaxAgeSeconds: stickyMaxAgeSeconds,
externalIPs: make([]string, len(service.Spec.ExternalIPs)),
loadBalancerSourceRanges: make([]string, len(service.Spec.LoadBalancerSourceRanges)),
onlyNodeLocalEndpoints: onlyNodeLocalEndpoints,
}
copy(info.loadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges)
copy(info.externalIPs, service.Spec.ExternalIPs)
if apiservice.NeedsHealthCheck(service) {
p := apiservice.GetServiceHealthCheckNodePort(service)
p := service.Spec.HealthCheckNodePort
if p == 0 {
glog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String())
} else {
@ -369,7 +375,7 @@ type Proxier struct {
mu sync.Mutex // protects the following fields
serviceMap proxyServiceMap
endpointsMap proxyEndpointsMap
portsMap map[localPort]closeable
portsMap map[utilproxy.LocalPort]utilproxy.Closeable
// endpointsSynced and servicesSynced are set to true when corresponding
// objects are synced after startup. This is used to avoid updating iptables
// with some partial data after kube-proxy restart.
@ -386,7 +392,7 @@ type Proxier struct {
clusterCIDR string
hostname string
nodeIP net.IP
portMapper portOpener
portMapper utilproxy.PortOpener
recorder record.EventRecorder
healthChecker healthcheck.Server
healthzServer healthcheck.HealthzUpdater
@ -405,32 +411,11 @@ type Proxier struct {
natRules *bytes.Buffer
}
type localPort struct {
desc string
ip string
port int
protocol string
}
func (lp *localPort) String() string {
return fmt.Sprintf("%q (%s:%d/%s)", lp.desc, lp.ip, lp.port, lp.protocol)
}
type closeable interface {
Close() error
}
// portOpener is an interface around port opening/closing.
// Abstracted out for testing.
type portOpener interface {
OpenLocalPort(lp *localPort) (closeable, error)
}
// listenPortOpener opens ports by calling bind() and listen().
type listenPortOpener struct{}
// OpenLocalPort holds the given local port open.
func (l *listenPortOpener) OpenLocalPort(lp *localPort) (closeable, error) {
func (l *listenPortOpener) OpenLocalPort(lp *utilproxy.LocalPort) (utilproxy.Closeable, error) {
return openLocalPort(lp)
}
@ -491,7 +476,7 @@ func NewProxier(ipt utiliptables.Interface,
healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps
proxier := &Proxier{
portsMap: make(map[localPort]closeable),
portsMap: make(map[utilproxy.LocalPort]utilproxy.Closeable),
serviceMap: make(proxyServiceMap),
serviceChanges: newServiceChangeMap(),
endpointsMap: make(proxyEndpointsMap),
@ -687,20 +672,6 @@ func (proxier *Proxier) OnServiceSynced() {
proxier.syncProxyRules()
}
func shouldSkipService(svcName types.NamespacedName, service *api.Service) bool {
// if ClusterIP is "None" or empty, skip proxying
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return true
}
// Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied
if service.Spec.Type == api.ServiceTypeExternalName {
glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName)
return true
}
return false
}
// <serviceMap> is updated by this function (based on the given changes).
// <changes> map is cleared after applying them.
func updateServiceMap(
@ -894,7 +865,7 @@ func serviceToServiceMap(service *api.Service) proxyServiceMap {
return nil
}
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
if shouldSkipService(svcName, service) {
if utilproxy.ShouldSkipService(svcName, service) {
return nil
}
@ -959,8 +930,6 @@ func (esp *endpointServicePair) IPPart() string {
return esp.endpoint
}
const noConnectionToDelete = "0 flow entries have been deleted"
// After a UDP endpoint has been removed, we must flush any pending conntrack entries to it, or else we
// risk sending more traffic to it, all of which will be lost (because UDP).
// This assumes the proxier mutex is held
@ -968,13 +937,9 @@ func (proxier *Proxier) deleteEndpointConnections(connectionMap map[endpointServ
for epSvcPair := range connectionMap {
if svcInfo, ok := proxier.serviceMap[epSvcPair.servicePortName]; ok && svcInfo.protocol == api.ProtocolUDP {
endpointIP := epSvcPair.endpoint[0:strings.Index(epSvcPair.endpoint, ":")]
glog.V(2).Infof("Deleting connection tracking state for service IP %s, endpoint IP %s", svcInfo.clusterIP.String(), endpointIP)
err := utilproxy.ExecConntrackTool(proxier.exec, "-D", "--orig-dst", svcInfo.clusterIP.String(), "--dst-nat", endpointIP, "-p", "udp")
if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) {
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it
// is expensive to baby sit all udp connections to kubernetes services.
glog.Errorf("conntrack return with error: %v", err)
err := utilproxy.ClearUDPConntrackForPeers(proxier.exec, svcInfo.clusterIP.String(), endpointIP)
if err != nil {
glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.servicePortName.String(), err)
}
}
}
@ -1146,7 +1111,7 @@ func (proxier *Proxier) syncProxyRules() {
activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set
// Accumulate the set of local ports that we will be holding open once this update is complete
replacementPortsMap := map[localPort]closeable{}
replacementPortsMap := map[utilproxy.LocalPort]utilproxy.Closeable{}
// We are creating those slices ones here to avoid memory reallocations
// in every loop. Note that reuse the memory, instead of doing:
@ -1217,14 +1182,14 @@ func (proxier *Proxier) syncProxyRules() {
// If the "external" IP happens to be an IP that is local to this
// machine, hold the local port open so no other process can open it
// (because the socket might open but it would never work).
if local, err := isLocalIP(externalIP); err != nil {
if local, err := utilproxy.IsLocalIP(externalIP); err != nil {
glog.Errorf("can't determine if IP is local, assuming not: %v", err)
} else if local {
lp := localPort{
desc: "externalIP for " + svcNameString,
ip: externalIP,
port: svcInfo.port,
protocol: protocol,
lp := utilproxy.LocalPort{
Description: "externalIP for " + svcNameString,
IP: externalIP,
Port: svcInfo.port,
Protocol: protocol,
}
if proxier.portsMap[lp] != nil {
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
@ -1235,7 +1200,7 @@ func (proxier *Proxier) syncProxyRules() {
msg := fmt.Sprintf("can't open %s, skipping this externalIP: %v", lp.String(), err)
proxier.recorder.Eventf(
&clientv1.ObjectReference{
&v1.ObjectReference{
Kind: "Node",
Name: proxier.hostname,
UID: types.UID(proxier.hostname),
@ -1357,11 +1322,11 @@ func (proxier *Proxier) syncProxyRules() {
if svcInfo.nodePort != 0 {
// Hold the local port open so no other process can open it
// (because the socket might open but it would never work).
lp := localPort{
desc: "nodePort for " + svcNameString,
ip: "",
port: svcInfo.nodePort,
protocol: protocol,
lp := utilproxy.LocalPort{
Description: "nodePort for " + svcNameString,
IP: "",
Port: svcInfo.nodePort,
Protocol: protocol,
}
if proxier.portsMap[lp] != nil {
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
@ -1372,8 +1337,15 @@ func (proxier *Proxier) syncProxyRules() {
glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err)
continue
}
if lp.protocol == "udp" {
proxier.clearUDPConntrackForPort(lp.port)
if lp.Protocol == "udp" {
// TODO: We might have multiple services using the same port, and this will clear conntrack for all of them.
// This is very low impact. The NodePort range is intentionally obscure, and unlikely to actually collide with real Services.
// This only affects UDP connections, which are not common.
// See issue: https://github.com/kubernetes/kubernetes/issues/49881
err := utilproxy.ClearUDPConntrackForPort(proxier.exec, lp.Port)
if err != nil {
glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", lp.Port, err)
}
}
replacementPortsMap[lp] = socket
} // We're holding the port, so it's OK to install iptables rules.
@ -1453,7 +1425,7 @@ func (proxier *Proxier) syncProxyRules() {
"-A", string(svcChain),
"-m", "comment", "--comment", svcNameString,
"-m", "recent", "--name", string(endpointChain),
"--rcheck", "--seconds", strconv.Itoa(svcInfo.stickyMaxAgeMinutes*60), "--reap",
"--rcheck", "--seconds", strconv.Itoa(svcInfo.stickyMaxAgeSeconds), "--reap",
"-j", string(endpointChain))
}
}
@ -1599,9 +1571,23 @@ func (proxier *Proxier) syncProxyRules() {
err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
glog.Errorf("Failed to execute iptables-restore: %v", err)
glog.V(2).Infof("Rules:\n%s", proxier.iptablesData.Bytes())
// ~rough approximation, assume ~100 chars per line
// we log first 1000 bytes, but full list at higher levels
rules := proxier.iptablesData.Bytes()
if len(rules) > 1000 {
abridgedRules := rules[:1000]
if glog.V(4) {
glog.V(4).Infof("Rules:\n%s", rules)
} else {
glog.V(2).Infof("Rules (abridged):\n%s", abridgedRules)
}
} else {
glog.V(2).Infof("Rules:\n%s", rules)
}
// Revert new local ports.
revertPorts(replacementPortsMap, proxier.portsMap)
glog.V(2).Infof("Closing local ports after iptables-restore failure")
utilproxy.RevertPorts(replacementPortsMap, proxier.portsMap)
return
}
@ -1629,26 +1615,13 @@ func (proxier *Proxier) syncProxyRules() {
}
// Finish housekeeping.
// TODO: these and clearUDPConntrackForPort() could be made more consistent.
utilproxy.DeleteServiceConnections(proxier.exec, staleServices.List())
proxier.deleteEndpointConnections(endpointUpdateResult.staleEndpoints)
}
// Clear UDP conntrack for port or all conntrack entries when port equal zero.
// When a packet arrives, it will not go through NAT table again, because it is not "the first" packet.
// The solution is clearing the conntrack. Known issus:
// https://github.com/docker/docker/issues/8795
// https://github.com/kubernetes/kubernetes/issues/31983
func (proxier *Proxier) clearUDPConntrackForPort(port int) {
glog.V(2).Infof("Deleting conntrack entries for udp connections")
if port > 0 {
err := utilproxy.ExecConntrackTool(proxier.exec, "-D", "-p", "udp", "--dport", strconv.Itoa(port))
if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) {
glog.Errorf("conntrack return with error: %v", err)
// TODO: these could be made more consistent.
for _, svcIP := range staleServices.List() {
if err := utilproxy.ClearUDPConntrackForIP(proxier.exec, svcIP); err != nil {
glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err)
}
} else {
glog.Errorf("Wrong port number. The port number must be greater than zero")
}
proxier.deleteEndpointConnections(endpointUpdateResult.staleEndpoints)
}
// Join all words with spaces, terminate with newline and write to buf.
@ -1664,24 +1637,7 @@ func writeLine(buf *bytes.Buffer, words ...string) {
}
}
func isLocalIP(ip string) (bool, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
return false, err
}
for i := range addrs {
intf, _, err := net.ParseCIDR(addrs[i].String())
if err != nil {
return false, err
}
if net.ParseIP(ip).Equal(intf) {
return true, nil
}
}
return false, nil
}
func openLocalPort(lp *localPort) (closeable, error) {
func openLocalPort(lp *utilproxy.LocalPort) (utilproxy.Closeable, error) {
// For ports on node IPs, open the actual port and hold it, even though we
// use iptables to redirect traffic.
// This ensures a) that it's safe to use that port and b) that (a) stays
@ -1694,16 +1650,16 @@ func openLocalPort(lp *localPort) (closeable, error) {
// it. Tools like 'ss' and 'netstat' do not show sockets that are
// bind()ed but not listen()ed, and at least the default debian netcat
// has no way to avoid about 10 seconds of retries.
var socket closeable
switch lp.protocol {
var socket utilproxy.Closeable
switch lp.Protocol {
case "tcp":
listener, err := net.Listen("tcp", net.JoinHostPort(lp.ip, strconv.Itoa(lp.port)))
listener, err := net.Listen("tcp", net.JoinHostPort(lp.IP, strconv.Itoa(lp.Port)))
if err != nil {
return nil, err
}
socket = listener
case "udp":
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(lp.ip, strconv.Itoa(lp.port)))
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(lp.IP, strconv.Itoa(lp.Port)))
if err != nil {
return nil, err
}
@ -1713,20 +1669,8 @@ func openLocalPort(lp *localPort) (closeable, error) {
}
socket = conn
default:
return nil, fmt.Errorf("unknown protocol %q", lp.protocol)
return nil, fmt.Errorf("unknown protocol %q", lp.Protocol)
}
glog.V(2).Infof("Opened local port %s", lp.String())
return socket, nil
}
// revertPorts is closing ports in replacementPortsMap but not in originalPortsMap. In other words, it only
// closes the ports opened in this sync.
func revertPorts(replacementPortsMap, originalPortsMap map[localPort]closeable) {
for k, v := range replacementPortsMap {
// Only close newly opened local ports - leave ones that were open before this update
if originalPortsMap[k] == nil {
glog.V(2).Infof("Closing local port %s after iptables-restore failure", k.String())
v.Close()
}
}
}

View file

@ -18,30 +18,27 @@ package util
import (
"fmt"
"strconv"
"strings"
"k8s.io/kubernetes/pkg/util/exec"
"github.com/golang/glog"
"k8s.io/utils/exec"
)
// Utilities for dealing with conntrack
const noConnectionToDelete = "0 flow entries have been deleted"
// DeleteServiceConnection uses the conntrack tool to delete the conntrack entries
// for the UDP connections specified by the given service IPs
func DeleteServiceConnections(execer exec.Interface, svcIPs []string) {
for _, ip := range svcIPs {
glog.V(2).Infof("Deleting connection tracking state for service IP %s", ip)
err := ExecConntrackTool(execer, "-D", "--orig-dst", ip, "-p", "udp")
if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) {
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it
// is expensive to baby-sit all udp connections to kubernetes services.
glog.Errorf("conntrack returned error: %v", err)
}
// DeleteServiceConnections uses the conntrack tool to delete the conntrack entries
// for the UDP connections specified by the given service IP
func ClearUDPConntrackForIP(execer exec.Interface, ip string) error {
err := ExecConntrackTool(execer, "-D", "--orig-dst", ip, "-p", "udp")
if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) {
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it
// is expensive to baby-sit all udp connections to kubernetes services.
return fmt.Errorf("error deleting connection tracking state for UDP service IP: %s, error: %v", ip, err)
}
return nil
}
// ExecConntrackTool executes the conntrack tool using the given parameters
@ -56,3 +53,33 @@ func ExecConntrackTool(execer exec.Interface, parameters ...string) error {
}
return nil
}
// ClearUDPConntrackForPort uses the conntrack tool to delete the conntrack entries
// for the UDP connections specified by the port.
// When a packet arrives, it will not go through NAT table again, because it is not "the first" packet.
// The solution is clearing the conntrack. Known issues:
// https://github.com/docker/docker/issues/8795
// https://github.com/kubernetes/kubernetes/issues/31983
func ClearUDPConntrackForPort(execer exec.Interface, port int) error {
if port <= 0 {
return fmt.Errorf("Wrong port number. The port number must be greater than zero")
}
err := ExecConntrackTool(execer, "-D", "-p", "udp", "--dport", strconv.Itoa(port))
if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) {
return fmt.Errorf("error deleting conntrack entries for UDP port: %d, error: %v", port, err)
}
return nil
}
// ClearUDPConntrackForPeers uses the conntrack tool to delete the conntrack entries
// for the UDP connections specified by the {origin, dest} IP pair.
func ClearUDPConntrackForPeers(execer exec.Interface, origin, dest string) error {
err := ExecConntrackTool(execer, "-D", "--orig-dst", origin, "--dst-nat", dest, "-p", "udp")
if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) {
// TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed.
// These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it
// is expensive to baby sit all udp connections to kubernetes services.
return fmt.Errorf("error deleting conntrack entries for UDP peer {%s, %s}, error: %v", origin, dest, err)
}
return nil
}

64
vendor/k8s.io/kubernetes/pkg/proxy/util/port.go generated vendored Normal file
View file

@ -0,0 +1,64 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"github.com/golang/glog"
)
// LocalPort describes a port on specific IP address and protocol
type LocalPort struct {
// Description is the identity message of a given local port.
Description string
// IP is the IP address part of a given local port.
// If this string is empty, the port binds to all local IP addresses.
IP string
// Port is the port part of a given local port.
Port int
// Protocol is the protocol part of a given local port.
// The value is assumed to be lower-case. For example, "udp" not "UDP", "tcp" not "TCP".
Protocol string
}
func (lp *LocalPort) String() string {
return fmt.Sprintf("%q (%s:%d/%s)", lp.Description, lp.IP, lp.Port, lp.Protocol)
}
// Closeable is an interface around closing an port.
type Closeable interface {
Close() error
}
// PortOpener is an interface around port opening/closing.
// Abstracted out for testing.
type PortOpener interface {
OpenLocalPort(lp *LocalPort) (Closeable, error)
}
// RevertPorts is closing ports in replacementPortsMap but not in originalPortsMap. In other words, it only
// closes the ports opened in this sync.
func RevertPorts(replacementPortsMap, originalPortsMap map[LocalPort]Closeable) {
for k, v := range replacementPortsMap {
// Only close newly opened local ports - leave ones that were open before this update
if originalPortsMap[k] == nil {
glog.V(2).Infof("Closing local port %s", k.String())
v.Close()
}
}
}

58
vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go generated vendored Normal file
View file

@ -0,0 +1,58 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"net"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/helper"
"github.com/golang/glog"
)
func IsLocalIP(ip string) (bool, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
return false, err
}
for i := range addrs {
intf, _, err := net.ParseCIDR(addrs[i].String())
if err != nil {
return false, err
}
if net.ParseIP(ip).Equal(intf) {
return true, nil
}
}
return false, nil
}
func ShouldSkipService(svcName types.NamespacedName, service *api.Service) bool {
// if ClusterIP is "None" or empty, skip proxying
if !helper.IsServiceIPSet(service) {
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
return true
}
// Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied
if service.Spec.Type == api.ServiceTypeExternalName {
glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName)
return true
}
return false
}