vendor: bump to Kube 1.9/master
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
parent
7076c73172
commit
7a675ccd92
202 changed files with 8543 additions and 7270 deletions
50
vendor/k8s.io/kubernetes/pkg/proxy/iptables/metrics.go
generated
vendored
50
vendor/k8s.io/kubernetes/pkg/proxy/iptables/metrics.go
generated
vendored
|
@ -1,50 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package iptables
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const kubeProxySubsystem = "kubeproxy"
|
||||
|
||||
var (
|
||||
SyncProxyRulesLatency = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: kubeProxySubsystem,
|
||||
Name: "sync_proxy_rules_latency_microseconds",
|
||||
Help: "SyncProxyRules latency",
|
||||
Buckets: prometheus.ExponentialBuckets(1000, 2, 15),
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
var registerMetricsOnce sync.Once
|
||||
|
||||
func RegisterMetrics() {
|
||||
registerMetricsOnce.Do(func() {
|
||||
prometheus.MustRegister(SyncProxyRulesLatency)
|
||||
})
|
||||
}
|
||||
|
||||
// Gets the time since the specified start in microseconds.
|
||||
func sinceInMicroseconds(start time.Time) float64 {
|
||||
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
||||
}
|
71
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
71
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
|
@ -47,6 +47,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
||||
"k8s.io/kubernetes/pkg/proxy/metrics"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
"k8s.io/kubernetes/pkg/util/async"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
|
@ -166,12 +167,9 @@ type endpointsInfo struct {
|
|||
chainName utiliptables.Chain
|
||||
}
|
||||
|
||||
// Returns just the IP part of the endpoint.
|
||||
// IPPart returns just the IP part of the endpoint.
|
||||
func (e *endpointsInfo) IPPart() string {
|
||||
if index := strings.Index(e.endpoint, ":"); index != -1 {
|
||||
return e.endpoint[0:index]
|
||||
}
|
||||
return e.endpoint
|
||||
return utilproxy.IPPart(e.endpoint)
|
||||
}
|
||||
|
||||
// Returns the endpoint chain name for a given endpointsInfo.
|
||||
|
@ -320,12 +318,14 @@ func (scm *serviceChangeMap) update(namespacedName *types.NamespacedName, previo
|
|||
func (sm *proxyServiceMap) merge(other proxyServiceMap) sets.String {
|
||||
existingPorts := sets.NewString()
|
||||
for svcPortName, info := range other {
|
||||
port := strconv.Itoa(info.port)
|
||||
clusterIPPort := net.JoinHostPort(info.clusterIP.String(), port)
|
||||
existingPorts.Insert(svcPortName.Port)
|
||||
_, exists := (*sm)[svcPortName]
|
||||
if !exists {
|
||||
glog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
|
||||
glog.V(1).Infof("Adding new service port %q at %s/%s", svcPortName, clusterIPPort, info.protocol)
|
||||
} else {
|
||||
glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
|
||||
glog.V(1).Infof("Updating existing service port %q at %s/%s", svcPortName, clusterIPPort, info.protocol)
|
||||
}
|
||||
(*sm)[svcPortName] = info
|
||||
}
|
||||
|
@ -798,11 +798,15 @@ func getLocalIPs(endpointsMap proxyEndpointsMap) map[types.NamespacedName]sets.S
|
|||
for svcPortName := range endpointsMap {
|
||||
for _, ep := range endpointsMap[svcPortName] {
|
||||
if ep.isLocal {
|
||||
nsn := svcPortName.NamespacedName
|
||||
if localIPs[nsn] == nil {
|
||||
localIPs[nsn] = sets.NewString()
|
||||
// If the endpoint has a bad format, ipPart() will log an
|
||||
// error and ep.IPPart() will return a null string.
|
||||
if ip := ep.IPPart(); ip != "" {
|
||||
nsn := svcPortName.NamespacedName
|
||||
if localIPs[nsn] == nil {
|
||||
localIPs[nsn] = sets.NewString()
|
||||
}
|
||||
localIPs[nsn].Insert(ip)
|
||||
}
|
||||
localIPs[nsn].Insert(ep.IPPart()) // just the IP part
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -924,10 +928,7 @@ type endpointServicePair struct {
|
|||
}
|
||||
|
||||
func (esp *endpointServicePair) IPPart() string {
|
||||
if index := strings.Index(esp.endpoint, ":"); index != -1 {
|
||||
return esp.endpoint[0:index]
|
||||
}
|
||||
return esp.endpoint
|
||||
return utilproxy.IPPart(esp.endpoint)
|
||||
}
|
||||
|
||||
// After a UDP endpoint has been removed, we must flush any pending conntrack entries to it, or else we
|
||||
|
@ -936,7 +937,7 @@ func (esp *endpointServicePair) IPPart() string {
|
|||
func (proxier *Proxier) deleteEndpointConnections(connectionMap map[endpointServicePair]bool) {
|
||||
for epSvcPair := range connectionMap {
|
||||
if svcInfo, ok := proxier.serviceMap[epSvcPair.servicePortName]; ok && svcInfo.protocol == api.ProtocolUDP {
|
||||
endpointIP := epSvcPair.endpoint[0:strings.Index(epSvcPair.endpoint, ":")]
|
||||
endpointIP := utilproxy.IPPart(epSvcPair.endpoint)
|
||||
err := utilproxy.ClearUDPConntrackForPeers(proxier.exec, svcInfo.clusterIP.String(), endpointIP)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.servicePortName.String(), err)
|
||||
|
@ -954,7 +955,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
SyncProxyRulesLatency.Observe(sinceInMicroseconds(start))
|
||||
metrics.SyncProxyRulesLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||
glog.V(4).Infof("syncProxyRules took %v", time.Since(start))
|
||||
}()
|
||||
// don't sync rules till we've received services and endpoints
|
||||
|
@ -1162,7 +1163,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
"-A", string(kubeServicesChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcNameString),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"-d", fmt.Sprintf("%s/32", svcInfo.clusterIP.String()),
|
||||
"-d", utilproxy.ToCIDR(svcInfo.clusterIP),
|
||||
"--dport", strconv.Itoa(svcInfo.port),
|
||||
)
|
||||
if proxier.masqueradeAll {
|
||||
|
@ -1216,7 +1217,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
"-A", string(kubeServicesChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcNameString),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"-d", fmt.Sprintf("%s/32", externalIP),
|
||||
"-d", utilproxy.ToCIDR(net.ParseIP(externalIP)),
|
||||
"--dport", strconv.Itoa(svcInfo.port),
|
||||
)
|
||||
// We have to SNAT packets to external IPs.
|
||||
|
@ -1242,7 +1243,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
"-A", string(kubeServicesChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"-d", fmt.Sprintf("%s/32", externalIP),
|
||||
"-d", utilproxy.ToCIDR(net.ParseIP(externalIP)),
|
||||
"--dport", strconv.Itoa(svcInfo.port),
|
||||
"-j", "REJECT",
|
||||
)
|
||||
|
@ -1268,7 +1269,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
"-A", string(kubeServicesChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"-d", fmt.Sprintf("%s/32", ingress.IP),
|
||||
"-d", utilproxy.ToCIDR(net.ParseIP(ingress.IP)),
|
||||
"--dport", strconv.Itoa(svcInfo.port),
|
||||
)
|
||||
// jump to service firewall chain
|
||||
|
@ -1306,7 +1307,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
|
||||
// Need to add the following rule to allow request on host.
|
||||
if allowFromNode {
|
||||
writeLine(proxier.natRules, append(args, "-s", fmt.Sprintf("%s/32", ingress.IP), "-j", string(chosenChain))...)
|
||||
writeLine(proxier.natRules, append(args, "-s", utilproxy.ToCIDR(net.ParseIP(ingress.IP)), "-j", string(chosenChain))...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1342,7 +1343,8 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
// This is very low impact. The NodePort range is intentionally obscure, and unlikely to actually collide with real Services.
|
||||
// This only affects UDP connections, which are not common.
|
||||
// See issue: https://github.com/kubernetes/kubernetes/issues/49881
|
||||
err := utilproxy.ClearUDPConntrackForPort(proxier.exec, lp.Port)
|
||||
isIPv6 := utilproxy.IsIPv6(svcInfo.clusterIP)
|
||||
err := utilproxy.ClearUDPConntrackForPort(proxier.exec, lp.Port, isIPv6)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", lp.Port, err)
|
||||
}
|
||||
|
@ -1389,7 +1391,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
"-A", string(kubeServicesChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"-d", fmt.Sprintf("%s/32", svcInfo.clusterIP.String()),
|
||||
"-d", utilproxy.ToCIDR(svcInfo.clusterIP),
|
||||
"--dport", strconv.Itoa(svcInfo.port),
|
||||
"-j", "REJECT",
|
||||
)
|
||||
|
@ -1433,6 +1435,11 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
// Now write loadbalancing & DNAT rules.
|
||||
n := len(endpointChains)
|
||||
for i, endpointChain := range endpointChains {
|
||||
epIP := endpoints[i].IPPart()
|
||||
if epIP == "" {
|
||||
// Error parsing this endpoint has been logged. Skip to next endpoint.
|
||||
continue
|
||||
}
|
||||
// Balancing rules in the per-service chain.
|
||||
args = append(args[:0], []string{
|
||||
"-A", string(svcChain),
|
||||
|
@ -1456,7 +1463,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
)
|
||||
// Handle traffic that loops back to the originator with SNAT.
|
||||
writeLine(proxier.natRules, append(args,
|
||||
"-s", fmt.Sprintf("%s/32", endpoints[i].IPPart()),
|
||||
"-s", utilproxy.ToCIDR(net.ParseIP(epIP)),
|
||||
"-j", string(KubeMarkMasqChain))...)
|
||||
// Update client-affinity lists.
|
||||
if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP {
|
||||
|
@ -1571,20 +1578,6 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to execute iptables-restore: %v", err)
|
||||
// ~rough approximation, assume ~100 chars per line
|
||||
// we log first 1000 bytes, but full list at higher levels
|
||||
rules := proxier.iptablesData.Bytes()
|
||||
if len(rules) > 1000 {
|
||||
abridgedRules := rules[:1000]
|
||||
if glog.V(4) {
|
||||
glog.V(4).Infof("Rules:\n%s", rules)
|
||||
} else {
|
||||
glog.V(2).Infof("Rules (abridged):\n%s", abridgedRules)
|
||||
}
|
||||
} else {
|
||||
glog.V(2).Infof("Rules:\n%s", rules)
|
||||
}
|
||||
|
||||
// Revert new local ports.
|
||||
glog.V(2).Infof("Closing local ports after iptables-restore failure")
|
||||
utilproxy.RevertPorts(replacementPortsMap, proxier.portsMap)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue