Switch to github.com/golang/dep for vendoring
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
parent
d6ab91be27
commit
8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions
63
vendor/k8s.io/kubernetes/pkg/kubelet/network/BUILD
generated
vendored
Normal file
63
vendor/k8s.io/kubernetes/pkg/kubelet/network/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"network.go",
|
||||
"plugins.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/sysctl:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/validation",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["plugins_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/kubelet/network/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/network/cni:all-srcs",
|
||||
"//pkg/kubelet/network/hairpin:all-srcs",
|
||||
"//pkg/kubelet/network/hostport:all-srcs",
|
||||
"//pkg/kubelet/network/kubenet:all-srcs",
|
||||
"//pkg/kubelet/network/mock_network:all-srcs",
|
||||
"//pkg/kubelet/network/testing:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
61
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/BUILD
generated
vendored
Normal file
61
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["cni.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//vendor:github.com/containernetworking/cni/libcni",
|
||||
"//vendor:github.com/containernetworking/cni/pkg/types",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["cni_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//pkg/kubelet/network/cni/testing:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//vendor:github.com/containernetworking/cni/pkg/types",
|
||||
"//vendor:github.com/stretchr/testify/mock",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/network/cni/testing:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
296
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni.go
generated
vendored
Normal file
296
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni.go
generated
vendored
Normal file
|
@ -0,0 +1,296 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cni
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containernetworking/cni/libcni"
|
||||
cnitypes "github.com/containernetworking/cni/pkg/types"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
)
|
||||
|
||||
const (
|
||||
CNIPluginName = "cni"
|
||||
DefaultNetDir = "/etc/cni/net.d"
|
||||
DefaultCNIDir = "/opt/cni/bin"
|
||||
VendorCNIDirTemplate = "%s/opt/%s/bin"
|
||||
)
|
||||
|
||||
type cniNetworkPlugin struct {
|
||||
network.NoopNetworkPlugin
|
||||
|
||||
loNetwork *cniNetwork
|
||||
|
||||
sync.RWMutex
|
||||
defaultNetwork *cniNetwork
|
||||
|
||||
host network.Host
|
||||
execer utilexec.Interface
|
||||
nsenterPath string
|
||||
pluginDir string
|
||||
binDir string
|
||||
vendorCNIDirPrefix string
|
||||
}
|
||||
|
||||
type cniNetwork struct {
|
||||
name string
|
||||
NetworkConfig *libcni.NetworkConfig
|
||||
CNIConfig libcni.CNI
|
||||
}
|
||||
|
||||
func probeNetworkPluginsWithVendorCNIDirPrefix(pluginDir, binDir, vendorCNIDirPrefix string) []network.NetworkPlugin {
|
||||
if binDir == "" {
|
||||
binDir = DefaultCNIDir
|
||||
}
|
||||
plugin := &cniNetworkPlugin{
|
||||
defaultNetwork: nil,
|
||||
loNetwork: getLoNetwork(binDir, vendorCNIDirPrefix),
|
||||
execer: utilexec.New(),
|
||||
pluginDir: pluginDir,
|
||||
binDir: binDir,
|
||||
vendorCNIDirPrefix: vendorCNIDirPrefix,
|
||||
}
|
||||
|
||||
// sync NetworkConfig in best effort during probing.
|
||||
plugin.syncNetworkConfig()
|
||||
return []network.NetworkPlugin{plugin}
|
||||
}
|
||||
|
||||
func ProbeNetworkPlugins(pluginDir, binDir string) []network.NetworkPlugin {
|
||||
return probeNetworkPluginsWithVendorCNIDirPrefix(pluginDir, binDir, "")
|
||||
}
|
||||
|
||||
func getDefaultCNINetwork(pluginDir, binDir, vendorCNIDirPrefix string) (*cniNetwork, error) {
|
||||
if pluginDir == "" {
|
||||
pluginDir = DefaultNetDir
|
||||
}
|
||||
files, err := libcni.ConfFiles(pluginDir)
|
||||
switch {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case len(files) == 0:
|
||||
return nil, fmt.Errorf("No networks found in %s", pluginDir)
|
||||
}
|
||||
|
||||
sort.Strings(files)
|
||||
for _, confFile := range files {
|
||||
conf, err := libcni.ConfFromFile(confFile)
|
||||
if err != nil {
|
||||
glog.Warningf("Error loading CNI config file %s: %v", confFile, err)
|
||||
continue
|
||||
}
|
||||
// Search for vendor-specific plugins as well as default plugins in the CNI codebase.
|
||||
vendorDir := vendorCNIDir(vendorCNIDirPrefix, conf.Network.Type)
|
||||
cninet := &libcni.CNIConfig{
|
||||
Path: []string{binDir, vendorDir},
|
||||
}
|
||||
network := &cniNetwork{name: conf.Network.Name, NetworkConfig: conf, CNIConfig: cninet}
|
||||
return network, nil
|
||||
}
|
||||
return nil, fmt.Errorf("No valid networks found in %s", pluginDir)
|
||||
}
|
||||
|
||||
func vendorCNIDir(prefix, pluginType string) string {
|
||||
return fmt.Sprintf(VendorCNIDirTemplate, prefix, pluginType)
|
||||
}
|
||||
|
||||
func getLoNetwork(binDir, vendorDirPrefix string) *cniNetwork {
|
||||
loConfig, err := libcni.ConfFromBytes([]byte(`{
|
||||
"cniVersion": "0.1.0",
|
||||
"name": "cni-loopback",
|
||||
"type": "loopback"
|
||||
}`))
|
||||
if err != nil {
|
||||
// The hardcoded config above should always be valid and unit tests will
|
||||
// catch this
|
||||
panic(err)
|
||||
}
|
||||
cninet := &libcni.CNIConfig{
|
||||
Path: []string{vendorCNIDir(vendorDirPrefix, loConfig.Network.Type), binDir},
|
||||
}
|
||||
loNetwork := &cniNetwork{
|
||||
name: "lo",
|
||||
NetworkConfig: loConfig,
|
||||
CNIConfig: cninet,
|
||||
}
|
||||
|
||||
return loNetwork
|
||||
}
|
||||
|
||||
func (plugin *cniNetworkPlugin) Init(host network.Host, hairpinMode componentconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error {
|
||||
var err error
|
||||
plugin.nsenterPath, err = plugin.execer.LookPath("nsenter")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
plugin.host = host
|
||||
|
||||
// sync network config from pluginDir periodically to detect network config updates
|
||||
go wait.Forever(func() {
|
||||
plugin.syncNetworkConfig()
|
||||
}, 10*time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *cniNetworkPlugin) syncNetworkConfig() {
|
||||
network, err := getDefaultCNINetwork(plugin.pluginDir, plugin.binDir, plugin.vendorCNIDirPrefix)
|
||||
if err != nil {
|
||||
glog.Errorf("error updating cni config: %s", err)
|
||||
return
|
||||
}
|
||||
plugin.setDefaultNetwork(network)
|
||||
}
|
||||
|
||||
func (plugin *cniNetworkPlugin) getDefaultNetwork() *cniNetwork {
|
||||
plugin.RLock()
|
||||
defer plugin.RUnlock()
|
||||
return plugin.defaultNetwork
|
||||
}
|
||||
|
||||
func (plugin *cniNetworkPlugin) setDefaultNetwork(n *cniNetwork) {
|
||||
plugin.Lock()
|
||||
defer plugin.Unlock()
|
||||
plugin.defaultNetwork = n
|
||||
}
|
||||
|
||||
func (plugin *cniNetworkPlugin) checkInitialized() error {
|
||||
if plugin.getDefaultNetwork() == nil {
|
||||
return errors.New("cni config unintialized")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *cniNetworkPlugin) Name() string {
|
||||
return CNIPluginName
|
||||
}
|
||||
|
||||
func (plugin *cniNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID) error {
|
||||
if err := plugin.checkInitialized(); err != nil {
|
||||
return err
|
||||
}
|
||||
netnsPath, err := plugin.host.GetNetNS(id.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("CNI failed to retrieve network namespace path: %v", err)
|
||||
}
|
||||
|
||||
_, err = plugin.loNetwork.addToNetwork(name, namespace, id, netnsPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Error while adding to cni lo network: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = plugin.getDefaultNetwork().addToNetwork(name, namespace, id, netnsPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Error while adding to cni network: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (plugin *cniNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error {
|
||||
if err := plugin.checkInitialized(); err != nil {
|
||||
return err
|
||||
}
|
||||
netnsPath, err := plugin.host.GetNetNS(id.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("CNI failed to retrieve network namespace path: %v", err)
|
||||
}
|
||||
|
||||
return plugin.getDefaultNetwork().deleteFromNetwork(name, namespace, id, netnsPath)
|
||||
}
|
||||
|
||||
// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin.
|
||||
// Also fix the runtime's call to Status function to be done only in the case that the IP is lost, no need to do periodic calls
|
||||
func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) {
|
||||
netnsPath, err := plugin.host.GetNetNS(id.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("CNI failed to retrieve network namespace path: %v", err)
|
||||
}
|
||||
|
||||
ip, err := network.GetPodIP(plugin.execer, plugin.nsenterPath, netnsPath, network.DefaultInterfaceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &network.PodNetworkStatus{IP: ip}, nil
|
||||
}
|
||||
|
||||
func (network *cniNetwork) addToNetwork(podName string, podNamespace string, podInfraContainerID kubecontainer.ContainerID, podNetnsPath string) (*cnitypes.Result, error) {
|
||||
rt, err := buildCNIRuntimeConf(podName, podNamespace, podInfraContainerID, podNetnsPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Error adding network: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
netconf, cninet := network.NetworkConfig, network.CNIConfig
|
||||
glog.V(4).Infof("About to run with conf.Network.Type=%v", netconf.Network.Type)
|
||||
res, err := cninet.AddNetwork(netconf, rt)
|
||||
if err != nil {
|
||||
glog.Errorf("Error adding network: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (network *cniNetwork) deleteFromNetwork(podName string, podNamespace string, podInfraContainerID kubecontainer.ContainerID, podNetnsPath string) error {
|
||||
rt, err := buildCNIRuntimeConf(podName, podNamespace, podInfraContainerID, podNetnsPath)
|
||||
if err != nil {
|
||||
glog.Errorf("Error deleting network: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
netconf, cninet := network.NetworkConfig, network.CNIConfig
|
||||
glog.V(4).Infof("About to run with conf.Network.Type=%v", netconf.Network.Type)
|
||||
err = cninet.DelNetwork(netconf, rt)
|
||||
if err != nil {
|
||||
glog.Errorf("Error deleting network: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildCNIRuntimeConf(podName string, podNs string, podInfraContainerID kubecontainer.ContainerID, podNetnsPath string) (*libcni.RuntimeConf, error) {
|
||||
glog.V(4).Infof("Got netns path %v", podNetnsPath)
|
||||
glog.V(4).Infof("Using netns path %v", podNs)
|
||||
|
||||
rt := &libcni.RuntimeConf{
|
||||
ContainerID: podInfraContainerID.ID,
|
||||
NetNS: podNetnsPath,
|
||||
IfName: network.DefaultInterfaceName,
|
||||
Args: [][2]string{
|
||||
{"IgnoreUnknown", "1"},
|
||||
{"K8S_POD_NAMESPACE", podNs},
|
||||
{"K8S_POD_NAME", podName},
|
||||
{"K8S_POD_INFRA_CONTAINER_ID", podInfraContainerID.ID},
|
||||
},
|
||||
}
|
||||
|
||||
return rt, nil
|
||||
}
|
259
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni_test.go
generated
vendored
Normal file
259
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni_test.go
generated
vendored
Normal file
|
@ -0,0 +1,259 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cni
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"text/template"
|
||||
|
||||
cnitypes "github.com/containernetworking/cni/pkg/types"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/cni/testing"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
)
|
||||
|
||||
func installPluginUnderTest(t *testing.T, testVendorCNIDirPrefix, testNetworkConfigPath, vendorName string, plugName string) {
|
||||
pluginDir := path.Join(testNetworkConfigPath, plugName)
|
||||
err := os.MkdirAll(pluginDir, 0777)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create plugin config dir: %v", err)
|
||||
}
|
||||
pluginConfig := path.Join(pluginDir, plugName+".conf")
|
||||
f, err := os.Create(pluginConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to install plugin")
|
||||
}
|
||||
networkConfig := fmt.Sprintf("{ \"name\": \"%s\", \"type\": \"%s\" }", plugName, vendorName)
|
||||
|
||||
_, err = f.WriteString(networkConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write network config file (%v)", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
vendorCNIDir := fmt.Sprintf(VendorCNIDirTemplate, testVendorCNIDirPrefix, vendorName)
|
||||
err = os.MkdirAll(vendorCNIDir, 0777)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create plugin dir: %v", err)
|
||||
}
|
||||
pluginExec := path.Join(vendorCNIDir, vendorName)
|
||||
f, err = os.Create(pluginExec)
|
||||
|
||||
const execScriptTempl = `#!/bin/bash
|
||||
read ignore
|
||||
env > {{.OutputEnv}}
|
||||
echo "%@" >> {{.OutputEnv}}
|
||||
export $(echo ${CNI_ARGS} | sed 's/;/ /g') &> /dev/null
|
||||
mkdir -p {{.OutputDir}} &> /dev/null
|
||||
echo -n "$CNI_COMMAND $CNI_NETNS $K8S_POD_NAMESPACE $K8S_POD_NAME $K8S_POD_INFRA_CONTAINER_ID" >& {{.OutputFile}}
|
||||
echo -n "{ \"ip4\": { \"ip\": \"10.1.0.23/24\" } }"
|
||||
`
|
||||
execTemplateData := &map[string]interface{}{
|
||||
"OutputFile": path.Join(pluginDir, plugName+".out"),
|
||||
"OutputEnv": path.Join(pluginDir, plugName+".env"),
|
||||
"OutputDir": pluginDir,
|
||||
}
|
||||
|
||||
tObj := template.Must(template.New("test").Parse(execScriptTempl))
|
||||
buf := &bytes.Buffer{}
|
||||
if err := tObj.Execute(buf, *execTemplateData); err != nil {
|
||||
t.Fatalf("Error in executing script template - %v", err)
|
||||
}
|
||||
execScript := buf.String()
|
||||
_, err = f.WriteString(execScript)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write plugin exec - %v", err)
|
||||
}
|
||||
|
||||
err = f.Chmod(0777)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to set exec perms on plugin")
|
||||
}
|
||||
|
||||
f.Close()
|
||||
}
|
||||
|
||||
func tearDownPlugin(tmpDir string) {
|
||||
err := os.RemoveAll(tmpDir)
|
||||
if err != nil {
|
||||
fmt.Printf("Error in cleaning up test: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
type fakeNetworkHost struct {
|
||||
kubeClient clientset.Interface
|
||||
runtime kubecontainer.Runtime
|
||||
}
|
||||
|
||||
func NewFakeHost(kubeClient clientset.Interface, pods []*containertest.FakePod) *fakeNetworkHost {
|
||||
host := &fakeNetworkHost{
|
||||
kubeClient: kubeClient,
|
||||
runtime: &containertest.FakeRuntime{
|
||||
AllPodList: pods,
|
||||
},
|
||||
}
|
||||
return host
|
||||
}
|
||||
|
||||
func (fnh *fakeNetworkHost) GetPodByName(name, namespace string) (*v1.Pod, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (fnh *fakeNetworkHost) GetKubeClient() clientset.Interface {
|
||||
return fnh.kubeClient
|
||||
}
|
||||
|
||||
func (fnh *fakeNetworkHost) GetRuntime() kubecontainer.Runtime {
|
||||
return fnh.runtime
|
||||
}
|
||||
|
||||
func (fnh *fakeNetworkHost) GetNetNS(containerID string) (string, error) {
|
||||
return fnh.GetRuntime().GetNetNS(kubecontainer.ContainerID{Type: "test", ID: containerID})
|
||||
}
|
||||
|
||||
func (fnh *fakeNetworkHost) SupportsLegacyFeatures() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func TestCNIPlugin(t *testing.T) {
|
||||
// install some random plugin
|
||||
pluginName := fmt.Sprintf("test%d", rand.Intn(1000))
|
||||
vendorName := fmt.Sprintf("test_vendor%d", rand.Intn(1000))
|
||||
|
||||
podIP := "10.0.0.2"
|
||||
podIPOutput := fmt.Sprintf("4: eth0 inet %s/24 scope global dynamic eth0\\ valid_lft forever preferred_lft forever", podIP)
|
||||
fakeCmds := []utilexec.FakeCommandAction{
|
||||
func(cmd string, args ...string) utilexec.Cmd {
|
||||
return utilexec.InitFakeCmd(&utilexec.FakeCmd{
|
||||
CombinedOutputScript: []utilexec.FakeCombinedOutputAction{
|
||||
func() ([]byte, error) {
|
||||
return []byte(podIPOutput), nil
|
||||
},
|
||||
},
|
||||
}, cmd, args...)
|
||||
},
|
||||
}
|
||||
|
||||
fexec := &utilexec.FakeExec{
|
||||
CommandScript: fakeCmds,
|
||||
LookPathFunc: func(file string) (string, error) {
|
||||
return fmt.Sprintf("/fake-bin/%s", file), nil
|
||||
},
|
||||
}
|
||||
|
||||
mockLoCNI := &mock_cni.MockCNI{}
|
||||
// TODO mock for the test plugin too
|
||||
|
||||
tmpDir := utiltesting.MkTmpdirOrDie("cni-test")
|
||||
testNetworkConfigPath := path.Join(tmpDir, "plugins", "net", "cni")
|
||||
testVendorCNIDirPrefix := tmpDir
|
||||
defer tearDownPlugin(tmpDir)
|
||||
installPluginUnderTest(t, testVendorCNIDirPrefix, testNetworkConfigPath, vendorName, pluginName)
|
||||
|
||||
containerID := kubecontainer.ContainerID{Type: "test", ID: "test_infra_container"}
|
||||
pods := []*containertest.FakePod{{
|
||||
Pod: &kubecontainer.Pod{
|
||||
Containers: []*kubecontainer.Container{
|
||||
{ID: containerID},
|
||||
},
|
||||
},
|
||||
NetnsPath: "/proc/12345/ns/net",
|
||||
}}
|
||||
|
||||
plugins := probeNetworkPluginsWithVendorCNIDirPrefix(path.Join(testNetworkConfigPath, pluginName), "", testVendorCNIDirPrefix)
|
||||
if len(plugins) != 1 {
|
||||
t.Fatalf("Expected only one network plugin, got %d", len(plugins))
|
||||
}
|
||||
if plugins[0].Name() != "cni" {
|
||||
t.Fatalf("Expected CNI network plugin, got %q", plugins[0].Name())
|
||||
}
|
||||
|
||||
cniPlugin, ok := plugins[0].(*cniNetworkPlugin)
|
||||
if !ok {
|
||||
t.Fatalf("Not a CNI network plugin!")
|
||||
}
|
||||
cniPlugin.execer = fexec
|
||||
cniPlugin.loNetwork.CNIConfig = mockLoCNI
|
||||
|
||||
mockLoCNI.On("AddNetwork", cniPlugin.loNetwork.NetworkConfig, mock.AnythingOfType("*libcni.RuntimeConf")).Return(&cnitypes.Result{IP4: &cnitypes.IPConfig{IP: net.IPNet{IP: []byte{127, 0, 0, 1}}}}, nil)
|
||||
|
||||
plug, err := network.InitNetworkPlugin(plugins, "cni", NewFakeHost(nil, pods), componentconfig.HairpinNone, "10.0.0.0/8", network.UseDefaultMTU)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to select the desired plugin: %v", err)
|
||||
}
|
||||
|
||||
// Set up the pod
|
||||
err = plug.SetUpPod("podNamespace", "podName", containerID)
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil: %v", err)
|
||||
}
|
||||
outputEnv := path.Join(testNetworkConfigPath, pluginName, pluginName+".env")
|
||||
eo, eerr := ioutil.ReadFile(outputEnv)
|
||||
outputFile := path.Join(testNetworkConfigPath, pluginName, pluginName+".out")
|
||||
output, err := ioutil.ReadFile(outputFile)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to read output file %s: %v (env %s err %v)", outputFile, err, eo, eerr)
|
||||
}
|
||||
expectedOutput := "ADD /proc/12345/ns/net podNamespace podName test_infra_container"
|
||||
if string(output) != expectedOutput {
|
||||
t.Errorf("Mismatch in expected output for setup hook. Expected '%s', got '%s'", expectedOutput, string(output))
|
||||
}
|
||||
|
||||
// Get its IP address
|
||||
status, err := plug.GetPodNetworkStatus("podNamespace", "podName", containerID)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to read pod network status: %v", err)
|
||||
}
|
||||
if status.IP.String() != podIP {
|
||||
t.Errorf("Expected pod IP %q but got %q", podIP, status.IP.String())
|
||||
}
|
||||
|
||||
// Tear it down
|
||||
err = plug.TearDownPod("podNamespace", "podName", containerID)
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil: %v", err)
|
||||
}
|
||||
output, err = ioutil.ReadFile(path.Join(testNetworkConfigPath, pluginName, pluginName+".out"))
|
||||
expectedOutput = "DEL /proc/12345/ns/net podNamespace podName test_infra_container"
|
||||
if string(output) != expectedOutput {
|
||||
t.Errorf("Mismatch in expected output for setup hook. Expected '%s', got '%s'", expectedOutput, string(output))
|
||||
}
|
||||
|
||||
mockLoCNI.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestLoNetNonNil(t *testing.T) {
|
||||
if conf := getLoNetwork("", ""); conf == nil {
|
||||
t.Error("Expected non-nil lo network")
|
||||
}
|
||||
}
|
32
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/testing/BUILD
generated
vendored
Normal file
32
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/testing/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["mock_cni.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor:github.com/containernetworking/cni/libcni",
|
||||
"//vendor:github.com/containernetworking/cni/pkg/types",
|
||||
"//vendor:github.com/stretchr/testify/mock",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
39
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/testing/mock_cni.go
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/testing/mock_cni.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// mock_cni is a mock of the `libcni.CNI` interface. It's a handwritten mock
|
||||
// because there are only two functions to deal with.
|
||||
package mock_cni
|
||||
|
||||
import (
|
||||
"github.com/containernetworking/cni/libcni"
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
type MockCNI struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockCNI) AddNetwork(net *libcni.NetworkConfig, rt *libcni.RuntimeConf) (*types.Result, error) {
|
||||
args := m.Called(net, rt)
|
||||
return args.Get(0).(*types.Result), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *MockCNI) DelNetwork(net *libcni.NetworkConfig, rt *libcni.RuntimeConf) error {
|
||||
args := m.Called(net, rt)
|
||||
return args.Error(0)
|
||||
}
|
40
vendor/k8s.io/kubernetes/pkg/kubelet/network/hairpin/BUILD
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/pkg/kubelet/network/hairpin/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["hairpin.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["hairpin_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = ["//pkg/util/exec:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
110
vendor/k8s.io/kubernetes/pkg/kubelet/network/hairpin/hairpin.go
generated
vendored
Normal file
110
vendor/k8s.io/kubernetes/pkg/kubelet/network/hairpin/hairpin.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package hairpin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
)
|
||||
|
||||
const (
|
||||
sysfsNetPath = "/sys/devices/virtual/net"
|
||||
brportRelativePath = "brport"
|
||||
hairpinModeRelativePath = "hairpin_mode"
|
||||
hairpinEnable = "1"
|
||||
)
|
||||
|
||||
var (
|
||||
ethtoolOutputRegex = regexp.MustCompile("peer_ifindex: (\\d+)")
|
||||
)
|
||||
|
||||
func SetUpContainerPid(containerPid int, containerInterfaceName string) error {
|
||||
pidStr := fmt.Sprintf("%d", containerPid)
|
||||
nsenterArgs := []string{"-t", pidStr, "-n"}
|
||||
return setUpContainerInternal(containerInterfaceName, pidStr, nsenterArgs)
|
||||
}
|
||||
|
||||
func SetUpContainerPath(netnsPath string, containerInterfaceName string) error {
|
||||
if netnsPath[0] != '/' {
|
||||
return fmt.Errorf("netnsPath path '%s' was invalid", netnsPath)
|
||||
}
|
||||
nsenterArgs := []string{"-n", netnsPath}
|
||||
return setUpContainerInternal(containerInterfaceName, netnsPath, nsenterArgs)
|
||||
}
|
||||
|
||||
func setUpContainerInternal(containerInterfaceName, containerDesc string, nsenterArgs []string) error {
|
||||
e := exec.New()
|
||||
hostIfName, err := findPairInterfaceOfContainerInterface(e, containerInterfaceName, containerDesc, nsenterArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return setUpInterface(hostIfName)
|
||||
}
|
||||
|
||||
func findPairInterfaceOfContainerInterface(e exec.Interface, containerInterfaceName, containerDesc string, nsenterArgs []string) (string, error) {
|
||||
nsenterPath, err := e.LookPath("nsenter")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ethtoolPath, err := e.LookPath("ethtool")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
nsenterArgs = append(nsenterArgs, "-F", "--", ethtoolPath, "--statistics", containerInterfaceName)
|
||||
output, err := e.Command(nsenterPath, nsenterArgs...).CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Unable to query interface %s of container %s: %v: %s", containerInterfaceName, containerDesc, err, string(output))
|
||||
}
|
||||
// look for peer_ifindex
|
||||
match := ethtoolOutputRegex.FindSubmatch(output)
|
||||
if match == nil {
|
||||
return "", fmt.Errorf("No peer_ifindex in interface statistics for %s of container %s", containerInterfaceName, containerDesc)
|
||||
}
|
||||
peerIfIndex, err := strconv.Atoi(string(match[1]))
|
||||
if err != nil { // seems impossible (\d+ not numeric)
|
||||
return "", fmt.Errorf("peer_ifindex wasn't numeric: %s: %v", match[1], err)
|
||||
}
|
||||
iface, err := net.InterfaceByIndex(peerIfIndex)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return iface.Name, nil
|
||||
}
|
||||
|
||||
func setUpInterface(ifName string) error {
|
||||
glog.V(3).Infof("Enabling hairpin on interface %s", ifName)
|
||||
ifPath := path.Join(sysfsNetPath, ifName)
|
||||
if _, err := os.Stat(ifPath); err != nil {
|
||||
return err
|
||||
}
|
||||
brportPath := path.Join(ifPath, brportRelativePath)
|
||||
if _, err := os.Stat(brportPath); err != nil && os.IsNotExist(err) {
|
||||
// Device is not on a bridge, so doesn't need hairpin mode
|
||||
return nil
|
||||
}
|
||||
hairpinModeFile := path.Join(brportPath, hairpinModeRelativePath)
|
||||
return ioutil.WriteFile(hairpinModeFile, []byte(hairpinEnable), 0644)
|
||||
}
|
108
vendor/k8s.io/kubernetes/pkg/kubelet/network/hairpin/hairpin_test.go
generated
vendored
Normal file
108
vendor/k8s.io/kubernetes/pkg/kubelet/network/hairpin/hairpin_test.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package hairpin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
)
|
||||
|
||||
func TestFindPairInterfaceOfContainerInterface(t *testing.T) {
|
||||
// there should be at least "lo" on any system
|
||||
interfaces, _ := net.Interfaces()
|
||||
validOutput := fmt.Sprintf("garbage\n peer_ifindex: %d", interfaces[0].Index)
|
||||
invalidOutput := fmt.Sprintf("garbage\n unknown: %d", interfaces[0].Index)
|
||||
|
||||
tests := []struct {
|
||||
output string
|
||||
err error
|
||||
expectedName string
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
output: validOutput,
|
||||
expectedName: interfaces[0].Name,
|
||||
},
|
||||
{
|
||||
output: invalidOutput,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
output: validOutput,
|
||||
err: errors.New("error"),
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
fcmd := exec.FakeCmd{
|
||||
CombinedOutputScript: []exec.FakeCombinedOutputAction{
|
||||
func() ([]byte, error) { return []byte(test.output), test.err },
|
||||
},
|
||||
}
|
||||
fexec := exec.FakeExec{
|
||||
CommandScript: []exec.FakeCommandAction{
|
||||
func(cmd string, args ...string) exec.Cmd {
|
||||
return exec.InitFakeCmd(&fcmd, cmd, args...)
|
||||
},
|
||||
},
|
||||
LookPathFunc: func(file string) (string, error) {
|
||||
return fmt.Sprintf("/fake-bin/%s", file), nil
|
||||
},
|
||||
}
|
||||
nsenterArgs := []string{"-t", "123", "-n"}
|
||||
name, err := findPairInterfaceOfContainerInterface(&fexec, "eth0", "123", nsenterArgs)
|
||||
if test.expectErr {
|
||||
if err == nil {
|
||||
t.Errorf("unexpected non-error")
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
if name != test.expectedName {
|
||||
t.Errorf("unexpected name: %s (expected: %s)", name, test.expectedName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetUpInterfaceNonExistent(t *testing.T) {
|
||||
err := setUpInterface("non-existent")
|
||||
if err == nil {
|
||||
t.Errorf("unexpected non-error")
|
||||
}
|
||||
deviceDir := fmt.Sprintf("%s/%s", sysfsNetPath, "non-existent")
|
||||
if !strings.Contains(fmt.Sprintf("%v", err), deviceDir) {
|
||||
t.Errorf("should have tried to open %s", deviceDir)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetUpInterfaceNotBridged(t *testing.T) {
|
||||
err := setUpInterface("lo")
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Skipf("'lo' device does not exist??? (%v)", err)
|
||||
}
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
55
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/BUILD
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"fake_iptables.go",
|
||||
"hostport.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/proxy/iptables:go_default_library",
|
||||
"//pkg/util/dbus:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["hostport_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/network/hostport/testing:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
335
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go
generated
vendored
Normal file
335
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go
generated
vendored
Normal file
|
@ -0,0 +1,335 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package hostport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
)
|
||||
|
||||
type fakeChain struct {
|
||||
name utiliptables.Chain
|
||||
rules []string
|
||||
}
|
||||
|
||||
type fakeTable struct {
|
||||
name utiliptables.Table
|
||||
chains map[string]*fakeChain
|
||||
}
|
||||
|
||||
type fakeIPTables struct {
|
||||
tables map[string]*fakeTable
|
||||
}
|
||||
|
||||
func NewFakeIPTables() *fakeIPTables {
|
||||
return &fakeIPTables{
|
||||
tables: make(map[string]*fakeTable, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) GetVersion() (string, error) {
|
||||
return "1.4.21", nil
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) getTable(tableName utiliptables.Table) (*fakeTable, error) {
|
||||
table, ok := f.tables[string(tableName)]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Table %s does not exist", tableName)
|
||||
}
|
||||
return table, nil
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) getChain(tableName utiliptables.Table, chainName utiliptables.Chain) (*fakeTable, *fakeChain, error) {
|
||||
table, err := f.getTable(tableName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
chain, ok := table.chains[string(chainName)]
|
||||
if !ok {
|
||||
return table, nil, fmt.Errorf("Chain %s/%s does not exist", tableName, chainName)
|
||||
}
|
||||
|
||||
return table, chain, nil
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) ensureChain(tableName utiliptables.Table, chainName utiliptables.Chain) (bool, *fakeChain) {
|
||||
table, chain, err := f.getChain(tableName, chainName)
|
||||
if err != nil {
|
||||
// either table or table+chain don't exist yet
|
||||
if table == nil {
|
||||
table = &fakeTable{
|
||||
name: tableName,
|
||||
chains: make(map[string]*fakeChain),
|
||||
}
|
||||
f.tables[string(tableName)] = table
|
||||
}
|
||||
chain := &fakeChain{
|
||||
name: chainName,
|
||||
rules: make([]string, 0),
|
||||
}
|
||||
table.chains[string(chainName)] = chain
|
||||
return false, chain
|
||||
}
|
||||
return true, chain
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) EnsureChain(tableName utiliptables.Table, chainName utiliptables.Chain) (bool, error) {
|
||||
existed, _ := f.ensureChain(tableName, chainName)
|
||||
return existed, nil
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) FlushChain(tableName utiliptables.Table, chainName utiliptables.Chain) error {
|
||||
_, chain, err := f.getChain(tableName, chainName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chain.rules = make([]string, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) DeleteChain(tableName utiliptables.Table, chainName utiliptables.Chain) error {
|
||||
table, _, err := f.getChain(tableName, chainName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delete(table.chains, string(chainName))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns index of rule in array; < 0 if rule is not found
|
||||
func findRule(chain *fakeChain, rule string) int {
|
||||
for i, candidate := range chain.rules {
|
||||
if rule == candidate {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) ensureRule(position utiliptables.RulePosition, tableName utiliptables.Table, chainName utiliptables.Chain, rule string) (bool, error) {
|
||||
_, chain, err := f.getChain(tableName, chainName)
|
||||
if err != nil {
|
||||
_, chain = f.ensureChain(tableName, chainName)
|
||||
}
|
||||
|
||||
rule, err = normalizeRule(rule)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ruleIdx := findRule(chain, rule)
|
||||
if ruleIdx >= 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if position == utiliptables.Prepend {
|
||||
chain.rules = append([]string{rule}, chain.rules...)
|
||||
} else if position == utiliptables.Append {
|
||||
chain.rules = append(chain.rules, rule)
|
||||
} else {
|
||||
return false, fmt.Errorf("Unknown position argument %q", position)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func normalizeRule(rule string) (string, error) {
|
||||
normalized := ""
|
||||
remaining := strings.TrimSpace(rule)
|
||||
for {
|
||||
var end int
|
||||
|
||||
if strings.HasPrefix(remaining, "--to-destination=") {
|
||||
remaining = strings.Replace(remaining, "=", " ", 1)
|
||||
}
|
||||
|
||||
if remaining[0] == '"' {
|
||||
end = strings.Index(remaining[1:], "\"")
|
||||
if end < 0 {
|
||||
return "", fmt.Errorf("Invalid rule syntax: mismatched quotes")
|
||||
}
|
||||
end += 2
|
||||
} else {
|
||||
end = strings.Index(remaining, " ")
|
||||
if end < 0 {
|
||||
end = len(remaining)
|
||||
}
|
||||
}
|
||||
arg := remaining[:end]
|
||||
|
||||
// Normalize un-prefixed IP addresses like iptables does
|
||||
if net.ParseIP(arg) != nil {
|
||||
arg = arg + "/32"
|
||||
}
|
||||
|
||||
if len(normalized) > 0 {
|
||||
normalized += " "
|
||||
}
|
||||
normalized += strings.TrimSpace(arg)
|
||||
if len(remaining) == end {
|
||||
break
|
||||
}
|
||||
remaining = remaining[end+1:]
|
||||
}
|
||||
return normalized, nil
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) EnsureRule(position utiliptables.RulePosition, tableName utiliptables.Table, chainName utiliptables.Chain, args ...string) (bool, error) {
|
||||
ruleArgs := make([]string, 0)
|
||||
for _, arg := range args {
|
||||
// quote args with internal spaces (like comments)
|
||||
if strings.Index(arg, " ") >= 0 {
|
||||
arg = fmt.Sprintf("\"%s\"", arg)
|
||||
}
|
||||
ruleArgs = append(ruleArgs, arg)
|
||||
}
|
||||
return f.ensureRule(position, tableName, chainName, strings.Join(ruleArgs, " "))
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) DeleteRule(tableName utiliptables.Table, chainName utiliptables.Chain, args ...string) error {
|
||||
_, chain, err := f.getChain(tableName, chainName)
|
||||
if err == nil {
|
||||
rule := strings.Join(args, " ")
|
||||
ruleIdx := findRule(chain, rule)
|
||||
if ruleIdx < 0 {
|
||||
return nil
|
||||
}
|
||||
chain.rules = append(chain.rules[:ruleIdx], chain.rules[ruleIdx+1:]...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) IsIpv6() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func saveChain(chain *fakeChain, data *bytes.Buffer) {
|
||||
for _, rule := range chain.rules {
|
||||
data.WriteString(fmt.Sprintf("-A %s %s\n", chain.name, rule))
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) Save(tableName utiliptables.Table) ([]byte, error) {
|
||||
table, err := f.getTable(tableName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data := bytes.NewBuffer(nil)
|
||||
data.WriteString(fmt.Sprintf("*%s\n", table.name))
|
||||
|
||||
rules := bytes.NewBuffer(nil)
|
||||
for _, chain := range table.chains {
|
||||
data.WriteString(fmt.Sprintf(":%s - [0:0]\n", string(chain.name)))
|
||||
saveChain(chain, rules)
|
||||
}
|
||||
data.Write(rules.Bytes())
|
||||
data.WriteString("COMMIT\n")
|
||||
return data.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) SaveAll() ([]byte, error) {
|
||||
data := bytes.NewBuffer(nil)
|
||||
for _, table := range f.tables {
|
||||
tableData, err := f.Save(table.name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err = data.Write(tableData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return data.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte, flush utiliptables.FlushFlag) error {
|
||||
buf := bytes.NewBuffer(data)
|
||||
var tableName utiliptables.Table
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
|
||||
line = strings.TrimSuffix(line, "\n")
|
||||
if strings.HasPrefix(line, "*") {
|
||||
tableName = utiliptables.Table(line[1:])
|
||||
}
|
||||
if tableName != "" {
|
||||
if restoreTableName != "" && restoreTableName != tableName {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(line, ":") {
|
||||
chainName := utiliptables.Chain(strings.Split(line[1:], " ")[0])
|
||||
if flush == utiliptables.FlushTables {
|
||||
table, chain, _ := f.getChain(tableName, chainName)
|
||||
if chain != nil {
|
||||
delete(table.chains, string(chainName))
|
||||
}
|
||||
}
|
||||
_, _ = f.ensureChain(tableName, chainName)
|
||||
} else if strings.HasPrefix(line, "-A") {
|
||||
parts := strings.Split(line, " ")
|
||||
if len(parts) < 3 {
|
||||
return fmt.Errorf("Invalid iptables rule '%s'", line)
|
||||
}
|
||||
chainName := utiliptables.Chain(parts[1])
|
||||
rule := strings.TrimPrefix(line, fmt.Sprintf("-A %s ", chainName))
|
||||
_, err := f.ensureRule(utiliptables.Append, tableName, chainName, rule)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if strings.HasPrefix(line, "-X") {
|
||||
parts := strings.Split(line, " ")
|
||||
if len(parts) < 3 {
|
||||
return fmt.Errorf("Invalid iptables rule '%s'", line)
|
||||
}
|
||||
if err := f.DeleteChain(tableName, utiliptables.Chain(parts[1])); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if line == "COMMIT" {
|
||||
if restoreTableName == tableName {
|
||||
return nil
|
||||
}
|
||||
tableName = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) Restore(tableName utiliptables.Table, data []byte, flush utiliptables.FlushFlag, counters utiliptables.RestoreCountersFlag) error {
|
||||
return f.restore(tableName, data, flush)
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) RestoreAll(data []byte, flush utiliptables.FlushFlag, counters utiliptables.RestoreCountersFlag) error {
|
||||
return f.restore("", data, flush)
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) AddReloadFunc(reloadFunc func()) {
|
||||
}
|
||||
|
||||
func (f *fakeIPTables) Destroy() {
|
||||
}
|
386
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport.go
generated
vendored
Normal file
386
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport.go
generated
vendored
Normal file
|
@ -0,0 +1,386 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package hostport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/base32"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables"
|
||||
utildbus "k8s.io/kubernetes/pkg/util/dbus"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
)
|
||||
|
||||
const (
|
||||
// the hostport chain
|
||||
kubeHostportsChain utiliptables.Chain = "KUBE-HOSTPORTS"
|
||||
// prefix for hostport chains
|
||||
kubeHostportChainPrefix string = "KUBE-HP-"
|
||||
)
|
||||
|
||||
type HostportHandler interface {
|
||||
OpenPodHostportsAndSync(newPod *ActivePod, natInterfaceName string, activePods []*ActivePod) error
|
||||
SyncHostports(natInterfaceName string, activePods []*ActivePod) error
|
||||
}
|
||||
|
||||
type ActivePod struct {
|
||||
Pod *v1.Pod
|
||||
IP net.IP
|
||||
}
|
||||
|
||||
type hostportOpener func(*hostport) (closeable, error)
|
||||
|
||||
type handler struct {
|
||||
hostPortMap map[hostport]closeable
|
||||
iptables utiliptables.Interface
|
||||
portOpener hostportOpener
|
||||
}
|
||||
|
||||
func NewHostportHandler() HostportHandler {
|
||||
iptInterface := utiliptables.New(utilexec.New(), utildbus.New(), utiliptables.ProtocolIpv4)
|
||||
return &handler{
|
||||
hostPortMap: make(map[hostport]closeable),
|
||||
iptables: iptInterface,
|
||||
portOpener: openLocalPort,
|
||||
}
|
||||
}
|
||||
|
||||
type closeable interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
type hostport struct {
|
||||
port int32
|
||||
protocol string
|
||||
}
|
||||
|
||||
type targetPod struct {
|
||||
podFullName string
|
||||
podIP string
|
||||
}
|
||||
|
||||
func (hp *hostport) String() string {
|
||||
return fmt.Sprintf("%s:%d", hp.protocol, hp.port)
|
||||
}
|
||||
|
||||
//openPodHostports opens all hostport for pod and returns the map of hostport and socket
|
||||
func (h *handler) openHostports(pod *v1.Pod) error {
|
||||
var retErr error
|
||||
ports := make(map[hostport]closeable)
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, port := range container.Ports {
|
||||
if port.HostPort <= 0 {
|
||||
// Ignore
|
||||
continue
|
||||
}
|
||||
hp := hostport{
|
||||
port: port.HostPort,
|
||||
protocol: strings.ToLower(string(port.Protocol)),
|
||||
}
|
||||
socket, err := h.portOpener(&hp)
|
||||
if err != nil {
|
||||
retErr = fmt.Errorf("Cannot open hostport %d for pod %s: %v", port.HostPort, kubecontainer.GetPodFullName(pod), err)
|
||||
break
|
||||
}
|
||||
ports[hp] = socket
|
||||
}
|
||||
if retErr != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
// If encounter any error, close all hostports that just got opened.
|
||||
if retErr != nil {
|
||||
for hp, socket := range ports {
|
||||
if err := socket.Close(); err != nil {
|
||||
glog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, kubecontainer.GetPodFullName(pod), err)
|
||||
}
|
||||
}
|
||||
return retErr
|
||||
}
|
||||
|
||||
for hostPort, socket := range ports {
|
||||
h.hostPortMap[hostPort] = socket
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// gatherAllHostports returns all hostports that should be presented on node,
|
||||
// given the list of pods running on that node and ignoring host network
|
||||
// pods (which don't need hostport <-> container port mapping).
|
||||
func gatherAllHostports(activePods []*ActivePod) (map[v1.ContainerPort]targetPod, error) {
|
||||
podHostportMap := make(map[v1.ContainerPort]targetPod)
|
||||
for _, r := range activePods {
|
||||
if r.IP.To4() == nil {
|
||||
return nil, fmt.Errorf("Invalid or missing pod %s IP", kubecontainer.GetPodFullName(r.Pod))
|
||||
}
|
||||
|
||||
// should not handle hostports for hostnetwork pods
|
||||
if r.Pod.Spec.HostNetwork {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, container := range r.Pod.Spec.Containers {
|
||||
for _, port := range container.Ports {
|
||||
if port.HostPort != 0 {
|
||||
podHostportMap[port] = targetPod{podFullName: kubecontainer.GetPodFullName(r.Pod), podIP: r.IP.String()}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return podHostportMap, nil
|
||||
}
|
||||
|
||||
// Join all words with spaces, terminate with newline and write to buf.
|
||||
func writeLine(buf *bytes.Buffer, words ...string) {
|
||||
buf.WriteString(strings.Join(words, " ") + "\n")
|
||||
}
|
||||
|
||||
//hostportChainName takes containerPort for a pod and returns associated iptables chain.
|
||||
// This is computed by hashing (sha256)
|
||||
// then encoding to base32 and truncating with the prefix "KUBE-SVC-". We do
|
||||
// this because IPTables Chain Names must be <= 28 chars long, and the longer
|
||||
// they are the harder they are to read.
|
||||
func hostportChainName(cp v1.ContainerPort, podFullName string) utiliptables.Chain {
|
||||
hash := sha256.Sum256([]byte(string(cp.HostPort) + string(cp.Protocol) + podFullName))
|
||||
encoded := base32.StdEncoding.EncodeToString(hash[:])
|
||||
return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16])
|
||||
}
|
||||
|
||||
// OpenPodHostportsAndSync opens hostports for a new pod, gathers all hostports on
|
||||
// node, sets up iptables rules enable them. And finally clean up stale hostports.
|
||||
// 'newPod' must also be present in 'activePods'.
|
||||
func (h *handler) OpenPodHostportsAndSync(newPod *ActivePod, natInterfaceName string, activePods []*ActivePod) error {
|
||||
// try to open pod host port if specified
|
||||
if err := h.openHostports(newPod.Pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the new pod to active pods if it's not present.
|
||||
var found bool
|
||||
for _, p := range activePods {
|
||||
if p.Pod.UID == newPod.Pod.UID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
activePods = append(activePods, newPod)
|
||||
}
|
||||
|
||||
return h.SyncHostports(natInterfaceName, activePods)
|
||||
}
|
||||
|
||||
// SyncHostports gathers all hostports on node and setup iptables rules enable them. And finally clean up stale hostports
|
||||
func (h *handler) SyncHostports(natInterfaceName string, activePods []*ActivePod) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("syncHostportsRules took %v", time.Since(start))
|
||||
}()
|
||||
|
||||
containerPortMap, err := gatherAllHostports(activePods)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Info("Ensuring kubelet hostport chains")
|
||||
// Ensure kubeHostportChain
|
||||
if _, err := h.iptables.EnsureChain(utiliptables.TableNAT, kubeHostportsChain); err != nil {
|
||||
return fmt.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubeHostportsChain, err)
|
||||
}
|
||||
tableChainsNeedJumpServices := []struct {
|
||||
table utiliptables.Table
|
||||
chain utiliptables.Chain
|
||||
}{
|
||||
{utiliptables.TableNAT, utiliptables.ChainOutput},
|
||||
{utiliptables.TableNAT, utiliptables.ChainPrerouting},
|
||||
}
|
||||
args := []string{"-m", "comment", "--comment", "kube hostport portals",
|
||||
"-m", "addrtype", "--dst-type", "LOCAL",
|
||||
"-j", string(kubeHostportsChain)}
|
||||
for _, tc := range tableChainsNeedJumpServices {
|
||||
if _, err := h.iptables.EnsureRule(utiliptables.Prepend, tc.table, tc.chain, args...); err != nil {
|
||||
return fmt.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", tc.table, tc.chain, kubeHostportsChain, err)
|
||||
}
|
||||
}
|
||||
// Need to SNAT traffic from localhost
|
||||
args = []string{"-m", "comment", "--comment", "SNAT for localhost access to hostports", "-o", natInterfaceName, "-s", "127.0.0.0/8", "-j", "MASQUERADE"}
|
||||
if _, err := h.iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil {
|
||||
return fmt.Errorf("Failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err)
|
||||
}
|
||||
|
||||
// Get iptables-save output so we can check for existing chains and rules.
|
||||
// This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore
|
||||
existingNATChains := make(map[utiliptables.Chain]string)
|
||||
iptablesSaveRaw, err := h.iptables.Save(utiliptables.TableNAT)
|
||||
if err != nil { // if we failed to get any rules
|
||||
glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
|
||||
} else { // otherwise parse the output
|
||||
existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, iptablesSaveRaw)
|
||||
}
|
||||
|
||||
natChains := bytes.NewBuffer(nil)
|
||||
natRules := bytes.NewBuffer(nil)
|
||||
writeLine(natChains, "*nat")
|
||||
// Make sure we keep stats for the top-level chains, if they existed
|
||||
// (which most should have because we created them above).
|
||||
if chain, ok := existingNATChains[kubeHostportsChain]; ok {
|
||||
writeLine(natChains, chain)
|
||||
} else {
|
||||
writeLine(natChains, utiliptables.MakeChainLine(kubeHostportsChain))
|
||||
}
|
||||
|
||||
// Accumulate NAT chains to keep.
|
||||
activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set
|
||||
|
||||
for containerPort, target := range containerPortMap {
|
||||
protocol := strings.ToLower(string(containerPort.Protocol))
|
||||
hostportChain := hostportChainName(containerPort, target.podFullName)
|
||||
if chain, ok := existingNATChains[hostportChain]; ok {
|
||||
writeLine(natChains, chain)
|
||||
} else {
|
||||
writeLine(natChains, utiliptables.MakeChainLine(hostportChain))
|
||||
}
|
||||
|
||||
activeNATChains[hostportChain] = true
|
||||
|
||||
// Redirect to hostport chain
|
||||
args := []string{
|
||||
"-A", string(kubeHostportsChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, containerPort.HostPort),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"--dport", fmt.Sprintf("%d", containerPort.HostPort),
|
||||
"-j", string(hostportChain),
|
||||
}
|
||||
writeLine(natRules, args...)
|
||||
|
||||
// Assuming kubelet is syncing iptables KUBE-MARK-MASQ chain
|
||||
// If the request comes from the pod that is serving the hostport, then SNAT
|
||||
args = []string{
|
||||
"-A", string(hostportChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, containerPort.HostPort),
|
||||
"-s", target.podIP, "-j", string(iptablesproxy.KubeMarkMasqChain),
|
||||
}
|
||||
writeLine(natRules, args...)
|
||||
|
||||
// Create hostport chain to DNAT traffic to final destination
|
||||
// IPTables will maintained the stats for this chain
|
||||
args = []string{
|
||||
"-A", string(hostportChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, containerPort.HostPort),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"-j", "DNAT", fmt.Sprintf("--to-destination=%s:%d", target.podIP, containerPort.ContainerPort),
|
||||
}
|
||||
writeLine(natRules, args...)
|
||||
}
|
||||
|
||||
// Delete chains no longer in use.
|
||||
for chain := range existingNATChains {
|
||||
if !activeNATChains[chain] {
|
||||
chainString := string(chain)
|
||||
if !strings.HasPrefix(chainString, kubeHostportChainPrefix) {
|
||||
// Ignore chains that aren't ours.
|
||||
continue
|
||||
}
|
||||
// We must (as per iptables) write a chain-line for it, which has
|
||||
// the nice effect of flushing the chain. Then we can remove the
|
||||
// chain.
|
||||
writeLine(natChains, existingNATChains[chain])
|
||||
writeLine(natRules, "-X", chainString)
|
||||
}
|
||||
}
|
||||
writeLine(natRules, "COMMIT")
|
||||
|
||||
natLines := append(natChains.Bytes(), natRules.Bytes()...)
|
||||
glog.V(3).Infof("Restoring iptables rules: %s", natLines)
|
||||
err = h.iptables.RestoreAll(natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to execute iptables-restore: %v", err)
|
||||
}
|
||||
|
||||
h.cleanupHostportMap(containerPortMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
func openLocalPort(hp *hostport) (closeable, error) {
|
||||
// For ports on node IPs, open the actual port and hold it, even though we
|
||||
// use iptables to redirect traffic.
|
||||
// This ensures a) that it's safe to use that port and b) that (a) stays
|
||||
// true. The risk is that some process on the node (e.g. sshd or kubelet)
|
||||
// is using a port and we give that same port out to a Service. That would
|
||||
// be bad because iptables would silently claim the traffic but the process
|
||||
// would never know.
|
||||
// NOTE: We should not need to have a real listen()ing socket - bind()
|
||||
// should be enough, but I can't figure out a way to e2e test without
|
||||
// it. Tools like 'ss' and 'netstat' do not show sockets that are
|
||||
// bind()ed but not listen()ed, and at least the default debian netcat
|
||||
// has no way to avoid about 10 seconds of retries.
|
||||
var socket closeable
|
||||
switch hp.protocol {
|
||||
case "tcp":
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", hp.port))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
socket = listener
|
||||
case "udp":
|
||||
addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf(":%d", hp.port))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
socket = conn
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown protocol %q", hp.protocol)
|
||||
}
|
||||
glog.V(3).Infof("Opened local port %s", hp.String())
|
||||
return socket, nil
|
||||
}
|
||||
|
||||
// cleanupHostportMap closes obsolete hostports
|
||||
func (h *handler) cleanupHostportMap(containerPortMap map[v1.ContainerPort]targetPod) {
|
||||
// compute hostports that are supposed to be open
|
||||
currentHostports := make(map[hostport]bool)
|
||||
for containerPort := range containerPortMap {
|
||||
hp := hostport{
|
||||
port: containerPort.HostPort,
|
||||
protocol: strings.ToLower(string(containerPort.Protocol)),
|
||||
}
|
||||
currentHostports[hp] = true
|
||||
}
|
||||
|
||||
// close and delete obsolete hostports
|
||||
for hp, socket := range h.hostPortMap {
|
||||
if _, ok := currentHostports[hp]; !ok {
|
||||
socket.Close()
|
||||
glog.V(3).Infof("Closed local port %s", hp.String())
|
||||
delete(h.hostPortMap, hp)
|
||||
}
|
||||
}
|
||||
}
|
254
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_test.go
generated
vendored
Normal file
254
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_test.go
generated
vendored
Normal file
|
@ -0,0 +1,254 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package hostport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
)
|
||||
|
||||
type fakeSocket struct {
|
||||
port int32
|
||||
protocol string
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (f *fakeSocket) Close() error {
|
||||
if f.closed {
|
||||
return fmt.Errorf("Socket %q.%s already closed!", f.port, f.protocol)
|
||||
}
|
||||
f.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func openFakeSocket(hp *hostport) (closeable, error) {
|
||||
return &fakeSocket{hp.port, hp.protocol, false}, nil
|
||||
}
|
||||
|
||||
type ruleMatch struct {
|
||||
hostport int
|
||||
chain string
|
||||
match string
|
||||
}
|
||||
|
||||
func TestOpenPodHostports(t *testing.T) {
|
||||
fakeIPTables := NewFakeIPTables()
|
||||
|
||||
h := &handler{
|
||||
hostPortMap: make(map[hostport]closeable),
|
||||
iptables: fakeIPTables,
|
||||
portOpener: openFakeSocket,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
ip string
|
||||
matches []*ruleMatch
|
||||
}{
|
||||
// New pod that we are going to add
|
||||
{
|
||||
&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "test-pod",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Ports: []v1.ContainerPort{{
|
||||
HostPort: 4567,
|
||||
ContainerPort: 80,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}, {
|
||||
HostPort: 5678,
|
||||
ContainerPort: 81,
|
||||
Protocol: v1.ProtocolUDP,
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
"10.1.1.2",
|
||||
[]*ruleMatch{
|
||||
{
|
||||
-1,
|
||||
"KUBE-HOSTPORTS",
|
||||
"-m comment --comment \"test-pod_default hostport 4567\" -m tcp -p tcp --dport 4567",
|
||||
},
|
||||
{
|
||||
4567,
|
||||
"",
|
||||
"-m comment --comment \"test-pod_default hostport 4567\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ",
|
||||
},
|
||||
{
|
||||
4567,
|
||||
"",
|
||||
"-m comment --comment \"test-pod_default hostport 4567\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.2:80",
|
||||
},
|
||||
{
|
||||
-1,
|
||||
"KUBE-HOSTPORTS",
|
||||
"-m comment --comment \"test-pod_default hostport 5678\" -m udp -p udp --dport 5678",
|
||||
},
|
||||
{
|
||||
5678,
|
||||
"",
|
||||
"-m comment --comment \"test-pod_default hostport 5678\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ",
|
||||
},
|
||||
{
|
||||
5678,
|
||||
"",
|
||||
"-m comment --comment \"test-pod_default hostport 5678\" -m udp -p udp -j DNAT --to-destination 10.1.1.2:81",
|
||||
},
|
||||
},
|
||||
},
|
||||
// Already running pod
|
||||
{
|
||||
&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "another-test-pod",
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Ports: []v1.ContainerPort{{
|
||||
HostPort: 123,
|
||||
ContainerPort: 654,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
"10.1.1.5",
|
||||
[]*ruleMatch{
|
||||
{
|
||||
-1,
|
||||
"KUBE-HOSTPORTS",
|
||||
"-m comment --comment \"another-test-pod_default hostport 123\" -m tcp -p tcp --dport 123",
|
||||
},
|
||||
{
|
||||
123,
|
||||
"",
|
||||
"-m comment --comment \"another-test-pod_default hostport 123\" -s 10.1.1.5/32 -j KUBE-MARK-MASQ",
|
||||
},
|
||||
{
|
||||
123,
|
||||
"",
|
||||
"-m comment --comment \"another-test-pod_default hostport 123\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.5:654",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
activePods := make([]*ActivePod, 0)
|
||||
|
||||
// Fill in any match rules missing chain names
|
||||
for _, test := range tests {
|
||||
for _, match := range test.matches {
|
||||
if match.hostport >= 0 {
|
||||
found := false
|
||||
for _, c := range test.pod.Spec.Containers {
|
||||
for _, cp := range c.Ports {
|
||||
if int(cp.HostPort) == match.hostport {
|
||||
match.chain = string(hostportChainName(cp, kubecontainer.GetPodFullName(test.pod)))
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("Failed to find ContainerPort for match %d/'%s'", match.hostport, match.match)
|
||||
}
|
||||
}
|
||||
}
|
||||
activePods = append(activePods, &ActivePod{
|
||||
Pod: test.pod,
|
||||
IP: net.ParseIP(test.ip),
|
||||
})
|
||||
}
|
||||
|
||||
// Already running pod's host port
|
||||
hp := hostport{
|
||||
tests[1].pod.Spec.Containers[0].Ports[0].HostPort,
|
||||
strings.ToLower(string(tests[1].pod.Spec.Containers[0].Ports[0].Protocol)),
|
||||
}
|
||||
h.hostPortMap[hp] = &fakeSocket{
|
||||
tests[1].pod.Spec.Containers[0].Ports[0].HostPort,
|
||||
strings.ToLower(string(tests[1].pod.Spec.Containers[0].Ports[0].Protocol)),
|
||||
false,
|
||||
}
|
||||
|
||||
err := h.OpenPodHostportsAndSync(&ActivePod{Pod: tests[0].pod, IP: net.ParseIP(tests[0].ip)}, "br0", activePods)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to OpenPodHostportsAndSync: %v", err)
|
||||
}
|
||||
|
||||
// Generic rules
|
||||
genericRules := []*ruleMatch{
|
||||
{-1, "POSTROUTING", "-m comment --comment \"SNAT for localhost access to hostports\" -o br0 -s 127.0.0.0/8 -j MASQUERADE"},
|
||||
{-1, "PREROUTING", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
|
||||
{-1, "OUTPUT", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
|
||||
}
|
||||
|
||||
for _, rule := range genericRules {
|
||||
_, chain, err := fakeIPTables.getChain(utiliptables.TableNAT, utiliptables.Chain(rule.chain))
|
||||
if err != nil {
|
||||
t.Fatalf("Expected NAT chain %s did not exist", rule.chain)
|
||||
}
|
||||
if !matchRule(chain, rule.match) {
|
||||
t.Fatalf("Expected %s chain rule match '%s' not found", rule.chain, rule.match)
|
||||
}
|
||||
}
|
||||
|
||||
// Pod rules
|
||||
for _, test := range tests {
|
||||
for _, match := range test.matches {
|
||||
// Ensure chain exists
|
||||
_, chain, err := fakeIPTables.getChain(utiliptables.TableNAT, utiliptables.Chain(match.chain))
|
||||
if err != nil {
|
||||
t.Fatalf("Expected NAT chain %s did not exist", match.chain)
|
||||
}
|
||||
if !matchRule(chain, match.match) {
|
||||
t.Fatalf("Expected NAT chain %s rule containing '%s' not found", match.chain, match.match)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Socket
|
||||
hostPortMap := map[hostport]closeable{
|
||||
hostport{123, "tcp"}: &fakeSocket{123, "tcp", false},
|
||||
hostport{4567, "tcp"}: &fakeSocket{4567, "tcp", false},
|
||||
hostport{5678, "udp"}: &fakeSocket{5678, "udp", false},
|
||||
}
|
||||
if !reflect.DeepEqual(hostPortMap, h.hostPortMap) {
|
||||
t.Fatalf("Mismatch in expected hostPortMap. Expected '%v', got '%v'", hostPortMap, h.hostPortMap)
|
||||
}
|
||||
}
|
||||
|
||||
func matchRule(chain *fakeChain, match string) bool {
|
||||
for _, rule := range chain.rules {
|
||||
if strings.Contains(rule, match) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
31
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/testing/BUILD
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/testing/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["fake.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network/hostport:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
44
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/testing/fake.go
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/testing/fake.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
|
||||
)
|
||||
|
||||
type fakeHandler struct{}
|
||||
|
||||
func NewFakeHostportHandler() hostport.HostportHandler {
|
||||
return &fakeHandler{}
|
||||
}
|
||||
|
||||
func (h *fakeHandler) OpenPodHostportsAndSync(newPod *hostport.ActivePod, natInterfaceName string, activePods []*hostport.ActivePod) error {
|
||||
return h.SyncHostports(natInterfaceName, activePods)
|
||||
}
|
||||
|
||||
func (h *fakeHandler) SyncHostports(natInterfaceName string, activePods []*hostport.ActivePod) error {
|
||||
for _, r := range activePods {
|
||||
if r.IP.To4() == nil {
|
||||
return fmt.Errorf("Invalid or missing pod %s IP", kubecontainer.GetPodFullName(r.Pod))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
72
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/BUILD
generated
vendored
Normal file
72
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kubenet.go",
|
||||
"kubenet_linux.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//pkg/kubelet/network/hostport:go_default_library",
|
||||
"//pkg/util/bandwidth:go_default_library",
|
||||
"//pkg/util/dbus:go_default_library",
|
||||
"//pkg/util/ebtables:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/sysctl:go_default_library",
|
||||
"//vendor:github.com/containernetworking/cni/libcni",
|
||||
"//vendor:github.com/containernetworking/cni/pkg/types",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/vishvananda/netlink",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/net",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["kubenet_linux_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//pkg/kubelet/network/cni/testing:go_default_library",
|
||||
"//pkg/kubelet/network/hostport/testing:go_default_library",
|
||||
"//pkg/kubelet/network/testing:go_default_library",
|
||||
"//pkg/util/bandwidth:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/iptables/testing:go_default_library",
|
||||
"//pkg/util/sysctl/testing:go_default_library",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:github.com/stretchr/testify/mock",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
21
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet.go
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubenet
|
||||
|
||||
const (
|
||||
KubenetPluginName = "kubenet"
|
||||
)
|
819
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux.go
generated
vendored
Normal file
819
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,819 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubenet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containernetworking/cni/libcni"
|
||||
cnitypes "github.com/containernetworking/cni/pkg/types"
|
||||
"github.com/golang/glog"
|
||||
"github.com/vishvananda/netlink"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
utilsets "k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
|
||||
"k8s.io/kubernetes/pkg/util/bandwidth"
|
||||
utildbus "k8s.io/kubernetes/pkg/util/dbus"
|
||||
utilebtables "k8s.io/kubernetes/pkg/util/ebtables"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
|
||||
)
|
||||
|
||||
const (
|
||||
BridgeName = "cbr0"
|
||||
DefaultCNIDir = "/opt/cni/bin"
|
||||
|
||||
sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables"
|
||||
|
||||
// fallbackMTU is used if an MTU is not specified, and we cannot determine the MTU
|
||||
fallbackMTU = 1460
|
||||
|
||||
// private mac prefix safe to use
|
||||
// Universally administered and locally administered addresses are distinguished by setting the second-least-significant
|
||||
// bit of the first octet of the address. If it is 1, the address is locally administered. For example, for address 0a:00:00:00:00:00,
|
||||
// the first cotet is 0a(hex), the binary form of which is 00001010, where the second-least-significant bit is 1.
|
||||
privateMACPrefix = "0a:58"
|
||||
|
||||
// ebtables Chain to store dedup rules
|
||||
dedupChain = utilebtables.Chain("KUBE-DEDUP")
|
||||
|
||||
// defaultIPAMDir is the default location for the checkpoint files stored by host-local ipam
|
||||
// https://github.com/containernetworking/cni/tree/master/plugins/ipam/host-local#backends
|
||||
defaultIPAMDir = "/var/lib/cni/networks"
|
||||
)
|
||||
|
||||
// CNI plugins required by kubenet in /opt/cni/bin or vendor directory
|
||||
var requiredCNIPlugins = [...]string{"bridge", "host-local", "loopback"}
|
||||
|
||||
type kubenetNetworkPlugin struct {
|
||||
network.NoopNetworkPlugin
|
||||
|
||||
host network.Host
|
||||
netConfig *libcni.NetworkConfig
|
||||
loConfig *libcni.NetworkConfig
|
||||
cniConfig libcni.CNI
|
||||
bandwidthShaper bandwidth.BandwidthShaper
|
||||
mu sync.Mutex //Mutex for protecting podIPs map, netConfig, and shaper initialization
|
||||
podIPs map[kubecontainer.ContainerID]string
|
||||
mtu int
|
||||
execer utilexec.Interface
|
||||
nsenterPath string
|
||||
hairpinMode componentconfig.HairpinMode
|
||||
hostportHandler hostport.HostportHandler
|
||||
iptables utiliptables.Interface
|
||||
sysctl utilsysctl.Interface
|
||||
ebtables utilebtables.Interface
|
||||
// vendorDir is passed by kubelet network-plugin-dir parameter.
|
||||
// kubenet will search for cni binaries in DefaultCNIDir first, then continue to vendorDir.
|
||||
vendorDir string
|
||||
nonMasqueradeCIDR string
|
||||
podCidr string
|
||||
gateway net.IP
|
||||
}
|
||||
|
||||
func NewPlugin(networkPluginDir string) network.NetworkPlugin {
|
||||
protocol := utiliptables.ProtocolIpv4
|
||||
execer := utilexec.New()
|
||||
dbus := utildbus.New()
|
||||
sysctl := utilsysctl.New()
|
||||
iptInterface := utiliptables.New(execer, dbus, protocol)
|
||||
return &kubenetNetworkPlugin{
|
||||
podIPs: make(map[kubecontainer.ContainerID]string),
|
||||
execer: utilexec.New(),
|
||||
iptables: iptInterface,
|
||||
sysctl: sysctl,
|
||||
vendorDir: networkPluginDir,
|
||||
hostportHandler: hostport.NewHostportHandler(),
|
||||
nonMasqueradeCIDR: "10.0.0.0/8",
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode componentconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error {
|
||||
plugin.host = host
|
||||
plugin.hairpinMode = hairpinMode
|
||||
plugin.nonMasqueradeCIDR = nonMasqueradeCIDR
|
||||
plugin.cniConfig = &libcni.CNIConfig{
|
||||
Path: []string{DefaultCNIDir, plugin.vendorDir},
|
||||
}
|
||||
|
||||
if mtu == network.UseDefaultMTU {
|
||||
if link, err := findMinMTU(); err == nil {
|
||||
plugin.mtu = link.MTU
|
||||
glog.V(5).Infof("Using interface %s MTU %d as bridge MTU", link.Name, link.MTU)
|
||||
} else {
|
||||
plugin.mtu = fallbackMTU
|
||||
glog.Warningf("Failed to find default bridge MTU, using %d: %v", fallbackMTU, err)
|
||||
}
|
||||
} else {
|
||||
plugin.mtu = mtu
|
||||
}
|
||||
|
||||
// Since this plugin uses a Linux bridge, set bridge-nf-call-iptables=1
|
||||
// is necessary to ensure kube-proxy functions correctly.
|
||||
//
|
||||
// This will return an error on older kernel version (< 3.18) as the module
|
||||
// was built-in, we simply ignore the error here. A better thing to do is
|
||||
// to check the kernel version in the future.
|
||||
plugin.execer.Command("modprobe", "br-netfilter").CombinedOutput()
|
||||
err := plugin.sysctl.SetSysctl(sysctlBridgeCallIPTables, 1)
|
||||
if err != nil {
|
||||
glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err)
|
||||
}
|
||||
|
||||
plugin.loConfig, err = libcni.ConfFromBytes([]byte(`{
|
||||
"cniVersion": "0.1.0",
|
||||
"name": "kubenet-loopback",
|
||||
"type": "loopback"
|
||||
}`))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to generate loopback config: %v", err)
|
||||
}
|
||||
|
||||
plugin.nsenterPath, err = plugin.execer.LookPath("nsenter")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to find nsenter binary: %v", err)
|
||||
}
|
||||
|
||||
// Need to SNAT outbound traffic from cluster
|
||||
if err = plugin.ensureMasqRule(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: move thic logic into cni bridge plugin and remove this from kubenet
|
||||
func (plugin *kubenetNetworkPlugin) ensureMasqRule() error {
|
||||
if _, err := plugin.iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, utiliptables.ChainPostrouting,
|
||||
"-m", "comment", "--comment", "kubenet: SNAT for outbound traffic from cluster",
|
||||
"-m", "addrtype", "!", "--dst-type", "LOCAL",
|
||||
"!", "-d", plugin.nonMasqueradeCIDR,
|
||||
"-j", "MASQUERADE"); err != nil {
|
||||
return fmt.Errorf("Failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func findMinMTU() (*net.Interface, error) {
|
||||
intfs, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mtu := 999999
|
||||
defIntfIndex := -1
|
||||
for i, intf := range intfs {
|
||||
if ((intf.Flags & net.FlagUp) != 0) && (intf.Flags&(net.FlagLoopback|net.FlagPointToPoint) == 0) {
|
||||
if intf.MTU < mtu {
|
||||
mtu = intf.MTU
|
||||
defIntfIndex = i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if mtu >= 999999 || mtu < 576 || defIntfIndex < 0 {
|
||||
return nil, fmt.Errorf("no suitable interface: %v", BridgeName)
|
||||
}
|
||||
|
||||
return &intfs[defIntfIndex], nil
|
||||
}
|
||||
|
||||
const NET_CONFIG_TEMPLATE = `{
|
||||
"cniVersion": "0.1.0",
|
||||
"name": "kubenet",
|
||||
"type": "bridge",
|
||||
"bridge": "%s",
|
||||
"mtu": %d,
|
||||
"addIf": "%s",
|
||||
"isGateway": true,
|
||||
"ipMasq": false,
|
||||
"hairpinMode": %t,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "%s",
|
||||
"gateway": "%s",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" }
|
||||
]
|
||||
}
|
||||
}`
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interface{}) {
|
||||
if name != network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE {
|
||||
return
|
||||
}
|
||||
|
||||
plugin.mu.Lock()
|
||||
defer plugin.mu.Unlock()
|
||||
|
||||
podCIDR, ok := details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR].(string)
|
||||
if !ok {
|
||||
glog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE)
|
||||
return
|
||||
}
|
||||
|
||||
if plugin.netConfig != nil {
|
||||
glog.Warningf("Ignoring subsequent pod CIDR update to %s", podCIDR)
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(5).Infof("PodCIDR is set to %q", podCIDR)
|
||||
_, cidr, err := net.ParseCIDR(podCIDR)
|
||||
if err == nil {
|
||||
setHairpin := plugin.hairpinMode == componentconfig.HairpinVeth
|
||||
// Set bridge address to first address in IPNet
|
||||
cidr.IP.To4()[3] += 1
|
||||
|
||||
json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, podCIDR, cidr.IP.String())
|
||||
glog.V(2).Infof("CNI network config set to %v", json)
|
||||
plugin.netConfig, err = libcni.ConfFromBytes([]byte(json))
|
||||
if err == nil {
|
||||
glog.V(5).Infof("CNI network config:\n%s", json)
|
||||
|
||||
// Ensure cbr0 has no conflicting addresses; CNI's 'bridge'
|
||||
// plugin will bail out if the bridge has an unexpected one
|
||||
plugin.clearBridgeAddressesExcept(cidr)
|
||||
}
|
||||
plugin.podCidr = podCIDR
|
||||
plugin.gateway = cidr.IP
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to generate CNI network config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) clearBridgeAddressesExcept(keep *net.IPNet) {
|
||||
bridge, err := netlink.LinkByName(BridgeName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
addrs, err := netlink.AddrList(bridge, syscall.AF_INET)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
if !utilnet.IPNetEqual(addr.IPNet, keep) {
|
||||
glog.V(2).Infof("Removing old address %s from %s", addr.IPNet.String(), BridgeName)
|
||||
netlink.AddrDel(bridge, &addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) Name() string {
|
||||
return KubenetPluginName
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) Capabilities() utilsets.Int {
|
||||
return utilsets.NewInt(network.NET_PLUGIN_CAPABILITY_SHAPING)
|
||||
}
|
||||
|
||||
// setup sets up networking through CNI using the given ns/name and sandbox ID.
|
||||
// TODO: Don't pass the pod to this method, it only needs it for bandwidth
|
||||
// shaping and hostport management.
|
||||
func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kubecontainer.ContainerID, pod *v1.Pod) error {
|
||||
// Bring up container loopback interface
|
||||
if _, err := plugin.addContainerToNetwork(plugin.loConfig, "lo", namespace, name, id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Hook container up with our bridge
|
||||
res, err := plugin.addContainerToNetwork(plugin.netConfig, network.DefaultInterfaceName, namespace, name, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.IP4 == nil {
|
||||
return fmt.Errorf("CNI plugin reported no IPv4 address for container %v.", id)
|
||||
}
|
||||
ip4 := res.IP4.IP.IP.To4()
|
||||
if ip4 == nil {
|
||||
return fmt.Errorf("CNI plugin reported an invalid IPv4 address for container %v: %+v.", id, res.IP4)
|
||||
}
|
||||
|
||||
// Explicitly assign mac address to cbr0. If bridge mac address is not explicitly set will adopt the lowest MAC address of the attached veths.
|
||||
// TODO: Remove this once upstream cni bridge plugin handles this
|
||||
link, err := netlink.LinkByName(BridgeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to lookup %q: %v", BridgeName, err)
|
||||
}
|
||||
macAddr, err := generateHardwareAddr(plugin.gateway)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(3).Infof("Configure %q mac address to %v", BridgeName, macAddr)
|
||||
err = netlink.LinkSetHardwareAddr(link, macAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to configure %q mac address to %q: %v", BridgeName, macAddr, err)
|
||||
}
|
||||
|
||||
// Put the container bridge into promiscuous mode to force it to accept hairpin packets.
|
||||
// TODO: Remove this once the kernel bug (#20096) is fixed.
|
||||
// TODO: check and set promiscuous mode with netlink once vishvananda/netlink supports it
|
||||
if plugin.hairpinMode == componentconfig.PromiscuousBridge {
|
||||
output, err := plugin.execer.Command("ip", "link", "show", "dev", BridgeName).CombinedOutput()
|
||||
if err != nil || strings.Index(string(output), "PROMISC") < 0 {
|
||||
_, err := plugin.execer.Command("ip", "link", "set", BridgeName, "promisc", "on").CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error setting promiscuous mode on %s: %v", BridgeName, err)
|
||||
}
|
||||
}
|
||||
// configure the ebtables rules to eliminate duplicate packets by best effort
|
||||
plugin.syncEbtablesDedupRules(macAddr)
|
||||
}
|
||||
|
||||
plugin.podIPs[id] = ip4.String()
|
||||
|
||||
// The host can choose to not support "legacy" features. The remote
|
||||
// shim doesn't support it (#35457), but the kubelet does.
|
||||
if !plugin.host.SupportsLegacyFeatures() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The first SetUpPod call creates the bridge; get a shaper for the sake of
|
||||
// initialization
|
||||
shaper := plugin.shaper()
|
||||
|
||||
ingress, egress, err := bandwidth.ExtractPodBandwidthResources(pod.Annotations)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading pod bandwidth annotations: %v", err)
|
||||
}
|
||||
if egress != nil || ingress != nil {
|
||||
if err := shaper.ReconcileCIDR(fmt.Sprintf("%s/32", ip4.String()), egress, ingress); err != nil {
|
||||
return fmt.Errorf("Failed to add pod to shaper: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Open any hostports the pod's containers want
|
||||
activePods, err := plugin.getActivePods()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newPod := &hostport.ActivePod{Pod: pod, IP: ip4}
|
||||
if err := plugin.hostportHandler.OpenPodHostportsAndSync(newPod, BridgeName, activePods); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID) error {
|
||||
plugin.mu.Lock()
|
||||
defer plugin.mu.Unlock()
|
||||
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("SetUpPod took %v for %s/%s", time.Since(start), namespace, name)
|
||||
}()
|
||||
|
||||
// TODO: Entire pod object only required for bw shaping and hostport.
|
||||
pod, ok := plugin.host.GetPodByName(namespace, name)
|
||||
if !ok {
|
||||
return fmt.Errorf("pod %q cannot be found", name)
|
||||
}
|
||||
|
||||
if err := plugin.Status(); err != nil {
|
||||
return fmt.Errorf("Kubenet cannot SetUpPod: %v", err)
|
||||
}
|
||||
|
||||
if err := plugin.setup(namespace, name, id, pod); err != nil {
|
||||
// Make sure everything gets cleaned up on errors
|
||||
podIP, _ := plugin.podIPs[id]
|
||||
if err := plugin.teardown(namespace, name, id, podIP); err != nil {
|
||||
// Not a hard error or warning
|
||||
glog.V(4).Infof("Failed to clean up %s/%s after SetUpPod failure: %v", namespace, name, err)
|
||||
}
|
||||
|
||||
// TODO(#34278): Figure out if we need IP GC through the cri.
|
||||
// The cri should always send us teardown events for stale sandboxes,
|
||||
// this obviates the need for GC in the common case, for kubenet.
|
||||
if plugin.host.SupportsLegacyFeatures() {
|
||||
|
||||
// TODO: Remove this hack once we've figured out how to retrieve the netns
|
||||
// of an exited container. Currently, restarting docker will leak a bunch of
|
||||
// ips. This will exhaust available ip space unless we cleanup old ips. At the
|
||||
// same time we don't want to try GC'ing them periodically as that could lead
|
||||
// to a performance regression in starting pods. So on each setup failure, try
|
||||
// GC on the assumption that the kubelet is going to retry pod creation, and
|
||||
// when it does, there will be ips.
|
||||
plugin.ipamGarbageCollection()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Need to SNAT outbound traffic from cluster
|
||||
if err := plugin.ensureMasqRule(); err != nil {
|
||||
glog.Errorf("Failed to ensure MASQ rule: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tears down as much of a pod's network as it can even if errors occur. Returns
|
||||
// an aggregate error composed of all errors encountered during the teardown.
|
||||
func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id kubecontainer.ContainerID, podIP string) error {
|
||||
errList := []error{}
|
||||
|
||||
if podIP != "" {
|
||||
glog.V(5).Infof("Removing pod IP %s from shaper", podIP)
|
||||
// shaper wants /32
|
||||
if err := plugin.shaper().Reset(fmt.Sprintf("%s/32", podIP)); err != nil {
|
||||
// Possible bandwidth shaping wasn't enabled for this pod anyways
|
||||
glog.V(4).Infof("Failed to remove pod IP %s from shaper: %v", podIP, err)
|
||||
}
|
||||
|
||||
delete(plugin.podIPs, id)
|
||||
}
|
||||
|
||||
if err := plugin.delContainerFromNetwork(plugin.netConfig, network.DefaultInterfaceName, namespace, name, id); err != nil {
|
||||
// This is to prevent returning error when TearDownPod is called twice on the same pod. This helps to reduce event pollution.
|
||||
if podIP != "" {
|
||||
glog.Warningf("Failed to delete container from kubenet: %v", err)
|
||||
} else {
|
||||
errList = append(errList, err)
|
||||
}
|
||||
}
|
||||
|
||||
// The host can choose to not support "legacy" features. The remote
|
||||
// shim doesn't support it (#35457), but the kubelet does.
|
||||
if !plugin.host.SupportsLegacyFeatures() {
|
||||
return utilerrors.NewAggregate(errList)
|
||||
}
|
||||
|
||||
activePods, err := plugin.getActivePods()
|
||||
if err == nil {
|
||||
err = plugin.hostportHandler.SyncHostports(BridgeName, activePods)
|
||||
}
|
||||
if err != nil {
|
||||
errList = append(errList, err)
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errList)
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error {
|
||||
plugin.mu.Lock()
|
||||
defer plugin.mu.Unlock()
|
||||
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("TearDownPod took %v for %s/%s", time.Since(start), namespace, name)
|
||||
}()
|
||||
|
||||
if plugin.netConfig == nil {
|
||||
return fmt.Errorf("Kubenet needs a PodCIDR to tear down pods")
|
||||
}
|
||||
|
||||
// no cached IP is Ok during teardown
|
||||
podIP, _ := plugin.podIPs[id]
|
||||
if err := plugin.teardown(namespace, name, id, podIP); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Need to SNAT outbound traffic from cluster
|
||||
if err := plugin.ensureMasqRule(); err != nil {
|
||||
glog.Errorf("Failed to ensure MASQ rule: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin.
|
||||
// Also fix the runtime's call to Status function to be done only in the case that the IP is lost, no need to do periodic calls
|
||||
func (plugin *kubenetNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) {
|
||||
plugin.mu.Lock()
|
||||
defer plugin.mu.Unlock()
|
||||
// Assuming the ip of pod does not change. Try to retrieve ip from kubenet map first.
|
||||
if podIP, ok := plugin.podIPs[id]; ok {
|
||||
return &network.PodNetworkStatus{IP: net.ParseIP(podIP)}, nil
|
||||
}
|
||||
|
||||
netnsPath, err := plugin.host.GetNetNS(id.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Kubenet failed to retrieve network namespace path: %v", err)
|
||||
}
|
||||
ip, err := network.GetPodIP(plugin.execer, plugin.nsenterPath, netnsPath, network.DefaultInterfaceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
plugin.podIPs[id] = ip.String()
|
||||
return &network.PodNetworkStatus{IP: ip}, nil
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) Status() error {
|
||||
// Can't set up pods if we don't have a PodCIDR yet
|
||||
if plugin.netConfig == nil {
|
||||
return fmt.Errorf("Kubenet does not have netConfig. This is most likely due to lack of PodCIDR")
|
||||
}
|
||||
|
||||
if !plugin.checkCNIPlugin() {
|
||||
return fmt.Errorf("could not locate kubenet required CNI plugins %v at %q or %q", requiredCNIPlugins, DefaultCNIDir, plugin.vendorDir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkCNIPlugin returns if all kubenet required cni plugins can be found at /opt/cni/bin or user specifed NetworkPluginDir.
|
||||
func (plugin *kubenetNetworkPlugin) checkCNIPlugin() bool {
|
||||
if plugin.checkCNIPluginInDir(DefaultCNIDir) || plugin.checkCNIPluginInDir(plugin.vendorDir) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkCNIPluginInDir returns if all required cni plugins are placed in dir
|
||||
func (plugin *kubenetNetworkPlugin) checkCNIPluginInDir(dir string) bool {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for _, cniPlugin := range requiredCNIPlugins {
|
||||
found := false
|
||||
for _, file := range files {
|
||||
if strings.TrimSpace(file.Name()) == cniPlugin {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// getNonExitedPods returns a list of pods that have at least one running container.
|
||||
func (plugin *kubenetNetworkPlugin) getNonExitedPods() ([]*kubecontainer.Pod, error) {
|
||||
ret := []*kubecontainer.Pod{}
|
||||
pods, err := plugin.host.GetRuntime().GetPods(true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to retrieve pods from runtime: %v", err)
|
||||
}
|
||||
for _, p := range pods {
|
||||
if podIsExited(p) {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, p)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Returns a list of pods running or ready to run on this node and each pod's IP address.
|
||||
// Assumes PodSpecs retrieved from the runtime include the name and ID of containers in
|
||||
// each pod.
|
||||
func (plugin *kubenetNetworkPlugin) getActivePods() ([]*hostport.ActivePod, error) {
|
||||
pods, err := plugin.getNonExitedPods()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
activePods := make([]*hostport.ActivePod, 0)
|
||||
for _, p := range pods {
|
||||
containerID, err := plugin.host.GetRuntime().GetPodContainerID(p)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ipString, ok := plugin.podIPs[containerID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
podIP := net.ParseIP(ipString)
|
||||
if podIP == nil {
|
||||
continue
|
||||
}
|
||||
if pod, ok := plugin.host.GetPodByName(p.Namespace, p.Name); ok {
|
||||
activePods = append(activePods, &hostport.ActivePod{
|
||||
Pod: pod,
|
||||
IP: podIP,
|
||||
})
|
||||
}
|
||||
}
|
||||
return activePods, nil
|
||||
}
|
||||
|
||||
// ipamGarbageCollection will release unused IP.
|
||||
// kubenet uses the CNI bridge plugin, which stores allocated ips on file. Each
|
||||
// file created under defaultIPAMDir has the format: ip/container-hash. So this
|
||||
// routine looks for hashes that are not reported by the currently running docker,
|
||||
// and invokes DelNetwork on each one. Note that this will only work for the
|
||||
// current CNI bridge plugin, because we have no way of finding the NetNs.
|
||||
func (plugin *kubenetNetworkPlugin) ipamGarbageCollection() {
|
||||
glog.V(2).Infof("Starting IP garbage collection")
|
||||
|
||||
ipamDir := filepath.Join(defaultIPAMDir, KubenetPluginName)
|
||||
files, err := ioutil.ReadDir(ipamDir)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to list files in %q: %v", ipamDir, err)
|
||||
return
|
||||
}
|
||||
|
||||
// gather containerIDs for allocated ips
|
||||
ipContainerIdMap := make(map[string]string)
|
||||
for _, file := range files {
|
||||
// skip non checkpoint file
|
||||
if ip := net.ParseIP(file.Name()); ip == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadFile(filepath.Join(ipamDir, file.Name()))
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to read file %v: %v", file, err)
|
||||
}
|
||||
ipContainerIdMap[file.Name()] = strings.TrimSpace(string(content))
|
||||
}
|
||||
|
||||
// gather infra container IDs of current running Pods
|
||||
runningContainerIDs := utilsets.String{}
|
||||
pods, err := plugin.getNonExitedPods()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get pods: %v", err)
|
||||
return
|
||||
}
|
||||
for _, pod := range pods {
|
||||
containerID, err := plugin.host.GetRuntime().GetPodContainerID(pod)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to get infra containerID of %q/%q: %v", pod.Namespace, pod.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
runningContainerIDs.Insert(strings.TrimSpace(containerID.ID))
|
||||
}
|
||||
|
||||
// release leaked ips
|
||||
for ip, containerID := range ipContainerIdMap {
|
||||
// if the container is not running, release IP
|
||||
if runningContainerIDs.Has(containerID) {
|
||||
continue
|
||||
}
|
||||
// CNI requires all config to be presented, although only containerID is needed in this case
|
||||
rt := &libcni.RuntimeConf{
|
||||
ContainerID: containerID,
|
||||
IfName: network.DefaultInterfaceName,
|
||||
// TODO: How do we find the NetNs of an exited container? docker inspect
|
||||
// doesn't show us the pid, so we probably need to checkpoint
|
||||
NetNS: "",
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Releasing IP %q allocated to %q.", ip, containerID)
|
||||
// CNI bridge plugin should try to release IP and then return
|
||||
if err := plugin.cniConfig.DelNetwork(plugin.netConfig, rt); err != nil {
|
||||
glog.Errorf("Error while releasing IP: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// podIsExited returns true if the pod is exited (all containers inside are exited).
|
||||
func podIsExited(p *kubecontainer.Pod) bool {
|
||||
for _, c := range p.Containers {
|
||||
if c.State != kubecontainer.ContainerStateExited {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, c := range p.Sandboxes {
|
||||
if c.State != kubecontainer.ContainerStateExited {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) buildCNIRuntimeConf(ifName string, id kubecontainer.ContainerID) (*libcni.RuntimeConf, error) {
|
||||
netnsPath, err := plugin.host.GetNetNS(id.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Kubenet failed to retrieve network namespace path: %v", err)
|
||||
}
|
||||
|
||||
return &libcni.RuntimeConf{
|
||||
ContainerID: id.ID,
|
||||
NetNS: netnsPath,
|
||||
IfName: ifName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) addContainerToNetwork(config *libcni.NetworkConfig, ifName, namespace, name string, id kubecontainer.ContainerID) (*cnitypes.Result, error) {
|
||||
rt, err := plugin.buildCNIRuntimeConf(ifName, id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error building CNI config: %v", err)
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Adding %s/%s to '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt)
|
||||
res, err := plugin.cniConfig.AddNetwork(config, rt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error adding container to network: %v", err)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.NetworkConfig, ifName, namespace, name string, id kubecontainer.ContainerID) error {
|
||||
rt, err := plugin.buildCNIRuntimeConf(ifName, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error building CNI config: %v", err)
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt)
|
||||
if err := plugin.cniConfig.DelNetwork(config, rt); err != nil {
|
||||
return fmt.Errorf("Error removing container from network: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// shaper retrieves the bandwidth shaper and, if it hasn't been fetched before,
|
||||
// initializes it and ensures the bridge is appropriately configured
|
||||
// This function should only be called while holding the `plugin.mu` lock
|
||||
func (plugin *kubenetNetworkPlugin) shaper() bandwidth.BandwidthShaper {
|
||||
if plugin.bandwidthShaper == nil {
|
||||
plugin.bandwidthShaper = bandwidth.NewTCShaper(BridgeName)
|
||||
plugin.bandwidthShaper.ReconcileInterface()
|
||||
}
|
||||
return plugin.bandwidthShaper
|
||||
}
|
||||
|
||||
//TODO: make this into a goroutine and rectify the dedup rules periodically
|
||||
func (plugin *kubenetNetworkPlugin) syncEbtablesDedupRules(macAddr net.HardwareAddr) {
|
||||
if plugin.ebtables == nil {
|
||||
plugin.ebtables = utilebtables.New(plugin.execer)
|
||||
glog.V(3).Infof("Flushing dedup chain")
|
||||
if err := plugin.ebtables.FlushChain(utilebtables.TableFilter, dedupChain); err != nil {
|
||||
glog.Errorf("Failed to flush dedup chain: %v", err)
|
||||
}
|
||||
}
|
||||
_, err := plugin.ebtables.GetVersion()
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to get ebtables version. Skip syncing ebtables dedup rules: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Filtering packets with ebtables on mac address: %v, gateway: %v, pod CIDR: %v", macAddr.String(), plugin.gateway.String(), plugin.podCidr)
|
||||
_, err = plugin.ebtables.EnsureChain(utilebtables.TableFilter, dedupChain)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to ensure %v chain %v", utilebtables.TableFilter, dedupChain)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, utilebtables.ChainOutput, "-j", string(dedupChain))
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to ensure %v chain %v jump to %v chain: %v", utilebtables.TableFilter, utilebtables.ChainOutput, dedupChain, err)
|
||||
return
|
||||
}
|
||||
|
||||
commonArgs := []string{"-p", "IPv4", "-s", macAddr.String(), "-o", "veth+"}
|
||||
_, err = plugin.ebtables.EnsureRule(utilebtables.Prepend, utilebtables.TableFilter, dedupChain, append(commonArgs, "--ip-src", plugin.gateway.String(), "-j", "ACCEPT")...)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to ensure packets from cbr0 gateway to be accepted")
|
||||
return
|
||||
|
||||
}
|
||||
_, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, dedupChain, append(commonArgs, "--ip-src", plugin.podCidr, "-j", "DROP")...)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to ensure packets from podCidr but has mac address of cbr0 to get dropped.")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// generateHardwareAddr generates 48 bit virtual mac addresses based on the IP input.
|
||||
func generateHardwareAddr(ip net.IP) (net.HardwareAddr, error) {
|
||||
if ip.To4() == nil {
|
||||
return nil, fmt.Errorf("generateHardwareAddr only support valid ipv4 address as input")
|
||||
}
|
||||
mac := privateMACPrefix
|
||||
sections := strings.Split(ip.String(), ".")
|
||||
for _, s := range sections {
|
||||
i, _ := strconv.Atoi(s)
|
||||
mac = mac + ":" + fmt.Sprintf("%02x", i)
|
||||
}
|
||||
hwAddr, err := net.ParseMAC(mac)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse mac address %s generated based on ip %s due to: %v", mac, ip, err)
|
||||
}
|
||||
return hwAddr, nil
|
||||
}
|
268
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux_test.go
generated
vendored
Normal file
268
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux_test.go
generated
vendored
Normal file
|
@ -0,0 +1,268 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubenet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/cni/testing"
|
||||
hostporttest "k8s.io/kubernetes/pkg/kubelet/network/hostport/testing"
|
||||
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
|
||||
"k8s.io/kubernetes/pkg/util/bandwidth"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
|
||||
sysctltest "k8s.io/kubernetes/pkg/util/sysctl/testing"
|
||||
)
|
||||
|
||||
// test it fulfills the NetworkPlugin interface
|
||||
var _ network.NetworkPlugin = &kubenetNetworkPlugin{}
|
||||
|
||||
func newFakeKubenetPlugin(initMap map[kubecontainer.ContainerID]string, execer exec.Interface, host network.Host) *kubenetNetworkPlugin {
|
||||
return &kubenetNetworkPlugin{
|
||||
podIPs: initMap,
|
||||
execer: execer,
|
||||
mtu: 1460,
|
||||
host: host,
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodNetworkStatus(t *testing.T) {
|
||||
podIPMap := make(map[kubecontainer.ContainerID]string)
|
||||
podIPMap[kubecontainer.ContainerID{ID: "1"}] = "10.245.0.2"
|
||||
podIPMap[kubecontainer.ContainerID{ID: "2"}] = "10.245.0.3"
|
||||
|
||||
testCases := []struct {
|
||||
id string
|
||||
expectError bool
|
||||
expectIP string
|
||||
}{
|
||||
//in podCIDR map
|
||||
{
|
||||
"1",
|
||||
false,
|
||||
"10.245.0.2",
|
||||
},
|
||||
{
|
||||
"2",
|
||||
false,
|
||||
"10.245.0.3",
|
||||
},
|
||||
//not in podCIDR map
|
||||
{
|
||||
"3",
|
||||
true,
|
||||
"",
|
||||
},
|
||||
//TODO: add test cases for retrieving ip inside container network namespace
|
||||
}
|
||||
|
||||
fakeCmds := make([]exec.FakeCommandAction, 0)
|
||||
for _, t := range testCases {
|
||||
// the fake commands return the IP from the given index, or an error
|
||||
fCmd := exec.FakeCmd{
|
||||
CombinedOutputScript: []exec.FakeCombinedOutputAction{
|
||||
func() ([]byte, error) {
|
||||
ip, ok := podIPMap[kubecontainer.ContainerID{ID: t.id}]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Pod IP %q not found", t.id)
|
||||
}
|
||||
return []byte(ip), nil
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeCmds = append(fakeCmds, func(cmd string, args ...string) exec.Cmd {
|
||||
return exec.InitFakeCmd(&fCmd, cmd, args...)
|
||||
})
|
||||
}
|
||||
fexec := exec.FakeExec{
|
||||
CommandScript: fakeCmds,
|
||||
LookPathFunc: func(file string) (string, error) {
|
||||
return fmt.Sprintf("/fake-bin/%s", file), nil
|
||||
},
|
||||
}
|
||||
|
||||
fhost := nettest.NewFakeHost(nil)
|
||||
fakeKubenet := newFakeKubenetPlugin(podIPMap, &fexec, fhost)
|
||||
|
||||
for i, tc := range testCases {
|
||||
out, err := fakeKubenet.GetPodNetworkStatus("", "", kubecontainer.ContainerID{ID: tc.id})
|
||||
if tc.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Test case %d expects error but got none", i)
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Test case %d expects error but got error: %v", i, err)
|
||||
}
|
||||
}
|
||||
if tc.expectIP != out.IP.String() {
|
||||
t.Errorf("Test case %d expects ip %s but got %s", i, tc.expectIP, out.IP.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestTeardownBeforeSetUp tests that a `TearDown` call does call
|
||||
// `shaper.Reset`
|
||||
func TestTeardownCallsShaper(t *testing.T) {
|
||||
fexec := &exec.FakeExec{
|
||||
CommandScript: []exec.FakeCommandAction{},
|
||||
LookPathFunc: func(file string) (string, error) {
|
||||
return fmt.Sprintf("/fake-bin/%s", file), nil
|
||||
},
|
||||
}
|
||||
fhost := nettest.NewFakeHost(nil)
|
||||
fshaper := &bandwidth.FakeShaper{}
|
||||
mockcni := &mock_cni.MockCNI{}
|
||||
kubenet := newFakeKubenetPlugin(map[kubecontainer.ContainerID]string{}, fexec, fhost)
|
||||
kubenet.cniConfig = mockcni
|
||||
kubenet.iptables = ipttest.NewFake()
|
||||
kubenet.bandwidthShaper = fshaper
|
||||
kubenet.hostportHandler = hostporttest.NewFakeHostportHandler()
|
||||
|
||||
mockcni.On("DelNetwork", mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil)
|
||||
|
||||
details := make(map[string]interface{})
|
||||
details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = "10.0.0.1/24"
|
||||
kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details)
|
||||
|
||||
existingContainerID := kubecontainer.BuildContainerID("docker", "123")
|
||||
kubenet.podIPs[existingContainerID] = "10.0.0.1"
|
||||
|
||||
if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil {
|
||||
t.Fatalf("Unexpected error in TearDownPod: %v", err)
|
||||
}
|
||||
assert.Equal(t, []string{"10.0.0.1/32"}, fshaper.ResetCIDRs, "shaper.Reset should have been called")
|
||||
|
||||
mockcni.AssertExpectations(t)
|
||||
}
|
||||
|
||||
// TestInit tests that a `Init` call with an MTU sets the MTU
|
||||
func TestInit_MTU(t *testing.T) {
|
||||
var fakeCmds []exec.FakeCommandAction
|
||||
{
|
||||
// modprobe br-netfilter
|
||||
fCmd := exec.FakeCmd{
|
||||
CombinedOutputScript: []exec.FakeCombinedOutputAction{
|
||||
func() ([]byte, error) {
|
||||
return make([]byte, 0), nil
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeCmds = append(fakeCmds, func(cmd string, args ...string) exec.Cmd {
|
||||
return exec.InitFakeCmd(&fCmd, cmd, args...)
|
||||
})
|
||||
}
|
||||
|
||||
fexec := &exec.FakeExec{
|
||||
CommandScript: fakeCmds,
|
||||
LookPathFunc: func(file string) (string, error) {
|
||||
return fmt.Sprintf("/fake-bin/%s", file), nil
|
||||
},
|
||||
}
|
||||
|
||||
fhost := nettest.NewFakeHost(nil)
|
||||
kubenet := newFakeKubenetPlugin(map[kubecontainer.ContainerID]string{}, fexec, fhost)
|
||||
kubenet.iptables = ipttest.NewFake()
|
||||
|
||||
sysctl := sysctltest.NewFake()
|
||||
sysctl.Settings["net/bridge/bridge-nf-call-iptables"] = 0
|
||||
kubenet.sysctl = sysctl
|
||||
|
||||
if err := kubenet.Init(nettest.NewFakeHost(nil), componentconfig.HairpinNone, "10.0.0.0/8", 1234); err != nil {
|
||||
t.Fatalf("Unexpected error in Init: %v", err)
|
||||
}
|
||||
assert.Equal(t, 1234, kubenet.mtu, "kubenet.mtu should have been set")
|
||||
assert.Equal(t, 1, sysctl.Settings["net/bridge/bridge-nf-call-iptables"], "net/bridge/bridge-nf-call-iptables sysctl should have been set")
|
||||
}
|
||||
|
||||
func TestGenerateMacAddress(t *testing.T) {
|
||||
testCases := []struct {
|
||||
ip net.IP
|
||||
expectedMAC string
|
||||
}{
|
||||
{
|
||||
ip: net.ParseIP("10.0.0.2"),
|
||||
expectedMAC: privateMACPrefix + ":0a:00:00:02",
|
||||
},
|
||||
{
|
||||
ip: net.ParseIP("10.250.0.244"),
|
||||
expectedMAC: privateMACPrefix + ":0a:fa:00:f4",
|
||||
},
|
||||
{
|
||||
ip: net.ParseIP("172.17.0.2"),
|
||||
expectedMAC: privateMACPrefix + ":ac:11:00:02",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
mac, err := generateHardwareAddr(tc.ip)
|
||||
if err != nil {
|
||||
t.Errorf("Did not expect error: %v", err)
|
||||
}
|
||||
if mac.String() != tc.expectedMAC {
|
||||
t.Errorf("generated mac: %q, expecting: %q", mac.String(), tc.expectedMAC)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestInvocationWithoutRuntime invokes the plugin without a runtime.
|
||||
// This is how kubenet is invoked from the cri.
|
||||
func TestTearDownWithoutRuntime(t *testing.T) {
|
||||
fhost := nettest.NewFakeHost(nil)
|
||||
fhost.Legacy = false
|
||||
fhost.Runtime = nil
|
||||
mockcni := &mock_cni.MockCNI{}
|
||||
|
||||
fexec := &exec.FakeExec{
|
||||
CommandScript: []exec.FakeCommandAction{},
|
||||
LookPathFunc: func(file string) (string, error) {
|
||||
return fmt.Sprintf("/fake-bin/%s", file), nil
|
||||
},
|
||||
}
|
||||
|
||||
kubenet := newFakeKubenetPlugin(map[kubecontainer.ContainerID]string{}, fexec, fhost)
|
||||
kubenet.cniConfig = mockcni
|
||||
kubenet.iptables = ipttest.NewFake()
|
||||
|
||||
details := make(map[string]interface{})
|
||||
details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = "10.0.0.1/24"
|
||||
kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details)
|
||||
|
||||
existingContainerID := kubecontainer.BuildContainerID("docker", "123")
|
||||
kubenet.podIPs[existingContainerID] = "10.0.0.1"
|
||||
|
||||
mockcni.On("DelNetwork", mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil)
|
||||
|
||||
if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil {
|
||||
t.Fatalf("Unexpected error in TearDownPod: %v", err)
|
||||
}
|
||||
// Assert that the CNI DelNetwork made it through and we didn't crash
|
||||
// without a runtime.
|
||||
mockcni.AssertExpectations(t)
|
||||
}
|
||||
|
||||
//TODO: add unit test for each implementation of network plugin interface
|
55
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_unsupported.go
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubenet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
)
|
||||
|
||||
type kubenetNetworkPlugin struct {
|
||||
network.NoopNetworkPlugin
|
||||
}
|
||||
|
||||
func NewPlugin(networkPluginDir string) network.NetworkPlugin {
|
||||
return &kubenetNetworkPlugin{}
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode componentconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error {
|
||||
return fmt.Errorf("Kubenet is not supported in this build")
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) Name() string {
|
||||
return "kubenet"
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID) error {
|
||||
return fmt.Errorf("Kubenet is not supported in this build")
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error {
|
||||
return fmt.Errorf("Kubenet is not supported in this build")
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) {
|
||||
return nil, fmt.Errorf("Kubenet is not supported in this build")
|
||||
}
|
34
vendor/k8s.io/kubernetes/pkg/kubelet/network/mock_network/BUILD
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/pkg/kubelet/network/mock_network/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["network_plugins.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//vendor:github.com/golang/mock/gomock",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
133
vendor/k8s.io/kubernetes/pkg/kubelet/network/mock_network/network_plugins.go
generated
vendored
Normal file
133
vendor/k8s.io/kubernetes/pkg/kubelet/network/mock_network/network_plugins.go
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Generated code, generated via: `mockgen k8s.io/kubernetes/pkg/kubelet/network NetworkPlugin > $GOPATH/src/k8s.io/kubernetes/pkg/kubelet/network/mock_network/network_plugins.go`
|
||||
// Edited by hand for boilerplate and gofmt.
|
||||
// TODO, this should be autogenerated/autoupdated by scripts.
|
||||
|
||||
package mock_network
|
||||
|
||||
import (
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
sets "k8s.io/apimachinery/pkg/util/sets"
|
||||
componentconfig "k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
container "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
network "k8s.io/kubernetes/pkg/kubelet/network"
|
||||
)
|
||||
|
||||
// Mock of NetworkPlugin interface
|
||||
type MockNetworkPlugin struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *_MockNetworkPluginRecorder
|
||||
}
|
||||
|
||||
// Recorder for MockNetworkPlugin (not exported)
|
||||
type _MockNetworkPluginRecorder struct {
|
||||
mock *MockNetworkPlugin
|
||||
}
|
||||
|
||||
func NewMockNetworkPlugin(ctrl *gomock.Controller) *MockNetworkPlugin {
|
||||
mock := &MockNetworkPlugin{ctrl: ctrl}
|
||||
mock.recorder = &_MockNetworkPluginRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
func (_m *MockNetworkPlugin) EXPECT() *_MockNetworkPluginRecorder {
|
||||
return _m.recorder
|
||||
}
|
||||
|
||||
func (_m *MockNetworkPlugin) Capabilities() sets.Int {
|
||||
ret := _m.ctrl.Call(_m, "Capabilities")
|
||||
ret0, _ := ret[0].(sets.Int)
|
||||
return ret0
|
||||
}
|
||||
|
||||
func (_m *MockNetworkPlugin) Finish() {
|
||||
_m.ctrl.Finish()
|
||||
}
|
||||
|
||||
func (_mr *_MockNetworkPluginRecorder) Capabilities() *gomock.Call {
|
||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "Capabilities")
|
||||
}
|
||||
|
||||
func (_m *MockNetworkPlugin) Event(_param0 string, _param1 map[string]interface{}) {
|
||||
_m.ctrl.Call(_m, "Event", _param0, _param1)
|
||||
}
|
||||
|
||||
func (_mr *_MockNetworkPluginRecorder) Event(arg0, arg1 interface{}) *gomock.Call {
|
||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "Event", arg0, arg1)
|
||||
}
|
||||
|
||||
func (_m *MockNetworkPlugin) GetPodNetworkStatus(_param0 string, _param1 string, _param2 container.ContainerID) (*network.PodNetworkStatus, error) {
|
||||
ret := _m.ctrl.Call(_m, "GetPodNetworkStatus", _param0, _param1, _param2)
|
||||
ret0, _ := ret[0].(*network.PodNetworkStatus)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
func (_mr *_MockNetworkPluginRecorder) GetPodNetworkStatus(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "GetPodNetworkStatus", arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
func (_m *MockNetworkPlugin) Init(_param0 network.Host, _param1 componentconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error {
|
||||
ret := _m.ctrl.Call(_m, "Init", _param0, _param1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
func (_mr *_MockNetworkPluginRecorder) Init(arg0, arg1 interface{}) *gomock.Call {
|
||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "Init", arg0, arg1)
|
||||
}
|
||||
|
||||
func (_m *MockNetworkPlugin) Name() string {
|
||||
ret := _m.ctrl.Call(_m, "Name")
|
||||
ret0, _ := ret[0].(string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
func (_mr *_MockNetworkPluginRecorder) Name() *gomock.Call {
|
||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "Name")
|
||||
}
|
||||
|
||||
func (_m *MockNetworkPlugin) SetUpPod(_param0 string, _param1 string, _param2 container.ContainerID) error {
|
||||
ret := _m.ctrl.Call(_m, "SetUpPod", _param0, _param1, _param2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
func (_mr *_MockNetworkPluginRecorder) SetUpPod(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "SetUpPod", arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
func (_m *MockNetworkPlugin) Status() error {
|
||||
ret := _m.ctrl.Call(_m, "Status")
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
func (_mr *_MockNetworkPluginRecorder) Status() *gomock.Call {
|
||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "Status")
|
||||
}
|
||||
|
||||
func (_m *MockNetworkPlugin) TearDownPod(_param0 string, _param1 string, _param2 container.ContainerID) error {
|
||||
ret := _m.ctrl.Call(_m, "TearDownPod", _param0, _param1, _param2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
func (_mr *_MockNetworkPluginRecorder) TearDownPod(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "TearDownPod", arg0, arg1, arg2)
|
||||
}
|
24
vendor/k8s.io/kubernetes/pkg/kubelet/network/network.go
generated
vendored
Normal file
24
vendor/k8s.io/kubernetes/pkg/kubelet/network/network.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package network
|
||||
|
||||
// TODO: Consider making this value configurable.
|
||||
const DefaultInterfaceName = "eth0"
|
||||
|
||||
// UseDefaultMTU is a marker value that indicates the plugin should determine its own MTU
|
||||
// It is the zero value, so a non-initialized value will mean "UseDefault"
|
||||
const UseDefaultMTU = 0
|
278
vendor/k8s.io/kubernetes/pkg/kubelet/network/plugins.go
generated
vendored
Normal file
278
vendor/k8s.io/kubernetes/pkg/kubelet/network/plugins.go
generated
vendored
Normal file
|
@ -0,0 +1,278 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilsets "k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
|
||||
)
|
||||
|
||||
const DefaultPluginName = "kubernetes.io/no-op"
|
||||
|
||||
// Called when the node's Pod CIDR is known when using the
|
||||
// controller manager's --allocate-node-cidrs=true option
|
||||
const NET_PLUGIN_EVENT_POD_CIDR_CHANGE = "pod-cidr-change"
|
||||
const NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR = "pod-cidr"
|
||||
|
||||
// Plugin capabilities
|
||||
const (
|
||||
// Indicates the plugin handles Kubernetes bandwidth shaping annotations internally
|
||||
NET_PLUGIN_CAPABILITY_SHAPING int = 1
|
||||
)
|
||||
|
||||
// Plugin is an interface to network plugins for the kubelet
|
||||
type NetworkPlugin interface {
|
||||
// Init initializes the plugin. This will be called exactly once
|
||||
// before any other methods are called.
|
||||
Init(host Host, hairpinMode componentconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error
|
||||
|
||||
// Called on various events like:
|
||||
// NET_PLUGIN_EVENT_POD_CIDR_CHANGE
|
||||
Event(name string, details map[string]interface{})
|
||||
|
||||
// Name returns the plugin's name. This will be used when searching
|
||||
// for a plugin by name, e.g.
|
||||
Name() string
|
||||
|
||||
// Returns a set of NET_PLUGIN_CAPABILITY_*
|
||||
Capabilities() utilsets.Int
|
||||
|
||||
// SetUpPod is the method called after the infra container of
|
||||
// the pod has been created but before the other containers of the
|
||||
// pod are launched.
|
||||
// TODO: rename podInfraContainerID to sandboxID
|
||||
SetUpPod(namespace string, name string, podInfraContainerID kubecontainer.ContainerID) error
|
||||
|
||||
// TearDownPod is the method called before a pod's infra container will be deleted
|
||||
// TODO: rename podInfraContainerID to sandboxID
|
||||
TearDownPod(namespace string, name string, podInfraContainerID kubecontainer.ContainerID) error
|
||||
|
||||
// Status is the method called to obtain the ipv4 or ipv6 addresses of the container
|
||||
// TODO: rename podInfraContainerID to sandboxID
|
||||
GetPodNetworkStatus(namespace string, name string, podInfraContainerID kubecontainer.ContainerID) (*PodNetworkStatus, error)
|
||||
|
||||
// NetworkStatus returns error if the network plugin is in error state
|
||||
Status() error
|
||||
}
|
||||
|
||||
// PodNetworkStatus stores the network status of a pod (currently just the primary IP address)
|
||||
// This struct represents version "v1beta1"
|
||||
type PodNetworkStatus struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// IP is the primary ipv4/ipv6 address of the pod. Among other things it is the address that -
|
||||
// - kube expects to be reachable across the cluster
|
||||
// - service endpoints are constructed with
|
||||
// - will be reported in the PodStatus.PodIP field (will override the IP reported by docker)
|
||||
IP net.IP `json:"ip" description:"Primary IP address of the pod"`
|
||||
}
|
||||
|
||||
// LegacyHost implements the methods required by network plugins that
|
||||
// were directly invoked by the kubelet. Implementations of this interface
|
||||
// that do not wish to support these features can simply return false
|
||||
// to SupportsLegacyFeatures.
|
||||
type LegacyHost interface {
|
||||
// Get the pod structure by its name, namespace
|
||||
// Only used for hostport management and bw shaping
|
||||
GetPodByName(namespace, name string) (*v1.Pod, bool)
|
||||
|
||||
// GetKubeClient returns a client interface
|
||||
// Only used in testing
|
||||
GetKubeClient() clientset.Interface
|
||||
|
||||
// GetContainerRuntime returns the container runtime that implements the containers (e.g. docker/rkt)
|
||||
// Only used for hostport management
|
||||
GetRuntime() kubecontainer.Runtime
|
||||
|
||||
// SupportsLegacyFeaturs returns true if this host can support hostports
|
||||
// and bandwidth shaping. Both will either get added to CNI or dropped,
|
||||
// so differnt implementations can choose to ignore them.
|
||||
SupportsLegacyFeatures() bool
|
||||
}
|
||||
|
||||
// Host is an interface that plugins can use to access the kubelet.
|
||||
// TODO(#35457): get rid of this backchannel to the kubelet. The scope of
|
||||
// the back channel is restricted to host-ports/testing, and restricted
|
||||
// to kubenet. No other network plugin wrapper needs it. Other plugins
|
||||
// only require a way to access namespace information, which they can do
|
||||
// directly through the embedded NamespaceGetter.
|
||||
type Host interface {
|
||||
// NamespaceGetter is a getter for sandbox namespace information.
|
||||
// It's the only part of this interface that isn't currently deprecated.
|
||||
NamespaceGetter
|
||||
|
||||
// LegacyHost contains methods that trap back into the Kubelet. Dependence
|
||||
// *do not* add more dependencies in this interface. In a post-cri world,
|
||||
// network plugins will be invoked by the runtime shim, and should only
|
||||
// require NamespaceGetter.
|
||||
LegacyHost
|
||||
}
|
||||
|
||||
// NamespaceGetter is an interface to retrieve namespace information for a given
|
||||
// sandboxID. Typically implemented by runtime shims that are closely coupled to
|
||||
// CNI plugin wrappers like kubenet.
|
||||
type NamespaceGetter interface {
|
||||
// GetNetNS returns network namespace information for the given containerID.
|
||||
GetNetNS(containerID string) (string, error)
|
||||
}
|
||||
|
||||
// InitNetworkPlugin inits the plugin that matches networkPluginName. Plugins must have unique names.
|
||||
func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host Host, hairpinMode componentconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) (NetworkPlugin, error) {
|
||||
if networkPluginName == "" {
|
||||
// default to the no_op plugin
|
||||
plug := &NoopNetworkPlugin{}
|
||||
if err := plug.Init(host, hairpinMode, nonMasqueradeCIDR, mtu); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return plug, nil
|
||||
}
|
||||
|
||||
pluginMap := map[string]NetworkPlugin{}
|
||||
|
||||
allErrs := []error{}
|
||||
for _, plugin := range plugins {
|
||||
name := plugin.Name()
|
||||
if errs := validation.IsQualifiedName(name); len(errs) != 0 {
|
||||
allErrs = append(allErrs, fmt.Errorf("network plugin has invalid name: %q: %s", name, strings.Join(errs, ";")))
|
||||
continue
|
||||
}
|
||||
|
||||
if _, found := pluginMap[name]; found {
|
||||
allErrs = append(allErrs, fmt.Errorf("network plugin %q was registered more than once", name))
|
||||
continue
|
||||
}
|
||||
pluginMap[name] = plugin
|
||||
}
|
||||
|
||||
chosenPlugin := pluginMap[networkPluginName]
|
||||
if chosenPlugin != nil {
|
||||
err := chosenPlugin.Init(host, hairpinMode, nonMasqueradeCIDR, mtu)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err))
|
||||
} else {
|
||||
glog.V(1).Infof("Loaded network plugin %q", networkPluginName)
|
||||
}
|
||||
} else {
|
||||
allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName))
|
||||
}
|
||||
|
||||
return chosenPlugin, utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
func UnescapePluginName(in string) string {
|
||||
return strings.Replace(in, "~", "/", -1)
|
||||
}
|
||||
|
||||
type NoopNetworkPlugin struct {
|
||||
}
|
||||
|
||||
const sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables"
|
||||
|
||||
func (plugin *NoopNetworkPlugin) Init(host Host, hairpinMode componentconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error {
|
||||
// Set bridge-nf-call-iptables=1 to maintain compatibility with older
|
||||
// kubernetes versions to ensure the iptables-based kube proxy functions
|
||||
// correctly. Other plugins are responsible for setting this correctly
|
||||
// depending on whether or not they connect containers to Linux bridges
|
||||
// or use some other mechanism (ie, SDN vswitch).
|
||||
|
||||
// Ensure the netfilter module is loaded on kernel >= 3.18; previously
|
||||
// it was built-in.
|
||||
utilexec.New().Command("modprobe", "br-netfilter").CombinedOutput()
|
||||
if err := utilsysctl.New().SetSysctl(sysctlBridgeCallIPTables, 1); err != nil {
|
||||
glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *NoopNetworkPlugin) Event(name string, details map[string]interface{}) {
|
||||
}
|
||||
|
||||
func (plugin *NoopNetworkPlugin) Name() string {
|
||||
return DefaultPluginName
|
||||
}
|
||||
|
||||
func (plugin *NoopNetworkPlugin) Capabilities() utilsets.Int {
|
||||
return utilsets.NewInt()
|
||||
}
|
||||
|
||||
func (plugin *NoopNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *NoopNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *NoopNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*PodNetworkStatus, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (plugin *NoopNetworkPlugin) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getOnePodIP(execer utilexec.Interface, nsenterPath, netnsPath, interfaceName, addrType string) (net.IP, error) {
|
||||
// Try to retrieve ip inside container network namespace
|
||||
output, err := execer.Command(nsenterPath, fmt.Sprintf("--net=%s", netnsPath), "-F", "--",
|
||||
"ip", "-o", addrType, "addr", "show", "dev", interfaceName, "scope", "global").CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unexpected command output %s with error: %v", output, err)
|
||||
}
|
||||
|
||||
lines := strings.Split(string(output), "\n")
|
||||
if len(lines) < 1 {
|
||||
return nil, fmt.Errorf("Unexpected command output %s", output)
|
||||
}
|
||||
fields := strings.Fields(lines[0])
|
||||
if len(fields) < 4 {
|
||||
return nil, fmt.Errorf("Unexpected address output %s ", lines[0])
|
||||
}
|
||||
ip, _, err := net.ParseCIDR(fields[3])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("CNI failed to parse ip from output %s due to %v", output, err)
|
||||
}
|
||||
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
// GetPodIP gets the IP of the pod by inspecting the network info inside the pod's network namespace.
|
||||
func GetPodIP(execer utilexec.Interface, nsenterPath, netnsPath, interfaceName string) (net.IP, error) {
|
||||
ip, err := getOnePodIP(execer, nsenterPath, netnsPath, interfaceName, "-4")
|
||||
if err != nil {
|
||||
// Fall back to IPv6 address if no IPv4 address is present
|
||||
ip, err = getOnePodIP(execer, nsenterPath, netnsPath, interfaceName, "-6")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ip, nil
|
||||
}
|
38
vendor/k8s.io/kubernetes/pkg/kubelet/network/plugins_test.go
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/pkg/kubelet/network/plugins_test.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
|
||||
)
|
||||
|
||||
func TestSelectDefaultPlugin(t *testing.T) {
|
||||
all_plugins := []NetworkPlugin{}
|
||||
plug, err := InitNetworkPlugin(all_plugins, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, "10.0.0.0/8", UseDefaultMTU)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error in selecting default plugin: %v", err)
|
||||
}
|
||||
if plug == nil {
|
||||
t.Fatalf("Failed to select the default plugin.")
|
||||
}
|
||||
if plug.Name() != DefaultPluginName {
|
||||
t.Errorf("Failed to select the default plugin. Expected %s. Got %s", DefaultPluginName, plug.Name())
|
||||
}
|
||||
}
|
33
vendor/k8s.io/kubernetes/pkg/kubelet/network/testing/BUILD
generated
vendored
Normal file
33
vendor/k8s.io/kubernetes/pkg/kubelet/network/testing/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["fake_host.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
63
vendor/k8s.io/kubernetes/pkg/kubelet/network/testing/fake_host.go
generated
vendored
Normal file
63
vendor/k8s.io/kubernetes/pkg/kubelet/network/testing/fake_host.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
// helper for testing plugins
|
||||
// a fake host is created here that can be used by plugins for testing
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
)
|
||||
|
||||
type fakeNetworkHost struct {
|
||||
fakeNamespaceGetter
|
||||
kubeClient clientset.Interface
|
||||
Legacy bool
|
||||
Runtime *containertest.FakeRuntime
|
||||
}
|
||||
|
||||
func NewFakeHost(kubeClient clientset.Interface) *fakeNetworkHost {
|
||||
host := &fakeNetworkHost{kubeClient: kubeClient, Legacy: true, Runtime: &containertest.FakeRuntime{}}
|
||||
return host
|
||||
}
|
||||
|
||||
func (fnh *fakeNetworkHost) GetPodByName(name, namespace string) (*v1.Pod, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (fnh *fakeNetworkHost) GetKubeClient() clientset.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nh *fakeNetworkHost) GetRuntime() kubecontainer.Runtime {
|
||||
return nh.Runtime
|
||||
}
|
||||
|
||||
func (nh *fakeNetworkHost) SupportsLegacyFeatures() bool {
|
||||
return nh.Legacy
|
||||
}
|
||||
|
||||
type fakeNamespaceGetter struct {
|
||||
ns string
|
||||
}
|
||||
|
||||
func (nh *fakeNamespaceGetter) GetNetNS(containerID string) (string, error) {
|
||||
return nh.ns, nil
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue