*: initial update to kube 1.8
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
parent
2453222695
commit
d6e819133d
1237 changed files with 84117 additions and 564982 deletions
176
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
176
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
|
@ -23,17 +23,26 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/util/io"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
const (
|
||||
// Common parameter which can be specified in StorageClass to specify the desired FSType
|
||||
// Provisioners SHOULD implement support for this if they are block device based
|
||||
// Must be a filesystem type supported by the host operating system.
|
||||
// Ex. "ext4", "xfs", "ntfs". Default value depends on the provisioner
|
||||
VolumeParameterFSType = "fstype"
|
||||
)
|
||||
|
||||
// VolumeOptions contains option information about a volume.
|
||||
type VolumeOptions struct {
|
||||
// The attributes below are required by volume.Provisioner
|
||||
|
@ -42,6 +51,8 @@ type VolumeOptions struct {
|
|||
|
||||
// Reclamation policy for a persistent volume
|
||||
PersistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy
|
||||
// Mount options for a persistent volume
|
||||
MountOptions []string
|
||||
// Suggested PV.Name of the PersistentVolume to provision.
|
||||
// This is a generated name guaranteed to be unique in Kubernetes cluster.
|
||||
// If you choose not to use it as volume name, ensure uniqueness by either
|
||||
|
@ -58,6 +69,19 @@ type VolumeOptions struct {
|
|||
CloudTags *map[string]string
|
||||
// Volume provisioning parameters from StorageClass
|
||||
Parameters map[string]string
|
||||
// This flag helps identify whether kubelet is running in a container
|
||||
Containerized bool
|
||||
}
|
||||
|
||||
type DynamicPluginProber interface {
|
||||
Init() error
|
||||
|
||||
// If an update has occurred since the last probe, updated = true
|
||||
// and the list of probed plugins is returned.
|
||||
// Otherwise, update = false and probedPlugins = nil.
|
||||
//
|
||||
// If an error occurs, updated and probedPlugins are undefined.
|
||||
Probe() (updated bool, probedPlugins []VolumePlugin, err error)
|
||||
}
|
||||
|
||||
// VolumePlugin is an interface to volume plugins that can be used on a
|
||||
|
@ -178,6 +202,14 @@ type AttachableVolumePlugin interface {
|
|||
GetDeviceMountRefs(deviceMountPath string) ([]string, error)
|
||||
}
|
||||
|
||||
// ExpandableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that can be
|
||||
// expanded
|
||||
type ExpandableVolumePlugin interface {
|
||||
VolumePlugin
|
||||
ExpandVolumeDevice(spec *Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error)
|
||||
RequiresFSResize() bool
|
||||
}
|
||||
|
||||
// VolumeHost is an interface that plugins can use to access the kubelet.
|
||||
type VolumeHost interface {
|
||||
// GetPluginDir returns the absolute path to a directory under which
|
||||
|
@ -216,7 +248,7 @@ type VolumeHost interface {
|
|||
GetCloudProvider() cloudprovider.Interface
|
||||
|
||||
// Get mounter interface.
|
||||
GetMounter() mount.Interface
|
||||
GetMounter(pluginName string) mount.Interface
|
||||
|
||||
// Get writer interface for writing data to disk.
|
||||
GetWriter() io.Writer
|
||||
|
@ -236,15 +268,20 @@ type VolumeHost interface {
|
|||
// Returns a function that returns a configmap.
|
||||
GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error)
|
||||
|
||||
// Returns an interface that should be used to execute any utilities in volume plugins
|
||||
GetExec(pluginName string) mount.Exec
|
||||
|
||||
// Returns the labels on the node
|
||||
GetNodeLabels() (map[string]string, error)
|
||||
}
|
||||
|
||||
// VolumePluginMgr tracks registered plugins.
|
||||
type VolumePluginMgr struct {
|
||||
mutex sync.Mutex
|
||||
plugins map[string]VolumePlugin
|
||||
Host VolumeHost
|
||||
mutex sync.Mutex
|
||||
plugins map[string]VolumePlugin
|
||||
prober DynamicPluginProber
|
||||
probedPlugins []VolumePlugin
|
||||
Host VolumeHost
|
||||
}
|
||||
|
||||
// Spec is an internal representation of a volume. All API volume types translate to Spec.
|
||||
|
@ -339,11 +376,24 @@ func NewSpecFromPersistentVolume(pv *v1.PersistentVolume, readOnly bool) *Spec {
|
|||
// InitPlugins initializes each plugin. All plugins must have unique names.
|
||||
// This must be called exactly once before any New* methods are called on any
|
||||
// plugins.
|
||||
func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, host VolumeHost) error {
|
||||
func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPluginProber, host VolumeHost) error {
|
||||
pm.mutex.Lock()
|
||||
defer pm.mutex.Unlock()
|
||||
|
||||
pm.Host = host
|
||||
|
||||
if prober == nil {
|
||||
// Use a dummy prober to prevent nil deference.
|
||||
pm.prober = &dummyPluginProber{}
|
||||
} else {
|
||||
pm.prober = prober
|
||||
}
|
||||
if err := pm.prober.Init(); err != nil {
|
||||
// Prober init failure should not affect the initialization of other plugins.
|
||||
glog.Errorf("Error initializing dynamic plugin prober: %s", err)
|
||||
pm.prober = &dummyPluginProber{}
|
||||
}
|
||||
|
||||
if pm.plugins == nil {
|
||||
pm.plugins = map[string]VolumePlugin{}
|
||||
}
|
||||
|
@ -362,7 +412,7 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, host VolumeHost)
|
|||
}
|
||||
err := plugin.Init(host)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to load volume plugin %s, error: %s", plugin, err.Error())
|
||||
glog.Errorf("Failed to load volume plugin %s, error: %s", name, err.Error())
|
||||
allErrs = append(allErrs, err)
|
||||
continue
|
||||
}
|
||||
|
@ -372,6 +422,21 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, host VolumeHost)
|
|||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
func (pm *VolumePluginMgr) initProbedPlugin(probedPlugin VolumePlugin) error {
|
||||
name := probedPlugin.GetPluginName()
|
||||
if errs := validation.IsQualifiedName(name); len(errs) != 0 {
|
||||
return fmt.Errorf("volume plugin has invalid name: %q: %s", name, strings.Join(errs, ";"))
|
||||
}
|
||||
|
||||
err := probedPlugin.Init(pm.Host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to load volume plugin %s, error: %s", name, err.Error())
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Loaded volume plugin %q", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindPluginBySpec looks for a plugin that can support a given volume
|
||||
// specification. If no plugins can support or more than one plugin can
|
||||
// support it, return error.
|
||||
|
@ -379,19 +444,34 @@ func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) {
|
|||
pm.mutex.Lock()
|
||||
defer pm.mutex.Unlock()
|
||||
|
||||
matches := []string{}
|
||||
if spec == nil {
|
||||
return nil, fmt.Errorf("Could not find plugin because volume spec is nil")
|
||||
}
|
||||
|
||||
matchedPluginNames := []string{}
|
||||
matches := []VolumePlugin{}
|
||||
for k, v := range pm.plugins {
|
||||
if v.CanSupport(spec) {
|
||||
matches = append(matches, k)
|
||||
matchedPluginNames = append(matchedPluginNames, k)
|
||||
matches = append(matches, v)
|
||||
}
|
||||
}
|
||||
|
||||
pm.refreshProbedPlugins()
|
||||
for _, plugin := range pm.probedPlugins {
|
||||
if plugin.CanSupport(spec) {
|
||||
matchedPluginNames = append(matchedPluginNames, plugin.GetPluginName())
|
||||
matches = append(matches, plugin)
|
||||
}
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
return nil, fmt.Errorf("no volume plugin matched")
|
||||
}
|
||||
if len(matches) > 1 {
|
||||
return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matches, ","))
|
||||
return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matchedPluginNames, ","))
|
||||
}
|
||||
return pm.plugins[matches[0]], nil
|
||||
return matches[0], nil
|
||||
}
|
||||
|
||||
// FindPluginByName fetches a plugin by name or by legacy name. If no plugin
|
||||
|
@ -401,19 +481,52 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) {
|
|||
defer pm.mutex.Unlock()
|
||||
|
||||
// Once we can get rid of legacy names we can reduce this to a map lookup.
|
||||
matches := []string{}
|
||||
matchedPluginNames := []string{}
|
||||
matches := []VolumePlugin{}
|
||||
for k, v := range pm.plugins {
|
||||
if v.GetPluginName() == name {
|
||||
matches = append(matches, k)
|
||||
matchedPluginNames = append(matchedPluginNames, k)
|
||||
matches = append(matches, v)
|
||||
}
|
||||
}
|
||||
|
||||
pm.refreshProbedPlugins()
|
||||
for _, plugin := range pm.probedPlugins {
|
||||
if plugin.GetPluginName() == name {
|
||||
matchedPluginNames = append(matchedPluginNames, plugin.GetPluginName())
|
||||
matches = append(matches, plugin)
|
||||
}
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
return nil, fmt.Errorf("no volume plugin matched")
|
||||
}
|
||||
if len(matches) > 1 {
|
||||
return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matches, ","))
|
||||
return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matchedPluginNames, ","))
|
||||
}
|
||||
return matches[0], nil
|
||||
}
|
||||
|
||||
// Check if probedPlugin cache update is required.
|
||||
// If it is, initialize all probed plugins and replace the cache with them.
|
||||
func (pm *VolumePluginMgr) refreshProbedPlugins() {
|
||||
updated, plugins, err := pm.prober.Probe()
|
||||
if err != nil {
|
||||
glog.Errorf("Error dynamically probing plugins: %s", err)
|
||||
return // Use cached plugins upon failure.
|
||||
}
|
||||
|
||||
if updated {
|
||||
pm.probedPlugins = []VolumePlugin{}
|
||||
for _, plugin := range plugins {
|
||||
if err := pm.initProbedPlugin(plugin); err != nil {
|
||||
glog.Errorf("Error initializing dynamically probed plugin %s; error: %s",
|
||||
plugin.GetPluginName(), err)
|
||||
continue
|
||||
}
|
||||
pm.probedPlugins = append(pm.probedPlugins, plugin)
|
||||
}
|
||||
}
|
||||
return pm.plugins[matches[0]], nil
|
||||
}
|
||||
|
||||
// FindPersistentPluginBySpec looks for a persistent volume plugin that can
|
||||
|
@ -538,6 +651,32 @@ func (pm *VolumePluginMgr) FindAttachablePluginByName(name string) (AttachableVo
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// FindExpandablePluginBySpec fetches a persistent volume plugin by spec.
|
||||
func (pm *VolumePluginMgr) FindExpandablePluginBySpec(spec *Spec) (ExpandableVolumePlugin, error) {
|
||||
volumePlugin, err := pm.FindPluginBySpec(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if expandableVolumePlugin, ok := volumePlugin.(ExpandableVolumePlugin); ok {
|
||||
return expandableVolumePlugin, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// FindExpandablePluginBySpec fetches a persistent volume plugin by name.
|
||||
func (pm *VolumePluginMgr) FindExpandablePluginByName(name string) (ExpandableVolumePlugin, error) {
|
||||
volumePlugin, err := pm.FindPluginByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if expandableVolumePlugin, ok := volumePlugin.(ExpandableVolumePlugin); ok {
|
||||
return expandableVolumePlugin, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler
|
||||
// pod. By default, a recycler pod simply runs "rm -rf" on a volume and tests
|
||||
// for emptiness. Most attributes of the template will be correct for most
|
||||
|
@ -601,3 +740,8 @@ func ValidateRecyclerPodTemplate(pod *v1.Pod) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type dummyPluginProber struct{}
|
||||
|
||||
func (*dummyPluginProber) Init() error { return nil }
|
||||
func (*dummyPluginProber) Probe() (bool, []VolumePlugin, error) { return false, nil, nil }
|
||||
|
|
89
vendor/k8s.io/kubernetes/pkg/volume/util.go
generated
vendored
89
vendor/k8s.io/kubernetes/pkg/volume/util.go
generated
vendored
|
@ -20,11 +20,11 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"hash/fnv"
|
||||
"math/rand"
|
||||
|
@ -180,7 +180,10 @@ func (c *realRecyclerClient) Event(eventtype, message string) {
|
|||
}
|
||||
|
||||
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
|
||||
podSelector, _ := fields.ParseSelector("metadata.name=" + name)
|
||||
podSelector, err := fields.ParseSelector("metadata.name=" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := metav1.ListOptions{
|
||||
FieldSelector: podSelector.String(),
|
||||
Watch: true,
|
||||
|
@ -295,9 +298,52 @@ func GetPath(mounter Mounter) (string, error) {
|
|||
func ChooseZoneForVolume(zones sets.String, pvcName string) string {
|
||||
// We create the volume in a zone determined by the name
|
||||
// Eventually the scheduler will coordinate placement into an available zone
|
||||
var hash uint32
|
||||
var index uint32
|
||||
hash, index := getPVCNameHashAndIndexOffset(pvcName)
|
||||
|
||||
// Zones.List returns zones in a consistent order (sorted)
|
||||
// We do have a potential failure case where volumes will not be properly spread,
|
||||
// if the set of zones changes during StatefulSet volume creation. However, this is
|
||||
// probably relatively unlikely because we expect the set of zones to be essentially
|
||||
// static for clusters.
|
||||
// Hopefully we can address this problem if/when we do full scheduler integration of
|
||||
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
|
||||
// unhealthy zones)
|
||||
zoneSlice := zones.List()
|
||||
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
|
||||
|
||||
glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
|
||||
return zone
|
||||
}
|
||||
|
||||
// ChooseZonesForVolume is identical to ChooseZoneForVolume, but selects a multiple zones, for multi-zone disks.
|
||||
func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) sets.String {
|
||||
// We create the volume in a zone determined by the name
|
||||
// Eventually the scheduler will coordinate placement into an available zone
|
||||
hash, index := getPVCNameHashAndIndexOffset(pvcName)
|
||||
|
||||
// Zones.List returns zones in a consistent order (sorted)
|
||||
// We do have a potential failure case where volumes will not be properly spread,
|
||||
// if the set of zones changes during StatefulSet volume creation. However, this is
|
||||
// probably relatively unlikely because we expect the set of zones to be essentially
|
||||
// static for clusters.
|
||||
// Hopefully we can address this problem if/when we do full scheduler integration of
|
||||
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
|
||||
// unhealthy zones)
|
||||
zoneSlice := zones.List()
|
||||
replicaZones := sets.NewString()
|
||||
|
||||
startingIndex := index * numZones
|
||||
for index = startingIndex; index < startingIndex+numZones; index++ {
|
||||
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
|
||||
replicaZones.Insert(zone)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q",
|
||||
pvcName, replicaZones.UnsortedList(), zoneSlice)
|
||||
return replicaZones
|
||||
}
|
||||
|
||||
func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) {
|
||||
if pvcName == "" {
|
||||
// We should always be called with a name; this shouldn't happen
|
||||
glog.Warningf("No name defined during volume create; choosing random zone")
|
||||
|
@ -346,19 +392,7 @@ func ChooseZoneForVolume(zones sets.String, pvcName string) string {
|
|||
hash = h.Sum32()
|
||||
}
|
||||
|
||||
// Zones.List returns zones in a consistent order (sorted)
|
||||
// We do have a potential failure case where volumes will not be properly spread,
|
||||
// if the set of zones changes during StatefulSet volume creation. However, this is
|
||||
// probably relatively unlikely because we expect the set of zones to be essentially
|
||||
// static for clusters.
|
||||
// Hopefully we can address this problem if/when we do full scheduler integration of
|
||||
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
|
||||
// unhealthy zones)
|
||||
zoneSlice := zones.List()
|
||||
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
|
||||
|
||||
glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
|
||||
return zone
|
||||
return hash, index
|
||||
}
|
||||
|
||||
// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi
|
||||
|
@ -386,12 +420,17 @@ func MountOptionFromSpec(spec *Spec, options ...string) []string {
|
|||
pv := spec.PersistentVolume
|
||||
|
||||
if pv != nil {
|
||||
// Use beta annotation first
|
||||
if mo, ok := pv.Annotations[v1.MountOptionAnnotation]; ok {
|
||||
moList := strings.Split(mo, ",")
|
||||
return JoinMountOptions(moList, options)
|
||||
}
|
||||
|
||||
if len(pv.Spec.MountOptions) > 0 {
|
||||
return JoinMountOptions(pv.Spec.MountOptions, options)
|
||||
}
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
|
@ -411,20 +450,6 @@ func JoinMountOptions(userOptions []string, systemOptions []string) []string {
|
|||
return allMountOptions.UnsortedList()
|
||||
}
|
||||
|
||||
// ZonesToSet converts a string containing a comma separated list of zones to set
|
||||
func ZonesToSet(zonesString string) (sets.String, error) {
|
||||
zonesSlice := strings.Split(zonesString, ",")
|
||||
zonesSet := make(sets.String)
|
||||
for _, zone := range zonesSlice {
|
||||
trimmedZone := strings.TrimSpace(zone)
|
||||
if trimmedZone == "" {
|
||||
return make(sets.String), fmt.Errorf("comma separated list of zones (%q) must not contain an empty zone", zonesString)
|
||||
}
|
||||
zonesSet.Insert(trimmedZone)
|
||||
}
|
||||
return zonesSet, nil
|
||||
}
|
||||
|
||||
// ValidateZone returns:
|
||||
// - an error in case zone is an empty string or contains only any combination of spaces and tab characters
|
||||
// - nil otherwise
|
||||
|
|
10
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go
generated
vendored
|
@ -311,11 +311,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection) (sets.St
|
|||
}
|
||||
|
||||
relativePath := strings.TrimPrefix(path, w.targetDir)
|
||||
if runtime.GOOS == "windows" {
|
||||
relativePath = strings.TrimPrefix(relativePath, "\\")
|
||||
} else {
|
||||
relativePath = strings.TrimPrefix(relativePath, "/")
|
||||
}
|
||||
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
|
||||
if strings.HasPrefix(relativePath, "..") {
|
||||
return nil
|
||||
}
|
||||
|
@ -339,7 +335,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection) (sets.St
|
|||
for subPath := file; subPath != ""; {
|
||||
newPaths.Insert(subPath)
|
||||
subPath, _ = filepath.Split(subPath)
|
||||
subPath = strings.TrimSuffix(subPath, "/")
|
||||
subPath = strings.TrimSuffix(subPath, string(os.PathSeparator))
|
||||
}
|
||||
}
|
||||
glog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List())
|
||||
|
@ -424,7 +420,7 @@ func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection)
|
|||
// Since filepath.Split leaves a trailing path separator, in this
|
||||
// example, dir = "foo/". In order to calculate the number of
|
||||
// subdirectories, we must subtract 1 from the number returned by split.
|
||||
subDirs = len(strings.Split(dir, "/")) - 1
|
||||
subDirs = len(strings.Split(dir, string(os.PathSeparator))) - 1
|
||||
err := os.MkdirAll(path.Join(w.targetDir, dir), os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
7
vendor/k8s.io/kubernetes/pkg/volume/util/fs.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/volume/util/fs.go
generated
vendored
|
@ -23,7 +23,8 @@ import (
|
|||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
@ -31,8 +32,8 @@ import (
|
|||
// FSInfo linux returns (available bytes, byte capacity, byte usage, total inodes, inodes free, inode usage, error)
|
||||
// for the filesystem that path resides upon.
|
||||
func FsInfo(path string) (int64, int64, int64, int64, int64, int64, error) {
|
||||
statfs := &syscall.Statfs_t{}
|
||||
err := syscall.Statfs(path, statfs)
|
||||
statfs := &unix.Statfs_t{}
|
||||
err := unix.Statfs(path, statfs)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, 0, 0, err
|
||||
}
|
||||
|
|
63
vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go
generated
vendored
Normal file
63
vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var storageOperationMetric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "storage_operation_duration_seconds",
|
||||
Help: "Storage operation duration",
|
||||
},
|
||||
[]string{"volume_plugin", "operation_name"},
|
||||
)
|
||||
|
||||
var storageOperationErrorMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "storage_operation_errors_total",
|
||||
Help: "Storage operation errors",
|
||||
},
|
||||
[]string{"volume_plugin", "operation_name"},
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerMetrics()
|
||||
}
|
||||
|
||||
func registerMetrics() {
|
||||
prometheus.MustRegister(storageOperationMetric)
|
||||
prometheus.MustRegister(storageOperationErrorMetric)
|
||||
}
|
||||
|
||||
// OperationCompleteHook returns a hook to call when an operation is completed
|
||||
func OperationCompleteHook(plugin, operationName string) func(error) {
|
||||
requestTime := time.Now()
|
||||
opComplete := func(err error) {
|
||||
timeTaken := time.Since(requestTime).Seconds()
|
||||
// Create metric with operation name and plugin name
|
||||
if err != nil {
|
||||
storageOperationErrorMetric.WithLabelValues(plugin, operationName).Inc()
|
||||
} else {
|
||||
storageOperationMetric.WithLabelValues(plugin, operationName).Observe(timeTaken)
|
||||
}
|
||||
}
|
||||
return opComplete
|
||||
}
|
65
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
65
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
|
@ -18,16 +18,23 @@ package util
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
|
@ -211,3 +218,55 @@ func CheckNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) er
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadPodFromFile will read, decode, and return a Pod from a file.
|
||||
func LoadPodFromFile(filePath string) (*v1.Pod, error) {
|
||||
if filePath == "" {
|
||||
return nil, fmt.Errorf("file path not specified")
|
||||
}
|
||||
podDef, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read file path %s: %+v", filePath, err)
|
||||
}
|
||||
if len(podDef) == 0 {
|
||||
return nil, fmt.Errorf("file was empty: %s", filePath)
|
||||
}
|
||||
pod := &v1.Pod{}
|
||||
|
||||
codec := api.Codecs.LegacyCodec(api.Registry.GroupOrDie(v1.GroupName).GroupVersion)
|
||||
if err := runtime.DecodeInto(codec, podDef, pod); err != nil {
|
||||
return nil, fmt.Errorf("failed decoding file: %v", err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
func ZonesSetToLabelValue(strSet sets.String) string {
|
||||
return strings.Join(strSet.UnsortedList(), kubeletapis.LabelMultiZoneDelimiter)
|
||||
}
|
||||
|
||||
// ZonesToSet converts a string containing a comma separated list of zones to set
|
||||
func ZonesToSet(zonesString string) (sets.String, error) {
|
||||
return stringToSet(zonesString, ",")
|
||||
}
|
||||
|
||||
// LabelZonesToSet converts a PV label value from string containing a delimited list of zones to set
|
||||
func LabelZonesToSet(labelZonesValue string) (sets.String, error) {
|
||||
return stringToSet(labelZonesValue, kubeletapis.LabelMultiZoneDelimiter)
|
||||
}
|
||||
|
||||
// StringToSet converts a string containing list separated by specified delimiter to to a set
|
||||
func stringToSet(str, delimiter string) (sets.String, error) {
|
||||
zonesSlice := strings.Split(str, delimiter)
|
||||
zonesSet := make(sets.String)
|
||||
for _, zone := range zonesSlice {
|
||||
trimmedZone := strings.TrimSpace(zone)
|
||||
if trimmedZone == "" {
|
||||
return make(sets.String), fmt.Errorf(
|
||||
"%q separated list (%q) must not contain an empty string",
|
||||
delimiter,
|
||||
str)
|
||||
}
|
||||
zonesSet.Insert(trimmedZone)
|
||||
}
|
||||
return zonesSet, nil
|
||||
}
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/volume/volume.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/volume.go
generated
vendored
|
@ -19,10 +19,10 @@ package volume
|
|||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
// Volume represents a directory used by pods or hosts on a node. All method
|
||||
|
@ -173,7 +173,7 @@ type Attacher interface {
|
|||
// node. If it successfully attaches, the path to the device
|
||||
// is returned. Otherwise, if the device does not attach after
|
||||
// the given timeout period, an error will be returned.
|
||||
WaitForAttach(spec *Spec, devicePath string, timeout time.Duration) (string, error)
|
||||
WaitForAttach(spec *Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error)
|
||||
|
||||
// GetDeviceMountPath returns a path where the device should
|
||||
// be mounted after it is attached. This is a global mount
|
||||
|
|
14
vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go
generated
vendored
|
@ -89,3 +89,17 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error {
|
|||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// IsSameFSGroup is called only for requests to mount an already mounted
|
||||
// volume. It checks if fsGroup of new mount request is the same or not.
|
||||
// It returns false if it not the same. It also returns current Gid of a path
|
||||
// provided for dir variable.
|
||||
func IsSameFSGroup(dir string, fsGroup int64) (bool, int, error) {
|
||||
info, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting stats for %s (%v)", dir, err)
|
||||
return false, 0, err
|
||||
}
|
||||
s := info.Sys().(*syscall.Stat_t)
|
||||
return int(s.Gid) == int(fsGroup), int(s.Gid), nil
|
||||
}
|
||||
|
|
4
vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go
generated
vendored
|
@ -21,3 +21,7 @@ package volume
|
|||
func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsSameFSGroup(dir string, fsGroup int64) (bool, int, error) {
|
||||
return true, int(fsGroup), nil
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue