vendor: bump to Kube 1.9/master
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
parent
7076c73172
commit
7a675ccd92
202 changed files with 8543 additions and 7270 deletions
4
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
|
@ -69,8 +69,6 @@ type VolumeOptions struct {
|
|||
CloudTags *map[string]string
|
||||
// Volume provisioning parameters from StorageClass
|
||||
Parameters map[string]string
|
||||
// This flag helps identify whether kubelet is running in a container
|
||||
Containerized bool
|
||||
}
|
||||
|
||||
type DynamicPluginProber interface {
|
||||
|
@ -713,7 +711,7 @@ func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod {
|
|||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pv-recycler",
|
||||
Image: "gcr.io/google_containers/busybox",
|
||||
Image: "busybox:1.27",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z \"$(ls -A /scrub)\" || exit 1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
|
|
64
vendor/k8s.io/kubernetes/pkg/volume/util.go
generated
vendored
64
vendor/k8s.io/kubernetes/pkg/volume/util.go
generated
vendored
|
@ -36,7 +36,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type RecycleEventRecorder func(eventtype, message string)
|
||||
|
@ -48,8 +47,8 @@ type RecycleEventRecorder func(eventtype, message string)
|
|||
// attempted before returning.
|
||||
//
|
||||
// In case there is a pod with the same namespace+name already running, this
|
||||
// function assumes it's an older instance of the recycler pod and watches
|
||||
// this old pod instead of starting a new one.
|
||||
// function deletes it as it is not able to judge if it is an old recycler
|
||||
// or user has forged a fake recycler to block Kubernetes from recycling.//
|
||||
//
|
||||
// pod - the pod designed by a volume plugin to recycle the volume. pod.Name
|
||||
// will be overwritten with unique name based on PV.Name.
|
||||
|
@ -81,20 +80,44 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po
|
|||
_, err = recyclerClient.CreatePod(pod)
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
glog.V(5).Infof("old recycler pod %q found for volume", pod.Name)
|
||||
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
|
||||
if deleteErr != nil {
|
||||
return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
|
||||
}
|
||||
// Recycler will try again and the old pod will be hopefuly deleted
|
||||
// at that time.
|
||||
return fmt.Errorf("old recycler pod found, will retry later")
|
||||
} else {
|
||||
return fmt.Errorf("unexpected error creating recycler pod: %+v\n", err)
|
||||
}
|
||||
}
|
||||
defer func(pod *v1.Pod) {
|
||||
glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
|
||||
if err := recyclerClient.DeletePod(pod.Name, pod.Namespace); err != nil {
|
||||
glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
}(pod)
|
||||
err = waitForPod(pod, recyclerClient, podCh)
|
||||
|
||||
// Now only the old pod or the new pod run. Watch it until it finishes
|
||||
// and send all events on the pod to the PV
|
||||
// In all cases delete the recycler pod and log its result.
|
||||
glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
|
||||
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
|
||||
if deleteErr != nil {
|
||||
glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
// Returning recycler error is preferred, the pod will be deleted again on
|
||||
// the next retry.
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to recycle volume: %s", err)
|
||||
}
|
||||
|
||||
// Recycle succeeded but we failed to delete the recycler pod. Report it,
|
||||
// the controller will re-try recycling the PV again shortly.
|
||||
if deleteErr != nil {
|
||||
return fmt.Errorf("failed to delete recycler pod: %s", deleteErr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForPod watches the pod it until it finishes and send all events on the
|
||||
// pod to the PV.
|
||||
func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.Event) error {
|
||||
for {
|
||||
event, ok := <-podCh
|
||||
if !ok {
|
||||
|
@ -164,15 +187,15 @@ type realRecyclerClient struct {
|
|||
}
|
||||
|
||||
func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
|
||||
return c.client.Core().Pods(pod.Namespace).Create(pod)
|
||||
return c.client.CoreV1().Pods(pod.Namespace).Create(pod)
|
||||
}
|
||||
|
||||
func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
|
||||
return c.client.Core().Pods(namespace).Get(name, metav1.GetOptions{})
|
||||
return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func (c *realRecyclerClient) DeletePod(name, namespace string) error {
|
||||
return c.client.Core().Pods(namespace).Delete(name, nil)
|
||||
return c.client.CoreV1().Pods(namespace).Delete(name, nil)
|
||||
}
|
||||
|
||||
func (c *realRecyclerClient) Event(eventtype, message string) {
|
||||
|
@ -189,13 +212,13 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s
|
|||
Watch: true,
|
||||
}
|
||||
|
||||
podWatch, err := c.client.Core().Pods(namespace).Watch(options)
|
||||
podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
|
||||
eventWatch, err := c.client.Core().Events(namespace).Watch(metav1.ListOptions{
|
||||
eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{
|
||||
FieldSelector: eventSelector.String(),
|
||||
Watch: true,
|
||||
})
|
||||
|
@ -400,13 +423,6 @@ func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) {
|
|||
func UnmountViaEmptyDir(dir string, host VolumeHost, volName string, volSpec Spec, podUID types.UID) error {
|
||||
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
|
||||
|
||||
if pathExists, pathErr := volutil.PathExists(dir); pathErr != nil {
|
||||
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wrap EmptyDir, let it do the teardown.
|
||||
wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)
|
||||
if err != nil {
|
||||
|
|
5
vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go
generated
vendored
|
@ -24,8 +24,9 @@ import (
|
|||
|
||||
var storageOperationMetric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "storage_operation_duration_seconds",
|
||||
Help: "Storage operation duration",
|
||||
Name: "storage_operation_duration_seconds",
|
||||
Help: "Storage operation duration",
|
||||
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50},
|
||||
},
|
||||
[]string{"volume_plugin", "operation_name"},
|
||||
)
|
||||
|
|
8
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
|
@ -32,7 +32,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
@ -147,7 +147,7 @@ func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interf
|
|||
if kubeClient == nil {
|
||||
return secret, fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
secrets, err := kubeClient.Core().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{})
|
||||
secrets, err := kubeClient.CoreV1().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return secret, err
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl
|
|||
if kubeClient == nil {
|
||||
return secret, fmt.Errorf("Cannot get kube client")
|
||||
}
|
||||
secrets, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{})
|
||||
secrets, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return secret, err
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ func LoadPodFromFile(filePath string) (*v1.Pod, error) {
|
|||
}
|
||||
pod := &v1.Pod{}
|
||||
|
||||
codec := api.Codecs.LegacyCodec(api.Registry.GroupOrDie(v1.GroupName).GroupVersion)
|
||||
codec := legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion)
|
||||
if err := runtime.DecodeInto(codec, podDef, pod); err != nil {
|
||||
return nil, fmt.Errorf("failed decoding file: %v", err)
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue