*: update kube vendor to v1.7.4

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2017-08-04 13:13:19 +02:00
parent c67859731f
commit d56bf090ce
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
1032 changed files with 273965 additions and 40081 deletions

83
vendor/k8s.io/apimachinery/pkg/util/cache/cache.go generated vendored Normal file
View file

@ -0,0 +1,83 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
)
const (
shardsCount int = 32
)
type Cache []*cacheShard
func NewCache(maxSize int) Cache {
if maxSize < shardsCount {
maxSize = shardsCount
}
cache := make(Cache, shardsCount)
for i := 0; i < shardsCount; i++ {
cache[i] = &cacheShard{
items: make(map[uint64]interface{}),
maxSize: maxSize / shardsCount,
}
}
return cache
}
func (c Cache) getShard(index uint64) *cacheShard {
return c[index%uint64(shardsCount)]
}
// Returns true if object already existed, false otherwise.
func (c *Cache) Add(index uint64, obj interface{}) bool {
return c.getShard(index).add(index, obj)
}
func (c *Cache) Get(index uint64) (obj interface{}, found bool) {
return c.getShard(index).get(index)
}
type cacheShard struct {
items map[uint64]interface{}
sync.RWMutex
maxSize int
}
// Returns true if object already existed, false otherwise.
func (s *cacheShard) add(index uint64, obj interface{}) bool {
s.Lock()
defer s.Unlock()
_, isOverwrite := s.items[index]
if !isOverwrite && len(s.items) >= s.maxSize {
var randomKey uint64
for randomKey = range s.items {
break
}
delete(s.items, randomKey)
}
s.items[index] = obj
return isOverwrite
}
func (s *cacheShard) get(index uint64) (obj interface{}, found bool) {
s.RLock()
defer s.RUnlock()
obj, found = s.items[index]
return
}

View file

@ -0,0 +1,102 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
"time"
"github.com/hashicorp/golang-lru"
)
// Clock defines an interface for obtaining the current time
type Clock interface {
Now() time.Time
}
// realClock implements the Clock interface by calling time.Now()
type realClock struct{}
func (realClock) Now() time.Time { return time.Now() }
// LRUExpireCache is a cache that ensures the mostly recently accessed keys are returned with
// a ttl beyond which keys are forcibly expired.
type LRUExpireCache struct {
// clock is used to obtain the current time
clock Clock
cache *lru.Cache
lock sync.Mutex
}
// NewLRUExpireCache creates an expiring cache with the given size
func NewLRUExpireCache(maxSize int) *LRUExpireCache {
return NewLRUExpireCacheWithClock(maxSize, realClock{})
}
// NewLRUExpireCacheWithClock creates an expiring cache with the given size, using the specified clock to obtain the current time.
func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache {
cache, err := lru.New(maxSize)
if err != nil {
// if called with an invalid size
panic(err)
}
return &LRUExpireCache{clock: clock, cache: cache}
}
type cacheEntry struct {
value interface{}
expireTime time.Time
}
// Add adds the value to the cache at key with the specified maximum duration.
func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Duration) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Add(key, &cacheEntry{value, c.clock.Now().Add(ttl)})
}
// Get returns the value at the specified key from the cache if it exists and is not
// expired, or returns false.
func (c *LRUExpireCache) Get(key interface{}) (interface{}, bool) {
c.lock.Lock()
defer c.lock.Unlock()
e, ok := c.cache.Get(key)
if !ok {
return nil, false
}
if c.clock.Now().After(e.(*cacheEntry).expireTime) {
c.cache.Remove(key)
return nil, false
}
return e.(*cacheEntry).value, true
}
// Remove removes the specified key from the cache if it exists
func (c *LRUExpireCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Remove(key)
}
// Keys returns all the keys in the cache, even if they are expired. Subsequent calls to
// get may return not found. It returns all keys from oldest to newest.
func (c *LRUExpireCache) Keys() []interface{} {
c.lock.Lock()
defer c.lock.Unlock()
return c.cache.Keys()
}

327
vendor/k8s.io/apimachinery/pkg/util/clock/clock.go generated vendored Normal file
View file

@ -0,0 +1,327 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clock
import (
"sync"
"time"
)
// Clock allows for injecting fake or real clocks into code that
// needs to do arbitrary things based on time.
type Clock interface {
Now() time.Time
Since(time.Time) time.Duration
After(d time.Duration) <-chan time.Time
NewTimer(d time.Duration) Timer
Sleep(d time.Duration)
Tick(d time.Duration) <-chan time.Time
}
var (
_ = Clock(RealClock{})
_ = Clock(&FakeClock{})
_ = Clock(&IntervalClock{})
)
// RealClock really calls time.Now()
type RealClock struct{}
// Now returns the current time.
func (RealClock) Now() time.Time {
return time.Now()
}
// Since returns time since the specified timestamp.
func (RealClock) Since(ts time.Time) time.Duration {
return time.Since(ts)
}
// Same as time.After(d).
func (RealClock) After(d time.Duration) <-chan time.Time {
return time.After(d)
}
func (RealClock) NewTimer(d time.Duration) Timer {
return &realTimer{
timer: time.NewTimer(d),
}
}
func (RealClock) Tick(d time.Duration) <-chan time.Time {
return time.Tick(d)
}
func (RealClock) Sleep(d time.Duration) {
time.Sleep(d)
}
// FakeClock implements Clock, but returns an arbitrary time.
type FakeClock struct {
lock sync.RWMutex
time time.Time
// waiters are waiting for the fake time to pass their specified time
waiters []fakeClockWaiter
}
type fakeClockWaiter struct {
targetTime time.Time
stepInterval time.Duration
skipIfBlocked bool
destChan chan time.Time
fired bool
}
func NewFakeClock(t time.Time) *FakeClock {
return &FakeClock{
time: t,
}
}
// Now returns f's time.
func (f *FakeClock) Now() time.Time {
f.lock.RLock()
defer f.lock.RUnlock()
return f.time
}
// Since returns time since the time in f.
func (f *FakeClock) Since(ts time.Time) time.Duration {
f.lock.RLock()
defer f.lock.RUnlock()
return f.time.Sub(ts)
}
// Fake version of time.After(d).
func (f *FakeClock) After(d time.Duration) <-chan time.Time {
f.lock.Lock()
defer f.lock.Unlock()
stopTime := f.time.Add(d)
ch := make(chan time.Time, 1) // Don't block!
f.waiters = append(f.waiters, fakeClockWaiter{
targetTime: stopTime,
destChan: ch,
})
return ch
}
// Fake version of time.NewTimer(d).
func (f *FakeClock) NewTimer(d time.Duration) Timer {
f.lock.Lock()
defer f.lock.Unlock()
stopTime := f.time.Add(d)
ch := make(chan time.Time, 1) // Don't block!
timer := &fakeTimer{
fakeClock: f,
waiter: fakeClockWaiter{
targetTime: stopTime,
destChan: ch,
},
}
f.waiters = append(f.waiters, timer.waiter)
return timer
}
func (f *FakeClock) Tick(d time.Duration) <-chan time.Time {
f.lock.Lock()
defer f.lock.Unlock()
tickTime := f.time.Add(d)
ch := make(chan time.Time, 1) // hold one tick
f.waiters = append(f.waiters, fakeClockWaiter{
targetTime: tickTime,
stepInterval: d,
skipIfBlocked: true,
destChan: ch,
})
return ch
}
// Move clock by Duration, notify anyone that's called After, Tick, or NewTimer
func (f *FakeClock) Step(d time.Duration) {
f.lock.Lock()
defer f.lock.Unlock()
f.setTimeLocked(f.time.Add(d))
}
// Sets the time.
func (f *FakeClock) SetTime(t time.Time) {
f.lock.Lock()
defer f.lock.Unlock()
f.setTimeLocked(t)
}
// Actually changes the time and checks any waiters. f must be write-locked.
func (f *FakeClock) setTimeLocked(t time.Time) {
f.time = t
newWaiters := make([]fakeClockWaiter, 0, len(f.waiters))
for i := range f.waiters {
w := &f.waiters[i]
if !w.targetTime.After(t) {
if w.skipIfBlocked {
select {
case w.destChan <- t:
w.fired = true
default:
}
} else {
w.destChan <- t
w.fired = true
}
if w.stepInterval > 0 {
for !w.targetTime.After(t) {
w.targetTime = w.targetTime.Add(w.stepInterval)
}
newWaiters = append(newWaiters, *w)
}
} else {
newWaiters = append(newWaiters, f.waiters[i])
}
}
f.waiters = newWaiters
}
// Returns true if After has been called on f but not yet satisfied (so you can
// write race-free tests).
func (f *FakeClock) HasWaiters() bool {
f.lock.RLock()
defer f.lock.RUnlock()
return len(f.waiters) > 0
}
func (f *FakeClock) Sleep(d time.Duration) {
f.Step(d)
}
// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration
type IntervalClock struct {
Time time.Time
Duration time.Duration
}
// Now returns i's time.
func (i *IntervalClock) Now() time.Time {
i.Time = i.Time.Add(i.Duration)
return i.Time
}
// Since returns time since the time in i.
func (i *IntervalClock) Since(ts time.Time) time.Duration {
return i.Time.Sub(ts)
}
// Unimplemented, will panic.
// TODO: make interval clock use FakeClock so this can be implemented.
func (*IntervalClock) After(d time.Duration) <-chan time.Time {
panic("IntervalClock doesn't implement After")
}
// Unimplemented, will panic.
// TODO: make interval clock use FakeClock so this can be implemented.
func (*IntervalClock) NewTimer(d time.Duration) Timer {
panic("IntervalClock doesn't implement NewTimer")
}
// Unimplemented, will panic.
// TODO: make interval clock use FakeClock so this can be implemented.
func (*IntervalClock) Tick(d time.Duration) <-chan time.Time {
panic("IntervalClock doesn't implement Tick")
}
func (*IntervalClock) Sleep(d time.Duration) {
panic("IntervalClock doesn't implement Sleep")
}
// Timer allows for injecting fake or real timers into code that
// needs to do arbitrary things based on time.
type Timer interface {
C() <-chan time.Time
Stop() bool
Reset(d time.Duration) bool
}
var (
_ = Timer(&realTimer{})
_ = Timer(&fakeTimer{})
)
// realTimer is backed by an actual time.Timer.
type realTimer struct {
timer *time.Timer
}
// C returns the underlying timer's channel.
func (r *realTimer) C() <-chan time.Time {
return r.timer.C
}
// Stop calls Stop() on the underlying timer.
func (r *realTimer) Stop() bool {
return r.timer.Stop()
}
// Reset calls Reset() on the underlying timer.
func (r *realTimer) Reset(d time.Duration) bool {
return r.timer.Reset(d)
}
// fakeTimer implements Timer based on a FakeClock.
type fakeTimer struct {
fakeClock *FakeClock
waiter fakeClockWaiter
}
// C returns the channel that notifies when this timer has fired.
func (f *fakeTimer) C() <-chan time.Time {
return f.waiter.destChan
}
// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise.
func (f *fakeTimer) Stop() bool {
f.fakeClock.lock.Lock()
defer f.fakeClock.lock.Unlock()
newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters))
for i := range f.fakeClock.waiters {
w := &f.fakeClock.waiters[i]
if w != &f.waiter {
newWaiters = append(newWaiters, *w)
}
}
f.fakeClock.waiters = newWaiters
return !f.waiter.fired
}
// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet
// fired, or false otherwise.
func (f *fakeTimer) Reset(d time.Duration) bool {
f.fakeClock.lock.Lock()
defer f.fakeClock.lock.Unlock()
active := !f.waiter.fired
f.waiter.fired = false
f.waiter.targetTime = f.fakeClock.time.Add(d)
return active
}

280
vendor/k8s.io/apimachinery/pkg/util/diff/diff.go generated vendored Normal file
View file

@ -0,0 +1,280 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diff
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"sort"
"strings"
"text/tabwriter"
"github.com/davecgh/go-spew/spew"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// StringDiff diffs a and b and returns a human readable diff.
func StringDiff(a, b string) string {
ba := []byte(a)
bb := []byte(b)
out := []byte{}
i := 0
for ; i < len(ba) && i < len(bb); i++ {
if ba[i] != bb[i] {
break
}
out = append(out, ba[i])
}
out = append(out, []byte("\n\nA: ")...)
out = append(out, ba[i:]...)
out = append(out, []byte("\n\nB: ")...)
out = append(out, bb[i:]...)
out = append(out, []byte("\n\n")...)
return string(out)
}
// ObjectDiff writes the two objects out as JSON and prints out the identical part of
// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'.
// For debugging tests.
func ObjectDiff(a, b interface{}) string {
ab, err := json.Marshal(a)
if err != nil {
panic(fmt.Sprintf("a: %v", err))
}
bb, err := json.Marshal(b)
if err != nil {
panic(fmt.Sprintf("b: %v", err))
}
return StringDiff(string(ab), string(bb))
}
// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects,
// which shows absolutely everything by recursing into every single pointer
// (go's %#v formatters OTOH stop at a certain point). This is needed when you
// can't figure out why reflect.DeepEqual is returning false and nothing is
// showing you differences. This will.
func ObjectGoPrintDiff(a, b interface{}) string {
s := spew.ConfigState{DisableMethods: true}
return StringDiff(
s.Sprintf("%#v", a),
s.Sprintf("%#v", b),
)
}
func ObjectReflectDiff(a, b interface{}) string {
vA, vB := reflect.ValueOf(a), reflect.ValueOf(b)
if vA.Type() != vB.Type() {
return fmt.Sprintf("type A %T and type B %T do not match", a, b)
}
diffs := objectReflectDiff(field.NewPath("object"), vA, vB)
if len(diffs) == 0 {
return "<no diffs>"
}
out := []string{""}
for _, d := range diffs {
out = append(out,
fmt.Sprintf("%s:", d.path),
limit(fmt.Sprintf(" a: %#v", d.a), 80),
limit(fmt.Sprintf(" b: %#v", d.b), 80),
)
}
return strings.Join(out, "\n")
}
func limit(s string, max int) string {
if len(s) > max {
return s[:max]
}
return s
}
func public(s string) bool {
if len(s) == 0 {
return false
}
return s[:1] == strings.ToUpper(s[:1])
}
type diff struct {
path *field.Path
a, b interface{}
}
type orderedDiffs []diff
func (d orderedDiffs) Len() int { return len(d) }
func (d orderedDiffs) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d orderedDiffs) Less(i, j int) bool {
a, b := d[i].path.String(), d[j].path.String()
if a < b {
return true
}
return false
}
func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff {
switch a.Type().Kind() {
case reflect.Struct:
var changes []diff
for i := 0; i < a.Type().NumField(); i++ {
if !public(a.Type().Field(i).Name) {
if reflect.DeepEqual(a.Interface(), b.Interface()) {
continue
}
return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}}
}
if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 {
changes = append(changes, sub...)
} else {
if !reflect.DeepEqual(a.Field(i).Interface(), b.Field(i).Interface()) {
changes = append(changes, diff{path: path, a: a.Field(i).Interface(), b: b.Field(i).Interface()})
}
}
}
return changes
case reflect.Ptr, reflect.Interface:
if a.IsNil() || b.IsNil() {
switch {
case a.IsNil() && b.IsNil():
return nil
case a.IsNil():
return []diff{{path: path, a: nil, b: b.Interface()}}
default:
return []diff{{path: path, a: a.Interface(), b: nil}}
}
}
return objectReflectDiff(path, a.Elem(), b.Elem())
case reflect.Chan:
if !reflect.DeepEqual(a.Interface(), b.Interface()) {
return []diff{{path: path, a: a.Interface(), b: b.Interface()}}
}
return nil
case reflect.Slice:
lA, lB := a.Len(), b.Len()
l := lA
if lB < lA {
l = lB
}
if lA == lB && lA == 0 {
if a.IsNil() != b.IsNil() {
return []diff{{path: path, a: a.Interface(), b: b.Interface()}}
}
return nil
}
for i := 0; i < l; i++ {
if !reflect.DeepEqual(a.Index(i), b.Index(i)) {
return objectReflectDiff(path.Index(i), a.Index(i), b.Index(i))
}
}
var diffs []diff
for i := l; i < lA; i++ {
diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil})
}
for i := l; i < lB; i++ {
diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)})
}
if len(diffs) == 0 {
diffs = append(diffs, diff{path: path, a: a, b: b})
}
return diffs
case reflect.Map:
if reflect.DeepEqual(a.Interface(), b.Interface()) {
return nil
}
aKeys := make(map[interface{}]interface{})
for _, key := range a.MapKeys() {
aKeys[key.Interface()] = a.MapIndex(key).Interface()
}
var missing []diff
for _, key := range b.MapKeys() {
if _, ok := aKeys[key.Interface()]; ok {
delete(aKeys, key.Interface())
if reflect.DeepEqual(a.MapIndex(key).Interface(), b.MapIndex(key).Interface()) {
continue
}
missing = append(missing, objectReflectDiff(path.Key(fmt.Sprintf("%s", key.Interface())), a.MapIndex(key), b.MapIndex(key))...)
continue
}
missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: nil, b: b.MapIndex(key).Interface()})
}
for key, value := range aKeys {
missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key)), a: value, b: nil})
}
if len(missing) == 0 {
missing = append(missing, diff{path: path, a: a.Interface(), b: b.Interface()})
}
sort.Sort(orderedDiffs(missing))
return missing
default:
if reflect.DeepEqual(a.Interface(), b.Interface()) {
return nil
}
if !a.CanInterface() {
return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}}
}
return []diff{{path: path, a: a.Interface(), b: b.Interface()}}
}
}
// ObjectGoPrintSideBySide prints a and b as textual dumps side by side,
// enabling easy visual scanning for mismatches.
func ObjectGoPrintSideBySide(a, b interface{}) string {
s := spew.ConfigState{
Indent: " ",
// Extra deep spew.
DisableMethods: true,
}
sA := s.Sdump(a)
sB := s.Sdump(b)
linesA := strings.Split(sA, "\n")
linesB := strings.Split(sB, "\n")
width := 0
for _, s := range linesA {
l := len(s)
if l > width {
width = l
}
}
for _, s := range linesB {
l := len(s)
if l > width {
width = l
}
}
buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0)
max := len(linesA)
if len(linesB) > max {
max = len(linesB)
}
for i := 0; i < max; i++ {
var a, b string
if i < len(linesA) {
a = linesA[i]
}
if i < len(linesB) {
b = linesB[i]
}
fmt.Fprintf(w, "%s\t%s\n", a, b)
}
w.Flush()
return buf.String()
}

View file

@ -21,6 +21,9 @@ import (
"fmt"
)
// MessagesgCountMap contains occurance for each error message.
type MessageCountMap map[string]int
// Aggregate represents an object that contains multiple errors, but does not
// necessarily have singular semantic meaning.
type Aggregate interface {
@ -147,6 +150,22 @@ func Flatten(agg Aggregate) Aggregate {
return NewAggregate(result)
}
// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate
func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate {
if m == nil {
return nil
}
result := make([]error, 0, len(m))
for errStr, count := range m {
var countStr string
if count > 1 {
countStr = fmt.Sprintf(" (repeated %v times)", count)
}
result = append(result, fmt.Errorf("%v%v", errStr, countStr))
}
return NewAggregate(result)
}
// Reduce will return err or, if err is an Aggregate and only has one item,
// the first item in the aggregate.
func Reduce(err error) error {

View file

@ -18,9 +18,11 @@ package spdy
import (
"bufio"
"bytes"
"crypto/tls"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
@ -33,6 +35,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/httpstream"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/third_party/forked/golang/netutil"
)
@ -59,25 +62,49 @@ type SpdyRoundTripper struct {
// proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment
// Used primarily for mocking the proxy discovery in tests.
proxier func(req *http.Request) (*url.URL, error)
// followRedirects indicates if the round tripper should examine responses for redirects and
// follow them.
followRedirects bool
}
var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}
var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{}
var _ utilnet.Dialer = &SpdyRoundTripper{}
// NewRoundTripper creates a new SpdyRoundTripper that will use
// the specified tlsConfig.
func NewRoundTripper(tlsConfig *tls.Config) httpstream.UpgradeRoundTripper {
return NewSpdyRoundTripper(tlsConfig)
func NewRoundTripper(tlsConfig *tls.Config, followRedirects bool) httpstream.UpgradeRoundTripper {
return NewSpdyRoundTripper(tlsConfig, followRedirects)
}
// NewSpdyRoundTripper creates a new SpdyRoundTripper that will use
// the specified tlsConfig. This function is mostly meant for unit tests.
func NewSpdyRoundTripper(tlsConfig *tls.Config) *SpdyRoundTripper {
return &SpdyRoundTripper{tlsConfig: tlsConfig}
func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects bool) *SpdyRoundTripper {
return &SpdyRoundTripper{tlsConfig: tlsConfig, followRedirects: followRedirects}
}
// implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during proxying with a spdy roundtripper
// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during
// proxying with a spdy roundtripper.
func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config {
return s.tlsConfig
}
// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer.
func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {
conn, err := s.dial(req)
if err != nil {
return nil, err
}
if err := req.Write(conn); err != nil {
conn.Close()
return nil, err
}
return conn, nil
}
// dial dials the host specified by req, using TLS if appropriate, optionally
// using a proxy server if one is configured via environment variables.
func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
@ -131,15 +158,16 @@ func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
return nil, err
}
if s.tlsConfig == nil {
s.tlsConfig = &tls.Config{}
tlsConfig := s.tlsConfig
switch {
case tlsConfig == nil:
tlsConfig = &tls.Config{ServerName: host}
case len(tlsConfig.ServerName) == 0:
tlsConfig = tlsConfig.Clone()
tlsConfig.ServerName = host
}
if len(s.tlsConfig.ServerName) == 0 {
s.tlsConfig.ServerName = host
}
tlsConn := tls.Client(rwc, s.tlsConfig)
tlsConn := tls.Client(rwc, tlsConfig)
// need to manually call Handshake() so we can call VerifyHostname() below
if err := tlsConn.Handshake(); err != nil {
@ -147,11 +175,11 @@ func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
}
// Return if we were configured to skip validation
if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify {
if tlsConfig.InsecureSkipVerify {
return tlsConn, nil
}
if err := tlsConn.VerifyHostname(host); err != nil {
if err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil {
return nil, err
}
@ -191,6 +219,9 @@ func (s *SpdyRoundTripper) dialWithoutProxy(url *url.URL) (net.Conn, error) {
if err != nil {
return nil, err
}
if s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 {
host = s.tlsConfig.ServerName
}
err = conn.VerifyHostname(host)
if err != nil {
return nil, err
@ -213,24 +244,39 @@ func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string {
// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded
// connection.
func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
// TODO what's the best way to clone the request?
r := *req
req = &r
req.Header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
req.Header.Add(httpstream.HeaderUpgrade, HeaderSpdy31)
header := utilnet.CloneHeader(req.Header)
header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
header.Add(httpstream.HeaderUpgrade, HeaderSpdy31)
conn, err := s.dial(req)
var (
conn net.Conn
rawResponse []byte
err error
)
if s.followRedirects {
conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s)
} else {
clone := utilnet.CloneRequest(req)
clone.Header = header
conn, err = s.Dial(clone)
}
if err != nil {
return nil, err
}
err = req.Write(conn)
if err != nil {
return nil, err
}
responseReader := bufio.NewReader(
io.MultiReader(
bytes.NewBuffer(rawResponse),
conn,
),
)
resp, err := http.ReadResponse(bufio.NewReader(conn), req)
resp, err := http.ReadResponse(responseReader, nil)
if err != nil {
if conn != nil {
conn.Close()
}
return nil, err
}

View file

@ -17,9 +17,13 @@ limitations under the License.
package spdy
import (
"bufio"
"fmt"
"io"
"net"
"net/http"
"strings"
"sync/atomic"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/runtime"
@ -32,6 +36,30 @@ const HeaderSpdy31 = "SPDY/3.1"
type responseUpgrader struct {
}
// connWrapper is used to wrap a hijacked connection and its bufio.Reader. All
// calls will be handled directly by the underlying net.Conn with the exception
// of Read and Close calls, which will consider data in the bufio.Reader. This
// ensures that data already inside the used bufio.Reader instance is also
// read.
type connWrapper struct {
net.Conn
closed int32
bufReader *bufio.Reader
}
func (w *connWrapper) Read(b []byte) (n int, err error) {
if atomic.LoadInt32(&w.closed) == 1 {
return 0, io.EOF
}
return w.bufReader.Read(b)
}
func (w *connWrapper) Close() error {
err := w.Conn.Close()
atomic.StoreInt32(&w.closed, 1)
return err
}
// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is
// capable of upgrading HTTP responses using SPDY/3.1 via the
// spdystream package.
@ -62,13 +90,14 @@ func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Reque
w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31)
w.WriteHeader(http.StatusSwitchingProtocols)
conn, _, err := hijacker.Hijack()
conn, bufrw, err := hijacker.Hijack()
if err != nil {
runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err))
return nil
}
spdyConn, err := NewServerConnection(conn, newStreamHandler)
connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader}
spdyConn, err := NewServerConnection(connWithBuf, newStreamHandler)
if err != nil {
runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err))
return nil

View file

@ -42,7 +42,9 @@ var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.GoGoProtoPackageIsVersion1
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
func (m *IntOrString) Reset() { *m = IntOrString{} }
func (*IntOrString) ProtoMessage() {}
@ -51,59 +53,59 @@ func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerate
func init() {
proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString")
}
func (m *IntOrString) Marshal() (data []byte, err error) {
func (m *IntOrString) Marshal() (dAtA []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return data[:n], nil
return dAtA[:n], nil
}
func (m *IntOrString) MarshalTo(data []byte) (int, error) {
func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
data[i] = 0x8
dAtA[i] = 0x8
i++
i = encodeVarintGenerated(data, i, uint64(m.Type))
data[i] = 0x10
i = encodeVarintGenerated(dAtA, i, uint64(m.Type))
dAtA[i] = 0x10
i++
i = encodeVarintGenerated(data, i, uint64(m.IntVal))
data[i] = 0x1a
i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal))
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(data, i, uint64(len(m.StrVal)))
i += copy(data[i:], m.StrVal)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal)))
i += copy(dAtA[i:], m.StrVal)
return i, nil
}
func encodeFixed64Generated(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Generated(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintGenerated(data []byte, offset int, v uint64) int {
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *IntOrString) Size() (n int) {
@ -129,8 +131,8 @@ func sovGenerated(x uint64) (n int) {
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *IntOrString) Unmarshal(data []byte) error {
l := len(data)
func (m *IntOrString) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
@ -142,7 +144,7 @@ func (m *IntOrString) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@ -170,7 +172,7 @@ func (m *IntOrString) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
b := dAtA[iNdEx]
iNdEx++
m.Type |= (Type(b) & 0x7F) << shift
if b < 0x80 {
@ -189,7 +191,7 @@ func (m *IntOrString) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
b := dAtA[iNdEx]
iNdEx++
m.IntVal |= (int32(b) & 0x7F) << shift
if b < 0x80 {
@ -208,7 +210,7 @@ func (m *IntOrString) Unmarshal(data []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@ -223,11 +225,11 @@ func (m *IntOrString) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StrVal = string(data[iNdEx:postIndex])
m.StrVal = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(data[iNdEx:])
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
@ -246,8 +248,8 @@ func (m *IntOrString) Unmarshal(data []byte) error {
}
return nil
}
func skipGenerated(data []byte) (n int, err error) {
l := len(data)
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
@ -258,7 +260,7 @@ func skipGenerated(data []byte) (n int, err error) {
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@ -276,7 +278,7 @@ func skipGenerated(data []byte) (n int, err error) {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
if dAtA[iNdEx-1] < 0x80 {
break
}
}
@ -293,7 +295,7 @@ func skipGenerated(data []byte) (n int, err error) {
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
@ -316,7 +318,7 @@ func skipGenerated(data []byte) (n int, err error) {
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@ -327,7 +329,7 @@ func skipGenerated(data []byte) (n int, err error) {
if innerWireType == 4 {
break
}
next, err := skipGenerated(data[start:])
next, err := skipGenerated(dAtA[start:])
if err != nil {
return 0, err
}
@ -351,24 +353,29 @@ var (
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
)
var fileDescriptorGenerated = []byte{
// 288 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0xf4, 0x30,
0x1c, 0xc6, 0x93, 0xf7, 0xee, 0x3d, 0xb4, 0x82, 0x43, 0x71, 0x38, 0x1c, 0xd2, 0xa2, 0x20, 0x5d,
0x4c, 0x56, 0x71, 0xec, 0x76, 0x20, 0x08, 0x3d, 0x71, 0x70, 0x6b, 0xef, 0x62, 0x2e, 0xf4, 0x2e,
0x09, 0xe9, 0xbf, 0x42, 0xb7, 0xfb, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x3a, 0xde, 0xe8, 0x20, 0x87,
0xad, 0xdf, 0xc2, 0x49, 0x9a, 0x16, 0x74, 0x4a, 0x9e, 0xe7, 0xf9, 0xfd, 0x02, 0xf1, 0x6e, 0xf2,
0xab, 0x82, 0x4a, 0xcd, 0xf2, 0x32, 0xe3, 0x56, 0x71, 0xe0, 0x05, 0x7b, 0xe2, 0x6a, 0xa9, 0x2d,
0x1b, 0x86, 0xd4, 0xc8, 0x4d, 0xba, 0x58, 0x49, 0xc5, 0x6d, 0xc5, 0x4c, 0x2e, 0x58, 0x09, 0x72,
0xcd, 0xa4, 0x82, 0x02, 0x2c, 0x13, 0x5c, 0x71, 0x9b, 0x02, 0x5f, 0x52, 0x63, 0x35, 0x68, 0xff,
0xbc, 0x97, 0xe8, 0x5f, 0x89, 0x9a, 0x5c, 0xd0, 0x4e, 0xa2, 0xbd, 0x74, 0x7a, 0x29, 0x24, 0xac,
0xca, 0x8c, 0x2e, 0xf4, 0x86, 0x09, 0x2d, 0x34, 0x73, 0x6e, 0x56, 0x3e, 0xba, 0xe4, 0x82, 0xbb,
0xf5, 0x6f, 0x9e, 0xbd, 0x60, 0xef, 0x68, 0xa6, 0xe0, 0xd6, 0xce, 0xc1, 0x4a, 0x25, 0xfc, 0xc8,
0x1b, 0x43, 0x65, 0xf8, 0x14, 0x87, 0x38, 0x1a, 0xc5, 0x27, 0xf5, 0x3e, 0x40, 0xed, 0x3e, 0x18,
0xdf, 0x55, 0x86, 0x7f, 0x0f, 0x67, 0xe2, 0x08, 0xff, 0xc2, 0x9b, 0x48, 0x05, 0xf7, 0xe9, 0x7a,
0xfa, 0x2f, 0xc4, 0xd1, 0xff, 0xf8, 0x78, 0x60, 0x27, 0x33, 0xd7, 0x26, 0xc3, 0xda, 0x71, 0x05,
0xd8, 0x8e, 0x1b, 0x85, 0x38, 0x3a, 0xfc, 0xe5, 0xe6, 0xae, 0x4d, 0x86, 0xf5, 0xfa, 0xe0, 0xf5,
0x2d, 0x40, 0xdb, 0x8f, 0x10, 0xc5, 0x51, 0xdd, 0x10, 0xb4, 0x6b, 0x08, 0x7a, 0x6f, 0x08, 0xda,
0xb6, 0x04, 0xd7, 0x2d, 0xc1, 0xbb, 0x96, 0xe0, 0xcf, 0x96, 0xe0, 0xe7, 0x2f, 0x82, 0x1e, 0x26,
0xfd, 0x67, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x55, 0xdf, 0x2a, 0x60, 0x01, 0x00, 0x00,
func init() {
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated)
}
var fileDescriptorGenerated = []byte{
// 292 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31,
0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d,
0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d,
0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a,
0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36,
0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda,
0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20,
0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e,
0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84,
0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0,
0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30,
0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f,
0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92,
0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3,
0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83,
0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35,
0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb,
0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9,
0x64, 0x01, 0x00, 0x00,
}

View file

@ -19,11 +19,46 @@ package mergepatch
import (
"errors"
"fmt"
"reflect"
)
var ErrBadJSONDoc = errors.New("Invalid JSON document")
var ErrNoListOfLists = errors.New("Lists of lists are not supported")
var ErrBadPatchFormatForPrimitiveList = errors.New("Invalid patch format of primitive list")
var (
ErrBadJSONDoc = errors.New("invalid JSON document")
ErrNoListOfLists = errors.New("lists of lists are not supported")
ErrBadPatchFormatForPrimitiveList = errors.New("invalid patch format of primitive list")
ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys")
ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list")
ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list")
)
func ErrNoMergeKey(m map[string]interface{}, k string) error {
return fmt.Errorf("map: %v does not contain declared merge key: %s", m, k)
}
func ErrBadArgType(expected, actual interface{}) error {
return fmt.Errorf("expected a %s, but received a %s",
reflect.TypeOf(expected),
reflect.TypeOf(actual))
}
func ErrBadArgKind(expected, actual interface{}) error {
var expectedKindString, actualKindString string
if expected == nil {
expectedKindString = "nil"
} else {
expectedKindString = reflect.TypeOf(expected).Kind().String()
}
if actual == nil {
actualKindString = "nil"
} else {
actualKindString = reflect.TypeOf(actual).Kind().String()
}
return fmt.Errorf("expected a %s, but received a %s", expectedKindString, actualKindString)
}
func ErrBadPatchType(t interface{}, m map[string]interface{}) error {
return fmt.Errorf("unknown patch type: %s in map: %v", t, m)
}
// IsPreconditionFailed returns true if the provided error indicates
// a precondition failed.

View file

@ -17,6 +17,8 @@ limitations under the License.
package net
import (
"bufio"
"bytes"
"crypto/tls"
"fmt"
"io"
@ -95,7 +97,7 @@ type RoundTripperWrapper interface {
type DialFunc func(net, addr string) (net.Conn, error)
func Dialer(transport http.RoundTripper) (DialFunc, error) {
func DialerFor(transport http.RoundTripper) (DialFunc, error) {
if transport == nil {
return nil, nil
}
@ -104,40 +106,12 @@ func Dialer(transport http.RoundTripper) (DialFunc, error) {
case *http.Transport:
return transport.Dial, nil
case RoundTripperWrapper:
return Dialer(transport.WrappedRoundTripper())
return DialerFor(transport.WrappedRoundTripper())
default:
return nil, fmt.Errorf("unknown transport type: %v", transport)
}
}
// CloneTLSConfig returns a tls.Config with all exported fields except SessionTicketsDisabled and SessionTicketKey copied.
// This makes it safe to call CloneTLSConfig on a config in active use by a server.
// TODO: replace with tls.Config#Clone when we move to go1.8
func CloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return &tls.Config{
Rand: cfg.Rand,
Time: cfg.Time,
Certificates: cfg.Certificates,
NameToCertificate: cfg.NameToCertificate,
GetCertificate: cfg.GetCertificate,
RootCAs: cfg.RootCAs,
NextProtos: cfg.NextProtos,
ServerName: cfg.ServerName,
ClientAuth: cfg.ClientAuth,
ClientCAs: cfg.ClientCAs,
InsecureSkipVerify: cfg.InsecureSkipVerify,
CipherSuites: cfg.CipherSuites,
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
ClientSessionCache: cfg.ClientSessionCache,
MinVersion: cfg.MinVersion,
MaxVersion: cfg.MaxVersion,
CurvePreferences: cfg.CurvePreferences,
}
}
type TLSClientConfigHolder interface {
TLSClientConfig() *tls.Config
}
@ -176,13 +150,13 @@ func GetHTTPClient(req *http.Request) string {
return "unknown"
}
// Extracts and returns the clients IP from the given request.
// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order.
// Returns nil if none of them are set or is set to an invalid value.
func GetClientIP(req *http.Request) net.IP {
// SourceIPs splits the comma separated X-Forwarded-For header or returns the X-Real-Ip header or req.RemoteAddr,
// in that order, ignoring invalid IPs. It returns nil if all of these are empty or invalid.
func SourceIPs(req *http.Request) []net.IP {
hdr := req.Header
// First check the X-Forwarded-For header for requests via proxy.
hdrForwardedFor := hdr.Get("X-Forwarded-For")
forwardedForIPs := []net.IP{}
if hdrForwardedFor != "" {
// X-Forwarded-For can be a csv of IPs in case of multiple proxies.
// Use the first valid one.
@ -190,17 +164,20 @@ func GetClientIP(req *http.Request) net.IP {
for _, part := range parts {
ip := net.ParseIP(strings.TrimSpace(part))
if ip != nil {
return ip
forwardedForIPs = append(forwardedForIPs, ip)
}
}
}
if len(forwardedForIPs) > 0 {
return forwardedForIPs
}
// Try the X-Real-Ip header.
hdrRealIp := hdr.Get("X-Real-Ip")
if hdrRealIp != "" {
ip := net.ParseIP(hdrRealIp)
if ip != nil {
return ip
return []net.IP{ip}
}
}
@ -208,11 +185,43 @@ func GetClientIP(req *http.Request) net.IP {
// Remote Address in Go's HTTP server is in the form host:port so we need to split that first.
host, _, err := net.SplitHostPort(req.RemoteAddr)
if err == nil {
return net.ParseIP(host)
if remoteIP := net.ParseIP(host); remoteIP != nil {
return []net.IP{remoteIP}
}
}
// Fallback if Remote Address was just IP.
return net.ParseIP(req.RemoteAddr)
if remoteIP := net.ParseIP(req.RemoteAddr); remoteIP != nil {
return []net.IP{remoteIP}
}
return nil
}
// Extracts and returns the clients IP from the given request.
// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order.
// Returns nil if none of them are set or is set to an invalid value.
func GetClientIP(req *http.Request) net.IP {
ips := SourceIPs(req)
if len(ips) == 0 {
return nil
}
return ips[0]
}
// Prepares the X-Forwarded-For header for another forwarding hop by appending the previous sender's
// IP address to the X-Forwarded-For chain.
func AppendForwardedForHeader(req *http.Request) {
// Copied from net/http/httputil/reverseproxy.go:
if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
// If we aren't the first proxy retain prior
// X-Forwarded-For information as a comma+space
// separated list and fold multiple headers into one.
if prior, ok := req.Header["X-Forwarded-For"]; ok {
clientIP = strings.Join(prior, ", ") + ", " + clientIP
}
req.Header.Set("X-Forwarded-For", clientIP)
}
}
var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment)
@ -267,3 +276,126 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
return delegate(req)
}
}
// Dialer dials a host and writes a request to it.
type Dialer interface {
// Dial connects to the host specified by req's URL, writes the request to the connection, and
// returns the opened net.Conn.
Dial(req *http.Request) (net.Conn, error)
}
// ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to
// originalLocation). It returns the opened net.Conn and the raw response bytes.
func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer) (net.Conn, []byte, error) {
const (
maxRedirects = 10
maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers
)
var (
location = originalLocation
method = originalMethod
intermediateConn net.Conn
rawResponse = bytes.NewBuffer(make([]byte, 0, 256))
body = originalBody
)
defer func() {
if intermediateConn != nil {
intermediateConn.Close()
}
}()
redirectLoop:
for redirects := 0; ; redirects++ {
if redirects > maxRedirects {
return nil, nil, fmt.Errorf("too many redirects (%d)", redirects)
}
req, err := http.NewRequest(method, location.String(), body)
if err != nil {
return nil, nil, err
}
req.Header = header
intermediateConn, err = dialer.Dial(req)
if err != nil {
return nil, nil, err
}
// Peek at the backend response.
rawResponse.Reset()
respReader := bufio.NewReader(io.TeeReader(
io.LimitReader(intermediateConn, maxResponseSize), // Don't read more than maxResponseSize bytes.
rawResponse)) // Save the raw response.
resp, err := http.ReadResponse(respReader, nil)
if err != nil {
// Unable to read the backend response; let the client handle it.
glog.Warningf("Error reading backend response: %v", err)
break redirectLoop
}
switch resp.StatusCode {
case http.StatusFound:
// Redirect, continue.
default:
// Don't redirect.
break redirectLoop
}
// Redirected requests switch to "GET" according to the HTTP spec:
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3
method = "GET"
// don't send a body when following redirects
body = nil
resp.Body.Close() // not used
// Reset the connection.
intermediateConn.Close()
intermediateConn = nil
// Prepare to follow the redirect.
redirectStr := resp.Header.Get("Location")
if redirectStr == "" {
return nil, nil, fmt.Errorf("%d response missing Location header", resp.StatusCode)
}
// We have to parse relative to the current location, NOT originalLocation. For example,
// if we request http://foo.com/a and get back "http://bar.com/b", the result should be
// http://bar.com/b. If we then make that request and get back a redirect to "/c", the result
// should be http://bar.com/c, not http://foo.com/c.
location, err = location.Parse(redirectStr)
if err != nil {
return nil, nil, fmt.Errorf("malformed Location header: %v", err)
}
}
connToReturn := intermediateConn
intermediateConn = nil // Don't close the connection when we return it.
return connToReturn, rawResponse.Bytes(), nil
}
// CloneRequest creates a shallow copy of the request along with a deep copy of the Headers.
func CloneRequest(req *http.Request) *http.Request {
r := new(http.Request)
// shallow clone
*r = *req
// deep copy headers
r.Header = CloneHeader(req.Header)
return r
}
// CloneHeader creates a deep copy of an http.Header.
func CloneHeader(in http.Header) http.Header {
out := make(http.Header, len(in))
for key, values := range in {
newValues := make([]string, len(values))
copy(newValues, values)
out[key] = newValues
}
return out
}

View file

@ -0,0 +1,53 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
DefaultStreamCreationTimeout = 30 * time.Second
// The SPDY subprotocol "channel.k8s.io" is used for remote command
// attachment/execution. This represents the initial unversioned subprotocol,
// which has the known bugs http://issues.k8s.io/13394 and
// http://issues.k8s.io/13395.
StreamProtocolV1Name = "channel.k8s.io"
// The SPDY subprotocol "v2.channel.k8s.io" is used for remote command
// attachment/execution. It is the second version of the subprotocol and
// resolves the issues present in the first version.
StreamProtocolV2Name = "v2.channel.k8s.io"
// The SPDY subprotocol "v3.channel.k8s.io" is used for remote command
// attachment/execution. It is the third version of the subprotocol and
// adds support for resizing container terminals.
StreamProtocolV3Name = "v3.channel.k8s.io"
// The SPDY subprotocol "v4.channel.k8s.io" is used for remote command
// attachment/execution. It is the 4th version of the subprotocol and
// adds support for exit codes.
StreamProtocolV4Name = "v4.channel.k8s.io"
NonZeroExitCodeReason = metav1.StatusReason("NonZeroExitCode")
ExitCodeCauseType = metav1.CauseType("ExitCode")
)
var SupportedStreamingProtocols = []string{StreamProtocolV4Name, StreamProtocolV3Name, StreamProtocolV2Name, StreamProtocolV1Name}

File diff suppressed because it is too large Load diff

View file

@ -197,16 +197,16 @@ const (
maxGroupID = math.MaxInt32
)
// IsValidGroupId tests that the argument is a valid Unix GID.
func IsValidGroupId(gid int64) []string {
// IsValidGroupID tests that the argument is a valid Unix GID.
func IsValidGroupID(gid int64) []string {
if minGroupID <= gid && gid <= maxGroupID {
return nil
}
return []string{InclusiveRangeError(minGroupID, maxGroupID)}
}
// IsValidUserId tests that the argument is a valid Unix UID.
func IsValidUserId(uid int64) []string {
// IsValidUserID tests that the argument is a valid Unix UID.
func IsValidUserID(uid int64) []string {
if minUserID <= uid && uid <= maxUserID {
return nil
}

View file

@ -65,16 +65,18 @@ func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
// JitterUntil loops until stop channel is closed, running f every period.
//
// If jitterFactor is positive, the period is jittered before every run of f.
// If jitterFactor is not positive, the period is unchanged and not jitterd.
// If jitterFactor is not positive, the period is unchanged and not jittered.
//
// If slidingis true, the period is computed after f runs. If it is false then
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
//
// Close stopCh to stop. f may not be invoked if stop channel is already
// closed. Pass NeverStop to if you don't want it stop.
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
for {
var t *time.Timer
var sawTimeout bool
for {
select {
case <-stopCh:
return
@ -86,9 +88,8 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b
jitteredPeriod = Jitter(period, jitterFactor)
}
var t *time.Timer
if !sliding {
t = time.NewTimer(jitteredPeriod)
t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout)
}
func() {
@ -97,7 +98,7 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b
}()
if sliding {
t = time.NewTimer(jitteredPeriod)
t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout)
}
// NOTE: b/c there is no priority selection in golang
@ -109,6 +110,7 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b
case <-stopCh:
return
case <-t.C:
sawTimeout = true
}
}
}
@ -136,14 +138,14 @@ type ConditionFunc func() (done bool, err error)
// Backoff holds parameters applied to a Backoff function.
type Backoff struct {
Duration time.Duration // the base duration
Factor float64 // Duration is multipled by factor each iteration
Factor float64 // Duration is multiplied by factor each iteration
Jitter float64 // The amount of jitter applied each iteration
Steps int // Exit with error after this many steps
}
// ExponentialBackoff repeats a condition check with exponential backoff.
//
// It checks the condition up to Steps times, increasing the wait by multipling
// It checks the condition up to Steps times, increasing the wait by multiplying
// the previous duration by Factor.
//
// If Jitter is greater than zero, a random amount of each duration is added
@ -184,7 +186,9 @@ func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
}
func pollInternal(wait WaitFunc, condition ConditionFunc) error {
return WaitFor(wait, condition, NeverStop)
done := make(chan struct{})
defer close(done)
return WaitFor(wait, condition, done)
}
// PollImmediate tries a condition func until it returns true, an error, or the timeout
@ -330,3 +334,16 @@ func poller(interval, timeout time.Duration) WaitFunc {
return ch
})
}
// resetOrReuseTimer avoids allocating a new timer if one is already in use.
// Not safe for multiple threads.
func resetOrReuseTimer(t *time.Timer, d time.Duration, sawTimeout bool) *time.Timer {
if t == nil {
return time.NewTimer(d)
}
if !t.Stop() && !sawTimeout {
<-t.C
}
t.Reset(d)
return t
}