vendor: remove dep and use vndr
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
parent
16f44674a4
commit
148e72d81e
16131 changed files with 73815 additions and 4235138 deletions
6
vendor/k8s.io/client-go/util/cert/cert.go
generated
vendored
6
vendor/k8s.io/client-go/util/cert/cert.go
generated
vendored
|
@ -128,7 +128,7 @@ func MakeEllipticPrivateKeyPEM() ([]byte, error) {
|
|||
}
|
||||
|
||||
privateKeyPemBlock := &pem.Block{
|
||||
Type: "EC PRIVATE KEY",
|
||||
Type: ECPrivateKeyBlockType,
|
||||
Bytes: derBytes,
|
||||
}
|
||||
return pem.EncodeToMemory(privateKeyPemBlock), nil
|
||||
|
@ -173,13 +173,13 @@ func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS
|
|||
|
||||
// Generate cert
|
||||
certBuffer := bytes.Buffer{}
|
||||
if err := pem.Encode(&certBuffer, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
|
||||
if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: derBytes}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Generate key
|
||||
keyBuffer := bytes.Buffer{}
|
||||
if err := pem.Encode(&keyBuffer, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
|
||||
if err := pem.Encode(&keyBuffer, &pem.Block{Type: RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
|
|
52
vendor/k8s.io/client-go/util/cert/csr.go
generated
vendored
52
vendor/k8s.io/client-go/util/cert/csr.go
generated
vendored
|
@ -28,36 +28,48 @@ import (
|
|||
// MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs.
|
||||
// All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.)
|
||||
func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) {
|
||||
// Customize the signature for RSA keys, depending on the key size
|
||||
var sigType x509.SignatureAlgorithm
|
||||
if privateKey, ok := privateKey.(*rsa.PrivateKey); ok {
|
||||
keySize := privateKey.N.BitLen()
|
||||
switch {
|
||||
case keySize >= 4096:
|
||||
sigType = x509.SHA512WithRSA
|
||||
case keySize >= 3072:
|
||||
sigType = x509.SHA384WithRSA
|
||||
default:
|
||||
sigType = x509.SHA256WithRSA
|
||||
}
|
||||
}
|
||||
|
||||
template := &x509.CertificateRequest{
|
||||
Subject: *subject,
|
||||
SignatureAlgorithm: sigType,
|
||||
DNSNames: dnsSANs,
|
||||
IPAddresses: ipSANs,
|
||||
Subject: *subject,
|
||||
DNSNames: dnsSANs,
|
||||
IPAddresses: ipSANs,
|
||||
}
|
||||
|
||||
csr, err = x509.CreateCertificateRequest(cryptorand.Reader, template, privateKey)
|
||||
return MakeCSRFromTemplate(privateKey, template)
|
||||
}
|
||||
|
||||
// MakeCSRFromTemplate generates a PEM-encoded CSR using the supplied private
|
||||
// key and certificate request as a template. All key types that are
|
||||
// implemented via crypto.Signer are supported (This includes *rsa.PrivateKey
|
||||
// and *ecdsa.PrivateKey.)
|
||||
func MakeCSRFromTemplate(privateKey interface{}, template *x509.CertificateRequest) ([]byte, error) {
|
||||
t := *template
|
||||
t.SignatureAlgorithm = sigType(privateKey)
|
||||
|
||||
csrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, &t, privateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
csrPemBlock := &pem.Block{
|
||||
Type: "CERTIFICATE REQUEST",
|
||||
Bytes: csr,
|
||||
Bytes: csrDER,
|
||||
}
|
||||
|
||||
return pem.EncodeToMemory(csrPemBlock), nil
|
||||
}
|
||||
|
||||
func sigType(privateKey interface{}) x509.SignatureAlgorithm {
|
||||
// Customize the signature for RSA keys, depending on the key size
|
||||
if privateKey, ok := privateKey.(*rsa.PrivateKey); ok {
|
||||
keySize := privateKey.N.BitLen()
|
||||
switch {
|
||||
case keySize >= 4096:
|
||||
return x509.SHA512WithRSA
|
||||
case keySize >= 3072:
|
||||
return x509.SHA384WithRSA
|
||||
default:
|
||||
return x509.SHA256WithRSA
|
||||
}
|
||||
}
|
||||
return x509.UnknownSignatureAlgorithm
|
||||
}
|
||||
|
|
46
vendor/k8s.io/client-go/util/cert/csr_test.go
generated
vendored
46
vendor/k8s.io/client-go/util/cert/csr_test.go
generated
vendored
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cert
|
||||
|
||||
import (
|
||||
"crypto/x509/pkix"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMakeCSR(t *testing.T) {
|
||||
keyFile := "testdata/dontUseThisKey.pem"
|
||||
subject := &pkix.Name{
|
||||
CommonName: "kube-worker",
|
||||
}
|
||||
dnsSANs := []string{"localhost"}
|
||||
ipSANs := []net.IP{net.ParseIP("127.0.0.1")}
|
||||
|
||||
keyData, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
key, err := ParsePrivateKeyPEM(keyData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = MakeCSR(key, subject, dnsSANs, ipSANs)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
21
vendor/k8s.io/client-go/util/cert/io.go
generated
vendored
21
vendor/k8s.io/client-go/util/cert/io.go
generated
vendored
|
@ -86,6 +86,27 @@ func WriteKey(keyPath string, data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it
|
||||
// can't find one, it will generate a new key and store it there.
|
||||
func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) {
|
||||
loadedData, err := ioutil.ReadFile(keyPath)
|
||||
if err == nil {
|
||||
return loadedData, false, err
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err)
|
||||
}
|
||||
|
||||
generatedData, err := MakeEllipticPrivateKeyPEM()
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("error generating key: %v", err)
|
||||
}
|
||||
if err := WriteKey(keyPath, generatedData); err != nil {
|
||||
return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err)
|
||||
}
|
||||
return generatedData, true, nil
|
||||
}
|
||||
|
||||
// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file.
|
||||
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
|
||||
func NewPool(filename string) (*x509.CertPool, error) {
|
||||
|
|
55
vendor/k8s.io/client-go/util/cert/pem.go
generated
vendored
55
vendor/k8s.io/client-go/util/cert/pem.go
generated
vendored
|
@ -24,6 +24,21 @@ import (
|
|||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
// ECPrivateKeyBlockType is a possible value for pem.Block.Type.
|
||||
ECPrivateKeyBlockType = "EC PRIVATE KEY"
|
||||
// RSAPrivateKeyBlockType is a possible value for pem.Block.Type.
|
||||
RSAPrivateKeyBlockType = "RSA PRIVATE KEY"
|
||||
// CertificateBlockType is a possible value for pem.Block.Type.
|
||||
CertificateBlockType = "CERTIFICATE"
|
||||
// CertificateRequestBlockType is a possible value for pem.Block.Type.
|
||||
CertificateRequestBlockType = "CERTIFICATE REQUEST"
|
||||
// PrivateKeyBlockType is a possible value for pem.Block.Type.
|
||||
PrivateKeyBlockType = "PRIVATE KEY"
|
||||
// PublicKeyBlockType is a possible value for pem.Block.Type.
|
||||
PublicKeyBlockType = "PUBLIC KEY"
|
||||
)
|
||||
|
||||
// EncodePublicKeyPEM returns PEM-endcode public data
|
||||
func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) {
|
||||
der, err := x509.MarshalPKIXPublicKey(key)
|
||||
|
@ -31,7 +46,7 @@ func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) {
|
|||
return []byte{}, err
|
||||
}
|
||||
block := pem.Block{
|
||||
Type: "PUBLIC KEY",
|
||||
Type: PublicKeyBlockType,
|
||||
Bytes: der,
|
||||
}
|
||||
return pem.EncodeToMemory(&block), nil
|
||||
|
@ -40,7 +55,7 @@ func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) {
|
|||
// EncodePrivateKeyPEM returns PEM-encoded private key data
|
||||
func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte {
|
||||
block := pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Type: RSAPrivateKeyBlockType,
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(key),
|
||||
}
|
||||
return pem.EncodeToMemory(&block)
|
||||
|
@ -49,30 +64,46 @@ func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte {
|
|||
// EncodeCertPEM returns PEM-endcoded certificate data
|
||||
func EncodeCertPEM(cert *x509.Certificate) []byte {
|
||||
block := pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Type: CertificateBlockType,
|
||||
Bytes: cert.Raw,
|
||||
}
|
||||
return pem.EncodeToMemory(&block)
|
||||
}
|
||||
|
||||
// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data.
|
||||
// Recognizes PEM blocks for "EC PRIVATE KEY" and "RSA PRIVATE KEY"
|
||||
// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY"
|
||||
func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) {
|
||||
var privateKeyPemBlock *pem.Block
|
||||
for {
|
||||
var privateKeyPemBlock *pem.Block
|
||||
privateKeyPemBlock, keyData = pem.Decode(keyData)
|
||||
if privateKeyPemBlock == nil {
|
||||
// we read all the PEM blocks and didn't recognize one
|
||||
return nil, fmt.Errorf("no private key PEM block found")
|
||||
break
|
||||
}
|
||||
|
||||
switch privateKeyPemBlock.Type {
|
||||
case "EC PRIVATE KEY":
|
||||
return x509.ParseECPrivateKey(privateKeyPemBlock.Bytes)
|
||||
case "RSA PRIVATE KEY":
|
||||
return x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes)
|
||||
case ECPrivateKeyBlockType:
|
||||
// ECDSA Private Key in ASN.1 format
|
||||
if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
case RSAPrivateKeyBlockType:
|
||||
// RSA Private Key in PKCS#1 format
|
||||
if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
case PrivateKeyBlockType:
|
||||
// RSA or ECDSA Private Key in unencrypted PKCS#8 format
|
||||
if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
}
|
||||
|
||||
// tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks
|
||||
// originally, only the first PEM block was parsed and expected to be a key block
|
||||
}
|
||||
|
||||
// we read all the PEM blocks and didn't recognize one
|
||||
return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key")
|
||||
}
|
||||
|
||||
// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array
|
||||
|
@ -87,7 +118,7 @@ func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
|
|||
break
|
||||
}
|
||||
// Only use PEM "CERTIFICATE" blocks without extra headers
|
||||
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
|
||||
if block.Type != CertificateBlockType || len(block.Headers) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
6
vendor/k8s.io/client-go/util/cert/testdata/dontUseThisKey.pem
generated
vendored
6
vendor/k8s.io/client-go/util/cert/testdata/dontUseThisKey.pem
generated
vendored
|
@ -1,6 +0,0 @@
|
|||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDAPEbSXwyDfWf0+61Oofd7aHkmdX69mrzD2Xb1CHF5syfsoRIhnG0dJ
|
||||
ozBulPZCDDWgBwYFK4EEACKhZANiAATjlMJAtKhEPqU/i7MsrgKcK/RmXHC6He7W
|
||||
0p69+9qFXg2raJ9zvvbKxkiu2ELOYRDAz0utcFTBOIgoUJEzBVmsjZQ7dvFa1BKP
|
||||
Ym7MFAKG3O2espBqXn+audgdHGh5B0I=
|
||||
-----END EC PRIVATE KEY-----
|
116
vendor/k8s.io/client-go/util/cert/triple/triple.go
generated
vendored
116
vendor/k8s.io/client-go/util/cert/triple/triple.go
generated
vendored
|
@ -1,116 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package triple generates key-certificate pairs for the
|
||||
// triple (CA, Server, Client).
|
||||
package triple
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
)
|
||||
|
||||
type KeyPair struct {
|
||||
Key *rsa.PrivateKey
|
||||
Cert *x509.Certificate
|
||||
}
|
||||
|
||||
func NewCA(name string) (*KeyPair, error) {
|
||||
key, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create a private key for a new CA: %v", err)
|
||||
}
|
||||
|
||||
config := certutil.Config{
|
||||
CommonName: name,
|
||||
}
|
||||
|
||||
cert, err := certutil.NewSelfSignedCACert(config, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create a self-signed certificate for a new CA: %v", err)
|
||||
}
|
||||
|
||||
return &KeyPair{
|
||||
Key: key,
|
||||
Cert: cert,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewServerKeyPair(ca *KeyPair, commonName, svcName, svcNamespace, dnsDomain string, ips, hostnames []string) (*KeyPair, error) {
|
||||
key, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create a server private key: %v", err)
|
||||
}
|
||||
|
||||
namespacedName := fmt.Sprintf("%s.%s", svcName, svcNamespace)
|
||||
internalAPIServerFQDN := []string{
|
||||
svcName,
|
||||
namespacedName,
|
||||
fmt.Sprintf("%s.svc", namespacedName),
|
||||
fmt.Sprintf("%s.svc.%s", namespacedName, dnsDomain),
|
||||
}
|
||||
|
||||
altNames := certutil.AltNames{}
|
||||
for _, ipStr := range ips {
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip != nil {
|
||||
altNames.IPs = append(altNames.IPs, ip)
|
||||
}
|
||||
}
|
||||
altNames.DNSNames = append(altNames.DNSNames, hostnames...)
|
||||
altNames.DNSNames = append(altNames.DNSNames, internalAPIServerFQDN...)
|
||||
|
||||
config := certutil.Config{
|
||||
CommonName: commonName,
|
||||
AltNames: altNames,
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
}
|
||||
cert, err := certutil.NewSignedCert(config, key, ca.Cert, ca.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to sign the server certificate: %v", err)
|
||||
}
|
||||
|
||||
return &KeyPair{
|
||||
Key: key,
|
||||
Cert: cert,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewClientKeyPair(ca *KeyPair, commonName string, organizations []string) (*KeyPair, error) {
|
||||
key, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create a client private key: %v", err)
|
||||
}
|
||||
|
||||
config := certutil.Config{
|
||||
CommonName: commonName,
|
||||
Organization: organizations,
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
}
|
||||
cert, err := certutil.NewSignedCert(config, key, ca.Cert, ca.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to sign the client certificate: %v", err)
|
||||
}
|
||||
|
||||
return &KeyPair{
|
||||
Key: key,
|
||||
Cert: cert,
|
||||
}, nil
|
||||
}
|
113
vendor/k8s.io/client-go/util/clock/clock.go
generated
vendored
113
vendor/k8s.io/client-go/util/clock/clock.go
generated
vendored
|
@ -27,6 +27,7 @@ type Clock interface {
|
|||
Now() time.Time
|
||||
Since(time.Time) time.Duration
|
||||
After(d time.Duration) <-chan time.Time
|
||||
NewTimer(d time.Duration) Timer
|
||||
Sleep(d time.Duration)
|
||||
Tick(d time.Duration) <-chan time.Time
|
||||
}
|
||||
|
@ -55,6 +56,12 @@ func (RealClock) After(d time.Duration) <-chan time.Time {
|
|||
return time.After(d)
|
||||
}
|
||||
|
||||
func (RealClock) NewTimer(d time.Duration) Timer {
|
||||
return &realTimer{
|
||||
timer: time.NewTimer(d),
|
||||
}
|
||||
}
|
||||
|
||||
func (RealClock) Tick(d time.Duration) <-chan time.Time {
|
||||
return time.Tick(d)
|
||||
}
|
||||
|
@ -76,7 +83,8 @@ type fakeClockWaiter struct {
|
|||
targetTime time.Time
|
||||
stepInterval time.Duration
|
||||
skipIfBlocked bool
|
||||
destChan chan<- time.Time
|
||||
destChan chan time.Time
|
||||
fired bool
|
||||
}
|
||||
|
||||
func NewFakeClock(t time.Time) *FakeClock {
|
||||
|
@ -112,6 +120,23 @@ func (f *FakeClock) After(d time.Duration) <-chan time.Time {
|
|||
return ch
|
||||
}
|
||||
|
||||
// Fake version of time.NewTimer(d).
|
||||
func (f *FakeClock) NewTimer(d time.Duration) Timer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
stopTime := f.time.Add(d)
|
||||
ch := make(chan time.Time, 1) // Don't block!
|
||||
timer := &fakeTimer{
|
||||
fakeClock: f,
|
||||
waiter: fakeClockWaiter{
|
||||
targetTime: stopTime,
|
||||
destChan: ch,
|
||||
},
|
||||
}
|
||||
f.waiters = append(f.waiters, timer.waiter)
|
||||
return timer
|
||||
}
|
||||
|
||||
func (f *FakeClock) Tick(d time.Duration) <-chan time.Time {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
@ -127,7 +152,7 @@ func (f *FakeClock) Tick(d time.Duration) <-chan time.Time {
|
|||
return ch
|
||||
}
|
||||
|
||||
// Move clock by Duration, notify anyone that's called After or Tick
|
||||
// Move clock by Duration, notify anyone that's called After, Tick, or NewTimer
|
||||
func (f *FakeClock) Step(d time.Duration) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
@ -152,10 +177,12 @@ func (f *FakeClock) setTimeLocked(t time.Time) {
|
|||
if w.skipIfBlocked {
|
||||
select {
|
||||
case w.destChan <- t:
|
||||
w.fired = true
|
||||
default:
|
||||
}
|
||||
} else {
|
||||
w.destChan <- t
|
||||
w.fired = true
|
||||
}
|
||||
|
||||
if w.stepInterval > 0 {
|
||||
|
@ -207,6 +234,12 @@ func (*IntervalClock) After(d time.Duration) <-chan time.Time {
|
|||
panic("IntervalClock doesn't implement After")
|
||||
}
|
||||
|
||||
// Unimplemented, will panic.
|
||||
// TODO: make interval clock use FakeClock so this can be implemented.
|
||||
func (*IntervalClock) NewTimer(d time.Duration) Timer {
|
||||
panic("IntervalClock doesn't implement NewTimer")
|
||||
}
|
||||
|
||||
// Unimplemented, will panic.
|
||||
// TODO: make interval clock use FakeClock so this can be implemented.
|
||||
func (*IntervalClock) Tick(d time.Duration) <-chan time.Time {
|
||||
|
@ -216,3 +249,79 @@ func (*IntervalClock) Tick(d time.Duration) <-chan time.Time {
|
|||
func (*IntervalClock) Sleep(d time.Duration) {
|
||||
panic("IntervalClock doesn't implement Sleep")
|
||||
}
|
||||
|
||||
// Timer allows for injecting fake or real timers into code that
|
||||
// needs to do arbitrary things based on time.
|
||||
type Timer interface {
|
||||
C() <-chan time.Time
|
||||
Stop() bool
|
||||
Reset(d time.Duration) bool
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Timer(&realTimer{})
|
||||
_ = Timer(&fakeTimer{})
|
||||
)
|
||||
|
||||
// realTimer is backed by an actual time.Timer.
|
||||
type realTimer struct {
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// C returns the underlying timer's channel.
|
||||
func (r *realTimer) C() <-chan time.Time {
|
||||
return r.timer.C
|
||||
}
|
||||
|
||||
// Stop calls Stop() on the underlying timer.
|
||||
func (r *realTimer) Stop() bool {
|
||||
return r.timer.Stop()
|
||||
}
|
||||
|
||||
// Reset calls Reset() on the underlying timer.
|
||||
func (r *realTimer) Reset(d time.Duration) bool {
|
||||
return r.timer.Reset(d)
|
||||
}
|
||||
|
||||
// fakeTimer implements Timer based on a FakeClock.
|
||||
type fakeTimer struct {
|
||||
fakeClock *FakeClock
|
||||
waiter fakeClockWaiter
|
||||
}
|
||||
|
||||
// C returns the channel that notifies when this timer has fired.
|
||||
func (f *fakeTimer) C() <-chan time.Time {
|
||||
return f.waiter.destChan
|
||||
}
|
||||
|
||||
// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise.
|
||||
func (f *fakeTimer) Stop() bool {
|
||||
f.fakeClock.lock.Lock()
|
||||
defer f.fakeClock.lock.Unlock()
|
||||
|
||||
newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters))
|
||||
for i := range f.fakeClock.waiters {
|
||||
w := &f.fakeClock.waiters[i]
|
||||
if w != &f.waiter {
|
||||
newWaiters = append(newWaiters, *w)
|
||||
}
|
||||
}
|
||||
|
||||
f.fakeClock.waiters = newWaiters
|
||||
|
||||
return !f.waiter.fired
|
||||
}
|
||||
|
||||
// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet
|
||||
// fired, or false otherwise.
|
||||
func (f *fakeTimer) Reset(d time.Duration) bool {
|
||||
f.fakeClock.lock.Lock()
|
||||
defer f.fakeClock.lock.Unlock()
|
||||
|
||||
active := !f.waiter.fired
|
||||
|
||||
f.waiter.fired = false
|
||||
f.waiter.targetTime = f.fakeClock.time.Add(d)
|
||||
|
||||
return active
|
||||
}
|
||||
|
|
184
vendor/k8s.io/client-go/util/clock/clock_test.go
generated
vendored
184
vendor/k8s.io/client-go/util/clock/clock_test.go
generated
vendored
|
@ -1,184 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package clock
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFakeClock(t *testing.T) {
|
||||
startTime := time.Now()
|
||||
tc := NewFakeClock(startTime)
|
||||
tc.Step(time.Second)
|
||||
now := tc.Now()
|
||||
if now.Sub(startTime) != time.Second {
|
||||
t.Errorf("input: %s now=%s gap=%s expected=%s", startTime, now, now.Sub(startTime), time.Second)
|
||||
}
|
||||
|
||||
tt := tc.Now()
|
||||
tc.SetTime(tt.Add(time.Hour))
|
||||
if tc.Now().Sub(tt) != time.Hour {
|
||||
t.Errorf("input: %s now=%s gap=%s expected=%s", tt, tc.Now(), tc.Now().Sub(tt), time.Hour)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFakeClockSleep(t *testing.T) {
|
||||
startTime := time.Now()
|
||||
tc := NewFakeClock(startTime)
|
||||
tc.Sleep(time.Duration(1) * time.Hour)
|
||||
now := tc.Now()
|
||||
if now.Sub(startTime) != time.Hour {
|
||||
t.Errorf("Fake sleep failed, expected time to advance by one hour, instead, its %v", now.Sub(startTime))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFakeAfter(t *testing.T) {
|
||||
tc := NewFakeClock(time.Now())
|
||||
if tc.HasWaiters() {
|
||||
t.Errorf("unexpected waiter?")
|
||||
}
|
||||
oneSec := tc.After(time.Second)
|
||||
if !tc.HasWaiters() {
|
||||
t.Errorf("unexpected lack of waiter?")
|
||||
}
|
||||
|
||||
oneOhOneSec := tc.After(time.Second + time.Millisecond)
|
||||
twoSec := tc.After(2 * time.Second)
|
||||
select {
|
||||
case <-oneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-oneOhOneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
}
|
||||
|
||||
tc.Step(999 * time.Millisecond)
|
||||
select {
|
||||
case <-oneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-oneOhOneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
}
|
||||
|
||||
tc.Step(time.Millisecond)
|
||||
select {
|
||||
case <-oneSec:
|
||||
// Expected!
|
||||
case <-oneOhOneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
t.Errorf("unexpected non-channel read")
|
||||
}
|
||||
tc.Step(time.Millisecond)
|
||||
select {
|
||||
case <-oneSec:
|
||||
// should not double-trigger!
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-oneOhOneSec:
|
||||
// Expected!
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
t.Errorf("unexpected non-channel read")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFakeTick(t *testing.T) {
|
||||
tc := NewFakeClock(time.Now())
|
||||
if tc.HasWaiters() {
|
||||
t.Errorf("unexpected waiter?")
|
||||
}
|
||||
oneSec := tc.Tick(time.Second)
|
||||
if !tc.HasWaiters() {
|
||||
t.Errorf("unexpected lack of waiter?")
|
||||
}
|
||||
|
||||
oneOhOneSec := tc.Tick(time.Second + time.Millisecond)
|
||||
twoSec := tc.Tick(2 * time.Second)
|
||||
select {
|
||||
case <-oneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-oneOhOneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
}
|
||||
|
||||
tc.Step(999 * time.Millisecond) // t=.999
|
||||
select {
|
||||
case <-oneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-oneOhOneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
}
|
||||
|
||||
tc.Step(time.Millisecond) // t=1.000
|
||||
select {
|
||||
case <-oneSec:
|
||||
// Expected!
|
||||
case <-oneOhOneSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
t.Errorf("unexpected non-channel read")
|
||||
}
|
||||
tc.Step(time.Millisecond) // t=1.001
|
||||
select {
|
||||
case <-oneSec:
|
||||
// should not double-trigger!
|
||||
t.Errorf("unexpected channel read")
|
||||
case <-oneOhOneSec:
|
||||
// Expected!
|
||||
case <-twoSec:
|
||||
t.Errorf("unexpected channel read")
|
||||
default:
|
||||
t.Errorf("unexpected non-channel read")
|
||||
}
|
||||
|
||||
tc.Step(time.Second) // t=2.001
|
||||
tc.Step(time.Second) // t=3.001
|
||||
tc.Step(time.Second) // t=4.001
|
||||
tc.Step(time.Second) // t=5.001
|
||||
|
||||
// The one second ticker should not accumulate ticks
|
||||
accumulatedTicks := 0
|
||||
drained := false
|
||||
for !drained {
|
||||
select {
|
||||
case <-oneSec:
|
||||
accumulatedTicks++
|
||||
default:
|
||||
drained = true
|
||||
}
|
||||
}
|
||||
if accumulatedTicks != 1 {
|
||||
t.Errorf("unexpected number of accumulated ticks: %d", accumulatedTicks)
|
||||
}
|
||||
}
|
195
vendor/k8s.io/client-go/util/flowcontrol/backoff_test.go
generated
vendored
195
vendor/k8s.io/client-go/util/flowcontrol/backoff_test.go
generated
vendored
|
@ -1,195 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flowcontrol
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
func TestSlowBackoff(t *testing.T) {
|
||||
id := "_idSlow"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 50 * step
|
||||
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
cases := []time.Duration{0, 1, 2, 4, 8, 16, 32, 50, 50, 50}
|
||||
for ix, c := range cases {
|
||||
tc.Step(step)
|
||||
w := b.Get(id)
|
||||
if w != c*step {
|
||||
t.Errorf("input: '%d': expected %s, got %s", ix, c*step, w)
|
||||
}
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
|
||||
//Now confirm that the Reset cancels backoff.
|
||||
b.Next(id, tc.Now())
|
||||
b.Reset(id)
|
||||
if b.Get(id) != 0 {
|
||||
t.Errorf("Reset didn't clear the backoff.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestBackoffReset(t *testing.T) {
|
||||
id := "_idReset"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := step * 5
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
startTime := tc.Now()
|
||||
|
||||
// get to backoff = maxDuration
|
||||
for i := 0; i <= int(maxDuration/step); i++ {
|
||||
tc.Step(step)
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
|
||||
// backoff should be capped at maxDuration
|
||||
if !b.IsInBackOffSince(id, tc.Now()) {
|
||||
t.Errorf("expected to be in Backoff got %s", b.Get(id))
|
||||
}
|
||||
|
||||
lastUpdate := tc.Now()
|
||||
tc.Step(2*maxDuration + step) // time += 11s, 11 > 2*maxDuration
|
||||
if b.IsInBackOffSince(id, lastUpdate) {
|
||||
t.Errorf("expected to not be in Backoff after reset (start=%s, now=%s, lastUpdate=%s), got %s", startTime, tc.Now(), lastUpdate, b.Get(id))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackoffHightWaterMark(t *testing.T) {
|
||||
id := "_idHiWaterMark"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 5 * step
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
|
||||
// get to backoff = maxDuration
|
||||
for i := 0; i <= int(maxDuration/step); i++ {
|
||||
tc.Step(step)
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
|
||||
// backoff high watermark expires after 2*maxDuration
|
||||
tc.Step(maxDuration + step)
|
||||
b.Next(id, tc.Now())
|
||||
|
||||
if b.Get(id) != maxDuration {
|
||||
t.Errorf("expected Backoff to stay at high watermark %s got %s", maxDuration, b.Get(id))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackoffGC(t *testing.T) {
|
||||
id := "_idGC"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 5 * step
|
||||
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
|
||||
for i := 0; i <= int(maxDuration/step); i++ {
|
||||
tc.Step(step)
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
lastUpdate := tc.Now()
|
||||
tc.Step(maxDuration + step)
|
||||
b.GC()
|
||||
_, found := b.perItemBackoff[id]
|
||||
if !found {
|
||||
t.Errorf("expected GC to skip entry, elapsed time=%s maxDuration=%s", tc.Now().Sub(lastUpdate), maxDuration)
|
||||
}
|
||||
|
||||
tc.Step(maxDuration + step)
|
||||
b.GC()
|
||||
r, found := b.perItemBackoff[id]
|
||||
if found {
|
||||
t.Errorf("expected GC of entry after %s got entry %v", tc.Now().Sub(lastUpdate), r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsInBackOffSinceUpdate(t *testing.T) {
|
||||
id := "_idIsInBackOffSinceUpdate"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 10 * step
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
startTime := tc.Now()
|
||||
|
||||
cases := []struct {
|
||||
tick time.Duration
|
||||
inBackOff bool
|
||||
value int
|
||||
}{
|
||||
{tick: 0, inBackOff: false, value: 0},
|
||||
{tick: 1, inBackOff: false, value: 1},
|
||||
{tick: 2, inBackOff: true, value: 2},
|
||||
{tick: 3, inBackOff: false, value: 2},
|
||||
{tick: 4, inBackOff: true, value: 4},
|
||||
{tick: 5, inBackOff: true, value: 4},
|
||||
{tick: 6, inBackOff: true, value: 4},
|
||||
{tick: 7, inBackOff: false, value: 4},
|
||||
{tick: 8, inBackOff: true, value: 8},
|
||||
{tick: 9, inBackOff: true, value: 8},
|
||||
{tick: 10, inBackOff: true, value: 8},
|
||||
{tick: 11, inBackOff: true, value: 8},
|
||||
{tick: 12, inBackOff: true, value: 8},
|
||||
{tick: 13, inBackOff: true, value: 8},
|
||||
{tick: 14, inBackOff: true, value: 8},
|
||||
{tick: 15, inBackOff: false, value: 8},
|
||||
{tick: 16, inBackOff: true, value: 10},
|
||||
{tick: 17, inBackOff: true, value: 10},
|
||||
{tick: 18, inBackOff: true, value: 10},
|
||||
{tick: 19, inBackOff: true, value: 10},
|
||||
{tick: 20, inBackOff: true, value: 10},
|
||||
{tick: 21, inBackOff: true, value: 10},
|
||||
{tick: 22, inBackOff: true, value: 10},
|
||||
{tick: 23, inBackOff: true, value: 10},
|
||||
{tick: 24, inBackOff: true, value: 10},
|
||||
{tick: 25, inBackOff: false, value: 10},
|
||||
{tick: 26, inBackOff: true, value: 10},
|
||||
{tick: 27, inBackOff: true, value: 10},
|
||||
{tick: 28, inBackOff: true, value: 10},
|
||||
{tick: 29, inBackOff: true, value: 10},
|
||||
{tick: 30, inBackOff: true, value: 10},
|
||||
{tick: 31, inBackOff: true, value: 10},
|
||||
{tick: 32, inBackOff: true, value: 10},
|
||||
{tick: 33, inBackOff: true, value: 10},
|
||||
{tick: 34, inBackOff: true, value: 10},
|
||||
{tick: 35, inBackOff: false, value: 10},
|
||||
{tick: 56, inBackOff: false, value: 0},
|
||||
{tick: 57, inBackOff: false, value: 1},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
tc.SetTime(startTime.Add(c.tick * step))
|
||||
if c.inBackOff != b.IsInBackOffSinceUpdate(id, tc.Now()) {
|
||||
t.Errorf("expected IsInBackOffSinceUpdate %v got %v at tick %s", c.inBackOff, b.IsInBackOffSinceUpdate(id, tc.Now()), c.tick*step)
|
||||
}
|
||||
|
||||
if c.inBackOff && (time.Duration(c.value)*step != b.Get(id)) {
|
||||
t.Errorf("expected backoff value=%s got %s at tick %s", time.Duration(c.value)*step, b.Get(id), c.tick*step)
|
||||
}
|
||||
|
||||
if !c.inBackOff {
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
}
|
||||
}
|
177
vendor/k8s.io/client-go/util/flowcontrol/throttle_test.go
generated
vendored
177
vendor/k8s.io/client-go/util/flowcontrol/throttle_test.go
generated
vendored
|
@ -1,177 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flowcontrol
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestMultithreadedThrottling(t *testing.T) {
|
||||
// Bucket with 100QPS and no burst
|
||||
r := NewTokenBucketRateLimiter(100, 1)
|
||||
|
||||
// channel to collect 100 tokens
|
||||
taken := make(chan bool, 100)
|
||||
|
||||
// Set up goroutines to hammer the throttler
|
||||
startCh := make(chan bool)
|
||||
endCh := make(chan bool)
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
// wait for the starting signal
|
||||
<-startCh
|
||||
for {
|
||||
// get a token
|
||||
r.Accept()
|
||||
select {
|
||||
// try to add it to the taken channel
|
||||
case taken <- true:
|
||||
continue
|
||||
// if taken is full, notify and return
|
||||
default:
|
||||
endCh <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// record wall time
|
||||
startTime := time.Now()
|
||||
// take the initial capacity so all tokens are the result of refill
|
||||
r.Accept()
|
||||
// start the thundering herd
|
||||
close(startCh)
|
||||
// wait for the first signal that we collected 100 tokens
|
||||
<-endCh
|
||||
// record wall time
|
||||
endTime := time.Now()
|
||||
|
||||
// tolerate a 1% clock change because these things happen
|
||||
if duration := endTime.Sub(startTime); duration < (time.Second * 99 / 100) {
|
||||
// We shouldn't be able to get 100 tokens out of the bucket in less than 1 second of wall clock time, no matter what
|
||||
t.Errorf("Expected it to take at least 1 second to get 100 tokens, took %v", duration)
|
||||
} else {
|
||||
t.Logf("Took %v to get 100 tokens", duration)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicThrottle(t *testing.T) {
|
||||
r := NewTokenBucketRateLimiter(1, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
if !r.TryAccept() {
|
||||
t.Error("unexpected false accept")
|
||||
}
|
||||
}
|
||||
if r.TryAccept() {
|
||||
t.Error("unexpected true accept")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncrementThrottle(t *testing.T) {
|
||||
r := NewTokenBucketRateLimiter(1, 1)
|
||||
if !r.TryAccept() {
|
||||
t.Error("unexpected false accept")
|
||||
}
|
||||
if r.TryAccept() {
|
||||
t.Error("unexpected true accept")
|
||||
}
|
||||
|
||||
// Allow to refill
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
if !r.TryAccept() {
|
||||
t.Error("unexpected false accept")
|
||||
}
|
||||
}
|
||||
|
||||
func TestThrottle(t *testing.T) {
|
||||
r := NewTokenBucketRateLimiter(10, 5)
|
||||
|
||||
// Should consume 5 tokens immediately, then
|
||||
// the remaining 11 should take at least 1 second (0.1s each)
|
||||
expectedFinish := time.Now().Add(time.Second * 1)
|
||||
for i := 0; i < 16; i++ {
|
||||
r.Accept()
|
||||
}
|
||||
if time.Now().Before(expectedFinish) {
|
||||
t.Error("rate limit was not respected, finished too early")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRateLimiterSaturation(t *testing.T) {
|
||||
const e = 0.000001
|
||||
tests := []struct {
|
||||
capacity int
|
||||
take int
|
||||
|
||||
expectedSaturation float64
|
||||
}{
|
||||
{1, 1, 1},
|
||||
{10, 3, 0.3},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
rl := NewTokenBucketRateLimiter(1, tt.capacity)
|
||||
for i := 0; i < tt.take; i++ {
|
||||
rl.Accept()
|
||||
}
|
||||
if math.Abs(rl.Saturation()-tt.expectedSaturation) > e {
|
||||
t.Fatalf("#%d: Saturation rate difference isn't within tolerable range\n want=%f, get=%f",
|
||||
i, tt.expectedSaturation, rl.Saturation())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlwaysFake(t *testing.T) {
|
||||
rl := NewFakeAlwaysRateLimiter()
|
||||
if !rl.TryAccept() {
|
||||
t.Error("TryAccept in AlwaysFake should return true.")
|
||||
}
|
||||
// If this will block the test will timeout
|
||||
rl.Accept()
|
||||
}
|
||||
|
||||
func TestNeverFake(t *testing.T) {
|
||||
rl := NewFakeNeverRateLimiter()
|
||||
if rl.TryAccept() {
|
||||
t.Error("TryAccept in NeverFake should return false.")
|
||||
}
|
||||
|
||||
finished := false
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
rl.Accept()
|
||||
finished = true
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// Wait some time to make sure it never finished.
|
||||
time.Sleep(time.Second)
|
||||
if finished {
|
||||
t.Error("Accept should block forever in NeverFake.")
|
||||
}
|
||||
|
||||
rl.Stop()
|
||||
wg.Wait()
|
||||
if !finished {
|
||||
t.Error("Stop should make Accept unblock in NeverFake.")
|
||||
}
|
||||
}
|
244
vendor/k8s.io/client-go/util/integer/integer_test.go
generated
vendored
244
vendor/k8s.io/client-go/util/integer/integer_test.go
generated
vendored
|
@ -1,244 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integer
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestIntMax(t *testing.T) {
|
||||
tests := []struct {
|
||||
nums []int
|
||||
expectedMax int
|
||||
}{
|
||||
{
|
||||
nums: []int{-1, 0},
|
||||
expectedMax: 0,
|
||||
},
|
||||
{
|
||||
nums: []int{-1, -2},
|
||||
expectedMax: -1,
|
||||
},
|
||||
{
|
||||
nums: []int{0, 1},
|
||||
expectedMax: 1,
|
||||
},
|
||||
{
|
||||
nums: []int{1, 2},
|
||||
expectedMax: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if max := IntMax(test.nums[0], test.nums[1]); max != test.expectedMax {
|
||||
t.Errorf("expected %v, got %v", test.expectedMax, max)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntMin(t *testing.T) {
|
||||
tests := []struct {
|
||||
nums []int
|
||||
expectedMin int
|
||||
}{
|
||||
{
|
||||
nums: []int{-1, 0},
|
||||
expectedMin: -1,
|
||||
},
|
||||
{
|
||||
nums: []int{-1, -2},
|
||||
expectedMin: -2,
|
||||
},
|
||||
{
|
||||
nums: []int{0, 1},
|
||||
expectedMin: 0,
|
||||
},
|
||||
{
|
||||
nums: []int{1, 2},
|
||||
expectedMin: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if min := IntMin(test.nums[0], test.nums[1]); min != test.expectedMin {
|
||||
t.Errorf("expected %v, got %v", test.expectedMin, min)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt32Max(t *testing.T) {
|
||||
tests := []struct {
|
||||
nums []int32
|
||||
expectedMax int32
|
||||
}{
|
||||
{
|
||||
nums: []int32{-1, 0},
|
||||
expectedMax: 0,
|
||||
},
|
||||
{
|
||||
nums: []int32{-1, -2},
|
||||
expectedMax: -1,
|
||||
},
|
||||
{
|
||||
nums: []int32{0, 1},
|
||||
expectedMax: 1,
|
||||
},
|
||||
{
|
||||
nums: []int32{1, 2},
|
||||
expectedMax: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if max := Int32Max(test.nums[0], test.nums[1]); max != test.expectedMax {
|
||||
t.Errorf("expected %v, got %v", test.expectedMax, max)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt32Min(t *testing.T) {
|
||||
tests := []struct {
|
||||
nums []int32
|
||||
expectedMin int32
|
||||
}{
|
||||
{
|
||||
nums: []int32{-1, 0},
|
||||
expectedMin: -1,
|
||||
},
|
||||
{
|
||||
nums: []int32{-1, -2},
|
||||
expectedMin: -2,
|
||||
},
|
||||
{
|
||||
nums: []int32{0, 1},
|
||||
expectedMin: 0,
|
||||
},
|
||||
{
|
||||
nums: []int32{1, 2},
|
||||
expectedMin: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if min := Int32Min(test.nums[0], test.nums[1]); min != test.expectedMin {
|
||||
t.Errorf("expected %v, got %v", test.expectedMin, min)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt64Max(t *testing.T) {
|
||||
tests := []struct {
|
||||
nums []int64
|
||||
expectedMax int64
|
||||
}{
|
||||
{
|
||||
nums: []int64{-1, 0},
|
||||
expectedMax: 0,
|
||||
},
|
||||
{
|
||||
nums: []int64{-1, -2},
|
||||
expectedMax: -1,
|
||||
},
|
||||
{
|
||||
nums: []int64{0, 1},
|
||||
expectedMax: 1,
|
||||
},
|
||||
{
|
||||
nums: []int64{1, 2},
|
||||
expectedMax: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if max := Int64Max(test.nums[0], test.nums[1]); max != test.expectedMax {
|
||||
t.Errorf("expected %v, got %v", test.expectedMax, max)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt64Min(t *testing.T) {
|
||||
tests := []struct {
|
||||
nums []int64
|
||||
expectedMin int64
|
||||
}{
|
||||
{
|
||||
nums: []int64{-1, 0},
|
||||
expectedMin: -1,
|
||||
},
|
||||
{
|
||||
nums: []int64{-1, -2},
|
||||
expectedMin: -2,
|
||||
},
|
||||
{
|
||||
nums: []int64{0, 1},
|
||||
expectedMin: 0,
|
||||
},
|
||||
{
|
||||
nums: []int64{1, 2},
|
||||
expectedMin: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if min := Int64Min(test.nums[0], test.nums[1]); min != test.expectedMin {
|
||||
t.Errorf("expected %v, got %v", test.expectedMin, min)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundToInt32(t *testing.T) {
|
||||
tests := []struct {
|
||||
num float64
|
||||
exp int32
|
||||
}{
|
||||
{
|
||||
num: 5.5,
|
||||
exp: 6,
|
||||
},
|
||||
{
|
||||
num: -3.7,
|
||||
exp: -4,
|
||||
},
|
||||
{
|
||||
num: 3.49,
|
||||
exp: 3,
|
||||
},
|
||||
{
|
||||
num: -7.9,
|
||||
exp: -8,
|
||||
},
|
||||
{
|
||||
num: -4.499999,
|
||||
exp: -4,
|
||||
},
|
||||
{
|
||||
num: 0,
|
||||
exp: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Logf("executing scenario %d", i)
|
||||
if got := RoundToInt32(test.num); got != test.exp {
|
||||
t.Errorf("expected %d, got %d", test.exp, got)
|
||||
}
|
||||
}
|
||||
}
|
20
vendor/k8s.io/client-go/util/jsonpath/doc.go
generated
vendored
20
vendor/k8s.io/client-go/util/jsonpath/doc.go
generated
vendored
|
@ -1,20 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// package jsonpath is a template engine using jsonpath syntax,
|
||||
// which can be seen at http://goessner.net/articles/JsonPath/.
|
||||
// In addition, it has {range} {end} function to iterate list and slice.
|
||||
package jsonpath // import "k8s.io/client-go/util/jsonpath"
|
498
vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
generated
vendored
498
vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
generated
vendored
|
@ -1,498 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jsonpath
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"k8s.io/client-go/third_party/forked/golang/template"
|
||||
)
|
||||
|
||||
type JSONPath struct {
|
||||
name string
|
||||
parser *Parser
|
||||
stack [][]reflect.Value //push and pop values in different scopes
|
||||
cur []reflect.Value //current scope values
|
||||
beginRange int
|
||||
inRange int
|
||||
endRange int
|
||||
|
||||
allowMissingKeys bool
|
||||
}
|
||||
|
||||
func New(name string) *JSONPath {
|
||||
return &JSONPath{
|
||||
name: name,
|
||||
beginRange: 0,
|
||||
inRange: 0,
|
||||
endRange: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// AllowMissingKeys allows a caller to specify whether they want an error if a field or map key
|
||||
// cannot be located, or simply an empty result. The receiver is returned for chaining.
|
||||
func (j *JSONPath) AllowMissingKeys(allow bool) *JSONPath {
|
||||
j.allowMissingKeys = allow
|
||||
return j
|
||||
}
|
||||
|
||||
// Parse parse the given template, return error
|
||||
func (j *JSONPath) Parse(text string) (err error) {
|
||||
j.parser, err = Parse(j.name, text)
|
||||
return
|
||||
}
|
||||
|
||||
// Execute bounds data into template and write the result
|
||||
func (j *JSONPath) Execute(wr io.Writer, data interface{}) error {
|
||||
fullResults, err := j.FindResults(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for ix := range fullResults {
|
||||
if err := j.PrintResults(wr, fullResults[ix]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
|
||||
if j.parser == nil {
|
||||
return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name)
|
||||
}
|
||||
|
||||
j.cur = []reflect.Value{reflect.ValueOf(data)}
|
||||
nodes := j.parser.Root.Nodes
|
||||
fullResult := [][]reflect.Value{}
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
node := nodes[i]
|
||||
results, err := j.walk(j.cur, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//encounter an end node, break the current block
|
||||
if j.endRange > 0 && j.endRange <= j.inRange {
|
||||
j.endRange -= 1
|
||||
break
|
||||
}
|
||||
//encounter a range node, start a range loop
|
||||
if j.beginRange > 0 {
|
||||
j.beginRange -= 1
|
||||
j.inRange += 1
|
||||
for k, value := range results {
|
||||
j.parser.Root.Nodes = nodes[i+1:]
|
||||
if k == len(results)-1 {
|
||||
j.inRange -= 1
|
||||
}
|
||||
nextResults, err := j.FindResults(value.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullResult = append(fullResult, nextResults...)
|
||||
}
|
||||
break
|
||||
}
|
||||
fullResult = append(fullResult, results)
|
||||
}
|
||||
return fullResult, nil
|
||||
}
|
||||
|
||||
// PrintResults write the results into writer
|
||||
func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
|
||||
for i, r := range results {
|
||||
text, err := j.evalToText(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if i != len(results)-1 {
|
||||
text = append(text, ' ')
|
||||
}
|
||||
if _, err = wr.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// walk visits tree rooted at the given node in DFS order
|
||||
func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) {
|
||||
switch node := node.(type) {
|
||||
case *ListNode:
|
||||
return j.evalList(value, node)
|
||||
case *TextNode:
|
||||
return []reflect.Value{reflect.ValueOf(node.Text)}, nil
|
||||
case *FieldNode:
|
||||
return j.evalField(value, node)
|
||||
case *ArrayNode:
|
||||
return j.evalArray(value, node)
|
||||
case *FilterNode:
|
||||
return j.evalFilter(value, node)
|
||||
case *IntNode:
|
||||
return j.evalInt(value, node)
|
||||
case *FloatNode:
|
||||
return j.evalFloat(value, node)
|
||||
case *WildcardNode:
|
||||
return j.evalWildcard(value, node)
|
||||
case *RecursiveNode:
|
||||
return j.evalRecursive(value, node)
|
||||
case *UnionNode:
|
||||
return j.evalUnion(value, node)
|
||||
case *IdentifierNode:
|
||||
return j.evalIdentifier(value, node)
|
||||
default:
|
||||
return value, fmt.Errorf("unexpected Node %v", node)
|
||||
}
|
||||
}
|
||||
|
||||
// evalInt evaluates IntNode
|
||||
func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) {
|
||||
result := make([]reflect.Value, len(input))
|
||||
for i := range input {
|
||||
result[i] = reflect.ValueOf(node.Value)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// evalFloat evaluates FloatNode
|
||||
func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) {
|
||||
result := make([]reflect.Value, len(input))
|
||||
for i := range input {
|
||||
result[i] = reflect.ValueOf(node.Value)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// evalList evaluates ListNode
|
||||
func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) {
|
||||
var err error
|
||||
curValue := value
|
||||
for _, node := range node.Nodes {
|
||||
curValue, err = j.walk(curValue, node)
|
||||
if err != nil {
|
||||
return curValue, err
|
||||
}
|
||||
}
|
||||
return curValue, nil
|
||||
}
|
||||
|
||||
// evalIdentifier evaluates IdentifierNode
|
||||
func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) {
|
||||
results := []reflect.Value{}
|
||||
switch node.Name {
|
||||
case "range":
|
||||
j.stack = append(j.stack, j.cur)
|
||||
j.beginRange += 1
|
||||
results = input
|
||||
case "end":
|
||||
if j.endRange < j.inRange { //inside a loop, break the current block
|
||||
j.endRange += 1
|
||||
break
|
||||
}
|
||||
// the loop is about to end, pop value and continue the following execution
|
||||
if len(j.stack) > 0 {
|
||||
j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1]
|
||||
} else {
|
||||
return results, fmt.Errorf("not in range, nothing to end")
|
||||
}
|
||||
default:
|
||||
return input, fmt.Errorf("unrecognized identifier %v", node.Name)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// evalArray evaluates ArrayNode
|
||||
func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) {
|
||||
result := []reflect.Value{}
|
||||
for _, value := range input {
|
||||
|
||||
value, isNil := template.Indirect(value)
|
||||
if isNil {
|
||||
continue
|
||||
}
|
||||
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
|
||||
return input, fmt.Errorf("%v is not array or slice", value.Type())
|
||||
}
|
||||
params := node.Params
|
||||
if !params[0].Known {
|
||||
params[0].Value = 0
|
||||
}
|
||||
if params[0].Value < 0 {
|
||||
params[0].Value += value.Len()
|
||||
}
|
||||
if !params[1].Known {
|
||||
params[1].Value = value.Len()
|
||||
}
|
||||
|
||||
if params[1].Value < 0 {
|
||||
params[1].Value += value.Len()
|
||||
}
|
||||
|
||||
sliceLength := value.Len()
|
||||
if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through.
|
||||
if params[0].Value >= sliceLength {
|
||||
return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength)
|
||||
}
|
||||
if params[1].Value > sliceLength {
|
||||
return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength)
|
||||
}
|
||||
}
|
||||
|
||||
if !params[2].Known {
|
||||
value = value.Slice(params[0].Value, params[1].Value)
|
||||
} else {
|
||||
value = value.Slice3(params[0].Value, params[1].Value, params[2].Value)
|
||||
}
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
result = append(result, value.Index(i))
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// evalUnion evaluates UnionNode
|
||||
func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) {
|
||||
result := []reflect.Value{}
|
||||
for _, listNode := range node.Nodes {
|
||||
temp, err := j.evalList(input, listNode)
|
||||
if err != nil {
|
||||
return input, err
|
||||
}
|
||||
result = append(result, temp...)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) {
|
||||
t := value.Type()
|
||||
var inlineValue *reflect.Value
|
||||
for ix := 0; ix < t.NumField(); ix++ {
|
||||
f := t.Field(ix)
|
||||
jsonTag := f.Tag.Get("json")
|
||||
parts := strings.Split(jsonTag, ",")
|
||||
if len(parts) == 0 {
|
||||
continue
|
||||
}
|
||||
if parts[0] == node.Value {
|
||||
return value.Field(ix), nil
|
||||
}
|
||||
if len(parts[0]) == 0 {
|
||||
val := value.Field(ix)
|
||||
inlineValue = &val
|
||||
}
|
||||
}
|
||||
if inlineValue != nil {
|
||||
if inlineValue.Kind() == reflect.Struct {
|
||||
// handle 'inline'
|
||||
match, err := j.findFieldInValue(inlineValue, node)
|
||||
if err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
if match.IsValid() {
|
||||
return match, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return value.FieldByName(node.Value), nil
|
||||
}
|
||||
|
||||
// evalField evaluates field of struct or key of map.
|
||||
func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) {
|
||||
results := []reflect.Value{}
|
||||
// If there's no input, there's no output
|
||||
if len(input) == 0 {
|
||||
return results, nil
|
||||
}
|
||||
for _, value := range input {
|
||||
var result reflect.Value
|
||||
value, isNil := template.Indirect(value)
|
||||
if isNil {
|
||||
continue
|
||||
}
|
||||
|
||||
if value.Kind() == reflect.Struct {
|
||||
var err error
|
||||
if result, err = j.findFieldInValue(&value, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if value.Kind() == reflect.Map {
|
||||
mapKeyType := value.Type().Key()
|
||||
nodeValue := reflect.ValueOf(node.Value)
|
||||
// node value type must be convertible to map key type
|
||||
if !nodeValue.Type().ConvertibleTo(mapKeyType) {
|
||||
return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType)
|
||||
}
|
||||
result = value.MapIndex(nodeValue.Convert(mapKeyType))
|
||||
}
|
||||
if result.IsValid() {
|
||||
results = append(results, result)
|
||||
}
|
||||
}
|
||||
if len(results) == 0 {
|
||||
if j.allowMissingKeys {
|
||||
return results, nil
|
||||
}
|
||||
return results, fmt.Errorf("%s is not found", node.Value)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// evalWildcard extract all contents of the given value
|
||||
func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) {
|
||||
results := []reflect.Value{}
|
||||
for _, value := range input {
|
||||
value, isNil := template.Indirect(value)
|
||||
if isNil {
|
||||
continue
|
||||
}
|
||||
|
||||
kind := value.Kind()
|
||||
if kind == reflect.Struct {
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
results = append(results, value.Field(i))
|
||||
}
|
||||
} else if kind == reflect.Map {
|
||||
for _, key := range value.MapKeys() {
|
||||
results = append(results, value.MapIndex(key))
|
||||
}
|
||||
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
results = append(results, value.Index(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// evalRecursive visit the given value recursively and push all of them to result
|
||||
func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) {
|
||||
result := []reflect.Value{}
|
||||
for _, value := range input {
|
||||
results := []reflect.Value{}
|
||||
value, isNil := template.Indirect(value)
|
||||
if isNil {
|
||||
continue
|
||||
}
|
||||
|
||||
kind := value.Kind()
|
||||
if kind == reflect.Struct {
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
results = append(results, value.Field(i))
|
||||
}
|
||||
} else if kind == reflect.Map {
|
||||
for _, key := range value.MapKeys() {
|
||||
results = append(results, value.MapIndex(key))
|
||||
}
|
||||
} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
results = append(results, value.Index(i))
|
||||
}
|
||||
}
|
||||
if len(results) != 0 {
|
||||
result = append(result, value)
|
||||
output, err := j.evalRecursive(results, node)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
result = append(result, output...)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// evalFilter filter array according to FilterNode
|
||||
func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) {
|
||||
results := []reflect.Value{}
|
||||
for _, value := range input {
|
||||
value, _ = template.Indirect(value)
|
||||
|
||||
if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
|
||||
return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value)
|
||||
}
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
temp := []reflect.Value{value.Index(i)}
|
||||
lefts, err := j.evalList(temp, node.Left)
|
||||
|
||||
//case exists
|
||||
if node.Operator == "exists" {
|
||||
if len(lefts) > 0 {
|
||||
results = append(results, value.Index(i))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return input, err
|
||||
}
|
||||
|
||||
var left, right interface{}
|
||||
if len(lefts) != 1 {
|
||||
return input, fmt.Errorf("can only compare one element at a time")
|
||||
}
|
||||
left = lefts[0].Interface()
|
||||
|
||||
rights, err := j.evalList(temp, node.Right)
|
||||
if err != nil {
|
||||
return input, err
|
||||
}
|
||||
if len(rights) != 1 {
|
||||
return input, fmt.Errorf("can only compare one element at a time")
|
||||
}
|
||||
right = rights[0].Interface()
|
||||
|
||||
pass := false
|
||||
switch node.Operator {
|
||||
case "<":
|
||||
pass, err = template.Less(left, right)
|
||||
case ">":
|
||||
pass, err = template.Greater(left, right)
|
||||
case "==":
|
||||
pass, err = template.Equal(left, right)
|
||||
case "!=":
|
||||
pass, err = template.NotEqual(left, right)
|
||||
case "<=":
|
||||
pass, err = template.LessEqual(left, right)
|
||||
case ">=":
|
||||
pass, err = template.GreaterEqual(left, right)
|
||||
default:
|
||||
return results, fmt.Errorf("unrecognized filter operator %s", node.Operator)
|
||||
}
|
||||
if err != nil {
|
||||
return results, err
|
||||
}
|
||||
if pass {
|
||||
results = append(results, value.Index(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// evalToText translates reflect value to corresponding text
|
||||
func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) {
|
||||
iface, ok := template.PrintableValue(v)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("can't print type %s", v.Type())
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
fmt.Fprint(&buffer, iface)
|
||||
return buffer.Bytes(), nil
|
||||
}
|
282
vendor/k8s.io/client-go/util/jsonpath/jsonpath_test.go
generated
vendored
282
vendor/k8s.io/client-go/util/jsonpath/jsonpath_test.go
generated
vendored
|
@ -1,282 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jsonpath
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type jsonpathTest struct {
|
||||
name string
|
||||
template string
|
||||
input interface{}
|
||||
expect string
|
||||
}
|
||||
|
||||
func testJSONPath(tests []jsonpathTest, allowMissingKeys bool, t *testing.T) {
|
||||
for _, test := range tests {
|
||||
j := New(test.name)
|
||||
j.AllowMissingKeys(allowMissingKeys)
|
||||
err := j.Parse(test.template)
|
||||
if err != nil {
|
||||
t.Errorf("in %s, parse %s error %v", test.name, test.template, err)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
err = j.Execute(buf, test.input)
|
||||
if err != nil {
|
||||
t.Errorf("in %s, execute error %v", test.name, err)
|
||||
}
|
||||
out := buf.String()
|
||||
if out != test.expect {
|
||||
t.Errorf(`in %s, expect to get "%s", got "%s"`, test.name, test.expect, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// testJSONPathSortOutput test cases related to map, the results may print in random order
|
||||
func testJSONPathSortOutput(tests []jsonpathTest, t *testing.T) {
|
||||
for _, test := range tests {
|
||||
j := New(test.name)
|
||||
err := j.Parse(test.template)
|
||||
if err != nil {
|
||||
t.Errorf("in %s, parse %s error %v", test.name, test.template, err)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
err = j.Execute(buf, test.input)
|
||||
if err != nil {
|
||||
t.Errorf("in %s, execute error %v", test.name, err)
|
||||
}
|
||||
out := buf.String()
|
||||
//since map is visited in random order, we need to sort the results.
|
||||
sortedOut := strings.Fields(out)
|
||||
sort.Strings(sortedOut)
|
||||
sortedExpect := strings.Fields(test.expect)
|
||||
sort.Strings(sortedExpect)
|
||||
if !reflect.DeepEqual(sortedOut, sortedExpect) {
|
||||
t.Errorf(`in %s, expect to get "%s", got "%s"`, test.name, test.expect, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testFailJSONPath(tests []jsonpathTest, t *testing.T) {
|
||||
for _, test := range tests {
|
||||
j := New(test.name)
|
||||
err := j.Parse(test.template)
|
||||
if err != nil {
|
||||
t.Errorf("in %s, parse %s error %v", test.name, test.template, err)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
err = j.Execute(buf, test.input)
|
||||
var out string
|
||||
if err == nil {
|
||||
out = "nil"
|
||||
} else {
|
||||
out = err.Error()
|
||||
}
|
||||
if out != test.expect {
|
||||
t.Errorf("in %s, expect to get error %q, got %q", test.name, test.expect, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type book struct {
|
||||
Category string
|
||||
Author string
|
||||
Title string
|
||||
Price float32
|
||||
}
|
||||
|
||||
func (b book) String() string {
|
||||
return fmt.Sprintf("{Category: %s, Author: %s, Title: %s, Price: %v}", b.Category, b.Author, b.Title, b.Price)
|
||||
}
|
||||
|
||||
type bicycle struct {
|
||||
Color string
|
||||
Price float32
|
||||
}
|
||||
|
||||
type empName string
|
||||
type job string
|
||||
type store struct {
|
||||
Book []book
|
||||
Bicycle bicycle
|
||||
Name string
|
||||
Labels map[string]int
|
||||
Employees map[empName]job
|
||||
}
|
||||
|
||||
func TestStructInput(t *testing.T) {
|
||||
|
||||
storeData := store{
|
||||
Name: "jsonpath",
|
||||
Book: []book{
|
||||
{"reference", "Nigel Rees", "Sayings of the Centurey", 8.95},
|
||||
{"fiction", "Evelyn Waugh", "Sword of Honour", 12.99},
|
||||
{"fiction", "Herman Melville", "Moby Dick", 8.99},
|
||||
},
|
||||
Bicycle: bicycle{"red", 19.95},
|
||||
Labels: map[string]int{
|
||||
"engieer": 10,
|
||||
"web/html": 15,
|
||||
"k8s-app": 20,
|
||||
},
|
||||
Employees: map[empName]job{
|
||||
"jason": "manager",
|
||||
"dan": "clerk",
|
||||
},
|
||||
}
|
||||
|
||||
storeTests := []jsonpathTest{
|
||||
{"plain", "hello jsonpath", nil, "hello jsonpath"},
|
||||
{"recursive", "{..}", []int{1, 2, 3}, "[1 2 3]"},
|
||||
{"filter", "{[?(@<5)]}", []int{2, 6, 3, 7}, "2 3"},
|
||||
{"quote", `{"{"}`, nil, "{"},
|
||||
{"union", "{[1,3,4]}", []int{0, 1, 2, 3, 4}, "1 3 4"},
|
||||
{"array", "{[0:2]}", []string{"Monday", "Tudesday"}, "Monday Tudesday"},
|
||||
{"variable", "hello {.Name}", storeData, "hello jsonpath"},
|
||||
{"dict/", "{$.Labels.web/html}", storeData, "15"},
|
||||
{"dict/", "{$.Employees.jason}", storeData, "manager"},
|
||||
{"dict/", "{$.Employees.dan}", storeData, "clerk"},
|
||||
{"dict-", "{.Labels.k8s-app}", storeData, "20"},
|
||||
{"nest", "{.Bicycle.Color}", storeData, "red"},
|
||||
{"allarray", "{.Book[*].Author}", storeData, "Nigel Rees Evelyn Waugh Herman Melville"},
|
||||
{"allfileds", "{.Bicycle.*}", storeData, "red 19.95"},
|
||||
{"recurfileds", "{..Price}", storeData, "8.95 12.99 8.99 19.95"},
|
||||
{"lastarray", "{.Book[-1:]}", storeData,
|
||||
"{Category: fiction, Author: Herman Melville, Title: Moby Dick, Price: 8.99}"},
|
||||
{"recurarray", "{..Book[2]}", storeData,
|
||||
"{Category: fiction, Author: Herman Melville, Title: Moby Dick, Price: 8.99}"},
|
||||
}
|
||||
testJSONPath(storeTests, false, t)
|
||||
|
||||
missingKeyTests := []jsonpathTest{
|
||||
{"nonexistent field", "{.hello}", storeData, ""},
|
||||
}
|
||||
testJSONPath(missingKeyTests, true, t)
|
||||
|
||||
failStoreTests := []jsonpathTest{
|
||||
{"invalid identifier", "{hello}", storeData, "unrecognized identifier hello"},
|
||||
{"nonexistent field", "{.hello}", storeData, "hello is not found"},
|
||||
{"invalid array", "{.Labels[0]}", storeData, "map[string]int is not array or slice"},
|
||||
{"invalid filter operator", "{.Book[?(@.Price<>10)]}", storeData, "unrecognized filter operator <>"},
|
||||
{"redundent end", "{range .Labels.*}{@}{end}{end}", storeData, "not in range, nothing to end"},
|
||||
}
|
||||
testFailJSONPath(failStoreTests, t)
|
||||
}
|
||||
|
||||
func TestJSONInput(t *testing.T) {
|
||||
var pointsJSON = []byte(`[
|
||||
{"id": "i1", "x":4, "y":-5},
|
||||
{"id": "i2", "x":-2, "y":-5, "z":1},
|
||||
{"id": "i3", "x": 8, "y": 3 },
|
||||
{"id": "i4", "x": -6, "y": -1 },
|
||||
{"id": "i5", "x": 0, "y": 2, "z": 1 },
|
||||
{"id": "i6", "x": 1, "y": 4 }
|
||||
]`)
|
||||
var pointsData interface{}
|
||||
err := json.Unmarshal(pointsJSON, &pointsData)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
pointsTests := []jsonpathTest{
|
||||
{"exists filter", "{[?(@.z)].id}", pointsData, "i2 i5"},
|
||||
{"bracket key", "{[0]['id']}", pointsData, "i1"},
|
||||
}
|
||||
testJSONPath(pointsTests, false, t)
|
||||
}
|
||||
|
||||
// TestKubernetes tests some use cases from kubernetes
|
||||
func TestKubernetes(t *testing.T) {
|
||||
var input = []byte(`{
|
||||
"kind": "List",
|
||||
"items":[
|
||||
{
|
||||
"kind":"None",
|
||||
"metadata":{
|
||||
"name":"127.0.0.1",
|
||||
"labels":{
|
||||
"kubernetes.io/hostname":"127.0.0.1"
|
||||
}
|
||||
},
|
||||
"status":{
|
||||
"capacity":{"cpu":"4"},
|
||||
"addresses":[{"type": "LegacyHostIP", "address":"127.0.0.1"}]
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind":"None",
|
||||
"metadata":{
|
||||
"name":"127.0.0.2",
|
||||
"labels":{
|
||||
"kubernetes.io/hostname":"127.0.0.2"
|
||||
}
|
||||
},
|
||||
"status":{
|
||||
"capacity":{"cpu":"8"},
|
||||
"addresses":[
|
||||
{"type": "LegacyHostIP", "address":"127.0.0.2"},
|
||||
{"type": "another", "address":"127.0.0.3"}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"users":[
|
||||
{
|
||||
"name": "myself",
|
||||
"user": {}
|
||||
},
|
||||
{
|
||||
"name": "e2e",
|
||||
"user": {"username": "admin", "password": "secret"}
|
||||
}
|
||||
]
|
||||
}`)
|
||||
var nodesData interface{}
|
||||
err := json.Unmarshal(input, &nodesData)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
nodesTests := []jsonpathTest{
|
||||
{"range item", `{range .items[*]}{.metadata.name}, {end}{.kind}`, nodesData, "127.0.0.1, 127.0.0.2, List"},
|
||||
{"range item with quote", `{range .items[*]}{.metadata.name}{"\t"}{end}`, nodesData, "127.0.0.1\t127.0.0.2\t"},
|
||||
{"range addresss", `{.items[*].status.addresses[*].address}`, nodesData,
|
||||
"127.0.0.1 127.0.0.2 127.0.0.3"},
|
||||
{"double range", `{range .items[*]}{range .status.addresses[*]}{.address}, {end}{end}`, nodesData,
|
||||
"127.0.0.1, 127.0.0.2, 127.0.0.3, "},
|
||||
{"item name", `{.items[*].metadata.name}`, nodesData, "127.0.0.1 127.0.0.2"},
|
||||
{"union nodes capacity", `{.items[*]['metadata.name', 'status.capacity']}`, nodesData,
|
||||
"127.0.0.1 127.0.0.2 map[cpu:4] map[cpu:8]"},
|
||||
{"range nodes capacity", `{range .items[*]}[{.metadata.name}, {.status.capacity}] {end}`, nodesData,
|
||||
"[127.0.0.1, map[cpu:4]] [127.0.0.2, map[cpu:8]] "},
|
||||
{"user password", `{.users[?(@.name=="e2e")].user.password}`, &nodesData, "secret"},
|
||||
{"hostname", `{.items[0].metadata.labels.kubernetes\.io/hostname}`, &nodesData, "127.0.0.1"},
|
||||
{"hostname filter", `{.items[?(@.metadata.labels.kubernetes\.io/hostname=="127.0.0.1")].kind}`, &nodesData, "None"},
|
||||
}
|
||||
testJSONPath(nodesTests, false, t)
|
||||
|
||||
randomPrintOrderTests := []jsonpathTest{
|
||||
{"recursive name", "{..name}", nodesData, `127.0.0.1 127.0.0.2 myself e2e`},
|
||||
}
|
||||
testJSONPathSortOutput(randomPrintOrderTests, t)
|
||||
}
|
239
vendor/k8s.io/client-go/util/jsonpath/node.go
generated
vendored
239
vendor/k8s.io/client-go/util/jsonpath/node.go
generated
vendored
|
@ -1,239 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jsonpath
|
||||
|
||||
import "fmt"
|
||||
|
||||
// NodeType identifies the type of a parse tree node.
|
||||
type NodeType int
|
||||
|
||||
// Type returns itself and provides an easy default implementation
|
||||
func (t NodeType) Type() NodeType {
|
||||
return t
|
||||
}
|
||||
|
||||
func (t NodeType) String() string {
|
||||
return NodeTypeName[t]
|
||||
}
|
||||
|
||||
const (
|
||||
NodeText NodeType = iota
|
||||
NodeArray
|
||||
NodeList
|
||||
NodeField
|
||||
NodeIdentifier
|
||||
NodeFilter
|
||||
NodeInt
|
||||
NodeFloat
|
||||
NodeWildcard
|
||||
NodeRecursive
|
||||
NodeUnion
|
||||
)
|
||||
|
||||
var NodeTypeName = map[NodeType]string{
|
||||
NodeText: "NodeText",
|
||||
NodeArray: "NodeArray",
|
||||
NodeList: "NodeList",
|
||||
NodeField: "NodeField",
|
||||
NodeIdentifier: "NodeIdentifier",
|
||||
NodeFilter: "NodeFilter",
|
||||
NodeInt: "NodeInt",
|
||||
NodeFloat: "NodeFloat",
|
||||
NodeWildcard: "NodeWildcard",
|
||||
NodeRecursive: "NodeRecursive",
|
||||
NodeUnion: "NodeUnion",
|
||||
}
|
||||
|
||||
type Node interface {
|
||||
Type() NodeType
|
||||
String() string
|
||||
}
|
||||
|
||||
// ListNode holds a sequence of nodes.
|
||||
type ListNode struct {
|
||||
NodeType
|
||||
Nodes []Node // The element nodes in lexical order.
|
||||
}
|
||||
|
||||
func newList() *ListNode {
|
||||
return &ListNode{NodeType: NodeList}
|
||||
}
|
||||
|
||||
func (l *ListNode) append(n Node) {
|
||||
l.Nodes = append(l.Nodes, n)
|
||||
}
|
||||
|
||||
func (l *ListNode) String() string {
|
||||
return fmt.Sprintf("%s", l.Type())
|
||||
}
|
||||
|
||||
// TextNode holds plain text.
|
||||
type TextNode struct {
|
||||
NodeType
|
||||
Text string // The text; may span newlines.
|
||||
}
|
||||
|
||||
func newText(text string) *TextNode {
|
||||
return &TextNode{NodeType: NodeText, Text: text}
|
||||
}
|
||||
|
||||
func (t *TextNode) String() string {
|
||||
return fmt.Sprintf("%s: %s", t.Type(), t.Text)
|
||||
}
|
||||
|
||||
// FieldNode holds field of struct
|
||||
type FieldNode struct {
|
||||
NodeType
|
||||
Value string
|
||||
}
|
||||
|
||||
func newField(value string) *FieldNode {
|
||||
return &FieldNode{NodeType: NodeField, Value: value}
|
||||
}
|
||||
|
||||
func (f *FieldNode) String() string {
|
||||
return fmt.Sprintf("%s: %s", f.Type(), f.Value)
|
||||
}
|
||||
|
||||
// IdentifierNode holds an identifier
|
||||
type IdentifierNode struct {
|
||||
NodeType
|
||||
Name string
|
||||
}
|
||||
|
||||
func newIdentifier(value string) *IdentifierNode {
|
||||
return &IdentifierNode{
|
||||
NodeType: NodeIdentifier,
|
||||
Name: value,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *IdentifierNode) String() string {
|
||||
return fmt.Sprintf("%s: %s", f.Type(), f.Name)
|
||||
}
|
||||
|
||||
// ParamsEntry holds param information for ArrayNode
|
||||
type ParamsEntry struct {
|
||||
Value int
|
||||
Known bool //whether the value is known when parse it
|
||||
}
|
||||
|
||||
// ArrayNode holds start, end, step information for array index selection
|
||||
type ArrayNode struct {
|
||||
NodeType
|
||||
Params [3]ParamsEntry //start, end, step
|
||||
}
|
||||
|
||||
func newArray(params [3]ParamsEntry) *ArrayNode {
|
||||
return &ArrayNode{
|
||||
NodeType: NodeArray,
|
||||
Params: params,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ArrayNode) String() string {
|
||||
return fmt.Sprintf("%s: %v", a.Type(), a.Params)
|
||||
}
|
||||
|
||||
// FilterNode holds operand and operator information for filter
|
||||
type FilterNode struct {
|
||||
NodeType
|
||||
Left *ListNode
|
||||
Right *ListNode
|
||||
Operator string
|
||||
}
|
||||
|
||||
func newFilter(left, right *ListNode, operator string) *FilterNode {
|
||||
return &FilterNode{
|
||||
NodeType: NodeFilter,
|
||||
Left: left,
|
||||
Right: right,
|
||||
Operator: operator,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FilterNode) String() string {
|
||||
return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right)
|
||||
}
|
||||
|
||||
// IntNode holds integer value
|
||||
type IntNode struct {
|
||||
NodeType
|
||||
Value int
|
||||
}
|
||||
|
||||
func newInt(num int) *IntNode {
|
||||
return &IntNode{NodeType: NodeInt, Value: num}
|
||||
}
|
||||
|
||||
func (i *IntNode) String() string {
|
||||
return fmt.Sprintf("%s: %d", i.Type(), i.Value)
|
||||
}
|
||||
|
||||
// FloatNode holds float value
|
||||
type FloatNode struct {
|
||||
NodeType
|
||||
Value float64
|
||||
}
|
||||
|
||||
func newFloat(num float64) *FloatNode {
|
||||
return &FloatNode{NodeType: NodeFloat, Value: num}
|
||||
}
|
||||
|
||||
func (i *FloatNode) String() string {
|
||||
return fmt.Sprintf("%s: %f", i.Type(), i.Value)
|
||||
}
|
||||
|
||||
// WildcardNode means a wildcard
|
||||
type WildcardNode struct {
|
||||
NodeType
|
||||
}
|
||||
|
||||
func newWildcard() *WildcardNode {
|
||||
return &WildcardNode{NodeType: NodeWildcard}
|
||||
}
|
||||
|
||||
func (i *WildcardNode) String() string {
|
||||
return fmt.Sprintf("%s", i.Type())
|
||||
}
|
||||
|
||||
// RecursiveNode means a recursive descent operator
|
||||
type RecursiveNode struct {
|
||||
NodeType
|
||||
}
|
||||
|
||||
func newRecursive() *RecursiveNode {
|
||||
return &RecursiveNode{NodeType: NodeRecursive}
|
||||
}
|
||||
|
||||
func (r *RecursiveNode) String() string {
|
||||
return fmt.Sprintf("%s", r.Type())
|
||||
}
|
||||
|
||||
// UnionNode is union of ListNode
|
||||
type UnionNode struct {
|
||||
NodeType
|
||||
Nodes []*ListNode
|
||||
}
|
||||
|
||||
func newUnion(nodes []*ListNode) *UnionNode {
|
||||
return &UnionNode{NodeType: NodeUnion, Nodes: nodes}
|
||||
}
|
||||
|
||||
func (u *UnionNode) String() string {
|
||||
return fmt.Sprintf("%s", u.Type())
|
||||
}
|
433
vendor/k8s.io/client-go/util/jsonpath/parser.go
generated
vendored
433
vendor/k8s.io/client-go/util/jsonpath/parser.go
generated
vendored
|
@ -1,433 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jsonpath
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const eof = -1
|
||||
|
||||
const (
|
||||
leftDelim = "{"
|
||||
rightDelim = "}"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
Name string
|
||||
Root *ListNode
|
||||
input string
|
||||
cur *ListNode
|
||||
pos int
|
||||
start int
|
||||
width int
|
||||
}
|
||||
|
||||
// Parse parsed the given text and return a node Parser.
|
||||
// If an error is encountered, parsing stops and an empty
|
||||
// Parser is returned with the error
|
||||
func Parse(name, text string) (*Parser, error) {
|
||||
p := NewParser(name)
|
||||
err := p.Parse(text)
|
||||
if err != nil {
|
||||
p = nil
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
func NewParser(name string) *Parser {
|
||||
return &Parser{
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// parseAction parsed the expression inside delimiter
|
||||
func parseAction(name, text string) (*Parser, error) {
|
||||
p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim))
|
||||
// when error happens, p will be nil, so we need to return here
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
p.Root = p.Root.Nodes[0].(*ListNode)
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *Parser) Parse(text string) error {
|
||||
p.input = text
|
||||
p.Root = newList()
|
||||
p.pos = 0
|
||||
return p.parseText(p.Root)
|
||||
}
|
||||
|
||||
// consumeText return the parsed text since last cosumeText
|
||||
func (p *Parser) consumeText() string {
|
||||
value := p.input[p.start:p.pos]
|
||||
p.start = p.pos
|
||||
return value
|
||||
}
|
||||
|
||||
// next returns the next rune in the input.
|
||||
func (p *Parser) next() rune {
|
||||
if int(p.pos) >= len(p.input) {
|
||||
p.width = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(p.input[p.pos:])
|
||||
p.width = w
|
||||
p.pos += p.width
|
||||
return r
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (p *Parser) peek() rune {
|
||||
r := p.next()
|
||||
p.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can only be called once per call of next.
|
||||
func (p *Parser) backup() {
|
||||
p.pos -= p.width
|
||||
}
|
||||
|
||||
func (p *Parser) parseText(cur *ListNode) error {
|
||||
for {
|
||||
if strings.HasPrefix(p.input[p.pos:], leftDelim) {
|
||||
if p.pos > p.start {
|
||||
cur.append(newText(p.consumeText()))
|
||||
}
|
||||
return p.parseLeftDelim(cur)
|
||||
}
|
||||
if p.next() == eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Correctly reached EOF.
|
||||
if p.pos > p.start {
|
||||
cur.append(newText(p.consumeText()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseLeftDelim scans the left delimiter, which is known to be present.
|
||||
func (p *Parser) parseLeftDelim(cur *ListNode) error {
|
||||
p.pos += len(leftDelim)
|
||||
p.consumeText()
|
||||
newNode := newList()
|
||||
cur.append(newNode)
|
||||
cur = newNode
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
func (p *Parser) parseInsideAction(cur *ListNode) error {
|
||||
prefixMap := map[string]func(*ListNode) error{
|
||||
rightDelim: p.parseRightDelim,
|
||||
"[?(": p.parseFilter,
|
||||
"..": p.parseRecursive,
|
||||
}
|
||||
for prefix, parseFunc := range prefixMap {
|
||||
if strings.HasPrefix(p.input[p.pos:], prefix) {
|
||||
return parseFunc(cur)
|
||||
}
|
||||
}
|
||||
|
||||
switch r := p.next(); {
|
||||
case r == eof || isEndOfLine(r):
|
||||
return fmt.Errorf("unclosed action")
|
||||
case r == ' ':
|
||||
p.consumeText()
|
||||
case r == '@' || r == '$': //the current object, just pass it
|
||||
p.consumeText()
|
||||
case r == '[':
|
||||
return p.parseArray(cur)
|
||||
case r == '"':
|
||||
return p.parseQuote(cur)
|
||||
case r == '.':
|
||||
return p.parseField(cur)
|
||||
case r == '+' || r == '-' || unicode.IsDigit(r):
|
||||
p.backup()
|
||||
return p.parseNumber(cur)
|
||||
case isAlphaNumeric(r):
|
||||
p.backup()
|
||||
return p.parseIdentifier(cur)
|
||||
default:
|
||||
return fmt.Errorf("unrecognized character in action: %#U", r)
|
||||
}
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseRightDelim scans the right delimiter, which is known to be present.
|
||||
func (p *Parser) parseRightDelim(cur *ListNode) error {
|
||||
p.pos += len(rightDelim)
|
||||
p.consumeText()
|
||||
cur = p.Root
|
||||
return p.parseText(cur)
|
||||
}
|
||||
|
||||
// parseIdentifier scans build-in keywords, like "range" "end"
|
||||
func (p *Parser) parseIdentifier(cur *ListNode) error {
|
||||
var r rune
|
||||
for {
|
||||
r = p.next()
|
||||
if isTerminator(r) {
|
||||
p.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
value := p.consumeText()
|
||||
cur.append(newIdentifier(value))
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseRecursive scans the recursive desent operator ..
|
||||
func (p *Parser) parseRecursive(cur *ListNode) error {
|
||||
p.pos += len("..")
|
||||
p.consumeText()
|
||||
cur.append(newRecursive())
|
||||
if r := p.peek(); isAlphaNumeric(r) {
|
||||
return p.parseField(cur)
|
||||
}
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseNumber scans number
|
||||
func (p *Parser) parseNumber(cur *ListNode) error {
|
||||
r := p.peek()
|
||||
if r == '+' || r == '-' {
|
||||
r = p.next()
|
||||
}
|
||||
for {
|
||||
r = p.next()
|
||||
if r != '.' && !unicode.IsDigit(r) {
|
||||
p.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
value := p.consumeText()
|
||||
i, err := strconv.Atoi(value)
|
||||
if err == nil {
|
||||
cur.append(newInt(i))
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
d, err := strconv.ParseFloat(value, 64)
|
||||
if err == nil {
|
||||
cur.append(newFloat(d))
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
return fmt.Errorf("cannot parse number %s", value)
|
||||
}
|
||||
|
||||
// parseArray scans array index selection
|
||||
func (p *Parser) parseArray(cur *ListNode) error {
|
||||
Loop:
|
||||
for {
|
||||
switch p.next() {
|
||||
case eof, '\n':
|
||||
return fmt.Errorf("unterminated array")
|
||||
case ']':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
text := p.consumeText()
|
||||
text = string(text[1 : len(text)-1])
|
||||
if text == "*" {
|
||||
text = ":"
|
||||
}
|
||||
|
||||
//union operator
|
||||
strs := strings.Split(text, ",")
|
||||
if len(strs) > 1 {
|
||||
union := []*ListNode{}
|
||||
for _, str := range strs {
|
||||
parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " ")))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
union = append(union, parser.Root)
|
||||
}
|
||||
cur.append(newUnion(union))
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// dict key
|
||||
reg := regexp.MustCompile(`^'([^']*)'$`)
|
||||
value := reg.FindStringSubmatch(text)
|
||||
if value != nil {
|
||||
parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range parser.Root.Nodes {
|
||||
cur.append(node)
|
||||
}
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
//slice operator
|
||||
reg = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:[\d]*)?$`)
|
||||
value = reg.FindStringSubmatch(text)
|
||||
if value == nil {
|
||||
return fmt.Errorf("invalid array index %s", text)
|
||||
}
|
||||
value = value[1:]
|
||||
params := [3]ParamsEntry{}
|
||||
for i := 0; i < 3; i++ {
|
||||
if value[i] != "" {
|
||||
if i > 0 {
|
||||
value[i] = value[i][1:]
|
||||
}
|
||||
if i > 0 && value[i] == "" {
|
||||
params[i].Known = false
|
||||
} else {
|
||||
var err error
|
||||
params[i].Known = true
|
||||
params[i].Value, err = strconv.Atoi(value[i])
|
||||
if err != nil {
|
||||
return fmt.Errorf("array index %s is not a number", value[i])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if i == 1 {
|
||||
params[i].Known = true
|
||||
params[i].Value = params[0].Value + 1
|
||||
} else {
|
||||
params[i].Known = false
|
||||
params[i].Value = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
cur.append(newArray(params))
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseFilter scans filter inside array selection
|
||||
func (p *Parser) parseFilter(cur *ListNode) error {
|
||||
p.pos += len("[?(")
|
||||
p.consumeText()
|
||||
Loop:
|
||||
for {
|
||||
switch p.next() {
|
||||
case eof, '\n':
|
||||
return fmt.Errorf("unterminated filter")
|
||||
case ')':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
if p.next() != ']' {
|
||||
return fmt.Errorf("unclosed array expect ]")
|
||||
}
|
||||
reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`)
|
||||
text := p.consumeText()
|
||||
text = string(text[:len(text)-2])
|
||||
value := reg.FindStringSubmatch(text)
|
||||
if value == nil {
|
||||
parser, err := parseAction("text", text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cur.append(newFilter(parser.Root, newList(), "exists"))
|
||||
} else {
|
||||
leftParser, err := parseAction("left", value[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rightParser, err := parseAction("right", value[3])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cur.append(newFilter(leftParser.Root, rightParser.Root, value[2]))
|
||||
}
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseQuote unquotes string inside double quote
|
||||
func (p *Parser) parseQuote(cur *ListNode) error {
|
||||
Loop:
|
||||
for {
|
||||
switch p.next() {
|
||||
case eof, '\n':
|
||||
return fmt.Errorf("unterminated quoted string")
|
||||
case '"':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
value := p.consumeText()
|
||||
s, err := strconv.Unquote(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unquote string %s error %v", value, err)
|
||||
}
|
||||
cur.append(newText(s))
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseField scans a field until a terminator
|
||||
func (p *Parser) parseField(cur *ListNode) error {
|
||||
p.consumeText()
|
||||
for p.advance() {
|
||||
}
|
||||
value := p.consumeText()
|
||||
if value == "*" {
|
||||
cur.append(newWildcard())
|
||||
} else {
|
||||
cur.append(newField(strings.Replace(value, "\\", "", -1)))
|
||||
}
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// advance scans until next non-escaped terminator
|
||||
func (p *Parser) advance() bool {
|
||||
r := p.next()
|
||||
if r == '\\' {
|
||||
p.next()
|
||||
} else if isTerminator(r) {
|
||||
p.backup()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isTerminator reports whether the input is at valid termination character to appear after an identifier.
|
||||
func isTerminator(r rune) bool {
|
||||
if isSpace(r) || isEndOfLine(r) {
|
||||
return true
|
||||
}
|
||||
switch r {
|
||||
case eof, '.', ',', '[', ']', '$', '@', '{', '}':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isSpace reports whether r is a space character.
|
||||
func isSpace(r rune) bool {
|
||||
return r == ' ' || r == '\t'
|
||||
}
|
||||
|
||||
// isEndOfLine reports whether r is an end-of-line character.
|
||||
func isEndOfLine(r rune) bool {
|
||||
return r == '\r' || r == '\n'
|
||||
}
|
||||
|
||||
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
||||
func isAlphaNumeric(r rune) bool {
|
||||
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
|
||||
}
|
136
vendor/k8s.io/client-go/util/jsonpath/parser_test.go
generated
vendored
136
vendor/k8s.io/client-go/util/jsonpath/parser_test.go
generated
vendored
|
@ -1,136 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jsonpath
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
type parserTest struct {
|
||||
name string
|
||||
text string
|
||||
nodes []Node
|
||||
shouldError bool
|
||||
}
|
||||
|
||||
var parserTests = []parserTest{
|
||||
{"plain", `hello jsonpath`, []Node{newText("hello jsonpath")}, false},
|
||||
{"variable", `hello {.jsonpath}`,
|
||||
[]Node{newText("hello "), newList(), newField("jsonpath")}, false},
|
||||
{"arrayfiled", `hello {['jsonpath']}`,
|
||||
[]Node{newText("hello "), newList(), newField("jsonpath")}, false},
|
||||
{"quote", `{"{"}`, []Node{newList(), newText("{")}, false},
|
||||
{"array", `{[1:3]}`, []Node{newList(),
|
||||
newArray([3]ParamsEntry{{1, true}, {3, true}, {0, false}})}, false},
|
||||
{"allarray", `{.book[*].author}`,
|
||||
[]Node{newList(), newField("book"),
|
||||
newArray([3]ParamsEntry{{0, false}, {0, false}, {0, false}}), newField("author")}, false},
|
||||
{"wildcard", `{.bicycle.*}`,
|
||||
[]Node{newList(), newField("bicycle"), newWildcard()}, false},
|
||||
{"filter", `{[?(@.price<3)]}`,
|
||||
[]Node{newList(), newFilter(newList(), newList(), "<"),
|
||||
newList(), newField("price"), newList(), newInt(3)}, false},
|
||||
{"recursive", `{..}`, []Node{newList(), newRecursive()}, false},
|
||||
{"recurField", `{..price}`,
|
||||
[]Node{newList(), newRecursive(), newField("price")}, false},
|
||||
{"arraydict", `{['book.price']}`, []Node{newList(),
|
||||
newField("book"), newField("price"),
|
||||
}, false},
|
||||
{"union", `{['bicycle.price', 3, 'book.price']}`, []Node{newList(), newUnion([]*ListNode{}),
|
||||
newList(), newField("bicycle"), newField("price"),
|
||||
newList(), newArray([3]ParamsEntry{{3, true}, {4, true}, {0, false}}),
|
||||
newList(), newField("book"), newField("price"),
|
||||
}, false},
|
||||
{"range", `{range .items}{.name},{end}`, []Node{
|
||||
newList(), newIdentifier("range"), newField("items"),
|
||||
newList(), newField("name"), newText(","),
|
||||
newList(), newIdentifier("end"),
|
||||
}, false},
|
||||
{"malformat input", `{\\\}`, []Node{}, true},
|
||||
}
|
||||
|
||||
func collectNode(nodes []Node, cur Node) []Node {
|
||||
nodes = append(nodes, cur)
|
||||
switch cur.Type() {
|
||||
case NodeList:
|
||||
for _, node := range cur.(*ListNode).Nodes {
|
||||
nodes = collectNode(nodes, node)
|
||||
}
|
||||
case NodeFilter:
|
||||
nodes = collectNode(nodes, cur.(*FilterNode).Left)
|
||||
nodes = collectNode(nodes, cur.(*FilterNode).Right)
|
||||
case NodeUnion:
|
||||
for _, node := range cur.(*UnionNode).Nodes {
|
||||
nodes = collectNode(nodes, node)
|
||||
}
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func TestParser(t *testing.T) {
|
||||
for _, test := range parserTests {
|
||||
parser, err := Parse(test.name, test.text)
|
||||
if test.shouldError {
|
||||
if err == nil {
|
||||
t.Errorf("unexpected non-error when parsing %s", test.name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("parse %s error %v", test.name, err)
|
||||
}
|
||||
result := collectNode([]Node{}, parser.Root)[1:]
|
||||
if len(result) != len(test.nodes) {
|
||||
t.Errorf("in %s, expect to get %d nodes, got %d nodes", test.name, len(test.nodes), len(result))
|
||||
t.Error(result)
|
||||
}
|
||||
for i, expect := range test.nodes {
|
||||
if result[i].String() != expect.String() {
|
||||
t.Errorf("in %s, %dth node, expect %v, got %v", test.name, i, expect, result[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type failParserTest struct {
|
||||
name string
|
||||
text string
|
||||
err string
|
||||
}
|
||||
|
||||
func TestFailParser(t *testing.T) {
|
||||
failParserTests := []failParserTest{
|
||||
{"unclosed action", "{.hello", "unclosed action"},
|
||||
{"unrecognized character", "{*}", "unrecognized character in action: U+002A '*'"},
|
||||
{"invalid number", "{+12.3.0}", "cannot parse number +12.3.0"},
|
||||
{"unterminated array", "{[1}", "unterminated array"},
|
||||
{"invalid index", "{[::-1]}", "invalid array index ::-1"},
|
||||
{"unterminated filter", "{[?(.price]}", "unterminated filter"},
|
||||
}
|
||||
for _, test := range failParserTests {
|
||||
_, err := Parse(test.name, test.text)
|
||||
var out string
|
||||
if err == nil {
|
||||
out = "nil"
|
||||
} else {
|
||||
out = err.Error()
|
||||
}
|
||||
if out != test.err {
|
||||
t.Errorf("in %s, expect to get error %v, got %v", test.name, test.err, out)
|
||||
}
|
||||
}
|
||||
}
|
139
vendor/k8s.io/client-go/util/testing/fake_handler.go
generated
vendored
139
vendor/k8s.io/client-go/util/testing/fake_handler.go
generated
vendored
|
@ -1,139 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// TestInterface is a simple interface providing Errorf, to make injection for
|
||||
// testing easier (insert 'yo dawg' meme here).
|
||||
type TestInterface interface {
|
||||
Errorf(format string, args ...interface{})
|
||||
Logf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// LogInterface is a simple interface to allow injection of Logf to report serving errors.
|
||||
type LogInterface interface {
|
||||
Logf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// FakeHandler is to assist in testing HTTP requests. Notice that FakeHandler is
|
||||
// not thread safe and you must not direct traffic to except for the request
|
||||
// you want to test. You can do this by hiding it in an http.ServeMux.
|
||||
type FakeHandler struct {
|
||||
RequestReceived *http.Request
|
||||
RequestBody string
|
||||
StatusCode int
|
||||
ResponseBody string
|
||||
// For logging - you can use a *testing.T
|
||||
// This will keep log messages associated with the test.
|
||||
T LogInterface
|
||||
|
||||
// Enforce "only one use" constraint.
|
||||
lock sync.Mutex
|
||||
requestCount int
|
||||
hasBeenChecked bool
|
||||
|
||||
SkipRequestFn func(verb string, url url.URL) bool
|
||||
}
|
||||
|
||||
func (f *FakeHandler) SetResponseBody(responseBody string) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.ResponseBody = responseBody
|
||||
}
|
||||
|
||||
func (f *FakeHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.SkipRequestFn != nil && f.SkipRequestFn(request.Method, *request.URL) {
|
||||
response.Header().Set("Content-Type", "application/json")
|
||||
response.WriteHeader(f.StatusCode)
|
||||
response.Write([]byte(f.ResponseBody))
|
||||
return
|
||||
}
|
||||
|
||||
f.requestCount++
|
||||
if f.hasBeenChecked {
|
||||
panic("got request after having been validated")
|
||||
}
|
||||
|
||||
f.RequestReceived = request
|
||||
response.Header().Set("Content-Type", "application/json")
|
||||
response.WriteHeader(f.StatusCode)
|
||||
response.Write([]byte(f.ResponseBody))
|
||||
|
||||
bodyReceived, err := ioutil.ReadAll(request.Body)
|
||||
if err != nil && f.T != nil {
|
||||
f.T.Logf("Received read error: %v", err)
|
||||
}
|
||||
f.RequestBody = string(bodyReceived)
|
||||
if f.T != nil {
|
||||
f.T.Logf("request body: %s", f.RequestBody)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeHandler) ValidateRequestCount(t TestInterface, count int) bool {
|
||||
ok := true
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
if f.requestCount != count {
|
||||
ok = false
|
||||
t.Errorf("Expected %d call, but got %d. Only the last call is recorded and checked.", count, f.requestCount)
|
||||
}
|
||||
f.hasBeenChecked = true
|
||||
return ok
|
||||
}
|
||||
|
||||
// ValidateRequest verifies that FakeHandler received a request with expected path, method, and body.
|
||||
func (f *FakeHandler) ValidateRequest(t TestInterface, expectedPath, expectedMethod string, body *string) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
if f.requestCount != 1 {
|
||||
t.Logf("Expected 1 call, but got %v. Only the last call is recorded and checked.", f.requestCount)
|
||||
}
|
||||
f.hasBeenChecked = true
|
||||
|
||||
expectURL, err := url.Parse(expectedPath)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't parse %v as a URL.", expectedPath)
|
||||
}
|
||||
if f.RequestReceived == nil {
|
||||
t.Errorf("Unexpected nil request received for %s", expectedPath)
|
||||
return
|
||||
}
|
||||
if f.RequestReceived.URL.Path != expectURL.Path {
|
||||
t.Errorf("Unexpected request path for request %#v, received: %q, expected: %q", f.RequestReceived, f.RequestReceived.URL.Path, expectURL.Path)
|
||||
}
|
||||
if e, a := expectURL.Query(), f.RequestReceived.URL.Query(); !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("Unexpected query for request %#v, received: %q, expected: %q", f.RequestReceived, a, e)
|
||||
}
|
||||
if f.RequestReceived.Method != expectedMethod {
|
||||
t.Errorf("Unexpected method: %q, expected: %q", f.RequestReceived.Method, expectedMethod)
|
||||
}
|
||||
if body != nil {
|
||||
if *body != f.RequestBody {
|
||||
t.Errorf("Received body:\n%s\n Doesn't match expected body:\n%s", f.RequestBody, *body)
|
||||
}
|
||||
}
|
||||
}
|
180
vendor/k8s.io/client-go/util/testing/fake_handler_test.go
generated
vendored
180
vendor/k8s.io/client-go/util/testing/fake_handler_test.go
generated
vendored
|
@ -1,180 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFakeHandlerPath(t *testing.T) {
|
||||
handler := FakeHandler{StatusCode: http.StatusOK}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
method := "GET"
|
||||
path := "/foo/bar"
|
||||
body := "somebody"
|
||||
|
||||
req, err := http.NewRequest(method, server.URL+path, bytes.NewBufferString(body))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
handler.ValidateRequest(t, path, method, &body)
|
||||
}
|
||||
|
||||
func TestFakeHandlerPathNoBody(t *testing.T) {
|
||||
handler := FakeHandler{StatusCode: http.StatusOK}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
method := "GET"
|
||||
path := "/foo/bar"
|
||||
|
||||
req, err := http.NewRequest(method, server.URL+path, nil)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
handler.ValidateRequest(t, path, method, nil)
|
||||
}
|
||||
|
||||
type fakeError struct {
|
||||
errors []string
|
||||
}
|
||||
|
||||
func (f *fakeError) Errorf(format string, args ...interface{}) {
|
||||
f.errors = append(f.errors, format)
|
||||
}
|
||||
|
||||
func (f *fakeError) Logf(format string, args ...interface{}) {}
|
||||
|
||||
func TestFakeHandlerWrongPath(t *testing.T) {
|
||||
handler := FakeHandler{StatusCode: http.StatusOK}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
method := "GET"
|
||||
path := "/foo/bar"
|
||||
fakeT := fakeError{}
|
||||
|
||||
req, err := http.NewRequest(method, server.URL+"/foo/baz", nil)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
handler.ValidateRequest(&fakeT, path, method, nil)
|
||||
if len(fakeT.errors) != 1 {
|
||||
t.Errorf("Unexpected error set: %#v", fakeT.errors)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFakeHandlerWrongMethod(t *testing.T) {
|
||||
handler := FakeHandler{StatusCode: http.StatusOK}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
method := "GET"
|
||||
path := "/foo/bar"
|
||||
fakeT := fakeError{}
|
||||
|
||||
req, err := http.NewRequest("PUT", server.URL+path, nil)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
handler.ValidateRequest(&fakeT, path, method, nil)
|
||||
if len(fakeT.errors) != 1 {
|
||||
t.Errorf("Unexpected error set: %#v", fakeT.errors)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFakeHandlerWrongBody(t *testing.T) {
|
||||
handler := FakeHandler{StatusCode: http.StatusOK}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
method := "GET"
|
||||
path := "/foo/bar"
|
||||
body := "somebody"
|
||||
fakeT := fakeError{}
|
||||
|
||||
req, err := http.NewRequest(method, server.URL+path, bytes.NewBufferString(body))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
otherbody := "otherbody"
|
||||
handler.ValidateRequest(&fakeT, path, method, &otherbody)
|
||||
if len(fakeT.errors) != 1 {
|
||||
t.Errorf("Unexpected error set: %#v", fakeT.errors)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFakeHandlerNilBody(t *testing.T) {
|
||||
handler := FakeHandler{StatusCode: http.StatusOK}
|
||||
server := httptest.NewServer(&handler)
|
||||
defer server.Close()
|
||||
method := "GET"
|
||||
path := "/foo/bar"
|
||||
body := "somebody"
|
||||
fakeT := fakeError{}
|
||||
|
||||
req, err := http.NewRequest(method, server.URL+path, nil)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{}
|
||||
_, err = client.Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
handler.ValidateRequest(&fakeT, path, method, &body)
|
||||
if len(fakeT.errors) != 1 {
|
||||
t.Errorf("Unexpected error set: %#v", fakeT.errors)
|
||||
}
|
||||
}
|
44
vendor/k8s.io/client-go/util/testing/tmpdir.go
generated
vendored
44
vendor/k8s.io/client-go/util/testing/tmpdir.go
generated
vendored
|
@ -1,44 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// MkTmpdir creates a temporary directory based upon the prefix passed in.
|
||||
// If successful, it returns the temporary directory path. The directory can be
|
||||
// deleted with a call to "os.RemoveAll(...)".
|
||||
// In case of error, it'll return an empty string and the error.
|
||||
func MkTmpdir(prefix string) (string, error) {
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), prefix)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return tmpDir, nil
|
||||
}
|
||||
|
||||
// MkTmpdir does the same work as "MkTmpdir", except in case of
|
||||
// errors, it'll trigger a panic.
|
||||
func MkTmpdirOrDie(prefix string) string {
|
||||
tmpDir, err := MkTmpdir(prefix)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return tmpDir
|
||||
}
|
211
vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
generated
vendored
211
vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
generated
vendored
|
@ -1,211 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
)
|
||||
|
||||
type RateLimiter interface {
|
||||
// When gets an item and gets to decide how long that item should wait
|
||||
When(item interface{}) time.Duration
|
||||
// Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing
|
||||
// or for success, we'll stop tracking it
|
||||
Forget(item interface{})
|
||||
// NumRequeues returns back how many failures the item has had
|
||||
NumRequeues(item interface{}) int
|
||||
}
|
||||
|
||||
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
|
||||
// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential
|
||||
func DefaultControllerRateLimiter() RateLimiter {
|
||||
return NewMaxOfRateLimiter(
|
||||
NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
|
||||
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
|
||||
&BucketRateLimiter{Bucket: ratelimit.NewBucketWithRate(float64(10), int64(100))},
|
||||
)
|
||||
}
|
||||
|
||||
// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API
|
||||
type BucketRateLimiter struct {
|
||||
*ratelimit.Bucket
|
||||
}
|
||||
|
||||
var _ RateLimiter = &BucketRateLimiter{}
|
||||
|
||||
func (r *BucketRateLimiter) When(item interface{}) time.Duration {
|
||||
return r.Bucket.Take(1)
|
||||
}
|
||||
|
||||
func (r *BucketRateLimiter) NumRequeues(item interface{}) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *BucketRateLimiter) Forget(item interface{}) {
|
||||
}
|
||||
|
||||
// ItemExponentialFailureRateLimiter does a simple baseDelay*10^<num-failures> limit
|
||||
// dealing with max failures and expiration are up to the caller
|
||||
type ItemExponentialFailureRateLimiter struct {
|
||||
failuresLock sync.Mutex
|
||||
failures map[interface{}]int
|
||||
|
||||
baseDelay time.Duration
|
||||
maxDelay time.Duration
|
||||
}
|
||||
|
||||
var _ RateLimiter = &ItemExponentialFailureRateLimiter{}
|
||||
|
||||
func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter {
|
||||
return &ItemExponentialFailureRateLimiter{
|
||||
failures: map[interface{}]int{},
|
||||
baseDelay: baseDelay,
|
||||
maxDelay: maxDelay,
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultItemBasedRateLimiter() RateLimiter {
|
||||
return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second)
|
||||
}
|
||||
|
||||
func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration {
|
||||
r.failuresLock.Lock()
|
||||
defer r.failuresLock.Unlock()
|
||||
|
||||
exp := r.failures[item]
|
||||
r.failures[item] = r.failures[item] + 1
|
||||
|
||||
// The backoff is capped such that 'calculated' value never overflows.
|
||||
backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp))
|
||||
if backoff > math.MaxInt64 {
|
||||
return r.maxDelay
|
||||
}
|
||||
|
||||
calculated := time.Duration(backoff)
|
||||
if calculated > r.maxDelay {
|
||||
return r.maxDelay
|
||||
}
|
||||
|
||||
return calculated
|
||||
}
|
||||
|
||||
func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int {
|
||||
r.failuresLock.Lock()
|
||||
defer r.failuresLock.Unlock()
|
||||
|
||||
return r.failures[item]
|
||||
}
|
||||
|
||||
func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) {
|
||||
r.failuresLock.Lock()
|
||||
defer r.failuresLock.Unlock()
|
||||
|
||||
delete(r.failures, item)
|
||||
}
|
||||
|
||||
// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
|
||||
type ItemFastSlowRateLimiter struct {
|
||||
failuresLock sync.Mutex
|
||||
failures map[interface{}]int
|
||||
|
||||
maxFastAttempts int
|
||||
fastDelay time.Duration
|
||||
slowDelay time.Duration
|
||||
}
|
||||
|
||||
var _ RateLimiter = &ItemFastSlowRateLimiter{}
|
||||
|
||||
func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter {
|
||||
return &ItemFastSlowRateLimiter{
|
||||
failures: map[interface{}]int{},
|
||||
fastDelay: fastDelay,
|
||||
slowDelay: slowDelay,
|
||||
maxFastAttempts: maxFastAttempts,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration {
|
||||
r.failuresLock.Lock()
|
||||
defer r.failuresLock.Unlock()
|
||||
|
||||
r.failures[item] = r.failures[item] + 1
|
||||
|
||||
if r.failures[item] <= r.maxFastAttempts {
|
||||
return r.fastDelay
|
||||
}
|
||||
|
||||
return r.slowDelay
|
||||
}
|
||||
|
||||
func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int {
|
||||
r.failuresLock.Lock()
|
||||
defer r.failuresLock.Unlock()
|
||||
|
||||
return r.failures[item]
|
||||
}
|
||||
|
||||
func (r *ItemFastSlowRateLimiter) Forget(item interface{}) {
|
||||
r.failuresLock.Lock()
|
||||
defer r.failuresLock.Unlock()
|
||||
|
||||
delete(r.failures, item)
|
||||
}
|
||||
|
||||
// MaxOfRateLimiter calls every RateLimiter and returns the worst case response
|
||||
// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
|
||||
// were separately delayed a longer time.
|
||||
type MaxOfRateLimiter struct {
|
||||
limiters []RateLimiter
|
||||
}
|
||||
|
||||
func (r *MaxOfRateLimiter) When(item interface{}) time.Duration {
|
||||
ret := time.Duration(0)
|
||||
for _, limiter := range r.limiters {
|
||||
curr := limiter.When(item)
|
||||
if curr > ret {
|
||||
ret = curr
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter {
|
||||
return &MaxOfRateLimiter{limiters: limiters}
|
||||
}
|
||||
|
||||
func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int {
|
||||
ret := 0
|
||||
for _, limiter := range r.limiters {
|
||||
curr := limiter.NumRequeues(item)
|
||||
if curr > ret {
|
||||
ret = curr
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (r *MaxOfRateLimiter) Forget(item interface{}) {
|
||||
for _, limiter := range r.limiters {
|
||||
limiter.Forget(item)
|
||||
}
|
||||
}
|
184
vendor/k8s.io/client-go/util/workqueue/default_rate_limiters_test.go
generated
vendored
184
vendor/k8s.io/client-go/util/workqueue/default_rate_limiters_test.go
generated
vendored
|
@ -1,184 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestItemExponentialFailureRateLimiter(t *testing.T) {
|
||||
limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second)
|
||||
|
||||
if e, a := 1*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 2*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 4*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 8*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 16*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5, limiter.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
if e, a := 1*time.Millisecond, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 2*time.Millisecond, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 2, limiter.NumRequeues("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
limiter.Forget("one")
|
||||
if e, a := 0, limiter.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 1*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestItemExponentialFailureRateLimiterOverFlow(t *testing.T) {
|
||||
limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1000*time.Second)
|
||||
for i := 0; i < 5; i++ {
|
||||
limiter.When("one")
|
||||
}
|
||||
if e, a := 32*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
limiter.When("overflow1")
|
||||
}
|
||||
if e, a := 1000*time.Second, limiter.When("overflow1"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
limiter = NewItemExponentialFailureRateLimiter(1*time.Minute, 1000*time.Hour)
|
||||
for i := 0; i < 2; i++ {
|
||||
limiter.When("two")
|
||||
}
|
||||
if e, a := 4*time.Minute, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
limiter.When("overflow2")
|
||||
}
|
||||
if e, a := 1000*time.Hour, limiter.When("overflow2"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestItemFastSlowRateLimiter(t *testing.T) {
|
||||
limiter := NewItemFastSlowRateLimiter(5*time.Millisecond, 10*time.Second, 3)
|
||||
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 10*time.Second, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 10*time.Second, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5, limiter.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
if e, a := 5*time.Millisecond, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 2, limiter.NumRequeues("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
limiter.Forget("one")
|
||||
if e, a := 0, limiter.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMaxOfRateLimiter(t *testing.T) {
|
||||
limiter := NewMaxOfRateLimiter(
|
||||
NewItemFastSlowRateLimiter(5*time.Millisecond, 3*time.Second, 3),
|
||||
NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second),
|
||||
)
|
||||
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 3*time.Second, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 3*time.Second, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5, limiter.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
if e, a := 5*time.Millisecond, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 2, limiter.NumRequeues("two"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
limiter.Forget("one")
|
||||
if e, a := 0, limiter.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
}
|
246
vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
generated
vendored
246
vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
generated
vendored
|
@ -1,246 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
|
||||
// requeue items after failures without ending up in a hot-loop.
|
||||
type DelayingInterface interface {
|
||||
Interface
|
||||
// AddAfter adds an item to the workqueue after the indicated duration has passed
|
||||
AddAfter(item interface{}, duration time.Duration)
|
||||
}
|
||||
|
||||
// NewDelayingQueue constructs a new workqueue with delayed queuing ability
|
||||
func NewDelayingQueue() DelayingInterface {
|
||||
return newDelayingQueue(clock.RealClock{}, "")
|
||||
}
|
||||
|
||||
func NewNamedDelayingQueue(name string) DelayingInterface {
|
||||
return newDelayingQueue(clock.RealClock{}, name)
|
||||
}
|
||||
|
||||
func newDelayingQueue(clock clock.Clock, name string) DelayingInterface {
|
||||
ret := &delayingType{
|
||||
Interface: NewNamed(name),
|
||||
clock: clock,
|
||||
heartbeat: clock.Tick(maxWait),
|
||||
stopCh: make(chan struct{}),
|
||||
waitingTimeByEntry: map[t]time.Time{},
|
||||
waitingForAddCh: make(chan waitFor, 1000),
|
||||
metrics: newRetryMetrics(name),
|
||||
}
|
||||
|
||||
go ret.waitingLoop()
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// delayingType wraps an Interface and provides delayed re-enquing
|
||||
type delayingType struct {
|
||||
Interface
|
||||
|
||||
// clock tracks time for delayed firing
|
||||
clock clock.Clock
|
||||
|
||||
// stopCh lets us signal a shutdown to the waiting loop
|
||||
stopCh chan struct{}
|
||||
|
||||
// heartbeat ensures we wait no more than maxWait before firing
|
||||
//
|
||||
// TODO: replace with Ticker (and add to clock) so this can be cleaned up.
|
||||
// clock.Tick will leak.
|
||||
heartbeat <-chan time.Time
|
||||
|
||||
// waitingForAdd is an ordered slice of items to be added to the contained work queue
|
||||
waitingForAdd []waitFor
|
||||
// waitingTimeByEntry holds wait time by entry, so we can lookup pre-existing indexes
|
||||
waitingTimeByEntry map[t]time.Time
|
||||
// waitingForAddCh is a buffered channel that feeds waitingForAdd
|
||||
waitingForAddCh chan waitFor
|
||||
|
||||
// metrics counts the number of retries
|
||||
metrics retryMetrics
|
||||
}
|
||||
|
||||
// waitFor holds the data to add and the time it should be added
|
||||
type waitFor struct {
|
||||
data t
|
||||
readyAt time.Time
|
||||
}
|
||||
|
||||
// ShutDown gives a way to shut off this queue
|
||||
func (q *delayingType) ShutDown() {
|
||||
q.Interface.ShutDown()
|
||||
close(q.stopCh)
|
||||
}
|
||||
|
||||
// AddAfter adds the given item to the work queue after the given delay
|
||||
func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
|
||||
// don't add if we're already shutting down
|
||||
if q.ShuttingDown() {
|
||||
return
|
||||
}
|
||||
|
||||
q.metrics.retry()
|
||||
|
||||
// immediately add things with no delay
|
||||
if duration <= 0 {
|
||||
q.Add(item)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-q.stopCh:
|
||||
// unblock if ShutDown() is called
|
||||
case q.waitingForAddCh <- waitFor{data: item, readyAt: q.clock.Now().Add(duration)}:
|
||||
}
|
||||
}
|
||||
|
||||
// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening.
|
||||
// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an
|
||||
// expired item sitting for more than 10 seconds.
|
||||
const maxWait = 10 * time.Second
|
||||
|
||||
// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added.
|
||||
func (q *delayingType) waitingLoop() {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
// Make a placeholder channel to use when there are no items in our list
|
||||
never := make(<-chan time.Time)
|
||||
|
||||
for {
|
||||
if q.Interface.ShuttingDown() {
|
||||
// discard waiting entries
|
||||
q.waitingForAdd = nil
|
||||
q.waitingTimeByEntry = nil
|
||||
return
|
||||
}
|
||||
|
||||
now := q.clock.Now()
|
||||
|
||||
// Add ready entries
|
||||
readyEntries := 0
|
||||
for _, entry := range q.waitingForAdd {
|
||||
if entry.readyAt.After(now) {
|
||||
break
|
||||
}
|
||||
q.Add(entry.data)
|
||||
delete(q.waitingTimeByEntry, entry.data)
|
||||
readyEntries++
|
||||
}
|
||||
q.waitingForAdd = q.waitingForAdd[readyEntries:]
|
||||
|
||||
// Set up a wait for the first item's readyAt (if one exists)
|
||||
nextReadyAt := never
|
||||
if len(q.waitingForAdd) > 0 {
|
||||
nextReadyAt = q.clock.After(q.waitingForAdd[0].readyAt.Sub(now))
|
||||
}
|
||||
|
||||
select {
|
||||
case <-q.stopCh:
|
||||
return
|
||||
|
||||
case <-q.heartbeat:
|
||||
// continue the loop, which will add ready items
|
||||
|
||||
case <-nextReadyAt:
|
||||
// continue the loop, which will add ready items
|
||||
|
||||
case waitEntry := <-q.waitingForAddCh:
|
||||
if waitEntry.readyAt.After(q.clock.Now()) {
|
||||
q.waitingForAdd = insert(q.waitingForAdd, q.waitingTimeByEntry, waitEntry)
|
||||
} else {
|
||||
q.Add(waitEntry.data)
|
||||
}
|
||||
|
||||
drained := false
|
||||
for !drained {
|
||||
select {
|
||||
case waitEntry := <-q.waitingForAddCh:
|
||||
if waitEntry.readyAt.After(q.clock.Now()) {
|
||||
q.waitingForAdd = insert(q.waitingForAdd, q.waitingTimeByEntry, waitEntry)
|
||||
} else {
|
||||
q.Add(waitEntry.data)
|
||||
}
|
||||
default:
|
||||
drained = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// inserts the given entry into the sorted entries list
|
||||
// same semantics as append()... the given slice may be modified,
|
||||
// and the returned value should be used
|
||||
//
|
||||
// TODO: This should probably be converted to use container/heap to improve
|
||||
// running time for a large number of items.
|
||||
func insert(entries []waitFor, knownEntries map[t]time.Time, entry waitFor) []waitFor {
|
||||
// if the entry is already in our retry list and the existing time is before the new one, just skip it
|
||||
existingTime, exists := knownEntries[entry.data]
|
||||
if exists && existingTime.Before(entry.readyAt) {
|
||||
return entries
|
||||
}
|
||||
|
||||
// if the entry exists and is scheduled for later, go ahead and remove the entry
|
||||
if exists {
|
||||
if existingIndex := findEntryIndex(entries, existingTime, entry.data); existingIndex >= 0 && existingIndex < len(entries) {
|
||||
entries = append(entries[:existingIndex], entries[existingIndex+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
insertionIndex := sort.Search(len(entries), func(i int) bool {
|
||||
return entry.readyAt.Before(entries[i].readyAt)
|
||||
})
|
||||
|
||||
// grow by 1
|
||||
entries = append(entries, waitFor{})
|
||||
// shift items from the insertion point to the end
|
||||
copy(entries[insertionIndex+1:], entries[insertionIndex:])
|
||||
// insert the record
|
||||
entries[insertionIndex] = entry
|
||||
|
||||
knownEntries[entry.data] = entry.readyAt
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
// findEntryIndex returns the index for an existing entry
|
||||
func findEntryIndex(entries []waitFor, existingTime time.Time, data t) int {
|
||||
index := sort.Search(len(entries), func(i int) bool {
|
||||
return entries[i].readyAt.After(existingTime) || existingTime == entries[i].readyAt
|
||||
})
|
||||
|
||||
// we know this is the earliest possible index, but there could be multiple with the same time
|
||||
// iterate from here to find the dupe
|
||||
for ; index < len(entries); index++ {
|
||||
if entries[index].data == data {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return index
|
||||
}
|
236
vendor/k8s.io/client-go/util/workqueue/delaying_queue_test.go
generated
vendored
236
vendor/k8s.io/client-go/util/workqueue/delaying_queue_test.go
generated
vendored
|
@ -1,236 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
func TestSimpleQueue(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
q := newDelayingQueue(fakeClock, "")
|
||||
|
||||
first := "foo"
|
||||
|
||||
q.AddAfter(first, 50*time.Millisecond)
|
||||
if err := waitForWaitingQueueToFill(q); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
|
||||
fakeClock.Step(60 * time.Millisecond)
|
||||
|
||||
if err := waitForAdded(q, 1); err != nil {
|
||||
t.Errorf("should have added")
|
||||
}
|
||||
item, _ := q.Get()
|
||||
q.Done(item)
|
||||
|
||||
// step past the next heartbeat
|
||||
fakeClock.Step(10 * time.Second)
|
||||
|
||||
err := wait.Poll(1*time.Millisecond, 30*time.Millisecond, func() (done bool, err error) {
|
||||
if q.Len() > 0 {
|
||||
return false, fmt.Errorf("added to queue")
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != wait.ErrWaitTimeout {
|
||||
t.Errorf("expected timeout, got: %v", err)
|
||||
}
|
||||
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeduping(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
q := newDelayingQueue(fakeClock, "")
|
||||
|
||||
first := "foo"
|
||||
|
||||
q.AddAfter(first, 50*time.Millisecond)
|
||||
if err := waitForWaitingQueueToFill(q); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
q.AddAfter(first, 70*time.Millisecond)
|
||||
if err := waitForWaitingQueueToFill(q); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
|
||||
// step past the first block, we should receive now
|
||||
fakeClock.Step(60 * time.Millisecond)
|
||||
if err := waitForAdded(q, 1); err != nil {
|
||||
t.Errorf("should have added")
|
||||
}
|
||||
item, _ := q.Get()
|
||||
q.Done(item)
|
||||
|
||||
// step past the second add
|
||||
fakeClock.Step(20 * time.Millisecond)
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
|
||||
// test again, but this time the earlier should override
|
||||
q.AddAfter(first, 50*time.Millisecond)
|
||||
q.AddAfter(first, 30*time.Millisecond)
|
||||
if err := waitForWaitingQueueToFill(q); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
|
||||
fakeClock.Step(40 * time.Millisecond)
|
||||
if err := waitForAdded(q, 1); err != nil {
|
||||
t.Errorf("should have added")
|
||||
}
|
||||
item, _ = q.Get()
|
||||
q.Done(item)
|
||||
|
||||
// step past the second add
|
||||
fakeClock.Step(20 * time.Millisecond)
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddTwoFireEarly(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
q := newDelayingQueue(fakeClock, "")
|
||||
|
||||
first := "foo"
|
||||
second := "bar"
|
||||
third := "baz"
|
||||
|
||||
q.AddAfter(first, 1*time.Second)
|
||||
q.AddAfter(second, 50*time.Millisecond)
|
||||
if err := waitForWaitingQueueToFill(q); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
|
||||
fakeClock.Step(60 * time.Millisecond)
|
||||
|
||||
if err := waitForAdded(q, 1); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
item, _ := q.Get()
|
||||
if !reflect.DeepEqual(item, second) {
|
||||
t.Errorf("expected %v, got %v", second, item)
|
||||
}
|
||||
|
||||
q.AddAfter(third, 2*time.Second)
|
||||
|
||||
fakeClock.Step(1 * time.Second)
|
||||
if err := waitForAdded(q, 1); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
item, _ = q.Get()
|
||||
if !reflect.DeepEqual(item, first) {
|
||||
t.Errorf("expected %v, got %v", first, item)
|
||||
}
|
||||
|
||||
fakeClock.Step(2 * time.Second)
|
||||
if err := waitForAdded(q, 1); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
item, _ = q.Get()
|
||||
if !reflect.DeepEqual(item, third) {
|
||||
t.Errorf("expected %v, got %v", third, item)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCopyShifting(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
q := newDelayingQueue(fakeClock, "")
|
||||
|
||||
first := "foo"
|
||||
second := "bar"
|
||||
third := "baz"
|
||||
|
||||
q.AddAfter(first, 1*time.Second)
|
||||
q.AddAfter(second, 500*time.Millisecond)
|
||||
q.AddAfter(third, 250*time.Millisecond)
|
||||
if err := waitForWaitingQueueToFill(q); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
|
||||
if q.Len() != 0 {
|
||||
t.Errorf("should not have added")
|
||||
}
|
||||
|
||||
fakeClock.Step(2 * time.Second)
|
||||
|
||||
if err := waitForAdded(q, 3); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
actualFirst, _ := q.Get()
|
||||
if !reflect.DeepEqual(actualFirst, third) {
|
||||
t.Errorf("expected %v, got %v", third, actualFirst)
|
||||
}
|
||||
actualSecond, _ := q.Get()
|
||||
if !reflect.DeepEqual(actualSecond, second) {
|
||||
t.Errorf("expected %v, got %v", second, actualSecond)
|
||||
}
|
||||
actualThird, _ := q.Get()
|
||||
if !reflect.DeepEqual(actualThird, first) {
|
||||
t.Errorf("expected %v, got %v", first, actualThird)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForAdded(q DelayingInterface, depth int) error {
|
||||
return wait.Poll(1*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
if q.Len() == depth {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func waitForWaitingQueueToFill(q DelayingInterface) error {
|
||||
return wait.Poll(1*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
if len(q.(*delayingType).waitingForAddCh) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
}
|
26
vendor/k8s.io/client-go/util/workqueue/doc.go
generated
vendored
26
vendor/k8s.io/client-go/util/workqueue/doc.go
generated
vendored
|
@ -1,26 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package workqueue provides a simple queue that supports the following
|
||||
// features:
|
||||
// * Fair: items processed in the order in which they are added.
|
||||
// * Stingy: a single item will not be processed multiple times concurrently,
|
||||
// and if an item is added multiple times before it can be processed, it
|
||||
// will only be processed once.
|
||||
// * Multiple consumers and producers. In particular, it is allowed for an
|
||||
// item to be reenqueued while it is being processed.
|
||||
// * Shutdown notifications.
|
||||
package workqueue
|
195
vendor/k8s.io/client-go/util/workqueue/metrics.go
generated
vendored
195
vendor/k8s.io/client-go/util/workqueue/metrics.go
generated
vendored
|
@ -1,195 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This file provides abstractions for setting the provider (e.g., prometheus)
|
||||
// of metrics.
|
||||
|
||||
type queueMetrics interface {
|
||||
add(item t)
|
||||
get(item t)
|
||||
done(item t)
|
||||
}
|
||||
|
||||
// GaugeMetric represents a single numerical value that can arbitrarily go up
|
||||
// and down.
|
||||
type GaugeMetric interface {
|
||||
Inc()
|
||||
Dec()
|
||||
}
|
||||
|
||||
// CounterMetric represents a single numerical value that only ever
|
||||
// goes up.
|
||||
type CounterMetric interface {
|
||||
Inc()
|
||||
}
|
||||
|
||||
// SummaryMetric captures individual observations.
|
||||
type SummaryMetric interface {
|
||||
Observe(float64)
|
||||
}
|
||||
|
||||
type noopMetric struct{}
|
||||
|
||||
func (noopMetric) Inc() {}
|
||||
func (noopMetric) Dec() {}
|
||||
func (noopMetric) Observe(float64) {}
|
||||
|
||||
type defaultQueueMetrics struct {
|
||||
// current depth of a workqueue
|
||||
depth GaugeMetric
|
||||
// total number of adds handled by a workqueue
|
||||
adds CounterMetric
|
||||
// how long an item stays in a workqueue
|
||||
latency SummaryMetric
|
||||
// how long processing an item from a workqueue takes
|
||||
workDuration SummaryMetric
|
||||
addTimes map[t]time.Time
|
||||
processingStartTimes map[t]time.Time
|
||||
}
|
||||
|
||||
func (m *defaultQueueMetrics) add(item t) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.adds.Inc()
|
||||
m.depth.Inc()
|
||||
if _, exists := m.addTimes[item]; !exists {
|
||||
m.addTimes[item] = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *defaultQueueMetrics) get(item t) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.depth.Dec()
|
||||
m.processingStartTimes[item] = time.Now()
|
||||
if startTime, exists := m.addTimes[item]; exists {
|
||||
m.latency.Observe(sinceInMicroseconds(startTime))
|
||||
delete(m.addTimes, item)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *defaultQueueMetrics) done(item t) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if startTime, exists := m.processingStartTimes[item]; exists {
|
||||
m.workDuration.Observe(sinceInMicroseconds(startTime))
|
||||
delete(m.processingStartTimes, item)
|
||||
}
|
||||
}
|
||||
|
||||
// Gets the time since the specified start in microseconds.
|
||||
func sinceInMicroseconds(start time.Time) float64 {
|
||||
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
||||
}
|
||||
|
||||
type retryMetrics interface {
|
||||
retry()
|
||||
}
|
||||
|
||||
type defaultRetryMetrics struct {
|
||||
retries CounterMetric
|
||||
}
|
||||
|
||||
func (m *defaultRetryMetrics) retry() {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.retries.Inc()
|
||||
}
|
||||
|
||||
// MetricsProvider generates various metrics used by the queue.
|
||||
type MetricsProvider interface {
|
||||
NewDepthMetric(name string) GaugeMetric
|
||||
NewAddsMetric(name string) CounterMetric
|
||||
NewLatencyMetric(name string) SummaryMetric
|
||||
NewWorkDurationMetric(name string) SummaryMetric
|
||||
NewRetriesMetric(name string) CounterMetric
|
||||
}
|
||||
|
||||
type noopMetricsProvider struct{}
|
||||
|
||||
func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewLatencyMetric(name string) SummaryMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewWorkDurationMetric(name string) SummaryMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
var metricsFactory = struct {
|
||||
metricsProvider MetricsProvider
|
||||
setProviders sync.Once
|
||||
}{
|
||||
metricsProvider: noopMetricsProvider{},
|
||||
}
|
||||
|
||||
func newQueueMetrics(name string) queueMetrics {
|
||||
var ret *defaultQueueMetrics
|
||||
if len(name) == 0 {
|
||||
return ret
|
||||
}
|
||||
return &defaultQueueMetrics{
|
||||
depth: metricsFactory.metricsProvider.NewDepthMetric(name),
|
||||
adds: metricsFactory.metricsProvider.NewAddsMetric(name),
|
||||
latency: metricsFactory.metricsProvider.NewLatencyMetric(name),
|
||||
workDuration: metricsFactory.metricsProvider.NewWorkDurationMetric(name),
|
||||
addTimes: map[t]time.Time{},
|
||||
processingStartTimes: map[t]time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
func newRetryMetrics(name string) retryMetrics {
|
||||
var ret *defaultRetryMetrics
|
||||
if len(name) == 0 {
|
||||
return ret
|
||||
}
|
||||
return &defaultRetryMetrics{
|
||||
retries: metricsFactory.metricsProvider.NewRetriesMetric(name),
|
||||
}
|
||||
}
|
||||
|
||||
// SetProvider sets the metrics provider of the metricsFactory.
|
||||
func SetProvider(metricsProvider MetricsProvider) {
|
||||
metricsFactory.setProviders.Do(func() {
|
||||
metricsFactory.metricsProvider = metricsProvider
|
||||
})
|
||||
}
|
52
vendor/k8s.io/client-go/util/workqueue/parallelizer.go
generated
vendored
52
vendor/k8s.io/client-go/util/workqueue/parallelizer.go
generated
vendored
|
@ -1,52 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
type DoWorkPieceFunc func(piece int)
|
||||
|
||||
// Parallelize is a very simple framework that allow for parallelizing
|
||||
// N independent pieces of work.
|
||||
func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) {
|
||||
toProcess := make(chan int, pieces)
|
||||
for i := 0; i < pieces; i++ {
|
||||
toProcess <- i
|
||||
}
|
||||
close(toProcess)
|
||||
|
||||
if pieces < workers {
|
||||
workers = pieces
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer wg.Done()
|
||||
for piece := range toProcess {
|
||||
doWorkPiece(piece)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
172
vendor/k8s.io/client-go/util/workqueue/queue.go
generated
vendored
172
vendor/k8s.io/client-go/util/workqueue/queue.go
generated
vendored
|
@ -1,172 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
Add(item interface{})
|
||||
Len() int
|
||||
Get() (item interface{}, shutdown bool)
|
||||
Done(item interface{})
|
||||
ShutDown()
|
||||
ShuttingDown() bool
|
||||
}
|
||||
|
||||
// New constructs a new workqueue (see the package comment).
|
||||
func New() *Type {
|
||||
return NewNamed("")
|
||||
}
|
||||
|
||||
func NewNamed(name string) *Type {
|
||||
return &Type{
|
||||
dirty: set{},
|
||||
processing: set{},
|
||||
cond: sync.NewCond(&sync.Mutex{}),
|
||||
metrics: newQueueMetrics(name),
|
||||
}
|
||||
}
|
||||
|
||||
// Type is a work queue (see the package comment).
|
||||
type Type struct {
|
||||
// queue defines the order in which we will work on items. Every
|
||||
// element of queue should be in the dirty set and not in the
|
||||
// processing set.
|
||||
queue []t
|
||||
|
||||
// dirty defines all of the items that need to be processed.
|
||||
dirty set
|
||||
|
||||
// Things that are currently being processed are in the processing set.
|
||||
// These things may be simultaneously in the dirty set. When we finish
|
||||
// processing something and remove it from this set, we'll check if
|
||||
// it's in the dirty set, and if so, add it to the queue.
|
||||
processing set
|
||||
|
||||
cond *sync.Cond
|
||||
|
||||
shuttingDown bool
|
||||
|
||||
metrics queueMetrics
|
||||
}
|
||||
|
||||
type empty struct{}
|
||||
type t interface{}
|
||||
type set map[t]empty
|
||||
|
||||
func (s set) has(item t) bool {
|
||||
_, exists := s[item]
|
||||
return exists
|
||||
}
|
||||
|
||||
func (s set) insert(item t) {
|
||||
s[item] = empty{}
|
||||
}
|
||||
|
||||
func (s set) delete(item t) {
|
||||
delete(s, item)
|
||||
}
|
||||
|
||||
// Add marks item as needing processing.
|
||||
func (q *Type) Add(item interface{}) {
|
||||
q.cond.L.Lock()
|
||||
defer q.cond.L.Unlock()
|
||||
if q.shuttingDown {
|
||||
return
|
||||
}
|
||||
if q.dirty.has(item) {
|
||||
return
|
||||
}
|
||||
|
||||
q.metrics.add(item)
|
||||
|
||||
q.dirty.insert(item)
|
||||
if q.processing.has(item) {
|
||||
return
|
||||
}
|
||||
|
||||
q.queue = append(q.queue, item)
|
||||
q.cond.Signal()
|
||||
}
|
||||
|
||||
// Len returns the current queue length, for informational purposes only. You
|
||||
// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular
|
||||
// value, that can't be synchronized properly.
|
||||
func (q *Type) Len() int {
|
||||
q.cond.L.Lock()
|
||||
defer q.cond.L.Unlock()
|
||||
return len(q.queue)
|
||||
}
|
||||
|
||||
// Get blocks until it can return an item to be processed. If shutdown = true,
|
||||
// the caller should end their goroutine. You must call Done with item when you
|
||||
// have finished processing it.
|
||||
func (q *Type) Get() (item interface{}, shutdown bool) {
|
||||
q.cond.L.Lock()
|
||||
defer q.cond.L.Unlock()
|
||||
for len(q.queue) == 0 && !q.shuttingDown {
|
||||
q.cond.Wait()
|
||||
}
|
||||
if len(q.queue) == 0 {
|
||||
// We must be shutting down.
|
||||
return nil, true
|
||||
}
|
||||
|
||||
item, q.queue = q.queue[0], q.queue[1:]
|
||||
|
||||
q.metrics.get(item)
|
||||
|
||||
q.processing.insert(item)
|
||||
q.dirty.delete(item)
|
||||
|
||||
return item, false
|
||||
}
|
||||
|
||||
// Done marks item as done processing, and if it has been marked as dirty again
|
||||
// while it was being processed, it will be re-added to the queue for
|
||||
// re-processing.
|
||||
func (q *Type) Done(item interface{}) {
|
||||
q.cond.L.Lock()
|
||||
defer q.cond.L.Unlock()
|
||||
|
||||
q.metrics.done(item)
|
||||
|
||||
q.processing.delete(item)
|
||||
if q.dirty.has(item) {
|
||||
q.queue = append(q.queue, item)
|
||||
q.cond.Signal()
|
||||
}
|
||||
}
|
||||
|
||||
// ShutDown will cause q to ignore all new items added to it. As soon as the
|
||||
// worker goroutines have drained the existing items in the queue, they will be
|
||||
// instructed to exit.
|
||||
func (q *Type) ShutDown() {
|
||||
q.cond.L.Lock()
|
||||
defer q.cond.L.Unlock()
|
||||
q.shuttingDown = true
|
||||
q.cond.Broadcast()
|
||||
}
|
||||
|
||||
func (q *Type) ShuttingDown() bool {
|
||||
q.cond.L.Lock()
|
||||
defer q.cond.L.Unlock()
|
||||
|
||||
return q.shuttingDown
|
||||
}
|
161
vendor/k8s.io/client-go/util/workqueue/queue_test.go
generated
vendored
161
vendor/k8s.io/client-go/util/workqueue/queue_test.go
generated
vendored
|
@ -1,161 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
// If something is seriously wrong this test will never complete.
|
||||
q := workqueue.New()
|
||||
|
||||
// Start producers
|
||||
const producers = 50
|
||||
producerWG := sync.WaitGroup{}
|
||||
producerWG.Add(producers)
|
||||
for i := 0; i < producers; i++ {
|
||||
go func(i int) {
|
||||
defer producerWG.Done()
|
||||
for j := 0; j < 50; j++ {
|
||||
q.Add(i)
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Start consumers
|
||||
const consumers = 10
|
||||
consumerWG := sync.WaitGroup{}
|
||||
consumerWG.Add(consumers)
|
||||
for i := 0; i < consumers; i++ {
|
||||
go func(i int) {
|
||||
defer consumerWG.Done()
|
||||
for {
|
||||
item, quit := q.Get()
|
||||
if item == "added after shutdown!" {
|
||||
t.Errorf("Got an item added after shutdown.")
|
||||
}
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
t.Logf("Worker %v: begin processing %v", i, item)
|
||||
time.Sleep(3 * time.Millisecond)
|
||||
t.Logf("Worker %v: done processing %v", i, item)
|
||||
q.Done(item)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
producerWG.Wait()
|
||||
q.ShutDown()
|
||||
q.Add("added after shutdown!")
|
||||
consumerWG.Wait()
|
||||
}
|
||||
|
||||
func TestAddWhileProcessing(t *testing.T) {
|
||||
q := workqueue.New()
|
||||
|
||||
// Start producers
|
||||
const producers = 50
|
||||
producerWG := sync.WaitGroup{}
|
||||
producerWG.Add(producers)
|
||||
for i := 0; i < producers; i++ {
|
||||
go func(i int) {
|
||||
defer producerWG.Done()
|
||||
q.Add(i)
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Start consumers
|
||||
const consumers = 10
|
||||
consumerWG := sync.WaitGroup{}
|
||||
consumerWG.Add(consumers)
|
||||
for i := 0; i < consumers; i++ {
|
||||
go func(i int) {
|
||||
defer consumerWG.Done()
|
||||
// Every worker will re-add every item up to two times.
|
||||
// This tests the dirty-while-processing case.
|
||||
counters := map[interface{}]int{}
|
||||
for {
|
||||
item, quit := q.Get()
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
counters[item]++
|
||||
if counters[item] < 2 {
|
||||
q.Add(item)
|
||||
}
|
||||
q.Done(item)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
producerWG.Wait()
|
||||
q.ShutDown()
|
||||
consumerWG.Wait()
|
||||
}
|
||||
|
||||
func TestLen(t *testing.T) {
|
||||
q := workqueue.New()
|
||||
q.Add("foo")
|
||||
if e, a := 1, q.Len(); e != a {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
q.Add("bar")
|
||||
if e, a := 2, q.Len(); e != a {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
q.Add("foo") // should not increase the queue length.
|
||||
if e, a := 2, q.Len(); e != a {
|
||||
t.Errorf("Expected %v, got %v", e, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReinsert(t *testing.T) {
|
||||
q := workqueue.New()
|
||||
q.Add("foo")
|
||||
|
||||
// Start processing
|
||||
i, _ := q.Get()
|
||||
if i != "foo" {
|
||||
t.Errorf("Expected %v, got %v", "foo", i)
|
||||
}
|
||||
|
||||
// Add it back while processing
|
||||
q.Add(i)
|
||||
|
||||
// Finish it up
|
||||
q.Done(i)
|
||||
|
||||
// It should be back on the queue
|
||||
i, _ = q.Get()
|
||||
if i != "foo" {
|
||||
t.Errorf("Expected %v, got %v", "foo", i)
|
||||
}
|
||||
|
||||
// Finish that one up
|
||||
q.Done(i)
|
||||
|
||||
if a := q.Len(); a != 0 {
|
||||
t.Errorf("Expected queue to be empty. Has %v items", a)
|
||||
}
|
||||
}
|
69
vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go
generated
vendored
69
vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go
generated
vendored
|
@ -1,69 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
// RateLimitingInterface is an interface that rate limits items being added to the queue.
|
||||
type RateLimitingInterface interface {
|
||||
DelayingInterface
|
||||
|
||||
// AddRateLimited adds an item to the workqueue after the rate limiter says its ok
|
||||
AddRateLimited(item interface{})
|
||||
|
||||
// Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing
|
||||
// or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you
|
||||
// still have to call `Done` on the queue.
|
||||
Forget(item interface{})
|
||||
|
||||
// NumRequeues returns back how many times the item was requeued
|
||||
NumRequeues(item interface{}) int
|
||||
}
|
||||
|
||||
// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
|
||||
// Remember to call Forget! If you don't, you may end up tracking failures forever.
|
||||
func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface {
|
||||
return &rateLimitingType{
|
||||
DelayingInterface: NewDelayingQueue(),
|
||||
rateLimiter: rateLimiter,
|
||||
}
|
||||
}
|
||||
|
||||
func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface {
|
||||
return &rateLimitingType{
|
||||
DelayingInterface: NewNamedDelayingQueue(name),
|
||||
rateLimiter: rateLimiter,
|
||||
}
|
||||
}
|
||||
|
||||
// rateLimitingType wraps an Interface and provides rateLimited re-enquing
|
||||
type rateLimitingType struct {
|
||||
DelayingInterface
|
||||
|
||||
rateLimiter RateLimiter
|
||||
}
|
||||
|
||||
// AddRateLimited AddAfter's the item based on the time when the rate limiter says its ok
|
||||
func (q *rateLimitingType) AddRateLimited(item interface{}) {
|
||||
q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item))
|
||||
}
|
||||
|
||||
func (q *rateLimitingType) NumRequeues(item interface{}) int {
|
||||
return q.rateLimiter.NumRequeues(item)
|
||||
}
|
||||
|
||||
func (q *rateLimitingType) Forget(item interface{}) {
|
||||
q.rateLimiter.Forget(item)
|
||||
}
|
75
vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue_test.go
generated
vendored
75
vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue_test.go
generated
vendored
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
func TestRateLimitingQueue(t *testing.T) {
|
||||
limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second)
|
||||
queue := NewRateLimitingQueue(limiter).(*rateLimitingType)
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
delayingQueue := &delayingType{
|
||||
Interface: New(),
|
||||
clock: fakeClock,
|
||||
heartbeat: fakeClock.Tick(maxWait),
|
||||
stopCh: make(chan struct{}),
|
||||
waitingForAddCh: make(chan waitFor, 1000),
|
||||
metrics: newRetryMetrics(""),
|
||||
}
|
||||
queue.DelayingInterface = delayingQueue
|
||||
|
||||
queue.AddRateLimited("one")
|
||||
waitEntry := <-delayingQueue.waitingForAddCh
|
||||
if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
queue.AddRateLimited("one")
|
||||
waitEntry = <-delayingQueue.waitingForAddCh
|
||||
if e, a := 2*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := 2, queue.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
queue.AddRateLimited("two")
|
||||
waitEntry = <-delayingQueue.waitingForAddCh
|
||||
if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
queue.AddRateLimited("two")
|
||||
waitEntry = <-delayingQueue.waitingForAddCh
|
||||
if e, a := 2*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
queue.Forget("one")
|
||||
if e, a := 0, queue.NumRequeues("one"); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
queue.AddRateLimited("one")
|
||||
waitEntry = <-delayingQueue.waitingForAddCh
|
||||
if e, a := 1*time.Millisecond, waitEntry.readyAt.Sub(fakeClock.Now()); e != a {
|
||||
t.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
|
||||
}
|
52
vendor/k8s.io/client-go/util/workqueue/timed_queue.go
generated
vendored
52
vendor/k8s.io/client-go/util/workqueue/timed_queue.go
generated
vendored
|
@ -1,52 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import "time"
|
||||
|
||||
type TimedWorkQueue struct {
|
||||
*Type
|
||||
}
|
||||
|
||||
type TimedWorkQueueItem struct {
|
||||
StartTime time.Time
|
||||
Object interface{}
|
||||
}
|
||||
|
||||
func NewTimedWorkQueue() *TimedWorkQueue {
|
||||
return &TimedWorkQueue{New()}
|
||||
}
|
||||
|
||||
// Add adds the obj along with the current timestamp to the queue.
|
||||
func (q TimedWorkQueue) Add(timedItem *TimedWorkQueueItem) {
|
||||
q.Type.Add(timedItem)
|
||||
}
|
||||
|
||||
// Get gets the obj along with its timestamp from the queue.
|
||||
func (q TimedWorkQueue) Get() (timedItem *TimedWorkQueueItem, shutdown bool) {
|
||||
origin, shutdown := q.Type.Get()
|
||||
if origin == nil {
|
||||
return nil, shutdown
|
||||
}
|
||||
timedItem, _ = origin.(*TimedWorkQueueItem)
|
||||
return timedItem, shutdown
|
||||
}
|
||||
|
||||
func (q TimedWorkQueue) Done(timedItem *TimedWorkQueueItem) error {
|
||||
q.Type.Done(timedItem)
|
||||
return nil
|
||||
}
|
38
vendor/k8s.io/client-go/util/workqueue/timed_queue_test.go
generated
vendored
38
vendor/k8s.io/client-go/util/workqueue/timed_queue_test.go
generated
vendored
|
@ -1,38 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workqueue
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
func TestNoMemoryLeak(t *testing.T) {
|
||||
timedQueue := NewTimedWorkQueue()
|
||||
timedQueue.Add(&TimedWorkQueueItem{Object: &v1.Pod{}, StartTime: time.Time{}})
|
||||
item, _ := timedQueue.Get()
|
||||
timedQueue.Add(item)
|
||||
// The item should still be in the timedQueue.
|
||||
timedQueue.Done(item)
|
||||
item, _ = timedQueue.Get()
|
||||
timedQueue.Done(item)
|
||||
if len(timedQueue.Type.processing) != 0 {
|
||||
t.Errorf("expect timedQueue.Type.processing to be empty!")
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue