vendor: remove dep and use vndr

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2017-06-06 09:19:04 +02:00
parent 16f44674a4
commit 148e72d81e
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
16131 changed files with 73815 additions and 4235138 deletions

View file

@ -1 +0,0 @@
Forked from etcd 2.3 release branch to support migration from 3.0 WAL to 2.3 WAL format

View file

@ -1,48 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"fileutil_test.go",
"lock_test.go",
"preallocate_test.go",
"purge_test.go",
],
library = ":go_default_library",
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = [
"fileutil.go",
"lock.go",
"lock_unix.go",
"preallocate.go",
"purge.go",
"sync_linux.go",
],
tags = ["automanaged"],
deps = ["//vendor:github.com/coreos/pkg/capnslog"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -1,75 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package fileutil implements utility functions related to files and paths.
package fileutil
import (
"io/ioutil"
"os"
"path"
"sort"
"github.com/coreos/pkg/capnslog"
)
const (
privateFileMode = 0600
// owner can make/remove files inside the directory
privateDirMode = 0700
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "fileutil")
)
// IsDirWriteable checks if dir is writable by writing and removing a file
// to dir. It returns nil if dir is writable.
func IsDirWriteable(dir string) error {
f := path.Join(dir, ".touch")
if err := ioutil.WriteFile(f, []byte(""), privateFileMode); err != nil {
return err
}
return os.Remove(f)
}
// ReadDir returns the filenames in the given directory in sorted order.
func ReadDir(dirpath string) ([]string, error) {
dir, err := os.Open(dirpath)
if err != nil {
return nil, err
}
defer dir.Close()
names, err := dir.Readdirnames(-1)
if err != nil {
return nil, err
}
sort.Strings(names)
return names, nil
}
// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
// does not exists. TouchDirAll also ensures the given directory is writable.
func TouchDirAll(dir string) error {
err := os.MkdirAll(dir, privateDirMode)
if err != nil && err != os.ErrExist {
return err
}
return IsDirWriteable(dir)
}
func Exist(name string) bool {
_, err := os.Stat(name)
return err == nil
}

View file

@ -1,96 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"io/ioutil"
"os"
"os/user"
"path/filepath"
"reflect"
"testing"
)
func TestIsDirWriteable(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("unexpected ioutil.TempDir error: %v", err)
}
defer os.RemoveAll(tmpdir)
if err = IsDirWriteable(tmpdir); err != nil {
t.Fatalf("unexpected IsDirWriteable error: %v", err)
}
if err = os.Chmod(tmpdir, 0444); err != nil {
t.Fatalf("unexpected os.Chmod error: %v", err)
}
me, err := user.Current()
if err != nil {
// err can be non-nil when cross compiled
// http://stackoverflow.com/questions/20609415/cross-compiling-user-current-not-implemented-on-linux-amd64
t.Skipf("failed to get current user: %v", err)
}
if me.Name == "root" || me.Name == "Administrator" {
// ideally we should check CAP_DAC_OVERRIDE.
// but it does not matter for tests.
t.Skipf("running as a superuser")
}
if err := IsDirWriteable(tmpdir); err == nil {
t.Fatalf("expected IsDirWriteable to error")
}
}
func TestReadDir(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
defer os.RemoveAll(tmpdir)
if err != nil {
t.Fatalf("unexpected ioutil.TempDir error: %v", err)
}
files := []string{"def", "abc", "xyz", "ghi"}
for _, f := range files {
var fh *os.File
fh, err = os.Create(filepath.Join(tmpdir, f))
if err != nil {
t.Fatalf("error creating file: %v", err)
}
if err = fh.Close(); err != nil {
t.Fatalf("error closing file: %v", err)
}
}
fs, err := ReadDir(tmpdir)
if err != nil {
t.Fatalf("error calling ReadDir: %v", err)
}
wfs := []string{"abc", "def", "ghi", "xyz"}
if !reflect.DeepEqual(fs, wfs) {
t.Fatalf("ReadDir: got %v, want %v", fs, wfs)
}
}
func TestExist(t *testing.T) {
f, err := ioutil.TempFile(os.TempDir(), "fileutil")
if err != nil {
t.Fatal(err)
}
f.Close()
if g := Exist(f.Name()); g != true {
t.Errorf("exist = %v, want true", g)
}
os.Remove(f.Name())
if g := Exist(f.Name()); g != false {
t.Errorf("exist = %v, want false", g)
}
}

View file

@ -1,29 +0,0 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
type Lock interface {
// Name returns the name of the file.
Name() string
// TryLock acquires exclusivity on the lock without blocking.
TryLock() error
// Lock acquires exclusivity on the lock.
Lock() error
// Unlock unlocks the lock.
Unlock() error
// Destroy should be called after Unlock to clean up
// the resources.
Destroy() error
}

View file

@ -1,79 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"errors"
"os"
"syscall"
"time"
)
var (
ErrLocked = errors.New("file already locked")
)
type lock struct {
fname string
file *os.File
}
func (l *lock) Name() string {
return l.fname
}
func (l *lock) TryLock() error {
err := os.Chmod(l.fname, syscall.DMEXCL|0600)
if err != nil {
return err
}
f, err := os.Open(l.fname)
if err != nil {
return ErrLocked
}
l.file = f
return nil
}
func (l *lock) Lock() error {
err := os.Chmod(l.fname, syscall.DMEXCL|0600)
if err != nil {
return err
}
for {
f, err := os.Open(l.fname)
if err == nil {
l.file = f
return nil
}
time.Sleep(10 * time.Millisecond)
}
}
func (l *lock) Unlock() error {
return l.file.Close()
}
func (l *lock) Destroy() error {
return nil
}
func NewLock(file string) (Lock, error) {
l := &lock{fname: file}
return l, nil
}

View file

@ -1,87 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build solaris
package fileutil
import (
"errors"
"os"
"syscall"
)
var (
ErrLocked = errors.New("file already locked")
)
type lock struct {
fd int
file *os.File
}
func (l *lock) Name() string {
return l.file.Name()
}
func (l *lock) TryLock() error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Pid = 0
lock.Type = syscall.F_WRLCK
lock.Whence = 0
lock.Pid = 0
err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
if err != nil && err == syscall.EAGAIN {
return ErrLocked
}
return err
}
func (l *lock) Lock() error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_WRLCK
lock.Whence = 0
lock.Pid = 0
return syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
}
func (l *lock) Unlock() error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_UNLCK
lock.Whence = 0
err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
if err != nil && err == syscall.EAGAIN {
return ErrLocked
}
return err
}
func (l *lock) Destroy() error {
return l.file.Close()
}
func NewLock(file string) (Lock, error) {
f, err := os.OpenFile(file, os.O_WRONLY, 0600)
if err != nil {
return nil, err
}
l := &lock{int(f.Fd()), f}
return l, nil
}

View file

@ -1,96 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"io/ioutil"
"os"
"testing"
"time"
)
func TestLockAndUnlock(t *testing.T) {
f, err := ioutil.TempFile("", "lock")
if err != nil {
t.Fatal(err)
}
f.Close()
defer func() {
err = os.Remove(f.Name())
if err != nil {
t.Fatal(err)
}
}()
// lock the file
l, err := NewLock(f.Name())
if err != nil {
t.Fatal(err)
}
defer l.Destroy()
err = l.Lock()
if err != nil {
t.Fatal(err)
}
// try lock a locked file
dupl, err := NewLock(f.Name())
if err != nil {
t.Fatal(err)
}
err = dupl.TryLock()
if err != ErrLocked {
t.Errorf("err = %v, want %v", err, ErrLocked)
}
// unlock the file
err = l.Unlock()
if err != nil {
t.Fatal(err)
}
// try lock the unlocked file
err = dupl.TryLock()
if err != nil {
t.Errorf("err = %v, want %v", err, nil)
}
defer dupl.Destroy()
// blocking on locked file
locked := make(chan struct{}, 1)
go func() {
l.Lock()
locked <- struct{}{}
}()
select {
case <-locked:
t.Error("unexpected unblocking")
case <-time.After(100 * time.Millisecond):
}
// unlock
err = dupl.Unlock()
if err != nil {
t.Fatal(err)
}
// the previously blocked routine should be unblocked
select {
case <-locked:
case <-time.After(1 * time.Second):
t.Error("unexpected blocking")
}
}

View file

@ -1,65 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows,!plan9,!solaris
package fileutil
import (
"errors"
"os"
"syscall"
)
var (
ErrLocked = errors.New("file already locked")
)
type lock struct {
fd int
file *os.File
}
func (l *lock) Name() string {
return l.file.Name()
}
func (l *lock) TryLock() error {
err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB)
if err != nil && err == syscall.EWOULDBLOCK {
return ErrLocked
}
return err
}
func (l *lock) Lock() error {
return syscall.Flock(l.fd, syscall.LOCK_EX)
}
func (l *lock) Unlock() error {
return syscall.Flock(l.fd, syscall.LOCK_UN)
}
func (l *lock) Destroy() error {
return l.file.Close()
}
func NewLock(file string) (Lock, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
l := &lock{int(f.Fd()), f}
return l, nil
}

View file

@ -1,60 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build windows
package fileutil
import (
"errors"
"os"
)
var (
ErrLocked = errors.New("file already locked")
)
type lock struct {
fd int
file *os.File
}
func (l *lock) Name() string {
return l.file.Name()
}
func (l *lock) TryLock() error {
return nil
}
func (l *lock) Lock() error {
return nil
}
func (l *lock) Unlock() error {
return nil
}
func (l *lock) Destroy() error {
return l.file.Close()
}
func NewLock(file string) (Lock, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
l := &lock{int(f.Fd()), f}
return l, nil
}

View file

@ -1,28 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux
package fileutil
import "os"
// Preallocate tries to allocate the space for given
// file. This operation is only supported on linux by a
// few filesystems (btrfs, ext4, etc.).
// If the operation is unsupported, no error will be returned.
// Otherwise, the error encountered will be returned.
func Preallocate(f *os.File, sizeInBytes int) error {
return nil
}

View file

@ -1,42 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package fileutil
import (
"os"
"syscall"
)
// Preallocate tries to allocate the space for given
// file. This operation is only supported on linux by a
// few filesystems (btrfs, ext4, etc.).
// If the operation is unsupported, no error will be returned.
// Otherwise, the error encountered will be returned.
func Preallocate(f *os.File, sizeInBytes int) error {
// use mode = 1 to keep size
// see FALLOC_FL_KEEP_SIZE
err := syscall.Fallocate(int(f.Fd()), 1, 0, int64(sizeInBytes))
if err != nil {
errno, ok := err.(syscall.Errno)
// treat not support as nil error
if ok && errno == syscall.ENOTSUP {
return nil
}
return err
}
return nil
}

View file

@ -1,53 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"io/ioutil"
"os"
"runtime"
"testing"
)
func TestPreallocate(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skipf("skip testPreallocate, OS = %s", runtime.GOOS)
}
p, err := ioutil.TempDir(os.TempDir(), "preallocateTest")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(p)
f, err := ioutil.TempFile(p, "")
if err != nil {
t.Fatal(err)
}
size := 64 * 1000
err = Preallocate(f, size)
if err != nil {
t.Fatal(err)
}
stat, err := f.Stat()
if err != nil {
t.Fatal(err)
}
if stat.Size() != 0 {
t.Errorf("size = %d, want %d", stat.Size(), 0)
}
}

View file

@ -1,80 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"os"
"path"
"sort"
"strings"
"time"
)
func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
errC := make(chan error, 1)
go func() {
for {
fnames, err := ReadDir(dirname)
if err != nil {
errC <- err
return
}
newfnames := make([]string, 0)
for _, fname := range fnames {
if strings.HasSuffix(fname, suffix) {
newfnames = append(newfnames, fname)
}
}
sort.Strings(newfnames)
for len(newfnames) > int(max) {
f := path.Join(dirname, newfnames[0])
l, err := NewLock(f)
if err != nil {
errC <- err
return
}
err = l.TryLock()
if err != nil {
break
}
err = os.Remove(f)
if err != nil {
errC <- err
return
}
err = l.Unlock()
if err != nil {
plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
errC <- err
return
}
err = l.Destroy()
if err != nil {
plog.Errorf("error destroying lock %s when purging file (%v)", l.Name(), err)
errC <- err
return
}
plog.Infof("purged file %s successfully", f)
newfnames = newfnames[1:]
}
select {
case <-time.After(interval):
case <-stop:
return
}
}
}()
return errC
}

View file

@ -1,161 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"fmt"
"io/ioutil"
"os"
"path"
"reflect"
"testing"
"time"
)
func TestPurgeFile(t *testing.T) {
dir, err := ioutil.TempDir("", "purgefile")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
for i := 0; i < 5; i++ {
_, err = os.Create(path.Join(dir, fmt.Sprintf("%d.test", i)))
if err != nil {
t.Fatal(err)
}
}
stop := make(chan struct{})
// keep at most 3 most recent files
errch := PurgeFile(dir, "test", 3, time.Millisecond, stop)
// create 5 more files
for i := 5; i < 10; i++ {
_, err = os.Create(path.Join(dir, fmt.Sprintf("%d.test", i)))
if err != nil {
t.Fatal(err)
}
time.Sleep(10 * time.Millisecond)
}
// purge routine should purge 7 out of 10 files and only keep the
// 3 most recent ones.
// wait for purging for at most 100ms.
var fnames []string
for i := 0; i < 10; i++ {
fnames, err = ReadDir(dir)
if err != nil {
t.Fatal(err)
}
if len(fnames) <= 3 {
break
}
time.Sleep(10 * time.Millisecond)
}
wnames := []string{"7.test", "8.test", "9.test"}
if !reflect.DeepEqual(fnames, wnames) {
t.Errorf("filenames = %v, want %v", fnames, wnames)
}
// no error should be reported from purge routine
select {
case err := <-errch:
t.Errorf("unexpected purge error %v", err)
case <-time.After(time.Millisecond):
}
close(stop)
}
func TestPurgeFileHoldingLock(t *testing.T) {
dir, err := ioutil.TempDir("", "purgefile")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
for i := 0; i < 10; i++ {
_, err = os.Create(path.Join(dir, fmt.Sprintf("%d.test", i)))
if err != nil {
t.Fatal(err)
}
}
// create a purge barrier at 5
l, err := NewLock(path.Join(dir, fmt.Sprintf("%d.test", 5)))
err = l.Lock()
if err != nil {
t.Fatal(err)
}
stop := make(chan struct{})
errch := PurgeFile(dir, "test", 3, time.Millisecond, stop)
var fnames []string
for i := 0; i < 10; i++ {
fnames, err = ReadDir(dir)
if err != nil {
t.Fatal(err)
}
if len(fnames) <= 5 {
break
}
time.Sleep(10 * time.Millisecond)
}
wnames := []string{"5.test", "6.test", "7.test", "8.test", "9.test"}
if !reflect.DeepEqual(fnames, wnames) {
t.Errorf("filenames = %v, want %v", fnames, wnames)
}
select {
case err = <-errch:
t.Errorf("unexpected purge error %v", err)
case <-time.After(time.Millisecond):
}
// remove the purge barrier
err = l.Unlock()
if err != nil {
t.Fatal(err)
}
err = l.Destroy()
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
fnames, err = ReadDir(dir)
if err != nil {
t.Fatal(err)
}
if len(fnames) <= 3 {
break
}
time.Sleep(10 * time.Millisecond)
}
wnames = []string{"7.test", "8.test", "9.test"}
if !reflect.DeepEqual(fnames, wnames) {
t.Errorf("filenames = %v, want %v", fnames, wnames)
}
select {
case err := <-errch:
t.Errorf("unexpected purge error %v", err)
case <-time.After(time.Millisecond):
}
close(stop)
}

View file

@ -1,26 +0,0 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux
package fileutil
import "os"
// Fdatasync is similar to fsync(), but does not flush modified metadata
// unless that metadata is needed in order to allow a subsequent data retrieval
// to be correctly handled.
func Fdatasync(f *os.File) error {
return f.Sync()
}

View file

@ -1,29 +0,0 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package fileutil
import (
"os"
"syscall"
)
// Fdatasync is similar to fsync(), but does not flush modified metadata
// unless that metadata is needed in order to allow a subsequent data retrieval
// to be correctly handled.
func Fdatasync(f *os.File) error {
return syscall.Fdatasync(int(f.Fd()))
}

View file

@ -1,48 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"decoder.go",
"doc.go",
"encoder.go",
"metrics.go",
"multi_readcloser.go",
"repair.go",
"util.go",
"wal.go",
],
tags = ["automanaged"],
deps = [
"//third_party/forked/etcd237/pkg/fileutil:go_default_library",
"//vendor:github.com/coreos/etcd/pkg/crc",
"//vendor:github.com/coreos/etcd/pkg/pbutil",
"//vendor:github.com/coreos/etcd/raft/raftpb",
"//vendor:github.com/coreos/etcd/wal/walpb",
"//vendor:github.com/coreos/pkg/capnslog",
"//vendor:github.com/prometheus/client_golang/prometheus",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//third_party/forked/etcd237/wal/walpb:all-srcs",
],
tags = ["automanaged"],
)

View file

@ -1,103 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import (
"bufio"
"encoding/binary"
"hash"
"io"
"sync"
"github.com/coreos/etcd/pkg/crc"
"github.com/coreos/etcd/pkg/pbutil"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/wal/walpb"
)
type decoder struct {
mu sync.Mutex
br *bufio.Reader
c io.Closer
crc hash.Hash32
}
func newDecoder(rc io.ReadCloser) *decoder {
return &decoder{
br: bufio.NewReader(rc),
c: rc,
crc: crc.New(0, crcTable),
}
}
func (d *decoder) decode(rec *walpb.Record) error {
d.mu.Lock()
defer d.mu.Unlock()
rec.Reset()
l, err := readInt64(d.br)
if err != nil {
return err
}
data := make([]byte, l)
if _, err = io.ReadFull(d.br, data); err != nil {
// ReadFull returns io.EOF only if no bytes were read
// the decoder should treat this as an ErrUnexpectedEOF instead.
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return err
}
if err := rec.Unmarshal(data); err != nil {
return err
}
// skip crc checking if the record type is crcType
if rec.Type == crcType {
return nil
}
d.crc.Write(rec.Data)
return rec.Validate(d.crc.Sum32())
}
func (d *decoder) updateCRC(prevCrc uint32) {
d.crc = crc.New(prevCrc, crcTable)
}
func (d *decoder) lastCRC() uint32 {
return d.crc.Sum32()
}
func (d *decoder) close() error {
return d.c.Close()
}
func mustUnmarshalEntry(d []byte) raftpb.Entry {
var e raftpb.Entry
pbutil.MustUnmarshal(&e, d)
return e
}
func mustUnmarshalState(d []byte) raftpb.HardState {
var s raftpb.HardState
pbutil.MustUnmarshal(&s, d)
return s
}
func readInt64(r io.Reader) (int64, error) {
var n int64
err := binary.Read(r, binary.LittleEndian, &n)
return n, err
}

View file

@ -1,68 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package wal provides an implementation of a write ahead log that is used by
etcd.
A WAL is created at a particular directory and is made up of a number of
segmented WAL files. Inside of each file the raft state and entries are appended
to it with the Save method:
metadata := []byte{}
w, err := wal.Create("/var/lib/etcd", metadata)
...
err := w.Save(s, ents)
After saving an raft snapshot to disk, SaveSnapshot method should be called to
record it. So WAL can match with the saved snapshot when restarting.
err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2})
When a user has finished using a WAL it must be closed:
w.Close()
WAL files are placed inside of the directory in the following format:
$seq-$index.wal
The first WAL file to be created will be 0000000000000000-0000000000000000.wal
indicating an initial sequence of 0 and an initial raft index of 0. The first
entry written to WAL MUST have raft index 0.
WAL will cuts its current wal files if its size exceeds 8MB. This will increment an internal
sequence number and cause a new file to be created. If the last raft index saved
was 0x20 and this is the first time cut has been called on this WAL then the sequence will
increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal.
If a second cut issues 0x10 entries with incremental index later then the file will be called:
0000000000000002-0000000000000031.wal.
At a later time a WAL can be opened at a particular snapshot. If there is no
snapshot, an empty snapshot should be passed in.
w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2})
...
The snapshot must have been written to the WAL.
Additional items cannot be Saved to this WAL until all of the items from the given
snapshot to the end of the WAL are read first:
metadata, state, ents, err := w.ReadAll()
This will give you the metadata, the last raft.State and the slice of
raft.Entry items in the log.
*/
package wal

View file

@ -1,89 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import (
"bufio"
"encoding/binary"
"hash"
"io"
"sync"
"github.com/coreos/etcd/pkg/crc"
"github.com/coreos/etcd/wal/walpb"
)
type encoder struct {
mu sync.Mutex
bw *bufio.Writer
crc hash.Hash32
buf []byte
uint64buf []byte
}
func newEncoder(w io.Writer, prevCrc uint32) *encoder {
return &encoder{
bw: bufio.NewWriter(w),
crc: crc.New(prevCrc, crcTable),
// 1MB buffer
buf: make([]byte, 1024*1024),
uint64buf: make([]byte, 8),
}
}
func (e *encoder) encode(rec *walpb.Record) error {
e.mu.Lock()
defer e.mu.Unlock()
e.crc.Write(rec.Data)
rec.Crc = e.crc.Sum32()
var (
data []byte
err error
n int
)
if rec.Size() > len(e.buf) {
data, err = rec.Marshal()
if err != nil {
return err
}
} else {
n, err = rec.MarshalTo(e.buf)
if err != nil {
return err
}
data = e.buf[:n]
}
if err = writeInt64(e.bw, int64(len(data)), e.uint64buf); err != nil {
return err
}
_, err = e.bw.Write(data)
return err
}
func (e *encoder) flush() error {
e.mu.Lock()
defer e.mu.Unlock()
return e.bw.Flush()
}
func writeInt64(w io.Writer, n int64, buf []byte) error {
// http://golang.org/src/encoding/binary/binary.go
binary.LittleEndian.PutUint64(buf, uint64(n))
_, err := w.Write(buf)
return err
}

View file

@ -1,38 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import "github.com/prometheus/client_golang/prometheus"
var (
syncDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: "etcd",
Subsystem: "wal",
Name: "fsync_durations_seconds",
Help: "The latency distributions of fsync called by wal.",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
})
lastIndexSaved = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "etcd",
Subsystem: "wal",
Name: "last_index_saved",
Help: "The index of the last entry saved by wal.",
})
)
func init() {
prometheus.MustRegister(syncDurations)
prometheus.MustRegister(lastIndexSaved)
}

View file

@ -1,45 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import "io"
type multiReadCloser struct {
closers []io.Closer
reader io.Reader
}
func (mc *multiReadCloser) Close() error {
var err error
for i := range mc.closers {
err = mc.closers[i].Close()
}
return err
}
func (mc *multiReadCloser) Read(p []byte) (int, error) {
return mc.reader.Read(p)
}
func MultiReadCloser(readClosers ...io.ReadCloser) io.ReadCloser {
cs := make([]io.Closer, len(readClosers))
rs := make([]io.Reader, len(readClosers))
for i := range readClosers {
cs[i] = readClosers[i]
rs[i] = readClosers[i]
}
r := io.MultiReader(rs...)
return &multiReadCloser{cs, r}
}

View file

@ -1,107 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import (
"io"
"os"
"path"
"k8s.io/kubernetes/third_party/forked/etcd237/pkg/fileutil"
"github.com/coreos/etcd/wal/walpb"
)
// Repair tries to repair ErrUnexpectedEOF in the
// last wal file by truncating.
func Repair(dirpath string) bool {
f, err := openLast(dirpath)
if err != nil {
return false
}
defer f.Close()
n := 0
rec := &walpb.Record{}
decoder := newDecoder(f)
defer decoder.close()
for {
err := decoder.decode(rec)
switch err {
case nil:
n += 8 + rec.Size()
// update crc of the decoder when necessary
switch rec.Type {
case crcType:
crc := decoder.crc.Sum32()
// current crc of decoder must match the crc of the record.
// do no need to match 0 crc, since the decoder is a new one at this case.
if crc != 0 && rec.Validate(crc) != nil {
return false
}
decoder.updateCRC(rec.Crc)
}
continue
case io.EOF:
return true
case io.ErrUnexpectedEOF:
plog.Noticef("repairing %v", f.Name())
bf, bferr := os.Create(f.Name() + ".broken")
if bferr != nil {
plog.Errorf("could not repair %v, failed to create backup file", f.Name())
return false
}
defer bf.Close()
if _, err = f.Seek(0, os.SEEK_SET); err != nil {
plog.Errorf("could not repair %v, failed to read file", f.Name())
return false
}
if _, err = io.Copy(bf, f); err != nil {
plog.Errorf("could not repair %v, failed to copy file", f.Name())
return false
}
if err = f.Truncate(int64(n)); err != nil {
plog.Errorf("could not repair %v, failed to truncate file", f.Name())
return false
}
if err = f.Sync(); err != nil {
plog.Errorf("could not repair %v, failed to sync file", f.Name())
return false
}
return true
default:
plog.Errorf("could not repair error (%v)", err)
return false
}
}
}
// openLast opens the last wal file for read and write.
func openLast(dirpath string) (*os.File, error) {
names, err := fileutil.ReadDir(dirpath)
if err != nil {
return nil, err
}
names = checkWalNames(names)
if len(names) == 0 {
return nil, ErrFileNotFound
}
last := path.Join(dirpath, names[len(names)-1])
return os.OpenFile(last, os.O_RDWR, 0)
}

View file

@ -1,93 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import (
"errors"
"fmt"
"strings"
"k8s.io/kubernetes/third_party/forked/etcd237/pkg/fileutil"
)
var (
badWalName = errors.New("bad wal name")
)
func Exist(dirpath string) bool {
names, err := fileutil.ReadDir(dirpath)
if err != nil {
return false
}
return len(names) != 0
}
// searchIndex returns the last array index of names whose raft index section is
// equal to or smaller than the given index.
// The given names MUST be sorted.
func searchIndex(names []string, index uint64) (int, bool) {
for i := len(names) - 1; i >= 0; i-- {
name := names[i]
_, curIndex, err := parseWalName(name)
if err != nil {
plog.Panicf("parse correct name should never fail: %v", err)
}
if index >= curIndex {
return i, true
}
}
return -1, false
}
// names should have been sorted based on sequence number.
// isValidSeq checks whether seq increases continuously.
func isValidSeq(names []string) bool {
var lastSeq uint64
for _, name := range names {
curSeq, _, err := parseWalName(name)
if err != nil {
plog.Panicf("parse correct name should never fail: %v", err)
}
if lastSeq != 0 && lastSeq != curSeq-1 {
return false
}
lastSeq = curSeq
}
return true
}
func checkWalNames(names []string) []string {
wnames := make([]string, 0)
for _, name := range names {
if _, _, err := parseWalName(name); err != nil {
plog.Warningf("ignored file %v in wal", name)
continue
}
wnames = append(wnames, name)
}
return wnames
}
func parseWalName(str string) (seq, index uint64, err error) {
if !strings.HasSuffix(str, ".wal") {
return 0, 0, badWalName
}
_, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index)
return seq, index, err
}
func walName(seq, index uint64) string {
return fmt.Sprintf("%016x-%016x.wal", seq, index)
}

View file

@ -1,571 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import (
"errors"
"fmt"
"hash/crc32"
"io"
"os"
"path"
"reflect"
"sync"
"time"
"k8s.io/kubernetes/third_party/forked/etcd237/pkg/fileutil"
"github.com/coreos/etcd/pkg/pbutil"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/wal/walpb"
"github.com/coreos/pkg/capnslog"
)
const (
metadataType int64 = iota + 1
entryType
stateType
crcType
snapshotType
// the owner can make/remove files inside the directory
privateDirMode = 0700
// the expected size of each wal segment file.
// the actual size might be bigger than it.
segmentSizeBytes = 64 * 1000 * 1000 // 64MB
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "wal")
ErrMetadataConflict = errors.New("wal: conflicting metadata found")
ErrFileNotFound = errors.New("wal: file not found")
ErrCRCMismatch = errors.New("wal: crc mismatch")
ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
ErrSnapshotNotFound = errors.New("wal: snapshot not found")
crcTable = crc32.MakeTable(crc32.Castagnoli)
)
// WAL is a logical representation of the stable storage.
// WAL is either in read mode or append mode but not both.
// A newly created WAL is in append mode, and ready for appending records.
// A just opened WAL is in read mode, and ready for reading records.
// The WAL will be ready for appending after reading out all the previous records.
type WAL struct {
dir string // the living directory of the underlay files
metadata []byte // metadata recorded at the head of each WAL
state raftpb.HardState // hardstate recorded at the head of WAL
start walpb.Snapshot // snapshot to start reading
decoder *decoder // decoder to decode records
mu sync.Mutex
f *os.File // underlay file opened for appending, sync
seq uint64 // sequence of the wal file currently used for writes
enti uint64 // index of the last entry saved to the wal
encoder *encoder // encoder to encode records
locks []fileutil.Lock // the file locks the WAL is holding (the name is increasing)
}
// Create creates a WAL ready for appending records. The given metadata is
// recorded at the head of each WAL file, and can be retrieved with ReadAll.
func Create(dirpath string, metadata []byte) (*WAL, error) {
if Exist(dirpath) {
return nil, os.ErrExist
}
if err := os.MkdirAll(dirpath, privateDirMode); err != nil {
return nil, err
}
p := path.Join(dirpath, walName(0, 0))
f, err := os.OpenFile(p, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
if err != nil {
return nil, err
}
l, err := fileutil.NewLock(f.Name())
if err != nil {
return nil, err
}
if err = l.Lock(); err != nil {
return nil, err
}
w := &WAL{
dir: dirpath,
metadata: metadata,
seq: 0,
f: f,
encoder: newEncoder(f, 0),
}
w.locks = append(w.locks, l)
if err := w.saveCrc(0); err != nil {
return nil, err
}
if err := w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
return nil, err
}
if err := w.SaveSnapshot(walpb.Snapshot{}); err != nil {
return nil, err
}
return w, nil
}
// Open opens the WAL at the given snap.
// The snap SHOULD have been previously saved to the WAL, or the following
// ReadAll will fail.
// The returned WAL is ready to read and the first record will be the one after
// the given snap. The WAL cannot be appended to before reading out all of its
// previous records.
func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) {
return openAtIndex(dirpath, snap, true)
}
// OpenForRead only opens the wal files for read.
// Write on a read only wal panics.
func OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error) {
return openAtIndex(dirpath, snap, false)
}
func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) {
names, err := fileutil.ReadDir(dirpath)
if err != nil {
return nil, err
}
names = checkWalNames(names)
if len(names) == 0 {
return nil, ErrFileNotFound
}
nameIndex, ok := searchIndex(names, snap.Index)
if !ok || !isValidSeq(names[nameIndex:]) {
return nil, ErrFileNotFound
}
// open the wal files for reading
rcs := make([]io.ReadCloser, 0)
ls := make([]fileutil.Lock, 0)
for _, name := range names[nameIndex:] {
f, err := os.Open(path.Join(dirpath, name))
if err != nil {
return nil, err
}
l, err := fileutil.NewLock(f.Name())
if err != nil {
return nil, err
}
err = l.TryLock()
if err != nil {
if write {
return nil, err
}
}
rcs = append(rcs, f)
ls = append(ls, l)
}
rc := MultiReadCloser(rcs...)
// create a WAL ready for reading
w := &WAL{
dir: dirpath,
start: snap,
decoder: newDecoder(rc),
locks: ls,
}
if write {
// open the last wal file for appending
seq, _, err := parseWalName(names[len(names)-1])
if err != nil {
rc.Close()
return nil, err
}
last := path.Join(dirpath, names[len(names)-1])
f, err := os.OpenFile(last, os.O_WRONLY|os.O_APPEND, 0)
if err != nil {
rc.Close()
return nil, err
}
err = fileutil.Preallocate(f, segmentSizeBytes)
if err != nil {
rc.Close()
plog.Errorf("failed to allocate space when creating new wal file (%v)", err)
return nil, err
}
w.f = f
w.seq = seq
}
return w, nil
}
// ReadAll reads out records of the current WAL.
// If opened in write mode, it must read out all records until EOF. Or an error
// will be returned.
// If opened in read mode, it will try to read all records if possible.
// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
// If loaded snap doesn't match with the expected one, it will return
// all the records and error ErrSnapshotMismatch.
// TODO: detect not-last-snap error.
// TODO: maybe loose the checking of match.
// After ReadAll, the WAL will be ready for appending new records.
func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
w.mu.Lock()
defer w.mu.Unlock()
rec := &walpb.Record{}
decoder := w.decoder
var match bool
for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
switch rec.Type {
case entryType:
e := mustUnmarshalEntry(rec.Data)
if e.Index > w.start.Index {
ents = append(ents[:e.Index-w.start.Index-1], e)
}
w.enti = e.Index
case stateType:
state = mustUnmarshalState(rec.Data)
case metadataType:
if metadata != nil && !reflect.DeepEqual(metadata, rec.Data) {
state.Reset()
return nil, state, nil, ErrMetadataConflict
}
metadata = rec.Data
case crcType:
crc := decoder.crc.Sum32()
// current crc of decoder must match the crc of the record.
// do no need to match 0 crc, since the decoder is a new one at this case.
if crc != 0 && rec.Validate(crc) != nil {
state.Reset()
return nil, state, nil, ErrCRCMismatch
}
decoder.updateCRC(rec.Crc)
case snapshotType:
var snap walpb.Snapshot
pbutil.MustUnmarshal(&snap, rec.Data)
if snap.Index == w.start.Index {
if snap.Term != w.start.Term {
state.Reset()
return nil, state, nil, ErrSnapshotMismatch
}
match = true
}
default:
state.Reset()
return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
}
}
switch w.f {
case nil:
// We do not have to read out all entries in read mode.
// The last record maybe a partial written one, so
// ErrunexpectedEOF might be returned.
if err != io.EOF && err != io.ErrUnexpectedEOF {
state.Reset()
return nil, state, nil, err
}
default:
// We must read all of the entries if WAL is opened in write mode.
if err != io.EOF {
state.Reset()
return nil, state, nil, err
}
}
err = nil
if !match {
err = ErrSnapshotNotFound
}
// close decoder, disable reading
w.decoder.close()
w.start = walpb.Snapshot{}
w.metadata = metadata
if w.f != nil {
// create encoder (chain crc with the decoder), enable appending
w.encoder = newEncoder(w.f, w.decoder.lastCRC())
w.decoder = nil
lastIndexSaved.Set(float64(w.enti))
}
return metadata, state, ents, err
}
// cut closes current file written and creates a new one ready to append.
// cut first creates a temp wal file and writes necessary headers into it.
// Then cut atomically rename temp wal file to a wal file.
func (w *WAL) cut() error {
// close old wal file
if err := w.sync(); err != nil {
return err
}
if err := w.f.Close(); err != nil {
return err
}
fpath := path.Join(w.dir, walName(w.seq+1, w.enti+1))
ftpath := fpath + ".tmp"
// create a temp wal file with name sequence + 1, or truncate the existing one
ft, err := os.OpenFile(ftpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
// update writer and save the previous crc
w.f = ft
prevCrc := w.encoder.crc.Sum32()
w.encoder = newEncoder(w.f, prevCrc)
if err = w.saveCrc(prevCrc); err != nil {
return err
}
if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil {
return err
}
if err = w.saveState(&w.state); err != nil {
return err
}
// close temp wal file
if err = w.sync(); err != nil {
return err
}
if err = w.f.Close(); err != nil {
return err
}
// atomically move temp wal file to wal file
if err = os.Rename(ftpath, fpath); err != nil {
return err
}
// open the wal file and update writer again
f, err := os.OpenFile(fpath, os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
return err
}
if err = fileutil.Preallocate(f, segmentSizeBytes); err != nil {
plog.Errorf("failed to allocate space when creating new wal file (%v)", err)
return err
}
w.f = f
prevCrc = w.encoder.crc.Sum32()
w.encoder = newEncoder(w.f, prevCrc)
// lock the new wal file
l, err := fileutil.NewLock(f.Name())
if err != nil {
return err
}
if err := l.Lock(); err != nil {
return err
}
w.locks = append(w.locks, l)
// increase the wal seq
w.seq++
plog.Infof("segmented wal file %v is created", fpath)
return nil
}
func (w *WAL) sync() error {
if w.encoder != nil {
if err := w.encoder.flush(); err != nil {
return err
}
}
start := time.Now()
err := fileutil.Fdatasync(w.f)
syncDurations.Observe(float64(time.Since(start)) / float64(time.Second))
return err
}
// ReleaseLockTo releases the locks, which has smaller index than the given index
// except the largest one among them.
// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release
// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4.
func (w *WAL) ReleaseLockTo(index uint64) error {
w.mu.Lock()
defer w.mu.Unlock()
var smaller int
found := false
for i, l := range w.locks {
_, lockIndex, err := parseWalName(path.Base(l.Name()))
if err != nil {
return err
}
if lockIndex >= index {
smaller = i - 1
found = true
break
}
}
// if no lock index is greater than the release index, we can
// release lock up to the last one(excluding).
if !found && len(w.locks) != 0 {
smaller = len(w.locks) - 1
}
if smaller <= 0 {
return nil
}
for i := 0; i < smaller; i++ {
w.locks[i].Unlock()
w.locks[i].Destroy()
}
w.locks = w.locks[smaller:]
return nil
}
func (w *WAL) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.f != nil {
if err := w.sync(); err != nil {
return err
}
if err := w.f.Close(); err != nil {
return err
}
}
for _, l := range w.locks {
err := l.Unlock()
if err != nil {
plog.Errorf("failed to unlock during closing wal: %s", err)
}
err = l.Destroy()
if err != nil {
plog.Errorf("failed to destroy lock during closing wal: %s", err)
}
}
return nil
}
func (w *WAL) saveEntry(e *raftpb.Entry) error {
// TODO: add MustMarshalTo to reduce one allocation.
b := pbutil.MustMarshal(e)
rec := &walpb.Record{Type: entryType, Data: b}
if err := w.encoder.encode(rec); err != nil {
return err
}
w.enti = e.Index
lastIndexSaved.Set(float64(w.enti))
return nil
}
func (w *WAL) saveState(s *raftpb.HardState) error {
if isEmptyHardState(*s) {
return nil
}
w.state = *s
b := pbutil.MustMarshal(s)
rec := &walpb.Record{Type: stateType, Data: b}
return w.encoder.encode(rec)
}
func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
w.mu.Lock()
defer w.mu.Unlock()
// short cut, do not call sync
if isEmptyHardState(st) && len(ents) == 0 {
return nil
}
mustSync := mustSync(st, w.state, len(ents))
// TODO(xiangli): no more reference operator
for i := range ents {
if err := w.saveEntry(&ents[i]); err != nil {
return err
}
}
if err := w.saveState(&st); err != nil {
return err
}
fstat, err := w.f.Stat()
if err != nil {
return err
}
if fstat.Size() < segmentSizeBytes {
if mustSync {
return w.sync()
}
return nil
}
// TODO: add a test for this code path when refactoring the tests
return w.cut()
}
func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
w.mu.Lock()
defer w.mu.Unlock()
b := pbutil.MustMarshal(&e)
rec := &walpb.Record{Type: snapshotType, Data: b}
if err := w.encoder.encode(rec); err != nil {
return err
}
// update enti only when snapshot is ahead of last index
if w.enti < e.Index {
w.enti = e.Index
}
lastIndexSaved.Set(float64(w.enti))
return w.sync()
}
func (w *WAL) saveCrc(prevCrc uint32) error {
return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc})
}
func mustSync(st, prevst raftpb.HardState, entsnum int) bool {
// Persistent state on all servers:
// (Updated on stable storage before responding to RPCs)
// currentTerm
// votedFor
// log entries[]
if entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term {
return true
}
return false
}
func isHardStateEqual(a, b raftpb.HardState) bool {
return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
}
var emptyState = raftpb.HardState{}
func isEmptyHardState(st raftpb.HardState) bool {
return isHardStateEqual(st, emptyState)
}

View file

@ -1,31 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"record.go",
"record.pb.go",
],
tags = ["automanaged"],
deps = ["//vendor:github.com/golang/protobuf/proto"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -1,29 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package walpb
import "errors"
var (
ErrCRCMismatch = errors.New("walpb: crc mismatch")
)
func (rec *Record) Validate(crc uint32) error {
if rec.Crc == crc {
return nil
}
rec.Reset()
return ErrCRCMismatch
}

View file

@ -1,495 +0,0 @@
// Code generated by protoc-gen-gogo.
// source: record.proto
// DO NOT EDIT!
/*
Package walpb is a generated protocol buffer package.
It is generated from these files:
record.proto
It has these top-level messages:
Record
Snapshot
*/
package walpb
import (
"fmt"
proto "github.com/golang/protobuf/proto"
)
import math "math"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type Record struct {
Type int64 `protobuf:"varint,1,opt,name=type" json:"type"`
Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"`
Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Record) Reset() { *m = Record{} }
func (m *Record) String() string { return proto.CompactTextString(m) }
func (*Record) ProtoMessage() {}
type Snapshot struct {
Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"`
Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Snapshot) Reset() { *m = Snapshot{} }
func (m *Snapshot) String() string { return proto.CompactTextString(m) }
func (*Snapshot) ProtoMessage() {}
func init() {
proto.RegisterType((*Record)(nil), "walpb.Record")
proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot")
}
func (m *Record) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Record) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
data[i] = 0x8
i++
i = encodeVarintRecord(data, i, uint64(m.Type))
data[i] = 0x10
i++
i = encodeVarintRecord(data, i, uint64(m.Crc))
if m.Data != nil {
data[i] = 0x1a
i++
i = encodeVarintRecord(data, i, uint64(len(m.Data)))
i += copy(data[i:], m.Data)
}
if m.XXX_unrecognized != nil {
i += copy(data[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Snapshot) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Snapshot) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
data[i] = 0x8
i++
i = encodeVarintRecord(data, i, uint64(m.Index))
data[i] = 0x10
i++
i = encodeVarintRecord(data, i, uint64(m.Term))
if m.XXX_unrecognized != nil {
i += copy(data[i:], m.XXX_unrecognized)
}
return i, nil
}
func encodeFixed64Record(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Record(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintRecord(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
func (m *Record) Size() (n int) {
var l int
_ = l
n += 1 + sovRecord(uint64(m.Type))
n += 1 + sovRecord(uint64(m.Crc))
if m.Data != nil {
l = len(m.Data)
n += 1 + l + sovRecord(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Snapshot) Size() (n int) {
var l int
_ = l
n += 1 + sovRecord(uint64(m.Index))
n += 1 + sovRecord(uint64(m.Term))
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovRecord(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozRecord(x uint64) (n int) {
return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Record) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Record: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Type |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType)
}
m.Crc = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Crc |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthRecord
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
if m.Data == nil {
m.Data = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRecord(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthRecord
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Snapshot) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
}
m.Index = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Index |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
}
m.Term = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRecord
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Term |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipRecord(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthRecord
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipRecord(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRecord
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRecord
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRecord
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthRecord
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowRecord
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipRecord(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow")
)

View file

@ -1,20 +0,0 @@
syntax = "proto2";
package walpb;
import "gogoproto/gogo.proto";
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.goproto_getters_all) = false;
message Record {
optional int64 type = 1 [(gogoproto.nullable) = false];
optional uint32 crc = 2 [(gogoproto.nullable) = false];
optional bytes data = 3;
}
message Snapshot {
optional uint64 index = 1 [(gogoproto.nullable) = false];
optional uint64 term = 2 [(gogoproto.nullable) = false];
}

View file

@ -1,36 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["expand.go"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["expand_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
deps = ["//pkg/api:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -1,285 +0,0 @@
package expansion
import (
"testing"
"k8s.io/kubernetes/pkg/api"
)
func TestMapReference(t *testing.T) {
envs := []api.EnvVar{
{
Name: "FOO",
Value: "bar",
},
{
Name: "ZOO",
Value: "$(FOO)-1",
},
{
Name: "BLU",
Value: "$(ZOO)-2",
},
}
declaredEnv := map[string]string{
"FOO": "bar",
"ZOO": "$(FOO)-1",
"BLU": "$(ZOO)-2",
}
serviceEnv := map[string]string{}
mapping := MappingFuncFor(declaredEnv, serviceEnv)
for _, env := range envs {
declaredEnv[env.Name] = Expand(env.Value, mapping)
}
expectedEnv := map[string]string{
"FOO": "bar",
"ZOO": "bar-1",
"BLU": "bar-1-2",
}
for k, v := range expectedEnv {
if e, a := v, declaredEnv[k]; e != a {
t.Errorf("Expected %v, got %v", e, a)
} else {
delete(declaredEnv, k)
}
}
if len(declaredEnv) != 0 {
t.Errorf("Unexpected keys in declared env: %v", declaredEnv)
}
}
func TestMapping(t *testing.T) {
context := map[string]string{
"VAR_A": "A",
"VAR_B": "B",
"VAR_C": "C",
"VAR_REF": "$(VAR_A)",
"VAR_EMPTY": "",
}
mapping := MappingFuncFor(context)
doExpansionTest(t, mapping)
}
func TestMappingDual(t *testing.T) {
context := map[string]string{
"VAR_A": "A",
"VAR_EMPTY": "",
}
context2 := map[string]string{
"VAR_B": "B",
"VAR_C": "C",
"VAR_REF": "$(VAR_A)",
}
mapping := MappingFuncFor(context, context2)
doExpansionTest(t, mapping)
}
func doExpansionTest(t *testing.T, mapping func(string) string) {
cases := []struct {
name string
input string
expected string
}{
{
name: "whole string",
input: "$(VAR_A)",
expected: "A",
},
{
name: "repeat",
input: "$(VAR_A)-$(VAR_A)",
expected: "A-A",
},
{
name: "beginning",
input: "$(VAR_A)-1",
expected: "A-1",
},
{
name: "middle",
input: "___$(VAR_B)___",
expected: "___B___",
},
{
name: "end",
input: "___$(VAR_C)",
expected: "___C",
},
{
name: "compound",
input: "$(VAR_A)_$(VAR_B)_$(VAR_C)",
expected: "A_B_C",
},
{
name: "escape & expand",
input: "$$(VAR_B)_$(VAR_A)",
expected: "$(VAR_B)_A",
},
{
name: "compound escape",
input: "$$(VAR_A)_$$(VAR_B)",
expected: "$(VAR_A)_$(VAR_B)",
},
{
name: "mixed in escapes",
input: "f000-$$VAR_A",
expected: "f000-$VAR_A",
},
{
name: "backslash escape ignored",
input: "foo\\$(VAR_C)bar",
expected: "foo\\Cbar",
},
{
name: "backslash escape ignored",
input: "foo\\\\$(VAR_C)bar",
expected: "foo\\\\Cbar",
},
{
name: "lots of backslashes",
input: "foo\\\\\\\\$(VAR_A)bar",
expected: "foo\\\\\\\\Abar",
},
{
name: "nested var references",
input: "$(VAR_A$(VAR_B))",
expected: "$(VAR_A$(VAR_B))",
},
{
name: "nested var references second type",
input: "$(VAR_A$(VAR_B)",
expected: "$(VAR_A$(VAR_B)",
},
{
name: "value is a reference",
input: "$(VAR_REF)",
expected: "$(VAR_A)",
},
{
name: "value is a reference x 2",
input: "%%$(VAR_REF)--$(VAR_REF)%%",
expected: "%%$(VAR_A)--$(VAR_A)%%",
},
{
name: "empty var",
input: "foo$(VAR_EMPTY)bar",
expected: "foobar",
},
{
name: "unterminated expression",
input: "foo$(VAR_Awhoops!",
expected: "foo$(VAR_Awhoops!",
},
{
name: "expression without operator",
input: "f00__(VAR_A)__",
expected: "f00__(VAR_A)__",
},
{
name: "shell special vars pass through",
input: "$?_boo_$!",
expected: "$?_boo_$!",
},
{
name: "bare operators are ignored",
input: "$VAR_A",
expected: "$VAR_A",
},
{
name: "undefined vars are passed through",
input: "$(VAR_DNE)",
expected: "$(VAR_DNE)",
},
{
name: "multiple (even) operators, var undefined",
input: "$$$$$$(BIG_MONEY)",
expected: "$$$(BIG_MONEY)",
},
{
name: "multiple (even) operators, var defined",
input: "$$$$$$(VAR_A)",
expected: "$$$(VAR_A)",
},
{
name: "multiple (odd) operators, var undefined",
input: "$$$$$$$(GOOD_ODDS)",
expected: "$$$$(GOOD_ODDS)",
},
{
name: "multiple (odd) operators, var defined",
input: "$$$$$$$(VAR_A)",
expected: "$$$A",
},
{
name: "missing open expression",
input: "$VAR_A)",
expected: "$VAR_A)",
},
{
name: "shell syntax ignored",
input: "${VAR_A}",
expected: "${VAR_A}",
},
{
name: "trailing incomplete expression not consumed",
input: "$(VAR_B)_______$(A",
expected: "B_______$(A",
},
{
name: "trailing incomplete expression, no content, is not consumed",
input: "$(VAR_C)_______$(",
expected: "C_______$(",
},
{
name: "operator at end of input string is preserved",
input: "$(VAR_A)foobarzab$",
expected: "Afoobarzab$",
},
{
name: "shell escaped incomplete expr",
input: "foo-\\$(VAR_A",
expected: "foo-\\$(VAR_A",
},
{
name: "lots of $( in middle",
input: "--$($($($($--",
expected: "--$($($($($--",
},
{
name: "lots of $( in beginning",
input: "$($($($($--foo$(",
expected: "$($($($($--foo$(",
},
{
name: "lots of $( at end",
input: "foo0--$($($($(",
expected: "foo0--$($($($(",
},
{
name: "escaped operators in variable names are not escaped",
input: "$(foo$$var)",
expected: "$(foo$$var)",
},
{
name: "newline not expanded",
input: "\n",
expected: "\n",
},
}
for _, tc := range cases {
expanded := Expand(tc.input, mapping)
if e, a := tc.expected, expanded; e != a {
t.Errorf("%v: expected %q, got %q", tc.name, e, a)
}
}
}

View file

@ -1,27 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["fields.go"],
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -1,501 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package json is forked from the Go standard library to enable us to find the
// field of a struct that a given JSON key maps to.
package json
import (
"bytes"
"fmt"
"reflect"
"sort"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
// Finds the patchStrategy and patchMergeKey struct tag fields on a given
// struct field given the struct type and the JSON name of the field.
// TODO: fix the returned errors to be introspectable.
func LookupPatchMetadata(t reflect.Type, jsonField string) (reflect.Type, string, string, error) {
if t.Kind() == reflect.Map {
return t.Elem(), "", "", nil
}
if t.Kind() != reflect.Struct {
return nil, "", "", fmt.Errorf("merging an object in json but data type is not map or struct, instead is: %s",
t.Kind().String())
}
jf := []byte(jsonField)
// Find the field that the JSON library would use.
var f *field
fields := cachedTypeFields(t)
for i := range fields {
ff := &fields[i]
if bytes.Equal(ff.nameBytes, jf) {
f = ff
break
}
// Do case-insensitive comparison.
if f == nil && ff.equalFold(ff.nameBytes, jf) {
f = ff
}
}
if f != nil {
// Find the reflect.Value of the most preferential struct field.
tjf := t.Field(f.index[0])
// we must navigate down all the anonymously included structs in the chain
for i := 1; i < len(f.index); i++ {
tjf = tjf.Type.Field(f.index[i])
}
patchStrategy := tjf.Tag.Get("patchStrategy")
patchMergeKey := tjf.Tag.Get("patchMergeKey")
return tjf.Type, patchStrategy, patchMergeKey, nil
}
return nil, "", "", fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField)
}
// A field represents a single field found in a struct.
type field struct {
name string
nameBytes []byte // []byte(name)
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
tag bool
// index is the sequence of indexes from the containing type fields to this field.
// it is a slice because anonymous structs will need multiple navigation steps to correctly
// resolve the proper fields
index []int
typ reflect.Type
omitEmpty bool
quoted bool
}
func (f field) String() string {
return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted)
}
func fillField(f field) field {
f.nameBytes = []byte(f.name)
f.equalFold = foldFunc(f.nameBytes)
return f
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from json tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
continue
}
tag := sf.Tag.Get("json")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, fillField(field{
name: name,
tag: tagged,
index: index,
typ: ft,
omitEmpty: opts.Contains("omitempty"),
quoted: opts.Contains("string"),
}))
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// See http://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
if len(t) > 0 {
return false
}
return true
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

View file

@ -1,27 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["addr.go"],
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -1,27 +0,0 @@
package netutil
import (
"net/url"
"strings"
)
// FROM: http://golang.org/src/net/http/client.go
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
// return true if the string includes a port.
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
// FROM: http://golang.org/src/net/http/transport.go
var portMap = map[string]string{
"http": "80",
"https": "443",
}
// FROM: http://golang.org/src/net/http/transport.go
// canonicalAddr returns url.Host but always with a ":port" suffix
func CanonicalAddr(url *url.URL) string {
addr := url.Host
if !hasPort(addr) {
return addr + ":" + portMap[url.Scheme]
}
return addr
}

View file

@ -1,38 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"deep_equal.go",
"type.go",
],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["deep_equal_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -1,388 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package reflect is a fork of go's standard library reflection package, which
// allows for deep equal with equality functions defined.
package reflect
import (
"fmt"
"reflect"
"strings"
)
// Equalities is a map from type to a function comparing two values of
// that type.
type Equalities map[reflect.Type]reflect.Value
// For convenience, panics on errrors
func EqualitiesOrDie(funcs ...interface{}) Equalities {
e := Equalities{}
if err := e.AddFuncs(funcs...); err != nil {
panic(err)
}
return e
}
// AddFuncs is a shortcut for multiple calls to AddFunc.
func (e Equalities) AddFuncs(funcs ...interface{}) error {
for _, f := range funcs {
if err := e.AddFunc(f); err != nil {
return err
}
}
return nil
}
// AddFunc uses func as an equality function: it must take
// two parameters of the same type, and return a boolean.
func (e Equalities) AddFunc(eqFunc interface{}) error {
fv := reflect.ValueOf(eqFunc)
ft := fv.Type()
if ft.Kind() != reflect.Func {
return fmt.Errorf("expected func, got: %v", ft)
}
if ft.NumIn() != 2 {
return fmt.Errorf("expected three 'in' params, got: %v", ft)
}
if ft.NumOut() != 1 {
return fmt.Errorf("expected one 'out' param, got: %v", ft)
}
if ft.In(0) != ft.In(1) {
return fmt.Errorf("expected arg 1 and 2 to have same type, but got %v", ft)
}
var forReturnType bool
boolType := reflect.TypeOf(forReturnType)
if ft.Out(0) != boolType {
return fmt.Errorf("expected bool return, got: %v", ft)
}
e[ft.In(0)] = fv
return nil
}
// Below here is forked from go's reflect/deepequal.go
// During deepValueEqual, must keep track of checks that are
// in progress. The comparison algorithm assumes that all
// checks in progress are true when it reencounters them.
// Visited comparisons are stored in a map indexed by visit.
type visit struct {
a1 uintptr
a2 uintptr
typ reflect.Type
}
// unexportedTypePanic is thrown when you use this DeepEqual on something that has an
// unexported type. It indicates a programmer error, so should not occur at runtime,
// which is why it's not public and thus impossible to catch.
type unexportedTypePanic []reflect.Type
func (u unexportedTypePanic) Error() string { return u.String() }
func (u unexportedTypePanic) String() string {
strs := make([]string, len(u))
for i, t := range u {
strs[i] = fmt.Sprintf("%v", t)
}
return "an unexported field was encountered, nested like this: " + strings.Join(strs, " -> ")
}
func makeUsefulPanic(v reflect.Value) {
if x := recover(); x != nil {
if u, ok := x.(unexportedTypePanic); ok {
u = append(unexportedTypePanic{v.Type()}, u...)
x = u
}
panic(x)
}
}
// Tests for deep equality using reflected types. The map argument tracks
// comparisons that have already been seen, which allows short circuiting on
// recursive types.
func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool {
defer makeUsefulPanic(v1)
if !v1.IsValid() || !v2.IsValid() {
return v1.IsValid() == v2.IsValid()
}
if v1.Type() != v2.Type() {
return false
}
if fv, ok := e[v1.Type()]; ok {
return fv.Call([]reflect.Value{v1, v2})[0].Bool()
}
hard := func(k reflect.Kind) bool {
switch k {
case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:
return true
}
return false
}
if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {
addr1 := v1.UnsafeAddr()
addr2 := v2.UnsafeAddr()
if addr1 > addr2 {
// Canonicalize order to reduce number of entries in visited.
addr1, addr2 = addr2, addr1
}
// Short circuit if references are identical ...
if addr1 == addr2 {
return true
}
// ... or already seen
typ := v1.Type()
v := visit{addr1, addr2, typ}
if visited[v] {
return true
}
// Remember for later.
visited[v] = true
}
switch v1.Kind() {
case reflect.Array:
// We don't need to check length here because length is part of
// an array's type, which has already been filtered for.
for i := 0; i < v1.Len(); i++ {
if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) {
return false
}
}
return true
case reflect.Slice:
if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) {
return false
}
if v1.IsNil() || v1.Len() == 0 {
return true
}
if v1.Len() != v2.Len() {
return false
}
if v1.Pointer() == v2.Pointer() {
return true
}
for i := 0; i < v1.Len(); i++ {
if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) {
return false
}
}
return true
case reflect.Interface:
if v1.IsNil() || v2.IsNil() {
return v1.IsNil() == v2.IsNil()
}
return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)
case reflect.Ptr:
return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)
case reflect.Struct:
for i, n := 0, v1.NumField(); i < n; i++ {
if !e.deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) {
return false
}
}
return true
case reflect.Map:
if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) {
return false
}
if v1.IsNil() || v1.Len() == 0 {
return true
}
if v1.Len() != v2.Len() {
return false
}
if v1.Pointer() == v2.Pointer() {
return true
}
for _, k := range v1.MapKeys() {
if !e.deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) {
return false
}
}
return true
case reflect.Func:
if v1.IsNil() && v2.IsNil() {
return true
}
// Can't do better than this:
return false
default:
// Normal equality suffices
if !v1.CanInterface() || !v2.CanInterface() {
panic(unexportedTypePanic{})
}
return v1.Interface() == v2.Interface()
}
}
// DeepEqual is like reflect.DeepEqual, but focused on semantic equality
// instead of memory equality.
//
// It will use e's equality functions if it finds types that match.
//
// An empty slice *is* equal to a nil slice for our purposes; same for maps.
//
// Unexported field members cannot be compared and will cause an imformative panic; you must add an Equality
// function for these types.
func (e Equalities) DeepEqual(a1, a2 interface{}) bool {
if a1 == nil || a2 == nil {
return a1 == a2
}
v1 := reflect.ValueOf(a1)
v2 := reflect.ValueOf(a2)
if v1.Type() != v2.Type() {
return false
}
return e.deepValueEqual(v1, v2, make(map[visit]bool), 0)
}
func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool {
defer makeUsefulPanic(v1)
if !v1.IsValid() || !v2.IsValid() {
return v1.IsValid() == v2.IsValid()
}
if v1.Type() != v2.Type() {
return false
}
if fv, ok := e[v1.Type()]; ok {
return fv.Call([]reflect.Value{v1, v2})[0].Bool()
}
hard := func(k reflect.Kind) bool {
switch k {
case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:
return true
}
return false
}
if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {
addr1 := v1.UnsafeAddr()
addr2 := v2.UnsafeAddr()
if addr1 > addr2 {
// Canonicalize order to reduce number of entries in visited.
addr1, addr2 = addr2, addr1
}
// Short circuit if references are identical ...
if addr1 == addr2 {
return true
}
// ... or already seen
typ := v1.Type()
v := visit{addr1, addr2, typ}
if visited[v] {
return true
}
// Remember for later.
visited[v] = true
}
switch v1.Kind() {
case reflect.Array:
// We don't need to check length here because length is part of
// an array's type, which has already been filtered for.
for i := 0; i < v1.Len(); i++ {
if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) {
return false
}
}
return true
case reflect.Slice:
if v1.IsNil() || v1.Len() == 0 {
return true
}
if v1.Len() > v2.Len() {
return false
}
if v1.Pointer() == v2.Pointer() {
return true
}
for i := 0; i < v1.Len(); i++ {
if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) {
return false
}
}
return true
case reflect.String:
if v1.Len() == 0 {
return true
}
if v1.Len() > v2.Len() {
return false
}
return v1.String() == v2.String()
case reflect.Interface:
if v1.IsNil() {
return true
}
return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1)
case reflect.Ptr:
if v1.IsNil() {
return true
}
return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1)
case reflect.Struct:
for i, n := 0, v1.NumField(); i < n; i++ {
if !e.deepValueDerive(v1.Field(i), v2.Field(i), visited, depth+1) {
return false
}
}
return true
case reflect.Map:
if v1.IsNil() || v1.Len() == 0 {
return true
}
if v1.Len() > v2.Len() {
return false
}
if v1.Pointer() == v2.Pointer() {
return true
}
for _, k := range v1.MapKeys() {
if !e.deepValueDerive(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) {
return false
}
}
return true
case reflect.Func:
if v1.IsNil() && v2.IsNil() {
return true
}
// Can't do better than this:
return false
default:
// Normal equality suffices
if !v1.CanInterface() || !v2.CanInterface() {
panic(unexportedTypePanic{})
}
return v1.Interface() == v2.Interface()
}
}
// DeepDerivative is similar to DeepEqual except that unset fields in a1 are
// ignored (not compared). This allows us to focus on the fields that matter to
// the semantic comparison.
//
// The unset fields include a nil pointer and an empty string.
func (e Equalities) DeepDerivative(a1, a2 interface{}) bool {
if a1 == nil {
return true
}
v1 := reflect.ValueOf(a1)
v2 := reflect.ValueOf(a2)
if v1.Type() != v2.Type() {
return false
}
return e.deepValueDerive(v1, v2, make(map[visit]bool), 0)
}

View file

@ -1,137 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package reflect
import (
"testing"
)
func TestEqualities(t *testing.T) {
e := Equalities{}
type Bar struct {
X int
}
type Baz struct {
Y Bar
}
err := e.AddFuncs(
func(a, b int) bool {
return a+1 == b
},
func(a, b Bar) bool {
return a.X*10 == b.X
},
)
if err != nil {
t.Fatalf("Unexpected: %v", err)
}
type Foo struct {
X int
}
table := []struct {
a, b interface{}
equal bool
}{
{1, 2, true},
{2, 1, false},
{"foo", "fo", false},
{"foo", "foo", true},
{"foo", "foobar", false},
{Foo{1}, Foo{2}, true},
{Foo{2}, Foo{1}, false},
{Bar{1}, Bar{10}, true},
{&Bar{1}, &Bar{10}, true},
{Baz{Bar{1}}, Baz{Bar{10}}, true},
{[...]string{}, [...]string{"1", "2", "3"}, false},
{[...]string{"1"}, [...]string{"1", "2", "3"}, false},
{[...]string{"1", "2", "3"}, [...]string{}, false},
{[...]string{"1", "2", "3"}, [...]string{"1", "2", "3"}, true},
{map[string]int{"foo": 1}, map[string]int{}, false},
{map[string]int{"foo": 1}, map[string]int{"foo": 2}, true},
{map[string]int{"foo": 2}, map[string]int{"foo": 1}, false},
{map[string]int{"foo": 1}, map[string]int{"foo": 2, "bar": 6}, false},
{map[string]int{"foo": 1, "bar": 6}, map[string]int{"foo": 2}, false},
{map[string]int{}, map[string]int(nil), true},
{[]string(nil), []string(nil), true},
{[]string{}, []string(nil), true},
{[]string(nil), []string{}, true},
{[]string{"1"}, []string(nil), false},
{[]string{}, []string{"1", "2", "3"}, false},
{[]string{"1"}, []string{"1", "2", "3"}, false},
{[]string{"1", "2", "3"}, []string{}, false},
}
for _, item := range table {
if e, a := item.equal, e.DeepEqual(item.a, item.b); e != a {
t.Errorf("Expected (%+v == %+v) == %v, but got %v", item.a, item.b, e, a)
}
}
}
func TestDerivates(t *testing.T) {
e := Equalities{}
type Bar struct {
X int
}
type Baz struct {
Y Bar
}
err := e.AddFuncs(
func(a, b int) bool {
return a+1 == b
},
func(a, b Bar) bool {
return a.X*10 == b.X
},
)
if err != nil {
t.Fatalf("Unexpected: %v", err)
}
type Foo struct {
X int
}
table := []struct {
a, b interface{}
equal bool
}{
{1, 2, true},
{2, 1, false},
{"foo", "fo", false},
{"foo", "foo", true},
{"foo", "foobar", false},
{Foo{1}, Foo{2}, true},
{Foo{2}, Foo{1}, false},
{Bar{1}, Bar{10}, true},
{&Bar{1}, &Bar{10}, true},
{Baz{Bar{1}}, Baz{Bar{10}}, true},
{[...]string{}, [...]string{"1", "2", "3"}, false},
{[...]string{"1"}, [...]string{"1", "2", "3"}, false},
{[...]string{"1", "2", "3"}, [...]string{}, false},
{[...]string{"1", "2", "3"}, [...]string{"1", "2", "3"}, true},
{map[string]int{"foo": 1}, map[string]int{}, false},
{map[string]int{"foo": 1}, map[string]int{"foo": 2}, true},
{map[string]int{"foo": 2}, map[string]int{"foo": 1}, false},
{map[string]int{"foo": 1}, map[string]int{"foo": 2, "bar": 6}, true},
{map[string]int{"foo": 1, "bar": 6}, map[string]int{"foo": 2}, false},
{map[string]int{}, map[string]int(nil), true},
{[]string(nil), []string(nil), true},
{[]string{}, []string(nil), true},
{[]string(nil), []string{}, true},
{[]string{"1"}, []string(nil), false},
{[]string{}, []string{"1", "2", "3"}, true},
{[]string{"1"}, []string{"1", "2", "3"}, true},
{[]string{"1", "2", "3"}, []string{}, false},
}
for _, item := range table {
if e, a := item.equal, e.DeepDerivative(item.a, item.b); e != a {
t.Errorf("Expected (%+v ~ %+v) == %v, but got %v", item.a, item.b, e, a)
}
}
}

View file

@ -1,91 +0,0 @@
//This package is copied from Go library reflect/type.go.
//The struct tag library provides no way to extract the list of struct tags, only
//a specific tag
package reflect
import (
"fmt"
"strconv"
"strings"
)
type StructTag struct {
Name string
Value string
}
func (t StructTag) String() string {
return fmt.Sprintf("%s:%q", t.Name, t.Value)
}
type StructTags []StructTag
func (tags StructTags) String() string {
s := make([]string, 0, len(tags))
for _, tag := range tags {
s = append(s, tag.String())
}
return "`" + strings.Join(s, " ") + "`"
}
func (tags StructTags) Has(name string) bool {
for i := range tags {
if tags[i].Name == name {
return true
}
}
return false
}
// ParseStructTags returns the full set of fields in a struct tag in the order they appear in
// the struct tag.
func ParseStructTags(tag string) (StructTags, error) {
tags := StructTags{}
for tag != "" {
// Skip leading space.
i := 0
for i < len(tag) && tag[i] == ' ' {
i++
}
tag = tag[i:]
if tag == "" {
break
}
// Scan to colon. A space, a quote or a control character is a syntax error.
// Strictly speaking, control chars include the range [0x7f, 0x9f], not just
// [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
// as it is simpler to inspect the tag's bytes than the tag's runes.
i = 0
for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
i++
}
if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
break
}
name := string(tag[:i])
tag = tag[i+1:]
// Scan quoted string to find value.
i = 1
for i < len(tag) && tag[i] != '"' {
if tag[i] == '\\' {
i++
}
i++
}
if i >= len(tag) {
break
}
qvalue := string(tag[:i+1])
tag = tag[i+1:]
value, err := strconv.Unquote(qvalue)
if err != nil {
return nil, err
}
tags = append(tags, StructTag{Name: name, Value: value})
}
return tags, nil
}

View file

@ -1,30 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"exec.go",
"funcs.go",
],
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -1,94 +0,0 @@
//This package is copied from Go library text/template.
//The original private functions indirect and printableValue
//are exported as public functions.
package template
import (
"fmt"
"reflect"
)
var Indirect = indirect
var PrintableValue = printableValue
var (
errorType = reflect.TypeOf((*error)(nil)).Elem()
fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
)
// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
// We indirect through pointers and empty interfaces (only) because
// non-empty interfaces have methods we might need.
func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
if v.IsNil() {
return v, true
}
if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
break
}
}
return v, false
}
// printableValue returns the, possibly indirected, interface value inside v that
// is best for a call to formatted printer.
func printableValue(v reflect.Value) (interface{}, bool) {
if v.Kind() == reflect.Ptr {
v, _ = indirect(v) // fmt.Fprint handles nil.
}
if !v.IsValid() {
return "<no value>", true
}
if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
v = v.Addr()
} else {
switch v.Kind() {
case reflect.Chan, reflect.Func:
return nil, false
}
}
}
return v.Interface(), true
}
// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
func canBeNil(typ reflect.Type) bool {
switch typ.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return true
}
return false
}
// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
// and whether the value has a meaningful truth value.
func isTrue(val reflect.Value) (truth, ok bool) {
if !val.IsValid() {
// Something like var x interface{}, never set. It's a form of nil.
return false, true
}
switch val.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
truth = val.Len() > 0
case reflect.Bool:
truth = val.Bool()
case reflect.Complex64, reflect.Complex128:
truth = val.Complex() != 0
case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
truth = !val.IsNil()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
truth = val.Int() != 0
case reflect.Float32, reflect.Float64:
truth = val.Float() != 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
truth = val.Uint() != 0
case reflect.Struct:
truth = true // Struct values are always true.
default:
return
}
return truth, true
}

View file

@ -1,599 +0,0 @@
//This package is copied from Go library text/template.
//The original private functions eq, ge, gt, le, lt, and ne
//are exported as public functions.
package template
import (
"bytes"
"errors"
"fmt"
"io"
"net/url"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
var Equal = eq
var GreaterEqual = ge
var Greater = gt
var LessEqual = le
var Less = lt
var NotEqual = ne
// FuncMap is the type of the map defining the mapping from names to functions.
// Each function must have either a single return value, or two return values of
// which the second has type error. In that case, if the second (error)
// return value evaluates to non-nil during execution, execution terminates and
// Execute returns that error.
type FuncMap map[string]interface{}
var builtins = FuncMap{
"and": and,
"call": call,
"html": HTMLEscaper,
"index": index,
"js": JSEscaper,
"len": length,
"not": not,
"or": or,
"print": fmt.Sprint,
"printf": fmt.Sprintf,
"println": fmt.Sprintln,
"urlquery": URLQueryEscaper,
// Comparisons
"eq": eq, // ==
"ge": ge, // >=
"gt": gt, // >
"le": le, // <=
"lt": lt, // <
"ne": ne, // !=
}
var builtinFuncs = createValueFuncs(builtins)
// createValueFuncs turns a FuncMap into a map[string]reflect.Value
func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
m := make(map[string]reflect.Value)
addValueFuncs(m, funcMap)
return m
}
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
for name, fn := range in {
v := reflect.ValueOf(fn)
if v.Kind() != reflect.Func {
panic("value for " + name + " not a function")
}
if !goodFunc(v.Type()) {
panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
}
out[name] = v
}
}
// AddFuncs adds to values the functions in funcs. It does no checking of the input -
// call addValueFuncs first.
func addFuncs(out, in FuncMap) {
for name, fn := range in {
out[name] = fn
}
}
// goodFunc checks that the function or method has the right result signature.
func goodFunc(typ reflect.Type) bool {
// We allow functions with 1 result or 2 results where the second is an error.
switch {
case typ.NumOut() == 1:
return true
case typ.NumOut() == 2 && typ.Out(1) == errorType:
return true
}
return false
}
// findFunction looks for a function in the template, and global map.
func findFunction(name string) (reflect.Value, bool) {
if fn := builtinFuncs[name]; fn.IsValid() {
return fn, true
}
return reflect.Value{}, false
}
// Indexing.
// index returns the result of indexing its first argument by the following
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
// indexed item must be a map, slice, or array.
func index(item interface{}, indices ...interface{}) (interface{}, error) {
v := reflect.ValueOf(item)
for _, i := range indices {
index := reflect.ValueOf(i)
var isNil bool
if v, isNil = indirect(v); isNil {
return nil, fmt.Errorf("index of nil pointer")
}
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.String:
var x int64
switch index.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
x = index.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
x = int64(index.Uint())
default:
return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
}
if x < 0 || x >= int64(v.Len()) {
return nil, fmt.Errorf("index out of range: %d", x)
}
v = v.Index(int(x))
case reflect.Map:
if !index.IsValid() {
index = reflect.Zero(v.Type().Key())
}
if !index.Type().AssignableTo(v.Type().Key()) {
return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
}
if x := v.MapIndex(index); x.IsValid() {
v = x
} else {
v = reflect.Zero(v.Type().Elem())
}
default:
return nil, fmt.Errorf("can't index item of type %s", v.Type())
}
}
return v.Interface(), nil
}
// Length
// length returns the length of the item, with an error if it has no defined length.
func length(item interface{}) (int, error) {
v, isNil := indirect(reflect.ValueOf(item))
if isNil {
return 0, fmt.Errorf("len of nil pointer")
}
switch v.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
return v.Len(), nil
}
return 0, fmt.Errorf("len of type %s", v.Type())
}
// Function invocation
// call returns the result of evaluating the first argument as a function.
// The function must return 1 result, or 2 results, the second of which is an error.
func call(fn interface{}, args ...interface{}) (interface{}, error) {
v := reflect.ValueOf(fn)
typ := v.Type()
if typ.Kind() != reflect.Func {
return nil, fmt.Errorf("non-function of type %s", typ)
}
if !goodFunc(typ) {
return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
}
numIn := typ.NumIn()
var dddType reflect.Type
if typ.IsVariadic() {
if len(args) < numIn-1 {
return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
}
dddType = typ.In(numIn - 1).Elem()
} else {
if len(args) != numIn {
return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
}
}
argv := make([]reflect.Value, len(args))
for i, arg := range args {
value := reflect.ValueOf(arg)
// Compute the expected type. Clumsy because of variadics.
var argType reflect.Type
if !typ.IsVariadic() || i < numIn-1 {
argType = typ.In(i)
} else {
argType = dddType
}
if !value.IsValid() && canBeNil(argType) {
value = reflect.Zero(argType)
}
if !value.Type().AssignableTo(argType) {
return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
}
argv[i] = value
}
result := v.Call(argv)
if len(result) == 2 && !result[1].IsNil() {
return result[0].Interface(), result[1].Interface().(error)
}
return result[0].Interface(), nil
}
// Boolean logic.
func truth(a interface{}) bool {
t, _ := isTrue(reflect.ValueOf(a))
return t
}
// and computes the Boolean AND of its arguments, returning
// the first false argument it encounters, or the last argument.
func and(arg0 interface{}, args ...interface{}) interface{} {
if !truth(arg0) {
return arg0
}
for i := range args {
arg0 = args[i]
if !truth(arg0) {
break
}
}
return arg0
}
// or computes the Boolean OR of its arguments, returning
// the first true argument it encounters, or the last argument.
func or(arg0 interface{}, args ...interface{}) interface{} {
if truth(arg0) {
return arg0
}
for i := range args {
arg0 = args[i]
if truth(arg0) {
break
}
}
return arg0
}
// not returns the Boolean negation of its argument.
func not(arg interface{}) (truth bool) {
truth, _ = isTrue(reflect.ValueOf(arg))
return !truth
}
// Comparison.
// TODO: Perhaps allow comparison between signed and unsigned integers.
var (
errBadComparisonType = errors.New("invalid type for comparison")
errBadComparison = errors.New("incompatible types for comparison")
errNoComparison = errors.New("missing argument for comparison")
)
type kind int
const (
invalidKind kind = iota
boolKind
complexKind
intKind
floatKind
integerKind
stringKind
uintKind
)
func basicKind(v reflect.Value) (kind, error) {
switch v.Kind() {
case reflect.Bool:
return boolKind, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return intKind, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return uintKind, nil
case reflect.Float32, reflect.Float64:
return floatKind, nil
case reflect.Complex64, reflect.Complex128:
return complexKind, nil
case reflect.String:
return stringKind, nil
}
return invalidKind, errBadComparisonType
}
// eq evaluates the comparison a == b || a == c || ...
func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
v1 := reflect.ValueOf(arg1)
k1, err := basicKind(v1)
if err != nil {
return false, err
}
if len(arg2) == 0 {
return false, errNoComparison
}
for _, arg := range arg2 {
v2 := reflect.ValueOf(arg)
k2, err := basicKind(v2)
if err != nil {
return false, err
}
truth := false
if k1 != k2 {
// Special case: Can compare integer values regardless of type's sign.
switch {
case k1 == intKind && k2 == uintKind:
truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
case k1 == uintKind && k2 == intKind:
truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
default:
return false, errBadComparison
}
} else {
switch k1 {
case boolKind:
truth = v1.Bool() == v2.Bool()
case complexKind:
truth = v1.Complex() == v2.Complex()
case floatKind:
truth = v1.Float() == v2.Float()
case intKind:
truth = v1.Int() == v2.Int()
case stringKind:
truth = v1.String() == v2.String()
case uintKind:
truth = v1.Uint() == v2.Uint()
default:
panic("invalid kind")
}
}
if truth {
return true, nil
}
}
return false, nil
}
// ne evaluates the comparison a != b.
func ne(arg1, arg2 interface{}) (bool, error) {
// != is the inverse of ==.
equal, err := eq(arg1, arg2)
return !equal, err
}
// lt evaluates the comparison a < b.
func lt(arg1, arg2 interface{}) (bool, error) {
v1 := reflect.ValueOf(arg1)
k1, err := basicKind(v1)
if err != nil {
return false, err
}
v2 := reflect.ValueOf(arg2)
k2, err := basicKind(v2)
if err != nil {
return false, err
}
truth := false
if k1 != k2 {
// Special case: Can compare integer values regardless of type's sign.
switch {
case k1 == intKind && k2 == uintKind:
truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
case k1 == uintKind && k2 == intKind:
truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
default:
return false, errBadComparison
}
} else {
switch k1 {
case boolKind, complexKind:
return false, errBadComparisonType
case floatKind:
truth = v1.Float() < v2.Float()
case intKind:
truth = v1.Int() < v2.Int()
case stringKind:
truth = v1.String() < v2.String()
case uintKind:
truth = v1.Uint() < v2.Uint()
default:
panic("invalid kind")
}
}
return truth, nil
}
// le evaluates the comparison <= b.
func le(arg1, arg2 interface{}) (bool, error) {
// <= is < or ==.
lessThan, err := lt(arg1, arg2)
if lessThan || err != nil {
return lessThan, err
}
return eq(arg1, arg2)
}
// gt evaluates the comparison a > b.
func gt(arg1, arg2 interface{}) (bool, error) {
// > is the inverse of <=.
lessOrEqual, err := le(arg1, arg2)
if err != nil {
return false, err
}
return !lessOrEqual, nil
}
// ge evaluates the comparison a >= b.
func ge(arg1, arg2 interface{}) (bool, error) {
// >= is the inverse of <.
lessThan, err := lt(arg1, arg2)
if err != nil {
return false, err
}
return !lessThan, nil
}
// HTML escaping.
var (
htmlQuot = []byte("&#34;") // shorter than "&quot;"
htmlApos = []byte("&#39;") // shorter than "&apos;" and apos was not in HTML until HTML5
htmlAmp = []byte("&amp;")
htmlLt = []byte("&lt;")
htmlGt = []byte("&gt;")
)
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
func HTMLEscape(w io.Writer, b []byte) {
last := 0
for i, c := range b {
var html []byte
switch c {
case '"':
html = htmlQuot
case '\'':
html = htmlApos
case '&':
html = htmlAmp
case '<':
html = htmlLt
case '>':
html = htmlGt
default:
continue
}
w.Write(b[last:i])
w.Write(html)
last = i + 1
}
w.Write(b[last:])
}
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
func HTMLEscapeString(s string) string {
// Avoid allocation if we can.
if strings.IndexAny(s, `'"&<>`) < 0 {
return s
}
var b bytes.Buffer
HTMLEscape(&b, []byte(s))
return b.String()
}
// HTMLEscaper returns the escaped HTML equivalent of the textual
// representation of its arguments.
func HTMLEscaper(args ...interface{}) string {
return HTMLEscapeString(evalArgs(args))
}
// JavaScript escaping.
var (
jsLowUni = []byte(`\u00`)
hex = []byte("0123456789ABCDEF")
jsBackslash = []byte(`\\`)
jsApos = []byte(`\'`)
jsQuot = []byte(`\"`)
jsLt = []byte(`\x3C`)
jsGt = []byte(`\x3E`)
)
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
func JSEscape(w io.Writer, b []byte) {
last := 0
for i := 0; i < len(b); i++ {
c := b[i]
if !jsIsSpecial(rune(c)) {
// fast path: nothing to do
continue
}
w.Write(b[last:i])
if c < utf8.RuneSelf {
// Quotes, slashes and angle brackets get quoted.
// Control characters get written as \u00XX.
switch c {
case '\\':
w.Write(jsBackslash)
case '\'':
w.Write(jsApos)
case '"':
w.Write(jsQuot)
case '<':
w.Write(jsLt)
case '>':
w.Write(jsGt)
default:
w.Write(jsLowUni)
t, b := c>>4, c&0x0f
w.Write(hex[t : t+1])
w.Write(hex[b : b+1])
}
} else {
// Unicode rune.
r, size := utf8.DecodeRune(b[i:])
if unicode.IsPrint(r) {
w.Write(b[i : i+size])
} else {
fmt.Fprintf(w, "\\u%04X", r)
}
i += size - 1
}
last = i + 1
}
w.Write(b[last:])
}
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
func JSEscapeString(s string) string {
// Avoid allocation if we can.
if strings.IndexFunc(s, jsIsSpecial) < 0 {
return s
}
var b bytes.Buffer
JSEscape(&b, []byte(s))
return b.String()
}
func jsIsSpecial(r rune) bool {
switch r {
case '\\', '\'', '"', '<', '>':
return true
}
return r < ' ' || utf8.RuneSelf <= r
}
// JSEscaper returns the escaped JavaScript equivalent of the textual
// representation of its arguments.
func JSEscaper(args ...interface{}) string {
return JSEscapeString(evalArgs(args))
}
// URLQueryEscaper returns the escaped value of the textual representation of
// its arguments in a form suitable for embedding in a URL query.
func URLQueryEscaper(args ...interface{}) string {
return url.QueryEscape(evalArgs(args))
}
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
// fmt.Sprint(args...)
// except that each argument is indirected (if a pointer), as required,
// using the same rules as the default string evaluation during template
// execution.
func evalArgs(args []interface{}) string {
ok := false
var s string
// Fast path for simple common case.
if len(args) == 1 {
s, ok = args[0].(string)
}
if !ok {
for i, arg := range args {
a, ok := printableValue(reflect.ValueOf(arg))
if ok {
args[i] = a
} // else left fmt do its thing
}
s = fmt.Sprint(args...)
}
return s
}

View file

@ -1,172 +0,0 @@
Shell2Junit License Information
Feb, 2010
shell2junit library and sample code is licensed under Apache License, v.2.0.
(c) 2009 Manolo Carrasco (Manuel Carrasco Moñino)
=====
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the
copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other
entities that control, are controlled by, or are under common control with
that entity. For the purposes of this definition, "control" means (i) the
power, direct or indirect, to cause the direction or management of such
entity, whether by contract or otherwise, or (ii) ownership of fifty percent
(50%) or more of the outstanding shares, or (iii) beneficial ownership of
such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation source, and
configuration files.
"Object" form shall mean any form resulting from mechanical transformation
or translation of a Source form, including but not limited to compiled
object code, generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form,
made available under the License, as indicated by a copyright notice that is
included in or attached to the work (an example is provided in the Appendix
below).
"Derivative Works" shall mean any work, whether in Source or Object form,
that is based on (or derived from) the Work and for which the editorial
revisions, annotations, elaborations, or other modifications represent, as a
whole, an original work of authorship. For the purposes of this License,
Derivative Works shall not include works that remain separable from, or
merely link (or bind by name) to the interfaces of, the Work and Derivative
Works thereof.
"Contribution" shall mean any work of authorship, including the original
version of the Work and any modifications or additions to that Work or
Derivative Works thereof, that is intentionally submitted to Licensor for
inclusion in the Work by the copyright owner or by an individual or Legal
Entity authorized to submit on behalf of the copyright owner. For the
purposes of this definition, "submitted" means any form of electronic,
verbal, or written communication sent to the Licensor or its
representatives, including but not limited to communication on electronic
mailing lists, source code control systems, and issue tracking systems that
are managed by, or on behalf of, the Licensor for the purpose of discussing
and improving the Work, but excluding communication that is conspicuously
marked or otherwise designated in writing by the copyright owner as "Not a
Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on
behalf of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of this
License, each Contributor hereby grants to You a perpetual, worldwide,
non-exclusive, no-charge, royalty-free, irrevocable copyright license to
reproduce, prepare Derivative Works of, publicly display, publicly perform,
sublicense, and distribute the Work and such Derivative Works in Source or
Object form.
3. Grant of Patent License. Subject to the terms and conditions of this
License, each Contributor hereby grants to You a perpetual, worldwide,
non-exclusive, no-charge, royalty-free, irrevocable (except as stated in
this section) patent license to make, have made, use, offer to sell, sell,
import, and otherwise transfer the Work, where such license applies only to
those patent claims licensable by such Contributor that are necessarily
infringed by their Contribution(s) alone or by combination of their
Contribution(s) with the Work to which such Contribution(s) was submitted.
If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this
License for that Work shall terminate as of the date such litigation is
filed.
4. Redistribution. You may reproduce and distribute copies of the Work or
Derivative Works thereof in any medium, with or without modifications, and
in Source or Object form, provided that You meet the following conditions:
a. You must give any other recipients of the Work or Derivative Works a copy
of this License; and
b. You must cause any modified files to carry prominent notices stating that
You changed the files; and
c. You must retain, in the Source form of any Derivative Works that You
distribute, all copyright, patent, trademark, and attribution notices from
the Source form of the Work, excluding those notices that do not pertain to
any part of the Derivative Works; and
d. If the Work includes a "NOTICE" text file as part of its distribution,
then any Derivative Works that You distribute must include a readable copy
of the attribution notices contained within such NOTICE file, excluding
those notices that do not pertain to any part of the Derivative Works, in at
least one of the following places: within a NOTICE text file distributed as
part of the Derivative Works; within the Source form or documentation, if
provided along with the Derivative Works; or, within a display generated by
the Derivative Works, if and wherever such third-party notices normally
appear. The contents of the NOTICE file are for informational purposes only
and do not modify the License. You may add Your own attribution notices
within Derivative Works that You distribute, alongside or as an addendum to
the NOTICE text from the Work, provided that such additional attribution
notices cannot be construed as modifying the License.
You may add Your own copyright statement to Your modifications and may
provide additional or different license terms and conditions for use,
reproduction, or distribution of Your modifications, or for any such
Derivative Works as a whole, provided Your use, reproduction, and
distribution of the Work otherwise complies with the conditions stated in
this License.
5. Submission of Contributions. Unless You explicitly state otherwise, any
Contribution intentionally submitted for inclusion in the Work by You to the
Licensor shall be under the terms and conditions of this License, without
any additional terms or conditions. Notwithstanding the above, nothing
herein shall supersede or modify the terms of any separate license agreement
you may have executed with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor, except
as required for reasonable and customary use in describing the origin of the
Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in
writing, Licensor provides the Work (and each Contributor provides its
Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied, including, without limitation, any
warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or
FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining
the appropriateness of using or redistributing the Work and assume any risks
associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, whether
in tort (including negligence), contract, or otherwise, unless required by
applicable law (such as deliberate and grossly negligent acts) or agreed to
in writing, shall any Contributor be liable to You for damages, including
any direct, indirect, special, incidental, or consequential damages of any
character arising as a result of this License or out of the use or inability
to use the Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all other
commercial damages or losses), even if such Contributor has been advised of
the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing the Work
or Derivative Works thereof, You may choose to offer, and charge a fee for,
acceptance of support, warranty, indemnity, or other liability obligations
and/or rights consistent with this License. However, in accepting such
obligations, You may act only on Your own behalf and on Your sole
responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any
liability incurred by, or claims asserted against, such Contributor by
reason of your accepting any such warranty or additional liability.

View file

@ -1,177 +0,0 @@
#!/bin/bash
### Copyright 2010 Manuel Carrasco Moñino. (manolo at apache.org)
###
### Licensed under the Apache License, Version 2.0.
### You may obtain a copy of it at
### http://www.apache.org/licenses/LICENSE-2.0
###
### A library for shell scripts which creates reports in jUnit format.
### These reports can be used in Jenkins, or any other CI.
###
### Usage:
### - Include this file in your shell script
### - Use juLog to call your command any time you want to produce a new report
### Usage: juLog <options> command arguments
### options:
### -class="MyClass" : a class name which will be shown in the junit report
### -name="TestName" : the test name which will be shown in the junit report
### -error="RegExp" : a regexp which sets the test as failure when the output matches it
### -ierror="RegExp" : same as -error but case insensitive
### -output="Path" : path to output directory, defaults to "./results"
### - Junit reports are left in the folder 'result' under the directory where the script is executed.
### - Configure Jenkins to parse junit files from the generated folder
###
asserts=00; errors=0; total=0; content=""
date=`which gdate 2>/dev/null || which date`
# default output folder
juDIR=`pwd`/results
# The name of the suite is calculated based in your script name
suite=""
# A wrapper for the eval method witch allows catching seg-faults and use tee
errfile=/tmp/evErr.$$.log
function eVal() {
(eval "$1")
# stdout and stderr may currently be inverted (see below) so echo may write to stderr
echo $? 2>&1 | tr -d "\n" > $errfile
}
# Method to clean old tests
function juLogClean() {
echo "+++ Removing old junit reports from: $juDIR "
rm -f "$juDIR"/junit-*
}
# Execute a command and record its results
function juLog() {
suite="";
errfile=/tmp/evErr.$$.log
date=`which gdate || which date`
asserts=00; errors=0; total=0; content=""
# parse arguments
ya=""; icase=""
while [ -z "$ya" ]; do
case "$1" in
-name=*) name=`echo "$1" | sed -e 's/-name=//'`; shift;;
-class=*) class=`echo "$1" | sed -e 's/-class=//'`; shift;;
-ierror=*) ereg=`echo "$1" | sed -e 's/-ierror=//'`; icase="-i"; shift;;
-error=*) ereg=`echo "$1" | sed -e 's/-error=//'`; shift;;
-output=*) juDIR=`echo "$1" | sed -e 's/-output=//'`; shift;;
*) ya=1;;
esac
done
# create output directory
mkdir -p "$juDIR" || exit
# use first arg as name if it was not given
if [ -z "$name" ]; then
name="$asserts-$1"
shift
fi
if [[ "$class" = "" ]]; then
class="default"
fi
suite=$class
# calculate command to eval
[ -z "$1" ] && return
cmd="$1"; shift
while [ -n "$1" ]
do
cmd="$cmd \"$1\""
shift
done
# eval the command sending output to a file
outf=/var/tmp/ju$$.txt
errf=/var/tmp/ju$$-err.txt
>$outf
echo "" | tee -a $outf
echo "+++ Running case: $class.$name " | tee -a $outf
echo "+++ working dir: "`pwd` | tee -a $outf
echo "+++ command: $cmd" | tee -a $outf
ini=`$date +%s.%N`
# execute the command, temporarily swapping stderr and stdout so they can be tee'd to separate files,
# then swapping them back again so that the streams are written correctly for the invoking process
( (eVal "$cmd" | tee -a $outf) 3>&1 1>&2 2>&3 | tee $errf) 3>&1 1>&2 2>&3
evErr=`cat $errfile`
rm -f $errfile
end=`$date +%s.%N`
echo "+++ exit code: $evErr" | tee -a $outf
# set the appropriate error, based in the exit code and the regex
[ $evErr != 0 ] && err=1 || err=0
out=`cat $outf | sed -e 's/^\([^+]\)/| \1/g'`
if [ $err = 0 -a -n "$ereg" ]; then
H=`echo "$out" | egrep $icase "$ereg"`
[ -n "$H" ] && err=1
fi
[ $err != 0 ] && echo "+++ error: $err" | tee -a $outf
rm -f $outf
errMsg=`cat $errf`
rm -f $errf
# calculate vars
asserts=$(($asserts+1))
errors=$(($errors+$err))
time=`echo "$end - $ini" | bc -l`
total=`echo "$total + $time" | bc -l`
# write the junit xml report
## failure tag
[ $err = 0 ] && failure="" || failure="
<failure type=\"ScriptError\" message=\"Script Error\">
<![CDATA[
$errMsg
]]>
</failure>
"
## testcase tag
content="$content
<testcase assertions=\"1\" name=\"$name\" time=\"$time\" classname=\"$class\">
$failure
<system-out>
<![CDATA[
$out
]]>
</system-out>
<system-err>
<![CDATA[
$errMsg
]]>
</system-err>
</testcase>
"
## testsuite block
if [[ -e "$juDIR/junit-$suite.xml" ]]; then
# file exists. first update the failures count
failCount=`sed -n "s/.*testsuite.*failures=\"\([0-9]*\)\".*/\1/p" "$juDIR/junit-$suite.xml"`
errors=$(($failCount+$errors))
sed -i "0,/failures=\"$failCount\"/ s/failures=\"$failCount\"/failures=\"$errors\"/" "$juDIR/junit-$suite.xml"
sed -i "0,/errors=\"$failCount\"/ s/errors=\"$failCount\"/errors=\"$errors\"/" "$juDIR/junit-$suite.xml"
# file exists. Need to append to it. If we remove the testsuite end tag, we can just add it in after.
sed -i "s^</testsuite>^^g" $juDIR/junit-$suite.xml ## remove testSuite so we can add it later
cat <<EOF >> "$juDIR/junit-$suite.xml"
$content
</testsuite>
EOF
else
# no file exists. Adding a new file
cat <<EOF > "$juDIR/junit-$suite.xml"
<testsuite failures="$errors" assertions="$assertions" name="$suite" tests="1" errors="$errors" time="$total">
$content
</testsuite>
EOF
fi
}