Update vendored packages for changes
Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
This commit is contained in:
parent
ab8586b7c5
commit
c869eb0c61
95 changed files with 7843 additions and 21708 deletions
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/docker/containerd/api/services/shim"
|
||||
"github.com/docker/containerd/api/types/container"
|
||||
"github.com/docker/containerd/api/types/mount"
|
||||
"github.com/docker/swarmkit/log"
|
||||
"github.com/docker/containerd/log"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
package supervisor
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseRuntimeLog(t *testing.T) {
|
||||
s := `{"level": "error", "msg": "foo\n", "time": "2017-01-01T00:00:42Z"}
|
||||
{"level": "error", "msg": "bar\n", "time": "2017-01-01T00:00:43Z"}
|
||||
`
|
||||
testCases := []struct {
|
||||
entries int
|
||||
expected []map[string]interface{}
|
||||
}{
|
||||
{
|
||||
entries: 0,
|
||||
expected: []map[string]interface{}{
|
||||
map[string]interface{}{"level": "error", "msg": "foo\n", "time": "2017-01-01T00:00:42Z"},
|
||||
map[string]interface{}{"level": "error", "msg": "bar\n", "time": "2017-01-01T00:00:43Z"},
|
||||
},
|
||||
},
|
||||
{
|
||||
entries: 1,
|
||||
expected: []map[string]interface{}{
|
||||
map[string]interface{}{"level": "error", "msg": "bar\n", "time": "2017-01-01T00:00:43Z"}},
|
||||
},
|
||||
{
|
||||
entries: 2,
|
||||
expected: []map[string]interface{}{
|
||||
map[string]interface{}{"level": "error", "msg": "foo\n", "time": "2017-01-01T00:00:42Z"},
|
||||
map[string]interface{}{"level": "error", "msg": "bar\n", "time": "2017-01-01T00:00:43Z"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
got, err := parseRuntimeLog(strings.NewReader(s), tc.entries)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(tc.expected, got) {
|
||||
t.Fatalf("expected %v, got %v", tc.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
66
vendor/github.com/docker/docker/pkg/term/ascii.go
generated
vendored
66
vendor/github.com/docker/docker/pkg/term/ascii.go
generated
vendored
|
@ -1,66 +0,0 @@
|
|||
package term
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ASCII list the possible supported ASCII key sequence
|
||||
var ASCII = []string{
|
||||
"ctrl-@",
|
||||
"ctrl-a",
|
||||
"ctrl-b",
|
||||
"ctrl-c",
|
||||
"ctrl-d",
|
||||
"ctrl-e",
|
||||
"ctrl-f",
|
||||
"ctrl-g",
|
||||
"ctrl-h",
|
||||
"ctrl-i",
|
||||
"ctrl-j",
|
||||
"ctrl-k",
|
||||
"ctrl-l",
|
||||
"ctrl-m",
|
||||
"ctrl-n",
|
||||
"ctrl-o",
|
||||
"ctrl-p",
|
||||
"ctrl-q",
|
||||
"ctrl-r",
|
||||
"ctrl-s",
|
||||
"ctrl-t",
|
||||
"ctrl-u",
|
||||
"ctrl-v",
|
||||
"ctrl-w",
|
||||
"ctrl-x",
|
||||
"ctrl-y",
|
||||
"ctrl-z",
|
||||
"ctrl-[",
|
||||
"ctrl-\\",
|
||||
"ctrl-]",
|
||||
"ctrl-^",
|
||||
"ctrl-_",
|
||||
}
|
||||
|
||||
// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code.
|
||||
func ToBytes(keys string) ([]byte, error) {
|
||||
codes := []byte{}
|
||||
next:
|
||||
for _, key := range strings.Split(keys, ",") {
|
||||
if len(key) != 1 {
|
||||
for code, ctrl := range ASCII {
|
||||
if ctrl == key {
|
||||
codes = append(codes, byte(code))
|
||||
continue next
|
||||
}
|
||||
}
|
||||
if key == "DEL" {
|
||||
codes = append(codes, 127)
|
||||
} else {
|
||||
return nil, fmt.Errorf("Unknown character: '%s'", key)
|
||||
}
|
||||
} else {
|
||||
codes = append(codes, byte(key[0]))
|
||||
}
|
||||
}
|
||||
return codes, nil
|
||||
}
|
50
vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go
generated
vendored
50
vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go
generated
vendored
|
@ -1,50 +0,0 @@
|
|||
// +build linux,cgo
|
||||
|
||||
package term
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #include <termios.h>
|
||||
import "C"
|
||||
|
||||
// Termios is the Unix API for terminal I/O.
|
||||
// It is passthrough for syscall.Termios in order to make it portable with
|
||||
// other platforms where it is not available or handled differently.
|
||||
type Termios syscall.Termios
|
||||
|
||||
// MakeRaw put the terminal connected to the given file descriptor into raw
|
||||
// mode and returns the previous state of the terminal so that it can be
|
||||
// restored.
|
||||
func MakeRaw(fd uintptr) (*State, error) {
|
||||
var oldState State
|
||||
if err := tcget(fd, &oldState.termios); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newState := oldState.termios
|
||||
|
||||
C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState)))
|
||||
if err := tcset(fd, &newState); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
return &oldState, nil
|
||||
}
|
||||
|
||||
func tcget(fd uintptr, p *Termios) syscall.Errno {
|
||||
ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
|
||||
if ret != 0 {
|
||||
return err.(syscall.Errno)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func tcset(fd uintptr, p *Termios) syscall.Errno {
|
||||
ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
|
||||
if ret != 0 {
|
||||
return err.(syscall.Errno)
|
||||
}
|
||||
return 0
|
||||
}
|
20
vendor/github.com/docker/docker/pkg/term/tc_other.go
generated
vendored
20
vendor/github.com/docker/docker/pkg/term/tc_other.go
generated
vendored
|
@ -1,20 +0,0 @@
|
|||
// +build !windows
|
||||
// +build !linux !cgo
|
||||
// +build !solaris !cgo
|
||||
|
||||
package term
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func tcget(fd uintptr, p *Termios) syscall.Errno {
|
||||
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p)))
|
||||
return err
|
||||
}
|
||||
|
||||
func tcset(fd uintptr, p *Termios) syscall.Errno {
|
||||
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p)))
|
||||
return err
|
||||
}
|
63
vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go
generated
vendored
63
vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go
generated
vendored
|
@ -1,63 +0,0 @@
|
|||
// +build solaris,cgo
|
||||
|
||||
package term
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #include <termios.h>
|
||||
import "C"
|
||||
|
||||
// Termios is the Unix API for terminal I/O.
|
||||
// It is passthrough for syscall.Termios in order to make it portable with
|
||||
// other platforms where it is not available or handled differently.
|
||||
type Termios syscall.Termios
|
||||
|
||||
// MakeRaw put the terminal connected to the given file descriptor into raw
|
||||
// mode and returns the previous state of the terminal so that it can be
|
||||
// restored.
|
||||
func MakeRaw(fd uintptr) (*State, error) {
|
||||
var oldState State
|
||||
if err := tcget(fd, &oldState.termios); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newState := oldState.termios
|
||||
|
||||
newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY)
|
||||
newState.Oflag &^= syscall.OPOST
|
||||
newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)
|
||||
newState.Cflag &^= (syscall.CSIZE | syscall.PARENB)
|
||||
newState.Cflag |= syscall.CS8
|
||||
|
||||
/*
|
||||
VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned
|
||||
Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It
|
||||
needs to be explicitly set to 1.
|
||||
*/
|
||||
newState.Cc[C.VMIN] = 1
|
||||
newState.Cc[C.VTIME] = 0
|
||||
|
||||
if err := tcset(fd, &newState); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
return &oldState, nil
|
||||
}
|
||||
|
||||
func tcget(fd uintptr, p *Termios) syscall.Errno {
|
||||
ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
|
||||
if ret != 0 {
|
||||
return err.(syscall.Errno)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func tcset(fd uintptr, p *Termios) syscall.Errno {
|
||||
ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
|
||||
if ret != 0 {
|
||||
return err.(syscall.Errno)
|
||||
}
|
||||
return 0
|
||||
}
|
123
vendor/github.com/docker/docker/pkg/term/term.go
generated
vendored
123
vendor/github.com/docker/docker/pkg/term/term.go
generated
vendored
|
@ -1,123 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
// Package term provides structures and helper functions to work with
|
||||
// terminal (state, sizes).
|
||||
package term
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidState is returned if the state of the terminal is invalid.
|
||||
ErrInvalidState = errors.New("Invalid terminal state")
|
||||
)
|
||||
|
||||
// State represents the state of the terminal.
|
||||
type State struct {
|
||||
termios Termios
|
||||
}
|
||||
|
||||
// Winsize represents the size of the terminal window.
|
||||
type Winsize struct {
|
||||
Height uint16
|
||||
Width uint16
|
||||
x uint16
|
||||
y uint16
|
||||
}
|
||||
|
||||
// StdStreams returns the standard streams (stdin, stdout, stedrr).
|
||||
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
|
||||
return os.Stdin, os.Stdout, os.Stderr
|
||||
}
|
||||
|
||||
// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
|
||||
func GetFdInfo(in interface{}) (uintptr, bool) {
|
||||
var inFd uintptr
|
||||
var isTerminalIn bool
|
||||
if file, ok := in.(*os.File); ok {
|
||||
inFd = file.Fd()
|
||||
isTerminalIn = IsTerminal(inFd)
|
||||
}
|
||||
return inFd, isTerminalIn
|
||||
}
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var termios Termios
|
||||
return tcget(fd, &termios) == 0
|
||||
}
|
||||
|
||||
// RestoreTerminal restores the terminal connected to the given file descriptor
|
||||
// to a previous state.
|
||||
func RestoreTerminal(fd uintptr, state *State) error {
|
||||
if state == nil {
|
||||
return ErrInvalidState
|
||||
}
|
||||
if err := tcset(fd, &state.termios); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveState saves the state of the terminal connected to the given file descriptor.
|
||||
func SaveState(fd uintptr) (*State, error) {
|
||||
var oldState State
|
||||
if err := tcget(fd, &oldState.termios); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &oldState, nil
|
||||
}
|
||||
|
||||
// DisableEcho applies the specified state to the terminal connected to the file
|
||||
// descriptor, with echo disabled.
|
||||
func DisableEcho(fd uintptr, state *State) error {
|
||||
newState := state.termios
|
||||
newState.Lflag &^= syscall.ECHO
|
||||
|
||||
if err := tcset(fd, &newState); err != 0 {
|
||||
return err
|
||||
}
|
||||
handleInterrupt(fd, state)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRawTerminal puts the terminal connected to the given file descriptor into
|
||||
// raw mode and returns the previous state. On UNIX, this puts both the input
|
||||
// and output into raw mode. On Windows, it only puts the input into raw mode.
|
||||
func SetRawTerminal(fd uintptr) (*State, error) {
|
||||
oldState, err := MakeRaw(fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
handleInterrupt(fd, oldState)
|
||||
return oldState, err
|
||||
}
|
||||
|
||||
// SetRawTerminalOutput puts the output of terminal connected to the given file
|
||||
// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
|
||||
// state. On Windows, it disables LF -> CRLF translation.
|
||||
func SetRawTerminalOutput(fd uintptr) (*State, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func handleInterrupt(fd uintptr, state *State) {
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, os.Interrupt)
|
||||
go func() {
|
||||
for range sigchan {
|
||||
// quit cleanly and the new terminal item is on a new line
|
||||
fmt.Println()
|
||||
signal.Stop(sigchan)
|
||||
close(sigchan)
|
||||
RestoreTerminal(fd, state)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
}
|
41
vendor/github.com/docker/docker/pkg/term/term_solaris.go
generated
vendored
41
vendor/github.com/docker/docker/pkg/term/term_solaris.go
generated
vendored
|
@ -1,41 +0,0 @@
|
|||
// +build solaris
|
||||
|
||||
package term
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <unistd.h>
|
||||
#include <stropts.h>
|
||||
#include <termios.h>
|
||||
|
||||
// Small wrapper to get rid of variadic args of ioctl()
|
||||
int my_ioctl(int fd, int cmd, struct winsize *ws) {
|
||||
return ioctl(fd, cmd, ws);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// GetWinsize returns the window size based on the specified file descriptor.
|
||||
func GetWinsize(fd uintptr) (*Winsize, error) {
|
||||
ws := &Winsize{}
|
||||
ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
|
||||
// Skip retval = 0
|
||||
if ret == 0 {
|
||||
return ws, nil
|
||||
}
|
||||
return ws, err
|
||||
}
|
||||
|
||||
// SetWinsize tries to set the specified window size for the specified file descriptor.
|
||||
func SetWinsize(fd uintptr, ws *Winsize) error {
|
||||
ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
|
||||
// Skip retval = 0
|
||||
if ret == 0 {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
29
vendor/github.com/docker/docker/pkg/term/term_unix.go
generated
vendored
29
vendor/github.com/docker/docker/pkg/term/term_unix.go
generated
vendored
|
@ -1,29 +0,0 @@
|
|||
// +build !solaris,!windows
|
||||
|
||||
package term
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// GetWinsize returns the window size based on the specified file descriptor.
|
||||
func GetWinsize(fd uintptr) (*Winsize, error) {
|
||||
ws := &Winsize{}
|
||||
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))
|
||||
// Skipp errno = 0
|
||||
if err == 0 {
|
||||
return ws, nil
|
||||
}
|
||||
return ws, err
|
||||
}
|
||||
|
||||
// SetWinsize tries to set the specified window size for the specified file descriptor.
|
||||
func SetWinsize(fd uintptr, ws *Winsize) error {
|
||||
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws)))
|
||||
// Skipp errno = 0
|
||||
if err == 0 {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
233
vendor/github.com/docker/docker/pkg/term/term_windows.go
generated
vendored
233
vendor/github.com/docker/docker/pkg/term/term_windows.go
generated
vendored
|
@ -1,233 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package term
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/Azure/go-ansiterm/winterm"
|
||||
"github.com/docker/docker/pkg/term/windows"
|
||||
)
|
||||
|
||||
// State holds the console mode for the terminal.
|
||||
type State struct {
|
||||
mode uint32
|
||||
}
|
||||
|
||||
// Winsize is used for window size.
|
||||
type Winsize struct {
|
||||
Height uint16
|
||||
Width uint16
|
||||
}
|
||||
|
||||
const (
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
|
||||
enableVirtualTerminalInput = 0x0200
|
||||
enableVirtualTerminalProcessing = 0x0004
|
||||
disableNewlineAutoReturn = 0x0008
|
||||
)
|
||||
|
||||
// vtInputSupported is true if enableVirtualTerminalInput is supported by the console
|
||||
var vtInputSupported bool
|
||||
|
||||
// StdStreams returns the standard streams (stdin, stdout, stedrr).
|
||||
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
|
||||
// Turn on VT handling on all std handles, if possible. This might
|
||||
// fail, in which case we will fall back to terminal emulation.
|
||||
var emulateStdin, emulateStdout, emulateStderr bool
|
||||
fd := os.Stdin.Fd()
|
||||
if mode, err := winterm.GetConsoleMode(fd); err == nil {
|
||||
// Validate that enableVirtualTerminalInput is supported, but do not set it.
|
||||
if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil {
|
||||
emulateStdin = true
|
||||
} else {
|
||||
vtInputSupported = true
|
||||
}
|
||||
// Unconditionally set the console mode back even on failure because SetConsoleMode
|
||||
// remembers invalid bits on input handles.
|
||||
winterm.SetConsoleMode(fd, mode)
|
||||
}
|
||||
|
||||
fd = os.Stdout.Fd()
|
||||
if mode, err := winterm.GetConsoleMode(fd); err == nil {
|
||||
// Validate disableNewlineAutoReturn is supported, but do not set it.
|
||||
if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
|
||||
emulateStdout = true
|
||||
} else {
|
||||
winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
|
||||
}
|
||||
}
|
||||
|
||||
fd = os.Stderr.Fd()
|
||||
if mode, err := winterm.GetConsoleMode(fd); err == nil {
|
||||
// Validate disableNewlineAutoReturn is supported, but do not set it.
|
||||
if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
|
||||
emulateStderr = true
|
||||
} else {
|
||||
winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
|
||||
}
|
||||
}
|
||||
|
||||
if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" {
|
||||
// The ConEmu and ConsoleZ terminals emulate ANSI on output streams well.
|
||||
emulateStdin = true
|
||||
emulateStdout = false
|
||||
emulateStderr = false
|
||||
}
|
||||
|
||||
if emulateStdin {
|
||||
stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE)
|
||||
} else {
|
||||
stdIn = os.Stdin
|
||||
}
|
||||
|
||||
if emulateStdout {
|
||||
stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE)
|
||||
} else {
|
||||
stdOut = os.Stdout
|
||||
}
|
||||
|
||||
if emulateStderr {
|
||||
stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE)
|
||||
} else {
|
||||
stdErr = os.Stderr
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
|
||||
func GetFdInfo(in interface{}) (uintptr, bool) {
|
||||
return windows.GetHandleInfo(in)
|
||||
}
|
||||
|
||||
// GetWinsize returns the window size based on the specified file descriptor.
|
||||
func GetWinsize(fd uintptr) (*Winsize, error) {
|
||||
info, err := winterm.GetConsoleScreenBufferInfo(fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
winsize := &Winsize{
|
||||
Width: uint16(info.Window.Right - info.Window.Left + 1),
|
||||
Height: uint16(info.Window.Bottom - info.Window.Top + 1),
|
||||
}
|
||||
|
||||
return winsize, nil
|
||||
}
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
return windows.IsConsole(fd)
|
||||
}
|
||||
|
||||
// RestoreTerminal restores the terminal connected to the given file descriptor
|
||||
// to a previous state.
|
||||
func RestoreTerminal(fd uintptr, state *State) error {
|
||||
return winterm.SetConsoleMode(fd, state.mode)
|
||||
}
|
||||
|
||||
// SaveState saves the state of the terminal connected to the given file descriptor.
|
||||
func SaveState(fd uintptr) (*State, error) {
|
||||
mode, e := winterm.GetConsoleMode(fd)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
return &State{mode: mode}, nil
|
||||
}
|
||||
|
||||
// DisableEcho disables echo for the terminal connected to the given file descriptor.
|
||||
// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
|
||||
func DisableEcho(fd uintptr, state *State) error {
|
||||
mode := state.mode
|
||||
mode &^= winterm.ENABLE_ECHO_INPUT
|
||||
mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT
|
||||
err := winterm.SetConsoleMode(fd, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Register an interrupt handler to catch and restore prior state
|
||||
restoreAtInterrupt(fd, state)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRawTerminal puts the terminal connected to the given file descriptor into
|
||||
// raw mode and returns the previous state. On UNIX, this puts both the input
|
||||
// and output into raw mode. On Windows, it only puts the input into raw mode.
|
||||
func SetRawTerminal(fd uintptr) (*State, error) {
|
||||
state, err := MakeRaw(fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Register an interrupt handler to catch and restore prior state
|
||||
restoreAtInterrupt(fd, state)
|
||||
return state, err
|
||||
}
|
||||
|
||||
// SetRawTerminalOutput puts the output of terminal connected to the given file
|
||||
// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
|
||||
// state. On Windows, it disables LF -> CRLF translation.
|
||||
func SetRawTerminalOutput(fd uintptr) (*State, error) {
|
||||
state, err := SaveState(fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Ignore failures, since disableNewlineAutoReturn might not be supported on this
|
||||
// version of Windows.
|
||||
winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn)
|
||||
return state, err
|
||||
}
|
||||
|
||||
// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw
|
||||
// mode and returns the previous state of the terminal so that it can be restored.
|
||||
func MakeRaw(fd uintptr) (*State, error) {
|
||||
state, err := SaveState(fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mode := state.mode
|
||||
|
||||
// See
|
||||
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
|
||||
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
|
||||
|
||||
// Disable these modes
|
||||
mode &^= winterm.ENABLE_ECHO_INPUT
|
||||
mode &^= winterm.ENABLE_LINE_INPUT
|
||||
mode &^= winterm.ENABLE_MOUSE_INPUT
|
||||
mode &^= winterm.ENABLE_WINDOW_INPUT
|
||||
mode &^= winterm.ENABLE_PROCESSED_INPUT
|
||||
|
||||
// Enable these modes
|
||||
mode |= winterm.ENABLE_EXTENDED_FLAGS
|
||||
mode |= winterm.ENABLE_INSERT_MODE
|
||||
mode |= winterm.ENABLE_QUICK_EDIT_MODE
|
||||
if vtInputSupported {
|
||||
mode |= enableVirtualTerminalInput
|
||||
}
|
||||
|
||||
err = winterm.SetConsoleMode(fd, mode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func restoreAtInterrupt(fd uintptr, state *State) {
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, os.Interrupt)
|
||||
|
||||
go func() {
|
||||
_ = <-sigchan
|
||||
RestoreTerminal(fd, state)
|
||||
os.Exit(0)
|
||||
}()
|
||||
}
|
69
vendor/github.com/docker/docker/pkg/term/termios_darwin.go
generated
vendored
69
vendor/github.com/docker/docker/pkg/term/termios_darwin.go
generated
vendored
|
@ -1,69 +0,0 @@
|
|||
package term
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
getTermios = syscall.TIOCGETA
|
||||
setTermios = syscall.TIOCSETA
|
||||
)
|
||||
|
||||
// Termios magic numbers, passthrough to the ones defined in syscall.
|
||||
const (
|
||||
IGNBRK = syscall.IGNBRK
|
||||
PARMRK = syscall.PARMRK
|
||||
INLCR = syscall.INLCR
|
||||
IGNCR = syscall.IGNCR
|
||||
ECHONL = syscall.ECHONL
|
||||
CSIZE = syscall.CSIZE
|
||||
ICRNL = syscall.ICRNL
|
||||
ISTRIP = syscall.ISTRIP
|
||||
PARENB = syscall.PARENB
|
||||
ECHO = syscall.ECHO
|
||||
ICANON = syscall.ICANON
|
||||
ISIG = syscall.ISIG
|
||||
IXON = syscall.IXON
|
||||
BRKINT = syscall.BRKINT
|
||||
INPCK = syscall.INPCK
|
||||
OPOST = syscall.OPOST
|
||||
CS8 = syscall.CS8
|
||||
IEXTEN = syscall.IEXTEN
|
||||
)
|
||||
|
||||
// Termios is the Unix API for terminal I/O.
|
||||
type Termios struct {
|
||||
Iflag uint64
|
||||
Oflag uint64
|
||||
Cflag uint64
|
||||
Lflag uint64
|
||||
Cc [20]byte
|
||||
Ispeed uint64
|
||||
Ospeed uint64
|
||||
}
|
||||
|
||||
// MakeRaw put the terminal connected to the given file descriptor into raw
|
||||
// mode and returns the previous state of the terminal so that it can be
|
||||
// restored.
|
||||
func MakeRaw(fd uintptr) (*State, error) {
|
||||
var oldState State
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newState := oldState.termios
|
||||
newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
|
||||
newState.Oflag &^= OPOST
|
||||
newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
|
||||
newState.Cflag &^= (CSIZE | PARENB)
|
||||
newState.Cflag |= CS8
|
||||
newState.Cc[syscall.VMIN] = 1
|
||||
newState.Cc[syscall.VTIME] = 0
|
||||
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &oldState, nil
|
||||
}
|
69
vendor/github.com/docker/docker/pkg/term/termios_freebsd.go
generated
vendored
69
vendor/github.com/docker/docker/pkg/term/termios_freebsd.go
generated
vendored
|
@ -1,69 +0,0 @@
|
|||
package term
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
getTermios = syscall.TIOCGETA
|
||||
setTermios = syscall.TIOCSETA
|
||||
)
|
||||
|
||||
// Termios magic numbers, passthrough to the ones defined in syscall.
|
||||
const (
|
||||
IGNBRK = syscall.IGNBRK
|
||||
PARMRK = syscall.PARMRK
|
||||
INLCR = syscall.INLCR
|
||||
IGNCR = syscall.IGNCR
|
||||
ECHONL = syscall.ECHONL
|
||||
CSIZE = syscall.CSIZE
|
||||
ICRNL = syscall.ICRNL
|
||||
ISTRIP = syscall.ISTRIP
|
||||
PARENB = syscall.PARENB
|
||||
ECHO = syscall.ECHO
|
||||
ICANON = syscall.ICANON
|
||||
ISIG = syscall.ISIG
|
||||
IXON = syscall.IXON
|
||||
BRKINT = syscall.BRKINT
|
||||
INPCK = syscall.INPCK
|
||||
OPOST = syscall.OPOST
|
||||
CS8 = syscall.CS8
|
||||
IEXTEN = syscall.IEXTEN
|
||||
)
|
||||
|
||||
// Termios is the Unix API for terminal I/O.
|
||||
type Termios struct {
|
||||
Iflag uint32
|
||||
Oflag uint32
|
||||
Cflag uint32
|
||||
Lflag uint32
|
||||
Cc [20]byte
|
||||
Ispeed uint32
|
||||
Ospeed uint32
|
||||
}
|
||||
|
||||
// MakeRaw put the terminal connected to the given file descriptor into raw
|
||||
// mode and returns the previous state of the terminal so that it can be
|
||||
// restored.
|
||||
func MakeRaw(fd uintptr) (*State, error) {
|
||||
var oldState State
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newState := oldState.termios
|
||||
newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
|
||||
newState.Oflag &^= OPOST
|
||||
newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
|
||||
newState.Cflag &^= (CSIZE | PARENB)
|
||||
newState.Cflag |= CS8
|
||||
newState.Cc[syscall.VMIN] = 1
|
||||
newState.Cc[syscall.VTIME] = 0
|
||||
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &oldState, nil
|
||||
}
|
47
vendor/github.com/docker/docker/pkg/term/termios_linux.go
generated
vendored
47
vendor/github.com/docker/docker/pkg/term/termios_linux.go
generated
vendored
|
@ -1,47 +0,0 @@
|
|||
// +build !cgo
|
||||
|
||||
package term
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
getTermios = syscall.TCGETS
|
||||
setTermios = syscall.TCSETS
|
||||
)
|
||||
|
||||
// Termios is the Unix API for terminal I/O.
|
||||
type Termios struct {
|
||||
Iflag uint32
|
||||
Oflag uint32
|
||||
Cflag uint32
|
||||
Lflag uint32
|
||||
Cc [20]byte
|
||||
Ispeed uint32
|
||||
Ospeed uint32
|
||||
}
|
||||
|
||||
// MakeRaw put the terminal connected to the given file descriptor into raw
|
||||
// mode and returns the previous state of the terminal so that it can be
|
||||
// restored.
|
||||
func MakeRaw(fd uintptr) (*State, error) {
|
||||
var oldState State
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newState := oldState.termios
|
||||
|
||||
newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON)
|
||||
newState.Oflag &^= syscall.OPOST
|
||||
newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)
|
||||
newState.Cflag &^= (syscall.CSIZE | syscall.PARENB)
|
||||
newState.Cflag |= syscall.CS8
|
||||
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
return &oldState, nil
|
||||
}
|
69
vendor/github.com/docker/docker/pkg/term/termios_openbsd.go
generated
vendored
69
vendor/github.com/docker/docker/pkg/term/termios_openbsd.go
generated
vendored
|
@ -1,69 +0,0 @@
|
|||
package term
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
getTermios = syscall.TIOCGETA
|
||||
setTermios = syscall.TIOCSETA
|
||||
)
|
||||
|
||||
// Termios magic numbers, passthrough to the ones defined in syscall.
|
||||
const (
|
||||
IGNBRK = syscall.IGNBRK
|
||||
PARMRK = syscall.PARMRK
|
||||
INLCR = syscall.INLCR
|
||||
IGNCR = syscall.IGNCR
|
||||
ECHONL = syscall.ECHONL
|
||||
CSIZE = syscall.CSIZE
|
||||
ICRNL = syscall.ICRNL
|
||||
ISTRIP = syscall.ISTRIP
|
||||
PARENB = syscall.PARENB
|
||||
ECHO = syscall.ECHO
|
||||
ICANON = syscall.ICANON
|
||||
ISIG = syscall.ISIG
|
||||
IXON = syscall.IXON
|
||||
BRKINT = syscall.BRKINT
|
||||
INPCK = syscall.INPCK
|
||||
OPOST = syscall.OPOST
|
||||
CS8 = syscall.CS8
|
||||
IEXTEN = syscall.IEXTEN
|
||||
)
|
||||
|
||||
// Termios is the Unix API for terminal I/O.
|
||||
type Termios struct {
|
||||
Iflag uint32
|
||||
Oflag uint32
|
||||
Cflag uint32
|
||||
Lflag uint32
|
||||
Cc [20]byte
|
||||
Ispeed uint32
|
||||
Ospeed uint32
|
||||
}
|
||||
|
||||
// MakeRaw put the terminal connected to the given file descriptor into raw
|
||||
// mode and returns the previous state of the terminal so that it can be
|
||||
// restored.
|
||||
func MakeRaw(fd uintptr) (*State, error) {
|
||||
var oldState State
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newState := oldState.termios
|
||||
newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
|
||||
newState.Oflag &^= OPOST
|
||||
newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
|
||||
newState.Cflag &^= (CSIZE | PARENB)
|
||||
newState.Cflag |= CS8
|
||||
newState.Cc[syscall.VMIN] = 1
|
||||
newState.Cc[syscall.VTIME] = 0
|
||||
|
||||
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &oldState, nil
|
||||
}
|
263
vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
generated
vendored
263
vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
generated
vendored
|
@ -1,263 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
ansiterm "github.com/Azure/go-ansiterm"
|
||||
"github.com/Azure/go-ansiterm/winterm"
|
||||
)
|
||||
|
||||
const (
|
||||
escapeSequence = ansiterm.KEY_ESC_CSI
|
||||
)
|
||||
|
||||
// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation.
|
||||
type ansiReader struct {
|
||||
file *os.File
|
||||
fd uintptr
|
||||
buffer []byte
|
||||
cbBuffer int
|
||||
command []byte
|
||||
}
|
||||
|
||||
// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a
|
||||
// Windows console input handle.
|
||||
func NewAnsiReader(nFile int) io.ReadCloser {
|
||||
initLogger()
|
||||
file, fd := winterm.GetStdFile(nFile)
|
||||
return &ansiReader{
|
||||
file: file,
|
||||
fd: fd,
|
||||
command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
|
||||
buffer: make([]byte, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the wrapped file.
|
||||
func (ar *ansiReader) Close() (err error) {
|
||||
return ar.file.Close()
|
||||
}
|
||||
|
||||
// Fd returns the file descriptor of the wrapped file.
|
||||
func (ar *ansiReader) Fd() uintptr {
|
||||
return ar.fd
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes of translated input events into p.
|
||||
func (ar *ansiReader) Read(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Previously read bytes exist, read as much as we can and return
|
||||
if len(ar.buffer) > 0 {
|
||||
logger.Debugf("Reading previously cached bytes")
|
||||
|
||||
originalLength := len(ar.buffer)
|
||||
copiedLength := copy(p, ar.buffer)
|
||||
|
||||
if copiedLength == originalLength {
|
||||
ar.buffer = make([]byte, 0, len(p))
|
||||
} else {
|
||||
ar.buffer = ar.buffer[copiedLength:]
|
||||
}
|
||||
|
||||
logger.Debugf("Read from cache p[%d]: % x", copiedLength, p)
|
||||
return copiedLength, nil
|
||||
}
|
||||
|
||||
// Read and translate key events
|
||||
events, err := readInputEvents(ar.fd, len(p))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if len(events) == 0 {
|
||||
logger.Debug("No input events detected")
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
keyBytes := translateKeyEvents(events, []byte(escapeSequence))
|
||||
|
||||
// Save excess bytes and right-size keyBytes
|
||||
if len(keyBytes) > len(p) {
|
||||
logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p))
|
||||
ar.buffer = keyBytes[len(p):]
|
||||
keyBytes = keyBytes[:len(p)]
|
||||
} else if len(keyBytes) == 0 {
|
||||
logger.Debug("No key bytes returned from the translator")
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
copiedLength := copy(p, keyBytes)
|
||||
if copiedLength != len(keyBytes) {
|
||||
return 0, errors.New("unexpected copy length encountered")
|
||||
}
|
||||
|
||||
logger.Debugf("Read p[%d]: % x", copiedLength, p)
|
||||
logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes)
|
||||
return copiedLength, nil
|
||||
}
|
||||
|
||||
// readInputEvents polls until at least one event is available.
|
||||
func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) {
|
||||
// Determine the maximum number of records to retrieve
|
||||
// -- Cast around the type system to obtain the size of a single INPUT_RECORD.
|
||||
// unsafe.Sizeof requires an expression vs. a type-reference; the casting
|
||||
// tricks the type system into believing it has such an expression.
|
||||
recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes)))))
|
||||
countRecords := maxBytes / recordSize
|
||||
if countRecords > ansiterm.MAX_INPUT_EVENTS {
|
||||
countRecords = ansiterm.MAX_INPUT_EVENTS
|
||||
} else if countRecords == 0 {
|
||||
countRecords = 1
|
||||
}
|
||||
logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize)
|
||||
|
||||
// Wait for and read input events
|
||||
events := make([]winterm.INPUT_RECORD, countRecords)
|
||||
nEvents := uint32(0)
|
||||
eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if eventsExist {
|
||||
err = winterm.ReadConsoleInput(fd, events, &nEvents)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Return a slice restricted to the number of returned records
|
||||
logger.Debugf("[windows] readInputEvents: Read %v events", nEvents)
|
||||
return events[:nEvents], nil
|
||||
}
|
||||
|
||||
// KeyEvent Translation Helpers
|
||||
|
||||
var arrowKeyMapPrefix = map[uint16]string{
|
||||
winterm.VK_UP: "%s%sA",
|
||||
winterm.VK_DOWN: "%s%sB",
|
||||
winterm.VK_RIGHT: "%s%sC",
|
||||
winterm.VK_LEFT: "%s%sD",
|
||||
}
|
||||
|
||||
var keyMapPrefix = map[uint16]string{
|
||||
winterm.VK_UP: "\x1B[%sA",
|
||||
winterm.VK_DOWN: "\x1B[%sB",
|
||||
winterm.VK_RIGHT: "\x1B[%sC",
|
||||
winterm.VK_LEFT: "\x1B[%sD",
|
||||
winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1
|
||||
winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4
|
||||
winterm.VK_INSERT: "\x1B[2%s~",
|
||||
winterm.VK_DELETE: "\x1B[3%s~",
|
||||
winterm.VK_PRIOR: "\x1B[5%s~",
|
||||
winterm.VK_NEXT: "\x1B[6%s~",
|
||||
winterm.VK_F1: "",
|
||||
winterm.VK_F2: "",
|
||||
winterm.VK_F3: "\x1B[13%s~",
|
||||
winterm.VK_F4: "\x1B[14%s~",
|
||||
winterm.VK_F5: "\x1B[15%s~",
|
||||
winterm.VK_F6: "\x1B[17%s~",
|
||||
winterm.VK_F7: "\x1B[18%s~",
|
||||
winterm.VK_F8: "\x1B[19%s~",
|
||||
winterm.VK_F9: "\x1B[20%s~",
|
||||
winterm.VK_F10: "\x1B[21%s~",
|
||||
winterm.VK_F11: "\x1B[23%s~",
|
||||
winterm.VK_F12: "\x1B[24%s~",
|
||||
}
|
||||
|
||||
// translateKeyEvents converts the input events into the appropriate ANSI string.
|
||||
func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte {
|
||||
var buffer bytes.Buffer
|
||||
for _, event := range events {
|
||||
if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 {
|
||||
buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence))
|
||||
}
|
||||
}
|
||||
|
||||
return buffer.Bytes()
|
||||
}
|
||||
|
||||
// keyToString maps the given input event record to the corresponding string.
|
||||
func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string {
|
||||
if keyEvent.UnicodeChar == 0 {
|
||||
return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence)
|
||||
}
|
||||
|
||||
_, alt, control := getControlKeys(keyEvent.ControlKeyState)
|
||||
if control {
|
||||
// TODO(azlinux): Implement following control sequences
|
||||
// <Ctrl>-D Signals the end of input from the keyboard; also exits current shell.
|
||||
// <Ctrl>-H Deletes the first character to the left of the cursor. Also called the ERASE key.
|
||||
// <Ctrl>-Q Restarts printing after it has been stopped with <Ctrl>-s.
|
||||
// <Ctrl>-S Suspends printing on the screen (does not stop the program).
|
||||
// <Ctrl>-U Deletes all characters on the current line. Also called the KILL key.
|
||||
// <Ctrl>-E Quits current command and creates a core
|
||||
|
||||
}
|
||||
|
||||
// <Alt>+Key generates ESC N Key
|
||||
if !control && alt {
|
||||
return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar))
|
||||
}
|
||||
|
||||
return string(keyEvent.UnicodeChar)
|
||||
}
|
||||
|
||||
// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string.
|
||||
func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string {
|
||||
shift, alt, control := getControlKeys(controlState)
|
||||
modifier := getControlKeysModifier(shift, alt, control)
|
||||
|
||||
if format, ok := arrowKeyMapPrefix[key]; ok {
|
||||
return fmt.Sprintf(format, escapeSequence, modifier)
|
||||
}
|
||||
|
||||
if format, ok := keyMapPrefix[key]; ok {
|
||||
return fmt.Sprintf(format, modifier)
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// getControlKeys extracts the shift, alt, and ctrl key states.
|
||||
func getControlKeys(controlState uint32) (shift, alt, control bool) {
|
||||
shift = 0 != (controlState & winterm.SHIFT_PRESSED)
|
||||
alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED))
|
||||
control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED))
|
||||
return shift, alt, control
|
||||
}
|
||||
|
||||
// getControlKeysModifier returns the ANSI modifier for the given combination of control keys.
|
||||
func getControlKeysModifier(shift, alt, control bool) string {
|
||||
if shift && alt && control {
|
||||
return ansiterm.KEY_CONTROL_PARAM_8
|
||||
}
|
||||
if alt && control {
|
||||
return ansiterm.KEY_CONTROL_PARAM_7
|
||||
}
|
||||
if shift && control {
|
||||
return ansiterm.KEY_CONTROL_PARAM_6
|
||||
}
|
||||
if control {
|
||||
return ansiterm.KEY_CONTROL_PARAM_5
|
||||
}
|
||||
if shift && alt {
|
||||
return ansiterm.KEY_CONTROL_PARAM_4
|
||||
}
|
||||
if alt {
|
||||
return ansiterm.KEY_CONTROL_PARAM_3
|
||||
}
|
||||
if shift {
|
||||
return ansiterm.KEY_CONTROL_PARAM_2
|
||||
}
|
||||
return ""
|
||||
}
|
64
vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
generated
vendored
64
vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
generated
vendored
|
@ -1,64 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
ansiterm "github.com/Azure/go-ansiterm"
|
||||
"github.com/Azure/go-ansiterm/winterm"
|
||||
)
|
||||
|
||||
// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation.
|
||||
type ansiWriter struct {
|
||||
file *os.File
|
||||
fd uintptr
|
||||
infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO
|
||||
command []byte
|
||||
escapeSequence []byte
|
||||
inAnsiSequence bool
|
||||
parser *ansiterm.AnsiParser
|
||||
}
|
||||
|
||||
// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a
|
||||
// Windows console output handle.
|
||||
func NewAnsiWriter(nFile int) io.Writer {
|
||||
initLogger()
|
||||
file, fd := winterm.GetStdFile(nFile)
|
||||
info, err := winterm.GetConsoleScreenBufferInfo(fd)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file))
|
||||
logger.Infof("newAnsiWriter: parser %p", parser)
|
||||
|
||||
aw := &ansiWriter{
|
||||
file: file,
|
||||
fd: fd,
|
||||
infoReset: info,
|
||||
command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
|
||||
escapeSequence: []byte(ansiterm.KEY_ESC_CSI),
|
||||
parser: parser,
|
||||
}
|
||||
|
||||
logger.Infof("newAnsiWriter: aw.parser %p", aw.parser)
|
||||
logger.Infof("newAnsiWriter: %v", aw)
|
||||
return aw
|
||||
}
|
||||
|
||||
func (aw *ansiWriter) Fd() uintptr {
|
||||
return aw.fd
|
||||
}
|
||||
|
||||
// Write writes len(p) bytes from p to the underlying data stream.
|
||||
func (aw *ansiWriter) Write(p []byte) (total int, err error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
logger.Infof("Write: % x", p)
|
||||
logger.Infof("Write: %s", string(p))
|
||||
return aw.parser.Parse(p)
|
||||
}
|
35
vendor/github.com/docker/docker/pkg/term/windows/console.go
generated
vendored
35
vendor/github.com/docker/docker/pkg/term/windows/console.go
generated
vendored
|
@ -1,35 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/Azure/go-ansiterm/winterm"
|
||||
)
|
||||
|
||||
// GetHandleInfo returns file descriptor and bool indicating whether the file is a console.
|
||||
func GetHandleInfo(in interface{}) (uintptr, bool) {
|
||||
switch t := in.(type) {
|
||||
case *ansiReader:
|
||||
return t.Fd(), true
|
||||
case *ansiWriter:
|
||||
return t.Fd(), true
|
||||
}
|
||||
|
||||
var inFd uintptr
|
||||
var isTerminal bool
|
||||
|
||||
if file, ok := in.(*os.File); ok {
|
||||
inFd = file.Fd()
|
||||
isTerminal = IsConsole(inFd)
|
||||
}
|
||||
return inFd, isTerminal
|
||||
}
|
||||
|
||||
// IsConsole returns true if the given file descriptor is a Windows Console.
|
||||
// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console.
|
||||
func IsConsole(fd uintptr) bool {
|
||||
_, e := winterm.GetConsoleMode(fd)
|
||||
return e == nil
|
||||
}
|
33
vendor/github.com/docker/docker/pkg/term/windows/windows.go
generated
vendored
33
vendor/github.com/docker/docker/pkg/term/windows/windows.go
generated
vendored
|
@ -1,33 +0,0 @@
|
|||
// These files implement ANSI-aware input and output streams for use by the Docker Windows client.
|
||||
// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create
|
||||
// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls.
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
ansiterm "github.com/Azure/go-ansiterm"
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
var logger *logrus.Logger
|
||||
var initOnce sync.Once
|
||||
|
||||
func initLogger() {
|
||||
initOnce.Do(func() {
|
||||
logFile := ioutil.Discard
|
||||
|
||||
if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
|
||||
logFile, _ = os.Create("ansiReaderWriter.log")
|
||||
}
|
||||
|
||||
logger = &logrus.Logger{
|
||||
Out: logFile,
|
||||
Formatter: new(logrus.TextFormatter),
|
||||
Level: logrus.DebugLevel,
|
||||
}
|
||||
})
|
||||
}
|
101
vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
generated
vendored
Normal file
101
vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
generated
vendored
Normal file
|
@ -0,0 +1,101 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package sortkeys
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
func Strings(l []string) {
|
||||
sort.Strings(l)
|
||||
}
|
||||
|
||||
func Float64s(l []float64) {
|
||||
sort.Float64s(l)
|
||||
}
|
||||
|
||||
func Float32s(l []float32) {
|
||||
sort.Sort(Float32Slice(l))
|
||||
}
|
||||
|
||||
func Int64s(l []int64) {
|
||||
sort.Sort(Int64Slice(l))
|
||||
}
|
||||
|
||||
func Int32s(l []int32) {
|
||||
sort.Sort(Int32Slice(l))
|
||||
}
|
||||
|
||||
func Uint64s(l []uint64) {
|
||||
sort.Sort(Uint64Slice(l))
|
||||
}
|
||||
|
||||
func Uint32s(l []uint32) {
|
||||
sort.Sort(Uint32Slice(l))
|
||||
}
|
||||
|
||||
func Bools(l []bool) {
|
||||
sort.Sort(BoolSlice(l))
|
||||
}
|
||||
|
||||
type BoolSlice []bool
|
||||
|
||||
func (p BoolSlice) Len() int { return len(p) }
|
||||
func (p BoolSlice) Less(i, j int) bool { return p[j] }
|
||||
func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
type Int64Slice []int64
|
||||
|
||||
func (p Int64Slice) Len() int { return len(p) }
|
||||
func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
type Int32Slice []int32
|
||||
|
||||
func (p Int32Slice) Len() int { return len(p) }
|
||||
func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
type Uint64Slice []uint64
|
||||
|
||||
func (p Uint64Slice) Len() int { return len(p) }
|
||||
func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
type Uint32Slice []uint32
|
||||
|
||||
func (p Uint32Slice) Len() int { return len(p) }
|
||||
func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
type Float32Slice []float32
|
||||
|
||||
func (p Float32Slice) Len() int { return len(p) }
|
||||
func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
135
vendor/github.com/gogo/protobuf/types/any.go
generated
vendored
Normal file
135
vendor/github.com/gogo/protobuf/types/any.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
// This file implements functions to marshal proto.Message to/from
|
||||
// google.protobuf.Any message.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
const googleApis = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
||||
//
|
||||
// Note that regular type assertions should be done using the Is
|
||||
// function. AnyMessageName is provided for less common use cases like filtering a
|
||||
// sequence of Any messages based on a set of allowed message type names.
|
||||
func AnyMessageName(any *Any) (string, error) {
|
||||
slash := strings.LastIndex(any.TypeUrl, "/")
|
||||
if slash < 0 {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return any.TypeUrl[slash+1:], nil
|
||||
}
|
||||
|
||||
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
||||
func MarshalAny(pb proto.Message) (*Any, error) {
|
||||
value, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
||||
// message. The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
type DynamicAny struct {
|
||||
proto.Message
|
||||
}
|
||||
|
||||
// Empty returns a new proto.Message of the type specified in a
|
||||
// google.protobuf.Any message. It returns an error if corresponding message
|
||||
// type isn't linked in.
|
||||
func EmptyAny(any *Any) (proto.Message, error) {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := proto.MessageType(aname)
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
||||
}
|
||||
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
||||
// message and places the decoded result in pb. It returns an error if type of
|
||||
// contents of Any message does not match type of pb message.
|
||||
//
|
||||
// pb can be a proto.Message, or a *DynamicAny.
|
||||
func UnmarshalAny(any *Any, pb proto.Message) error {
|
||||
if d, ok := pb.(*DynamicAny); ok {
|
||||
if d.Message == nil {
|
||||
var err error
|
||||
d.Message, err = EmptyAny(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return UnmarshalAny(any, d.Message)
|
||||
}
|
||||
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mname := proto.MessageName(pb)
|
||||
if aname != mname {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, pb)
|
||||
}
|
||||
|
||||
// Is returns true if any value contains a given message type.
|
||||
func Is(any *Any, pb proto.Message) bool {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return aname == proto.MessageName(pb)
|
||||
}
|
687
vendor/github.com/gogo/protobuf/types/any.pb.go
generated
vendored
Normal file
687
vendor/github.com/gogo/protobuf/types/any.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,687 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// source: any.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package types is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
any.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Any
|
||||
*/
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import bytes "bytes"
|
||||
|
||||
import strings "strings"
|
||||
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
import sort "sort"
|
||||
import strconv "strconv"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
type Any struct {
|
||||
// A URL/resource name whose content describes the type of the
|
||||
// serialized protocol buffer message.
|
||||
//
|
||||
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||
// following restrictions and interpretations apply:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * The last segment of the URL's path must represent the fully
|
||||
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||
// The name should be in a canonical form (e.g., leading "." is
|
||||
// not accepted).
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Any) Reset() { *m = Any{} }
|
||||
func (*Any) ProtoMessage() {}
|
||||
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptorAny, []int{0} }
|
||||
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||
|
||||
func (m *Any) GetTypeUrl() string {
|
||||
if m != nil {
|
||||
return m.TypeUrl
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Any) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||
}
|
||||
func (this *Any) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*Any)
|
||||
if !ok {
|
||||
that2, ok := that.(Any)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if this.TypeUrl != that1.TypeUrl {
|
||||
if this.TypeUrl < that1.TypeUrl {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if c := bytes.Compare(this.Value, that1.Value); c != 0 {
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *Any) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
that1, ok := that.(*Any)
|
||||
if !ok {
|
||||
that2, ok := that.(Any)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if this.TypeUrl != that1.TypeUrl {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(this.Value, that1.Value) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *Any) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&types.Any{")
|
||||
s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n")
|
||||
s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringAny(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func extensionToGoStringAny(m github_com_gogo_protobuf_proto.Message) string {
|
||||
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
|
||||
if e == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
|
||||
keys := make([]int, 0, len(e))
|
||||
for k := range e {
|
||||
keys = append(keys, int(k))
|
||||
}
|
||||
sort.Ints(keys)
|
||||
ss := []string{}
|
||||
for _, k := range keys {
|
||||
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
|
||||
}
|
||||
s += strings.Join(ss, ",") + "})"
|
||||
return s
|
||||
}
|
||||
func (m *Any) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Any) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.TypeUrl) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl)))
|
||||
i += copy(dAtA[i:], m.TypeUrl)
|
||||
}
|
||||
if len(m.Value) > 0 {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintAny(dAtA, i, uint64(len(m.Value)))
|
||||
i += copy(dAtA[i:], m.Value)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Any(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Any(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintAny(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func NewPopulatedAny(r randyAny, easy bool) *Any {
|
||||
this := &Any{}
|
||||
this.TypeUrl = string(randStringAny(r))
|
||||
v1 := r.Intn(100)
|
||||
this.Value = make([]byte, v1)
|
||||
for i := 0; i < v1; i++ {
|
||||
this.Value[i] = byte(r.Intn(256))
|
||||
}
|
||||
if !easy && r.Intn(10) != 0 {
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
type randyAny interface {
|
||||
Float32() float32
|
||||
Float64() float64
|
||||
Int63() int64
|
||||
Int31() int32
|
||||
Uint32() uint32
|
||||
Intn(n int) int
|
||||
}
|
||||
|
||||
func randUTF8RuneAny(r randyAny) rune {
|
||||
ru := r.Intn(62)
|
||||
if ru < 10 {
|
||||
return rune(ru + 48)
|
||||
} else if ru < 36 {
|
||||
return rune(ru + 55)
|
||||
}
|
||||
return rune(ru + 61)
|
||||
}
|
||||
func randStringAny(r randyAny) string {
|
||||
v2 := r.Intn(100)
|
||||
tmps := make([]rune, v2)
|
||||
for i := 0; i < v2; i++ {
|
||||
tmps[i] = randUTF8RuneAny(r)
|
||||
}
|
||||
return string(tmps)
|
||||
}
|
||||
func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) {
|
||||
l := r.Intn(5)
|
||||
for i := 0; i < l; i++ {
|
||||
wire := r.Intn(4)
|
||||
if wire == 3 {
|
||||
wire = 5
|
||||
}
|
||||
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||
dAtA = randFieldAny(dAtA, r, fieldNumber, wire)
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte {
|
||||
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||
switch wire {
|
||||
case 0:
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||
v3 := r.Int63()
|
||||
if r.Intn(2) == 0 {
|
||||
v3 *= -1
|
||||
}
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(v3))
|
||||
case 1:
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
case 2:
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||
ll := r.Intn(100)
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(ll))
|
||||
for j := 0; j < ll; j++ {
|
||||
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||
}
|
||||
default:
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte {
|
||||
for v >= 1<<7 {
|
||||
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||
v >>= 7
|
||||
}
|
||||
dAtA = append(dAtA, uint8(v))
|
||||
return dAtA
|
||||
}
|
||||
func (m *Any) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.TypeUrl)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovAny(uint64(l))
|
||||
}
|
||||
l = len(m.Value)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovAny(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovAny(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozAny(x uint64) (n int) {
|
||||
return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *Any) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&Any{`,
|
||||
`TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`,
|
||||
`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringAny(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *Any) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Any: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.TypeUrl = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Value == nil {
|
||||
m.Value = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipAny(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipAny(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthAny
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipAny(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowAny = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("any.proto", fileDescriptorAny) }
|
||||
|
||||
var fileDescriptorAny = []byte{
|
||||
// 208 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0xcc, 0xab, 0xd4,
|
||||
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92,
|
||||
0x4a, 0xd3, 0x94, 0xcc, 0xb8, 0x98, 0x1d, 0xf3, 0x2a, 0x85, 0x24, 0xb9, 0x38, 0x4a, 0x2a, 0x0b,
|
||||
0x52, 0xe3, 0x4b, 0x8b, 0x72, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xd8, 0x41, 0xfc, 0xd0,
|
||||
0xa2, 0x1c, 0x21, 0x11, 0x2e, 0xd6, 0xb2, 0xc4, 0x9c, 0xd2, 0x54, 0x09, 0x26, 0x05, 0x46, 0x0d,
|
||||
0x9e, 0x20, 0x08, 0xc7, 0xa9, 0x89, 0xf1, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e,
|
||||
0x3c, 0x94, 0x63, 0xfc, 0xf1, 0x50, 0x8e, 0xb1, 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c,
|
||||
0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72,
|
||||
0x0c, 0x1f, 0x40, 0xe2, 0x8f, 0xe5, 0x18, 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0xec, 0x77,
|
||||
0xe2, 0x70, 0xcc, 0xab, 0x0c, 0x00, 0x71, 0x02, 0x18, 0xa3, 0x58, 0x41, 0x56, 0x16, 0x2f, 0x60,
|
||||
0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x3a, 0x00, 0xaa,
|
||||
0x5a, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89, 0x0d,
|
||||
0x6c, 0x8c, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x63, 0x5d, 0x2d, 0x27, 0xe1, 0x00, 0x00, 0x00,
|
||||
}
|
35
vendor/github.com/gogo/protobuf/types/doc.go
generated
vendored
Normal file
35
vendor/github.com/gogo/protobuf/types/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package types contains code for interacting with well-known types.
|
||||
*/
|
||||
package types
|
100
vendor/github.com/gogo/protobuf/types/duration.go
generated
vendored
Normal file
100
vendor/github.com/gogo/protobuf/types/duration.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
// This file implements conversions between google.protobuf.Duration
|
||||
// and time.Duration.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Range of a Duration in seconds, as specified in
|
||||
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// validateDuration determines whether the Duration is valid according to the
|
||||
// definition in google/protobuf/duration.proto. A valid Duration
|
||||
// may still be too large to fit into a time.Duration (the range of Duration
|
||||
// is about 10,000 years, and the range of time.Duration is about 290).
|
||||
func validateDuration(d *Duration) error {
|
||||
if d == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %#v: seconds out of range", d)
|
||||
}
|
||||
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %#v: nanos out of range", d)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
|
||||
// returns an error if the Duration is invalid or is too large to be
|
||||
// represented in a time.Duration.
|
||||
func DurationFromProto(p *Duration) (time.Duration, error) {
|
||||
if err := validateDuration(p); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(p.Seconds) * time.Second
|
||||
if int64(d/time.Second) != p.Seconds {
|
||||
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||
}
|
||||
if p.Nanos != 0 {
|
||||
d += time.Duration(p.Nanos)
|
||||
if (d < 0) != (p.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a Duration.
|
||||
func DurationProto(d time.Duration) *Duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &Duration{
|
||||
Seconds: secs,
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
515
vendor/github.com/gogo/protobuf/types/duration.pb.go
generated
vendored
Normal file
515
vendor/github.com/gogo/protobuf/types/duration.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,515 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// source: duration.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package types is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
duration.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Duration
|
||||
*/
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import strings "strings"
|
||||
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
import sort "sort"
|
||||
import strconv "strconv"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
//
|
||||
type Duration struct {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Duration) Reset() { *m = Duration{} }
|
||||
func (*Duration) ProtoMessage() {}
|
||||
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorDuration, []int{0} }
|
||||
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
||||
|
||||
func (m *Duration) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Duration) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||
}
|
||||
func (this *Duration) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*Duration)
|
||||
if !ok {
|
||||
that2, ok := that.(Duration)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if this.Seconds != that1.Seconds {
|
||||
if this.Seconds < that1.Seconds {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if this.Nanos != that1.Nanos {
|
||||
if this.Nanos < that1.Nanos {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *Duration) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
that1, ok := that.(*Duration)
|
||||
if !ok {
|
||||
that2, ok := that.(Duration)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if this.Seconds != that1.Seconds {
|
||||
return false
|
||||
}
|
||||
if this.Nanos != that1.Nanos {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *Duration) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&types.Duration{")
|
||||
s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n")
|
||||
s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n")
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringDuration(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func extensionToGoStringDuration(m github_com_gogo_protobuf_proto.Message) string {
|
||||
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
|
||||
if e == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
|
||||
keys := make([]int, 0, len(e))
|
||||
for k := range e {
|
||||
keys = append(keys, int(k))
|
||||
}
|
||||
sort.Ints(keys)
|
||||
ss := []string{}
|
||||
for _, k := range keys {
|
||||
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
|
||||
}
|
||||
s += strings.Join(ss, ",") + "})"
|
||||
return s
|
||||
}
|
||||
func (m *Duration) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Duration) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seconds != 0 {
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintDuration(dAtA, i, uint64(m.Seconds))
|
||||
}
|
||||
if m.Nanos != 0 {
|
||||
dAtA[i] = 0x10
|
||||
i++
|
||||
i = encodeVarintDuration(dAtA, i, uint64(m.Nanos))
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Duration(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Duration(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintDuration(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Duration) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seconds != 0 {
|
||||
n += 1 + sovDuration(uint64(m.Seconds))
|
||||
}
|
||||
if m.Nanos != 0 {
|
||||
n += 1 + sovDuration(uint64(m.Nanos))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovDuration(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozDuration(x uint64) (n int) {
|
||||
return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *Duration) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Duration: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
|
||||
}
|
||||
m.Seconds = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seconds |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
|
||||
}
|
||||
m.Nanos = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Nanos |= (int32(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDuration(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDuration
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipDuration(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDuration
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipDuration(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("duration.proto", fileDescriptorDuration) }
|
||||
|
||||
var fileDescriptorDuration = []byte{
|
||||
// 203 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0x29, 0x2d, 0x4a,
|
||||
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24,
|
||||
0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83,
|
||||
0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d,
|
||||
0xd6, 0x20, 0x08, 0xc7, 0xa9, 0xfe, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c,
|
||||
0x94, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f,
|
||||
0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe5, 0x18,
|
||||
0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0xac, 0x76, 0xe2, 0x85, 0x59, 0x1c, 0x00, 0x12, 0x09,
|
||||
0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0x5e, 0xc0, 0xc8, 0xb8, 0x88, 0x89, 0xd9, 0x3d,
|
||||
0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x4b, 0x00, 0x54, 0x8b, 0x5e, 0x78, 0x6a, 0x4e, 0x8e,
|
||||
0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, 0x2c, 0x63, 0x40, 0x00, 0x00,
|
||||
0x00, 0xff, 0xff, 0xba, 0xfb, 0x15, 0xc9, 0xe6, 0x00, 0x00, 0x00,
|
||||
}
|
100
vendor/github.com/gogo/protobuf/types/duration_gogo.go
generated
vendored
Normal file
100
vendor/github.com/gogo/protobuf/types/duration_gogo.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewPopulatedDuration(r interface {
|
||||
Int63() int64
|
||||
}, easy bool) *Duration {
|
||||
this := &Duration{}
|
||||
maxSecs := time.Hour.Nanoseconds() / 1e9
|
||||
max := 2 * maxSecs
|
||||
s := int64(r.Int63()) % max
|
||||
s -= maxSecs
|
||||
neg := int64(1)
|
||||
if s < 0 {
|
||||
neg = -1
|
||||
}
|
||||
this.Seconds = s
|
||||
this.Nanos = int32(neg * (r.Int63() % 1e9))
|
||||
return this
|
||||
}
|
||||
|
||||
func (d *Duration) String() string {
|
||||
td, err := DurationFromProto(d)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return td.String()
|
||||
}
|
||||
|
||||
func NewPopulatedStdDuration(r interface {
|
||||
Int63() int64
|
||||
}, easy bool) *time.Duration {
|
||||
dur := NewPopulatedDuration(r, easy)
|
||||
d, err := DurationFromProto(dur)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return &d
|
||||
}
|
||||
|
||||
func SizeOfStdDuration(d time.Duration) int {
|
||||
dur := DurationProto(d)
|
||||
return dur.Size()
|
||||
}
|
||||
|
||||
func StdDurationMarshal(d time.Duration) ([]byte, error) {
|
||||
size := SizeOfStdDuration(d)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdDurationMarshalTo(d, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) {
|
||||
dur := DurationProto(d)
|
||||
return dur.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdDurationUnmarshal(d *time.Duration, data []byte) error {
|
||||
dur := &Duration{}
|
||||
if err := dur.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
dd, err := DurationFromProto(dur)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*d = dd
|
||||
return nil
|
||||
}
|
478
vendor/github.com/gogo/protobuf/types/empty.pb.go
generated
vendored
Normal file
478
vendor/github.com/gogo/protobuf/types/empty.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,478 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// source: empty.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package types is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
empty.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Empty
|
||||
*/
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import strings "strings"
|
||||
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
import sort "sort"
|
||||
import strconv "strconv"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// A generic empty message that you can re-use to avoid defining duplicated
|
||||
// empty messages in your APIs. A typical example is to use it as the request
|
||||
// or the response type of an API method. For instance:
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||
// }
|
||||
//
|
||||
// The JSON representation for `Empty` is empty JSON object `{}`.
|
||||
type Empty struct {
|
||||
}
|
||||
|
||||
func (m *Empty) Reset() { *m = Empty{} }
|
||||
func (*Empty) ProtoMessage() {}
|
||||
func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptorEmpty, []int{0} }
|
||||
func (*Empty) XXX_WellKnownType() string { return "Empty" }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
|
||||
}
|
||||
func (this *Empty) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*Empty)
|
||||
if !ok {
|
||||
that2, ok := that.(Empty)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *Empty) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
that1, ok := that.(*Empty)
|
||||
if !ok {
|
||||
that2, ok := that.(Empty)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *Empty) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 4)
|
||||
s = append(s, "&types.Empty{")
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringEmpty(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func extensionToGoStringEmpty(m github_com_gogo_protobuf_proto.Message) string {
|
||||
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
|
||||
if e == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
|
||||
keys := make([]int, 0, len(e))
|
||||
for k := range e {
|
||||
keys = append(keys, int(k))
|
||||
}
|
||||
sort.Ints(keys)
|
||||
ss := []string{}
|
||||
for _, k := range keys {
|
||||
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
|
||||
}
|
||||
s += strings.Join(ss, ",") + "})"
|
||||
return s
|
||||
}
|
||||
func (m *Empty) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Empty) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Empty(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Empty(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty {
|
||||
this := &Empty{}
|
||||
if !easy && r.Intn(10) != 0 {
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
type randyEmpty interface {
|
||||
Float32() float32
|
||||
Float64() float64
|
||||
Int63() int64
|
||||
Int31() int32
|
||||
Uint32() uint32
|
||||
Intn(n int) int
|
||||
}
|
||||
|
||||
func randUTF8RuneEmpty(r randyEmpty) rune {
|
||||
ru := r.Intn(62)
|
||||
if ru < 10 {
|
||||
return rune(ru + 48)
|
||||
} else if ru < 36 {
|
||||
return rune(ru + 55)
|
||||
}
|
||||
return rune(ru + 61)
|
||||
}
|
||||
func randStringEmpty(r randyEmpty) string {
|
||||
v1 := r.Intn(100)
|
||||
tmps := make([]rune, v1)
|
||||
for i := 0; i < v1; i++ {
|
||||
tmps[i] = randUTF8RuneEmpty(r)
|
||||
}
|
||||
return string(tmps)
|
||||
}
|
||||
func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) {
|
||||
l := r.Intn(5)
|
||||
for i := 0; i < l; i++ {
|
||||
wire := r.Intn(4)
|
||||
if wire == 3 {
|
||||
wire = 5
|
||||
}
|
||||
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||
dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire)
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte {
|
||||
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||
switch wire {
|
||||
case 0:
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||
v2 := r.Int63()
|
||||
if r.Intn(2) == 0 {
|
||||
v2 *= -1
|
||||
}
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2))
|
||||
case 1:
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
case 2:
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||
ll := r.Intn(100)
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll))
|
||||
for j := 0; j < ll; j++ {
|
||||
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||
}
|
||||
default:
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte {
|
||||
for v >= 1<<7 {
|
||||
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||
v >>= 7
|
||||
}
|
||||
dAtA = append(dAtA, uint8(v))
|
||||
return dAtA
|
||||
}
|
||||
func (m *Empty) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
return n
|
||||
}
|
||||
|
||||
func sovEmpty(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozEmpty(x uint64) (n int) {
|
||||
return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *Empty) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&Empty{`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringEmpty(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *Empty) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowEmpty
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Empty: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipEmpty(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthEmpty
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipEmpty(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowEmpty
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowEmpty
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowEmpty
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthEmpty
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowEmpty
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipEmpty(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("empty.proto", fileDescriptorEmpty) }
|
||||
|
||||
var fileDescriptorEmpty = []byte{
|
||||
// 172 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0xcd, 0x2d, 0x28,
|
||||
0xa9, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x85,
|
||||
0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xd8, 0xb9, 0x58, 0x5d, 0x41, 0xf2, 0x4e, 0xed, 0x8c, 0x17, 0x1e,
|
||||
0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0xe3, 0x8f, 0x87, 0x72, 0x8c, 0x0d,
|
||||
0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6,
|
||||
0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0x00, 0x12, 0x7f, 0x2c, 0xc7, 0xc8, 0x25,
|
||||
0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0xa0, 0x13, 0x17, 0xd8, 0xb8, 0x00, 0x10, 0x37, 0x80, 0x31,
|
||||
0x8a, 0xb5, 0xa4, 0xb2, 0x20, 0xb5, 0x78, 0x01, 0x23, 0xe3, 0x0f, 0x46, 0xc6, 0x45, 0x4c, 0xcc,
|
||||
0xee, 0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x5a, 0x02, 0xa0, 0x5a, 0xf4, 0xc2, 0x53, 0x73,
|
||||
0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x8a, 0x93, 0xd8, 0xc0, 0x66, 0x19, 0x03, 0x02,
|
||||
0x00, 0x00, 0xff, 0xff, 0x97, 0x6c, 0x95, 0xdd, 0xb9, 0x00, 0x00, 0x00,
|
||||
}
|
759
vendor/github.com/gogo/protobuf/types/field_mask.pb.go
generated
vendored
Normal file
759
vendor/github.com/gogo/protobuf/types/field_mask.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,759 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// source: field_mask.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package types is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
field_mask.proto
|
||||
|
||||
It has these top-level messages:
|
||||
FieldMask
|
||||
*/
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import strings "strings"
|
||||
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
import sort "sort"
|
||||
import strconv "strconv"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// `FieldMask` represents a set of symbolic field paths, for example:
|
||||
//
|
||||
// paths: "f.a"
|
||||
// paths: "f.b.d"
|
||||
//
|
||||
// Here `f` represents a field in some root message, `a` and `b`
|
||||
// fields in the message found in `f`, and `d` a field found in the
|
||||
// message in `f.b`.
|
||||
//
|
||||
// Field masks are used to specify a subset of fields that should be
|
||||
// returned by a get operation or modified by an update operation.
|
||||
// Field masks also have a custom JSON encoding (see below).
|
||||
//
|
||||
// # Field Masks in Projections
|
||||
//
|
||||
// When used in the context of a projection, a response message or
|
||||
// sub-message is filtered by the API to only contain those fields as
|
||||
// specified in the mask. For example, if the mask in the previous
|
||||
// example is applied to a response message as follows:
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// x : 2
|
||||
// }
|
||||
// y : 13
|
||||
// }
|
||||
// z: 8
|
||||
//
|
||||
// The result will not contain specific values for fields x,y and z
|
||||
// (their value will be set to the default, and omitted in proto text
|
||||
// output):
|
||||
//
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// A repeated field is not allowed except at the last position of a
|
||||
// field mask.
|
||||
//
|
||||
// If a FieldMask object is not present in a get operation, the
|
||||
// operation applies to all fields (as if a FieldMask of all fields
|
||||
// had been specified).
|
||||
//
|
||||
// Note that a field mask does not necessarily apply to the
|
||||
// top-level response message. In case of a REST get operation, the
|
||||
// field mask applies directly to the response, but in case of a REST
|
||||
// list operation, the mask instead applies to each individual message
|
||||
// in the returned resource list. In case of a REST custom method,
|
||||
// other definitions may be used. Where the mask applies will be
|
||||
// clearly documented together with its declaration in the API. In
|
||||
// any case, the effect on the returned resource/resources is required
|
||||
// behavior for APIs.
|
||||
//
|
||||
// # Field Masks in Update Operations
|
||||
//
|
||||
// A field mask in update operations specifies which fields of the
|
||||
// targeted resource are going to be updated. The API is required
|
||||
// to only change the values of the fields as specified in the mask
|
||||
// and leave the others untouched. If a resource is passed in to
|
||||
// describe the updated values, the API ignores the values of all
|
||||
// fields not covered by the mask.
|
||||
//
|
||||
// If a repeated field is specified for an update operation, the existing
|
||||
// repeated values in the target resource will be overwritten by the new values.
|
||||
// Note that a repeated field is only allowed in the last position of a field
|
||||
// mask.
|
||||
//
|
||||
// If a sub-message is specified in the last position of the field mask for an
|
||||
// update operation, then the existing sub-message in the target resource is
|
||||
// overwritten. Given the target message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d : 1
|
||||
// x : 2
|
||||
// }
|
||||
// c : 1
|
||||
// }
|
||||
//
|
||||
// And an update message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d : 10
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// then if the field mask is:
|
||||
//
|
||||
// paths: "f.b"
|
||||
//
|
||||
// then the result will be:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d : 10
|
||||
// }
|
||||
// c : 1
|
||||
// }
|
||||
//
|
||||
// However, if the update mask was:
|
||||
//
|
||||
// paths: "f.b.d"
|
||||
//
|
||||
// then the result would be:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d : 10
|
||||
// x : 2
|
||||
// }
|
||||
// c : 1
|
||||
// }
|
||||
//
|
||||
// In order to reset a field's value to the default, the field must
|
||||
// be in the mask and set to the default value in the provided resource.
|
||||
// Hence, in order to reset all fields of a resource, provide a default
|
||||
// instance of the resource and set all fields in the mask, or do
|
||||
// not provide a mask as described below.
|
||||
//
|
||||
// If a field mask is not present on update, the operation applies to
|
||||
// all fields (as if a field mask of all fields has been specified).
|
||||
// Note that in the presence of schema evolution, this may mean that
|
||||
// fields the client does not know and has therefore not filled into
|
||||
// the request will be reset to their default. If this is unwanted
|
||||
// behavior, a specific service may require a client to always specify
|
||||
// a field mask, producing an error if not.
|
||||
//
|
||||
// As with get operations, the location of the resource which
|
||||
// describes the updated values in the request message depends on the
|
||||
// operation kind. In any case, the effect of the field mask is
|
||||
// required to be honored by the API.
|
||||
//
|
||||
// ## Considerations for HTTP REST
|
||||
//
|
||||
// The HTTP kind of an update operation which uses a field mask must
|
||||
// be set to PATCH instead of PUT in order to satisfy HTTP semantics
|
||||
// (PUT must only be used for full updates).
|
||||
//
|
||||
// # JSON Encoding of Field Masks
|
||||
//
|
||||
// In JSON, a field mask is encoded as a single string where paths are
|
||||
// separated by a comma. Fields name in each path are converted
|
||||
// to/from lower-camel naming conventions.
|
||||
//
|
||||
// As an example, consider the following message declarations:
|
||||
//
|
||||
// message Profile {
|
||||
// User user = 1;
|
||||
// Photo photo = 2;
|
||||
// }
|
||||
// message User {
|
||||
// string display_name = 1;
|
||||
// string address = 2;
|
||||
// }
|
||||
//
|
||||
// In proto a field mask for `Profile` may look as such:
|
||||
//
|
||||
// mask {
|
||||
// paths: "user.display_name"
|
||||
// paths: "photo"
|
||||
// }
|
||||
//
|
||||
// In JSON, the same mask is represented as below:
|
||||
//
|
||||
// {
|
||||
// mask: "user.displayName,photo"
|
||||
// }
|
||||
//
|
||||
// # Field Masks and Oneof Fields
|
||||
//
|
||||
// Field masks treat fields in oneofs just as regular fields. Consider the
|
||||
// following message:
|
||||
//
|
||||
// message SampleMessage {
|
||||
// oneof test_oneof {
|
||||
// string name = 4;
|
||||
// SubMessage sub_message = 9;
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// The field mask can be:
|
||||
//
|
||||
// mask {
|
||||
// paths: "name"
|
||||
// }
|
||||
//
|
||||
// Or:
|
||||
//
|
||||
// mask {
|
||||
// paths: "sub_message"
|
||||
// }
|
||||
//
|
||||
// Note that oneof type names ("test_oneof" in this case) cannot be used in
|
||||
// paths.
|
||||
type FieldMask struct {
|
||||
// The set of field mask paths.
|
||||
Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"`
|
||||
}
|
||||
|
||||
func (m *FieldMask) Reset() { *m = FieldMask{} }
|
||||
func (*FieldMask) ProtoMessage() {}
|
||||
func (*FieldMask) Descriptor() ([]byte, []int) { return fileDescriptorFieldMask, []int{0} }
|
||||
|
||||
func (m *FieldMask) GetPaths() []string {
|
||||
if m != nil {
|
||||
return m.Paths
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask")
|
||||
}
|
||||
func (this *FieldMask) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*FieldMask)
|
||||
if !ok {
|
||||
that2, ok := that.(FieldMask)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if len(this.Paths) != len(that1.Paths) {
|
||||
if len(this.Paths) < len(that1.Paths) {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
for i := range this.Paths {
|
||||
if this.Paths[i] != that1.Paths[i] {
|
||||
if this.Paths[i] < that1.Paths[i] {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *FieldMask) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
that1, ok := that.(*FieldMask)
|
||||
if !ok {
|
||||
that2, ok := that.(FieldMask)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if len(this.Paths) != len(that1.Paths) {
|
||||
return false
|
||||
}
|
||||
for i := range this.Paths {
|
||||
if this.Paths[i] != that1.Paths[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *FieldMask) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&types.FieldMask{")
|
||||
s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n")
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringFieldMask(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func extensionToGoStringFieldMask(m github_com_gogo_protobuf_proto.Message) string {
|
||||
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
|
||||
if e == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
|
||||
keys := make([]int, 0, len(e))
|
||||
for k := range e {
|
||||
keys = append(keys, int(k))
|
||||
}
|
||||
sort.Ints(keys)
|
||||
ss := []string{}
|
||||
for _, k := range keys {
|
||||
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
|
||||
}
|
||||
s += strings.Join(ss, ",") + "})"
|
||||
return s
|
||||
}
|
||||
func (m *FieldMask) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Paths) > 0 {
|
||||
for _, s := range m.Paths {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64FieldMask(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32FieldMask(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask {
|
||||
this := &FieldMask{}
|
||||
v1 := r.Intn(10)
|
||||
this.Paths = make([]string, v1)
|
||||
for i := 0; i < v1; i++ {
|
||||
this.Paths[i] = string(randStringFieldMask(r))
|
||||
}
|
||||
if !easy && r.Intn(10) != 0 {
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
type randyFieldMask interface {
|
||||
Float32() float32
|
||||
Float64() float64
|
||||
Int63() int64
|
||||
Int31() int32
|
||||
Uint32() uint32
|
||||
Intn(n int) int
|
||||
}
|
||||
|
||||
func randUTF8RuneFieldMask(r randyFieldMask) rune {
|
||||
ru := r.Intn(62)
|
||||
if ru < 10 {
|
||||
return rune(ru + 48)
|
||||
} else if ru < 36 {
|
||||
return rune(ru + 55)
|
||||
}
|
||||
return rune(ru + 61)
|
||||
}
|
||||
func randStringFieldMask(r randyFieldMask) string {
|
||||
v2 := r.Intn(100)
|
||||
tmps := make([]rune, v2)
|
||||
for i := 0; i < v2; i++ {
|
||||
tmps[i] = randUTF8RuneFieldMask(r)
|
||||
}
|
||||
return string(tmps)
|
||||
}
|
||||
func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) {
|
||||
l := r.Intn(5)
|
||||
for i := 0; i < l; i++ {
|
||||
wire := r.Intn(4)
|
||||
if wire == 3 {
|
||||
wire = 5
|
||||
}
|
||||
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||
dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire)
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte {
|
||||
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||
switch wire {
|
||||
case 0:
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||
v3 := r.Int63()
|
||||
if r.Intn(2) == 0 {
|
||||
v3 *= -1
|
||||
}
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3))
|
||||
case 1:
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
case 2:
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||
ll := r.Intn(100)
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll))
|
||||
for j := 0; j < ll; j++ {
|
||||
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||
}
|
||||
default:
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte {
|
||||
for v >= 1<<7 {
|
||||
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||
v >>= 7
|
||||
}
|
||||
dAtA = append(dAtA, uint8(v))
|
||||
return dAtA
|
||||
}
|
||||
func (m *FieldMask) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Paths) > 0 {
|
||||
for _, s := range m.Paths {
|
||||
l = len(s)
|
||||
n += 1 + l + sovFieldMask(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovFieldMask(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozFieldMask(x uint64) (n int) {
|
||||
return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *FieldMask) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&FieldMask{`,
|
||||
`Paths:` + fmt.Sprintf("%v", this.Paths) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringFieldMask(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *FieldMask) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: FieldMask: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthFieldMask
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipFieldMask(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthFieldMask
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipFieldMask(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthFieldMask
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipFieldMask(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("field_mask.proto", fileDescriptorFieldMask) }
|
||||
|
||||
var fileDescriptorFieldMask = []byte{
|
||||
// 196 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x48, 0xcb, 0x4c, 0xcd,
|
||||
0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf,
|
||||
0xcf, 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0x14, 0xb9, 0x38, 0xdd, 0x40, 0x8a,
|
||||
0x7c, 0x13, 0x8b, 0xb3, 0x85, 0x44, 0xb8, 0x58, 0x0b, 0x12, 0x4b, 0x32, 0x8a, 0x25, 0x18, 0x15,
|
||||
0x98, 0x35, 0x38, 0x83, 0x20, 0x1c, 0xa7, 0x0e, 0xc6, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94,
|
||||
0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e,
|
||||
0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f,
|
||||
0x1e, 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0xe4, 0x12, 0x4e, 0xce, 0xcf, 0xd5, 0x43,
|
||||
0xb3, 0xca, 0x89, 0x0f, 0x6e, 0x51, 0x00, 0x48, 0x28, 0x80, 0x31, 0x8a, 0xb5, 0xa4, 0xb2, 0x20,
|
||||
0xb5, 0x78, 0x01, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10,
|
||||
0x3d, 0x01, 0x50, 0x3d, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20,
|
||||
0x95, 0x49, 0x6c, 0x60, 0xc3, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xea, 0xb1, 0x3a, 0xd5,
|
||||
0xd9, 0x00, 0x00, 0x00,
|
||||
}
|
1908
vendor/github.com/gogo/protobuf/types/struct.pb.go
generated
vendored
Normal file
1908
vendor/github.com/gogo/protobuf/types/struct.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
123
vendor/github.com/gogo/protobuf/types/timestamp.go
generated
vendored
Normal file
123
vendor/github.com/gogo/protobuf/types/timestamp.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
// This file implements operations on google.protobuf.Timestamp.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800
|
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range
|
||||
// [0001-01-01, 10000-01-01) and has a Nanos field
|
||||
// in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes
|
||||
// the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
||||
func validateTimestamp(ts *Timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
if ts.Seconds < minValidSeconds {
|
||||
return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
|
||||
}
|
||||
if ts.Seconds >= maxValidSeconds {
|
||||
return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
|
||||
}
|
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||
return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
||||
// is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
func TimestampFromProto(ts *Timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
func TimestampProto(t time.Time) (*Timestamp, error) {
|
||||
seconds := t.Unix()
|
||||
nanos := int32(t.Sub(time.Unix(seconds, 0)))
|
||||
ts := &Timestamp{
|
||||
Seconds: seconds,
|
||||
Nanos: nanos,
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
|
||||
// Timestamps, it returns an error message in parentheses.
|
||||
func TimestampString(ts *Timestamp) string {
|
||||
t, err := TimestampFromProto(ts)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
527
vendor/github.com/gogo/protobuf/types/timestamp.pb.go
generated
vendored
Normal file
527
vendor/github.com/gogo/protobuf/types/timestamp.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,527 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// source: timestamp.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package types is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
timestamp.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Timestamp
|
||||
*/
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import strings "strings"
|
||||
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
import sort "sort"
|
||||
import strconv "strconv"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// now = time.time()
|
||||
// seconds = int(now)
|
||||
// nanos = int((now - seconds) * 10**9)
|
||||
// timestamp = Timestamp(seconds=seconds, nanos=nanos)
|
||||
//
|
||||
//
|
||||
type Timestamp struct {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||
func (*Timestamp) ProtoMessage() {}
|
||||
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorTimestamp, []int{0} }
|
||||
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
||||
|
||||
func (m *Timestamp) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Timestamp) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||
}
|
||||
func (this *Timestamp) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*Timestamp)
|
||||
if !ok {
|
||||
that2, ok := that.(Timestamp)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if this.Seconds != that1.Seconds {
|
||||
if this.Seconds < that1.Seconds {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if this.Nanos != that1.Nanos {
|
||||
if this.Nanos < that1.Nanos {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *Timestamp) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
that1, ok := that.(*Timestamp)
|
||||
if !ok {
|
||||
that2, ok := that.(Timestamp)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if this.Seconds != that1.Seconds {
|
||||
return false
|
||||
}
|
||||
if this.Nanos != that1.Nanos {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *Timestamp) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&types.Timestamp{")
|
||||
s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n")
|
||||
s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n")
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringTimestamp(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func extensionToGoStringTimestamp(m github_com_gogo_protobuf_proto.Message) string {
|
||||
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
|
||||
if e == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
|
||||
keys := make([]int, 0, len(e))
|
||||
for k := range e {
|
||||
keys = append(keys, int(k))
|
||||
}
|
||||
sort.Ints(keys)
|
||||
ss := []string{}
|
||||
for _, k := range keys {
|
||||
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
|
||||
}
|
||||
s += strings.Join(ss, ",") + "})"
|
||||
return s
|
||||
}
|
||||
func (m *Timestamp) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seconds != 0 {
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds))
|
||||
}
|
||||
if m.Nanos != 0 {
|
||||
dAtA[i] = 0x10
|
||||
i++
|
||||
i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos))
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Timestamp(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Timestamp(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Timestamp) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seconds != 0 {
|
||||
n += 1 + sovTimestamp(uint64(m.Seconds))
|
||||
}
|
||||
if m.Nanos != 0 {
|
||||
n += 1 + sovTimestamp(uint64(m.Nanos))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovTimestamp(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozTimestamp(x uint64) (n int) {
|
||||
return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *Timestamp) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Timestamp: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
|
||||
}
|
||||
m.Seconds = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seconds |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
|
||||
}
|
||||
m.Nanos = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Nanos |= (int32(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTimestamp(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthTimestamp
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipTimestamp(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthTimestamp
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipTimestamp(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("timestamp.proto", fileDescriptorTimestamp) }
|
||||
|
||||
var fileDescriptorTimestamp = []byte{
|
||||
// 208 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0xc9, 0xcc, 0x4d,
|
||||
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf,
|
||||
0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84,
|
||||
0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98,
|
||||
0x83, 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46,
|
||||
0x0d, 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x99, 0xf1, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18,
|
||||
0x3e, 0x3c, 0x94, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4,
|
||||
0x18, 0x1f, 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f,
|
||||
0xe5, 0x18, 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0x2c, 0x77, 0xe2, 0x83, 0x5b, 0x1d, 0x00,
|
||||
0x12, 0x0a, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0x5e, 0xc0, 0xc8, 0xf8, 0x83, 0x91,
|
||||
0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb6, 0x00, 0xa8, 0x36,
|
||||
0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, 0xe2, 0x24, 0x36, 0xb0,
|
||||
0x79, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7c, 0x4d, 0xbd, 0x9c, 0xed, 0x00, 0x00, 0x00,
|
||||
}
|
94
vendor/github.com/gogo/protobuf/types/timestamp_gogo.go
generated
vendored
Normal file
94
vendor/github.com/gogo/protobuf/types/timestamp_gogo.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewPopulatedTimestamp(r interface {
|
||||
Int63() int64
|
||||
}, easy bool) *Timestamp {
|
||||
this := &Timestamp{}
|
||||
ns := int64(r.Int63())
|
||||
this.Seconds = ns / 1e9
|
||||
this.Nanos = int32(ns % 1e9)
|
||||
return this
|
||||
}
|
||||
|
||||
func (ts *Timestamp) String() string {
|
||||
return TimestampString(ts)
|
||||
}
|
||||
|
||||
func NewPopulatedStdTime(r interface {
|
||||
Int63() int64
|
||||
}, easy bool) *time.Time {
|
||||
timestamp := NewPopulatedTimestamp(r, easy)
|
||||
t, err := TimestampFromProto(timestamp)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return &t
|
||||
}
|
||||
|
||||
func SizeOfStdTime(t time.Time) int {
|
||||
ts, err := TimestampProto(t)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return ts.Size()
|
||||
}
|
||||
|
||||
func StdTimeMarshal(t time.Time) ([]byte, error) {
|
||||
size := SizeOfStdTime(t)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdTimeMarshalTo(t, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdTimeMarshalTo(t time.Time, data []byte) (int, error) {
|
||||
ts, err := TimestampProto(t)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return ts.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdTimeUnmarshal(t *time.Time, data []byte) error {
|
||||
ts := &Timestamp{}
|
||||
if err := ts.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
tt, err := TimestampFromProto(ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = tt
|
||||
return nil
|
||||
}
|
2280
vendor/github.com/gogo/protobuf/types/wrappers.pb.go
generated
vendored
Normal file
2280
vendor/github.com/gogo/protobuf/types/wrappers.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
20
vendor/github.com/nats-io/gnatsd/LICENSE
generated
vendored
20
vendor/github.com/nats-io/gnatsd/LICENSE
generated
vendored
|
@ -1,20 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012-2016 Apcera Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
45
vendor/github.com/nats-io/gnatsd/auth/multiuser.go
generated
vendored
45
vendor/github.com/nats-io/gnatsd/auth/multiuser.go
generated
vendored
|
@ -1,45 +0,0 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"github.com/nats-io/gnatsd/server"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
// Plain authentication is a basic username and password
|
||||
type MultiUser struct {
|
||||
users map[string]*server.User
|
||||
}
|
||||
|
||||
// Create a new multi-user
|
||||
func NewMultiUser(users []*server.User) *MultiUser {
|
||||
m := &MultiUser{users: make(map[string]*server.User)}
|
||||
for _, u := range users {
|
||||
m.users[u.Username] = u
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Check authenticates the client using a username and password against a list of multiple users.
|
||||
func (m *MultiUser) Check(c server.ClientAuth) bool {
|
||||
opts := c.GetOpts()
|
||||
user, ok := m.users[opts.Username]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
pass := user.Password
|
||||
|
||||
// Check to see if the password is a bcrypt hash
|
||||
if isBcrypt(pass) {
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(pass), []byte(opts.Password)); err != nil {
|
||||
return false
|
||||
}
|
||||
} else if pass != opts.Password {
|
||||
return false
|
||||
}
|
||||
|
||||
c.RegisterUser(user)
|
||||
|
||||
return true
|
||||
}
|
40
vendor/github.com/nats-io/gnatsd/auth/plain.go
generated
vendored
40
vendor/github.com/nats-io/gnatsd/auth/plain.go
generated
vendored
|
@ -1,40 +0,0 @@
|
|||
// Copyright 2014-2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/nats-io/gnatsd/server"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
const bcryptPrefix = "$2a$"
|
||||
|
||||
func isBcrypt(password string) bool {
|
||||
return strings.HasPrefix(password, bcryptPrefix)
|
||||
}
|
||||
|
||||
// Plain authentication is a basic username and password
|
||||
type Plain struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// Check authenticates the client using a username and password
|
||||
func (p *Plain) Check(c server.ClientAuth) bool {
|
||||
opts := c.GetOpts()
|
||||
if p.Username != opts.Username {
|
||||
return false
|
||||
}
|
||||
// Check to see if the password is a bcrypt hash
|
||||
if isBcrypt(p.Password) {
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(p.Password), []byte(opts.Password)); err != nil {
|
||||
return false
|
||||
}
|
||||
} else if p.Password != opts.Password {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
26
vendor/github.com/nats-io/gnatsd/auth/token.go
generated
vendored
26
vendor/github.com/nats-io/gnatsd/auth/token.go
generated
vendored
|
@ -1,26 +0,0 @@
|
|||
package auth
|
||||
|
||||
import (
|
||||
"github.com/nats-io/gnatsd/server"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
// Token holds a string token used for authentication
|
||||
type Token struct {
|
||||
Token string
|
||||
}
|
||||
|
||||
// Check authenticates a client from a token
|
||||
func (p *Token) Check(c server.ClientAuth) bool {
|
||||
opts := c.GetOpts()
|
||||
// Check to see if the token is a bcrypt hash
|
||||
if isBcrypt(p.Token) {
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(p.Token), []byte(opts.Authorization)); err != nil {
|
||||
return false
|
||||
}
|
||||
} else if p.Token != opts.Authorization {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
1087
vendor/github.com/nats-io/gnatsd/conf/lex.go
generated
vendored
1087
vendor/github.com/nats-io/gnatsd/conf/lex.go
generated
vendored
File diff suppressed because it is too large
Load diff
284
vendor/github.com/nats-io/gnatsd/conf/parse.go
generated
vendored
284
vendor/github.com/nats-io/gnatsd/conf/parse.go
generated
vendored
|
@ -1,284 +0,0 @@
|
|||
// Copyright 2013-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
// Package conf supports a configuration file format used by gnatsd. It is
|
||||
// a flexible format that combines the best of traditional
|
||||
// configuration formats and newer styles such as JSON and YAML.
|
||||
package conf
|
||||
|
||||
// The format supported is less restrictive than today's formats.
|
||||
// Supports mixed Arrays [], nested Maps {}, multiple comment types (# and //)
|
||||
// Also supports key value assigments using '=' or ':' or whiteSpace()
|
||||
// e.g. foo = 2, foo : 2, foo 2
|
||||
// maps can be assigned with no key separator as well
|
||||
// semicolons as value terminators in key/value assignments are optional
|
||||
//
|
||||
// see parse_test.go for more examples.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
mapping map[string]interface{}
|
||||
lx *lexer
|
||||
|
||||
// The current scoped context, can be array or map
|
||||
ctx interface{}
|
||||
|
||||
// stack of contexts, either map or array/slice stack
|
||||
ctxs []interface{}
|
||||
|
||||
// Keys stack
|
||||
keys []string
|
||||
|
||||
// The config file path, empty by default.
|
||||
fp string
|
||||
}
|
||||
|
||||
// Parse will return a map of keys to interface{}, although concrete types
|
||||
// underly them. The values supported are string, bool, int64, float64, DateTime.
|
||||
// Arrays and nested Maps are also supported.
|
||||
func Parse(data string) (map[string]interface{}, error) {
|
||||
p, err := parse(data, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.mapping, nil
|
||||
}
|
||||
|
||||
// ParseFile is a helper to open file, etc. and parse the contents.
|
||||
func ParseFile(fp string) (map[string]interface{}, error) {
|
||||
data, err := ioutil.ReadFile(fp)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening config file: %v", err)
|
||||
}
|
||||
|
||||
p, err := parse(string(data), filepath.Dir(fp))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.mapping, nil
|
||||
}
|
||||
|
||||
func parse(data, fp string) (p *parser, err error) {
|
||||
p = &parser{
|
||||
mapping: make(map[string]interface{}),
|
||||
lx: lex(data),
|
||||
ctxs: make([]interface{}, 0, 4),
|
||||
keys: make([]string, 0, 4),
|
||||
fp: fp,
|
||||
}
|
||||
p.pushContext(p.mapping)
|
||||
|
||||
for {
|
||||
it := p.next()
|
||||
if it.typ == itemEOF {
|
||||
break
|
||||
}
|
||||
if err := p.processItem(it); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) next() item {
|
||||
return p.lx.nextItem()
|
||||
}
|
||||
|
||||
func (p *parser) pushContext(ctx interface{}) {
|
||||
p.ctxs = append(p.ctxs, ctx)
|
||||
p.ctx = ctx
|
||||
}
|
||||
|
||||
func (p *parser) popContext() interface{} {
|
||||
if len(p.ctxs) == 0 {
|
||||
panic("BUG in parser, context stack empty")
|
||||
}
|
||||
li := len(p.ctxs) - 1
|
||||
last := p.ctxs[li]
|
||||
p.ctxs = p.ctxs[0:li]
|
||||
p.ctx = p.ctxs[len(p.ctxs)-1]
|
||||
return last
|
||||
}
|
||||
|
||||
func (p *parser) pushKey(key string) {
|
||||
p.keys = append(p.keys, key)
|
||||
}
|
||||
|
||||
func (p *parser) popKey() string {
|
||||
if len(p.keys) == 0 {
|
||||
panic("BUG in parser, keys stack empty")
|
||||
}
|
||||
li := len(p.keys) - 1
|
||||
last := p.keys[li]
|
||||
p.keys = p.keys[0:li]
|
||||
return last
|
||||
}
|
||||
|
||||
func (p *parser) processItem(it item) error {
|
||||
switch it.typ {
|
||||
case itemError:
|
||||
return fmt.Errorf("Parse error on line %d: '%s'", it.line, it.val)
|
||||
case itemKey:
|
||||
p.pushKey(it.val)
|
||||
case itemMapStart:
|
||||
newCtx := make(map[string]interface{})
|
||||
p.pushContext(newCtx)
|
||||
case itemMapEnd:
|
||||
p.setValue(p.popContext())
|
||||
case itemString:
|
||||
p.setValue(it.val) // FIXME(dlc) sanitize string?
|
||||
case itemInteger:
|
||||
lastDigit := 0
|
||||
for _, r := range it.val {
|
||||
if !unicode.IsDigit(r) {
|
||||
break
|
||||
}
|
||||
lastDigit++
|
||||
}
|
||||
numStr := it.val[:lastDigit]
|
||||
num, err := strconv.ParseInt(numStr, 10, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
return fmt.Errorf("Integer '%s' is out of the range.", it.val)
|
||||
}
|
||||
return fmt.Errorf("Expected integer, but got '%s'.", it.val)
|
||||
}
|
||||
// Process a suffix
|
||||
suffix := strings.ToLower(strings.TrimSpace(it.val[lastDigit:]))
|
||||
switch suffix {
|
||||
case "":
|
||||
p.setValue(num)
|
||||
case "k":
|
||||
p.setValue(num * 1000)
|
||||
case "kb":
|
||||
p.setValue(num * 1024)
|
||||
case "m":
|
||||
p.setValue(num * 1000 * 1000)
|
||||
case "mb":
|
||||
p.setValue(num * 1024 * 1024)
|
||||
case "g":
|
||||
p.setValue(num * 1000 * 1000 * 1000)
|
||||
case "gb":
|
||||
p.setValue(num * 1024 * 1024 * 1024)
|
||||
}
|
||||
case itemFloat:
|
||||
num, err := strconv.ParseFloat(it.val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
return fmt.Errorf("Float '%s' is out of the range.", it.val)
|
||||
}
|
||||
return fmt.Errorf("Expected float, but got '%s'.", it.val)
|
||||
}
|
||||
p.setValue(num)
|
||||
case itemBool:
|
||||
switch strings.ToLower(it.val) {
|
||||
case "true", "yes", "on":
|
||||
p.setValue(true)
|
||||
case "false", "no", "off":
|
||||
p.setValue(false)
|
||||
default:
|
||||
return fmt.Errorf("Expected boolean value, but got '%s'.", it.val)
|
||||
}
|
||||
case itemDatetime:
|
||||
dt, err := time.Parse("2006-01-02T15:04:05Z", it.val)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Expected Zulu formatted DateTime, but got '%s'.", it.val)
|
||||
}
|
||||
p.setValue(dt)
|
||||
case itemArrayStart:
|
||||
var array = make([]interface{}, 0)
|
||||
p.pushContext(array)
|
||||
case itemArrayEnd:
|
||||
array := p.ctx
|
||||
p.popContext()
|
||||
p.setValue(array)
|
||||
case itemVariable:
|
||||
if value, ok := p.lookupVariable(it.val); ok {
|
||||
p.setValue(value)
|
||||
} else {
|
||||
return fmt.Errorf("Variable reference for '%s' on line %d can not be found.",
|
||||
it.val, it.line)
|
||||
}
|
||||
case itemInclude:
|
||||
m, err := ParseFile(filepath.Join(p.fp, it.val))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing include file '%s', %v.", it.val, err)
|
||||
}
|
||||
for k, v := range m {
|
||||
p.pushKey(k)
|
||||
p.setValue(v)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Used to map an environment value into a temporary map to pass to secondary Parse call.
|
||||
const pkey = "pk"
|
||||
|
||||
// We special case raw strings here that are bcrypt'd. This allows us not to force quoting the strings
|
||||
const bcryptPrefix = "2a$"
|
||||
|
||||
// lookupVariable will lookup a variable reference. It will use block scoping on keys
|
||||
// it has seen before, with the top level scoping being the environment variables. We
|
||||
// ignore array contexts and only process the map contexts..
|
||||
//
|
||||
// Returns true for ok if it finds something, similar to map.
|
||||
func (p *parser) lookupVariable(varReference string) (interface{}, bool) {
|
||||
// Do special check to see if it is a raw bcrypt string.
|
||||
if strings.HasPrefix(varReference, bcryptPrefix) {
|
||||
return "$" + varReference, true
|
||||
}
|
||||
|
||||
// Loop through contexts currently on the stack.
|
||||
for i := len(p.ctxs) - 1; i >= 0; i -= 1 {
|
||||
ctx := p.ctxs[i]
|
||||
// Process if it is a map context
|
||||
if m, ok := ctx.(map[string]interface{}); ok {
|
||||
if v, ok := m[varReference]; ok {
|
||||
return v, ok
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we are here, we have exhausted our context maps and still not found anything.
|
||||
// Parse from the environment.
|
||||
if vStr, ok := os.LookupEnv(varReference); ok {
|
||||
// Everything we get here will be a string value, so we need to process as a parser would.
|
||||
if vmap, err := Parse(fmt.Sprintf("%s=%s", pkey, vStr)); err == nil {
|
||||
v, ok := vmap[pkey]
|
||||
return v, ok
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (p *parser) setValue(val interface{}) {
|
||||
// Test to see if we are on an array or a map
|
||||
|
||||
// Array processing
|
||||
if ctx, ok := p.ctx.([]interface{}); ok {
|
||||
p.ctx = append(ctx, val)
|
||||
p.ctxs[len(p.ctxs)-1] = p.ctx
|
||||
}
|
||||
|
||||
// Map processing
|
||||
if ctx, ok := p.ctx.(map[string]interface{}); ok {
|
||||
key := p.popKey()
|
||||
// FIXME(dlc), make sure to error if redefining same key?
|
||||
ctx[key] = val
|
||||
}
|
||||
}
|
128
vendor/github.com/nats-io/gnatsd/logger/log.go
generated
vendored
128
vendor/github.com/nats-io/gnatsd/logger/log.go
generated
vendored
|
@ -1,128 +0,0 @@
|
|||
// Copyright 2012-2015 Apcera Inc. All rights reserved.
|
||||
|
||||
//Package logger provides logging facilities for the NATS server
|
||||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Logger is the server logger
|
||||
type Logger struct {
|
||||
logger *log.Logger
|
||||
debug bool
|
||||
trace bool
|
||||
infoLabel string
|
||||
errorLabel string
|
||||
fatalLabel string
|
||||
debugLabel string
|
||||
traceLabel string
|
||||
}
|
||||
|
||||
// NewStdLogger creates a logger with output directed to Stderr
|
||||
func NewStdLogger(time, debug, trace, colors, pid bool) *Logger {
|
||||
flags := 0
|
||||
if time {
|
||||
flags = log.LstdFlags | log.Lmicroseconds
|
||||
}
|
||||
|
||||
pre := ""
|
||||
if pid {
|
||||
pre = pidPrefix()
|
||||
}
|
||||
|
||||
l := &Logger{
|
||||
logger: log.New(os.Stderr, pre, flags),
|
||||
debug: debug,
|
||||
trace: trace,
|
||||
}
|
||||
|
||||
if colors {
|
||||
setColoredLabelFormats(l)
|
||||
} else {
|
||||
setPlainLabelFormats(l)
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// NewFileLogger creates a logger with output directed to a file
|
||||
func NewFileLogger(filename string, time, debug, trace, pid bool) *Logger {
|
||||
fileflags := os.O_WRONLY | os.O_APPEND | os.O_CREATE
|
||||
f, err := os.OpenFile(filename, fileflags, 0660)
|
||||
if err != nil {
|
||||
log.Fatalf("error opening file: %v", err)
|
||||
}
|
||||
|
||||
flags := 0
|
||||
if time {
|
||||
flags = log.LstdFlags | log.Lmicroseconds
|
||||
}
|
||||
|
||||
pre := ""
|
||||
if pid {
|
||||
pre = pidPrefix()
|
||||
}
|
||||
|
||||
l := &Logger{
|
||||
logger: log.New(f, pre, flags),
|
||||
debug: debug,
|
||||
trace: trace,
|
||||
}
|
||||
|
||||
setPlainLabelFormats(l)
|
||||
return l
|
||||
}
|
||||
|
||||
// Generate the pid prefix string
|
||||
func pidPrefix() string {
|
||||
return fmt.Sprintf("[%d] ", os.Getpid())
|
||||
}
|
||||
|
||||
func setPlainLabelFormats(l *Logger) {
|
||||
l.infoLabel = "[INF] "
|
||||
l.debugLabel = "[DBG] "
|
||||
l.errorLabel = "[ERR] "
|
||||
l.fatalLabel = "[FTL] "
|
||||
l.traceLabel = "[TRC] "
|
||||
}
|
||||
|
||||
func setColoredLabelFormats(l *Logger) {
|
||||
colorFormat := "[\x1b[%dm%s\x1b[0m] "
|
||||
l.infoLabel = fmt.Sprintf(colorFormat, 32, "INF")
|
||||
l.debugLabel = fmt.Sprintf(colorFormat, 36, "DBG")
|
||||
l.errorLabel = fmt.Sprintf(colorFormat, 31, "ERR")
|
||||
l.fatalLabel = fmt.Sprintf(colorFormat, 31, "FTL")
|
||||
l.traceLabel = fmt.Sprintf(colorFormat, 33, "TRC")
|
||||
}
|
||||
|
||||
// Noticef logs a notice statement
|
||||
func (l *Logger) Noticef(format string, v ...interface{}) {
|
||||
l.logger.Printf(l.infoLabel+format, v...)
|
||||
}
|
||||
|
||||
// Errorf logs an error statement
|
||||
func (l *Logger) Errorf(format string, v ...interface{}) {
|
||||
l.logger.Printf(l.errorLabel+format, v...)
|
||||
}
|
||||
|
||||
// Fatalf logs a fatal error
|
||||
func (l *Logger) Fatalf(format string, v ...interface{}) {
|
||||
l.logger.Fatalf(l.fatalLabel+format, v...)
|
||||
}
|
||||
|
||||
// Debugf logs a debug statement
|
||||
func (l *Logger) Debugf(format string, v ...interface{}) {
|
||||
if l.debug {
|
||||
l.logger.Printf(l.debugLabel+format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Tracef logs a trace statement
|
||||
func (l *Logger) Tracef(format string, v ...interface{}) {
|
||||
if l.trace {
|
||||
l.logger.Printf(l.traceLabel+format, v...)
|
||||
}
|
||||
}
|
112
vendor/github.com/nats-io/gnatsd/logger/syslog.go
generated
vendored
112
vendor/github.com/nats-io/gnatsd/logger/syslog.go
generated
vendored
|
@ -1,112 +0,0 @@
|
|||
// Copyright 2012-2014 Apcera Inc. All rights reserved.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"log/syslog"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SysLogger provides a system logger facility
|
||||
type SysLogger struct {
|
||||
writer *syslog.Writer
|
||||
debug bool
|
||||
trace bool
|
||||
}
|
||||
|
||||
// GetSysLoggerTag generates the tag name for use in syslog statements. If
|
||||
// the executable is linked, the name of the link will be used as the tag,
|
||||
// otherwise, the name of the executable is used. "gnatsd" is the default
|
||||
// for the NATS server.
|
||||
func GetSysLoggerTag() string {
|
||||
procName := os.Args[0]
|
||||
if strings.ContainsRune(procName, os.PathSeparator) {
|
||||
parts := strings.FieldsFunc(procName, func(c rune) bool {
|
||||
return c == os.PathSeparator
|
||||
})
|
||||
procName = parts[len(parts)-1]
|
||||
}
|
||||
return procName
|
||||
}
|
||||
|
||||
// NewSysLogger creates a new system logger
|
||||
func NewSysLogger(debug, trace bool) *SysLogger {
|
||||
w, err := syslog.New(syslog.LOG_DAEMON|syslog.LOG_NOTICE, GetSysLoggerTag())
|
||||
if err != nil {
|
||||
log.Fatalf("error connecting to syslog: %q", err.Error())
|
||||
}
|
||||
|
||||
return &SysLogger{
|
||||
writer: w,
|
||||
debug: debug,
|
||||
trace: trace,
|
||||
}
|
||||
}
|
||||
|
||||
// NewRemoteSysLogger creates a new remote system logger
|
||||
func NewRemoteSysLogger(fqn string, debug, trace bool) *SysLogger {
|
||||
network, addr := getNetworkAndAddr(fqn)
|
||||
w, err := syslog.Dial(network, addr, syslog.LOG_DEBUG, GetSysLoggerTag())
|
||||
if err != nil {
|
||||
log.Fatalf("error connecting to syslog: %q", err.Error())
|
||||
}
|
||||
|
||||
return &SysLogger{
|
||||
writer: w,
|
||||
debug: debug,
|
||||
trace: trace,
|
||||
}
|
||||
}
|
||||
|
||||
func getNetworkAndAddr(fqn string) (network, addr string) {
|
||||
u, err := url.Parse(fqn)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
network = u.Scheme
|
||||
if network == "udp" || network == "tcp" {
|
||||
addr = u.Host
|
||||
} else if network == "unix" {
|
||||
addr = u.Path
|
||||
} else {
|
||||
log.Fatalf("error invalid network type: %q", u.Scheme)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Noticef logs a notice statement
|
||||
func (l *SysLogger) Noticef(format string, v ...interface{}) {
|
||||
l.writer.Notice(fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
// Fatalf logs a fatal error
|
||||
func (l *SysLogger) Fatalf(format string, v ...interface{}) {
|
||||
l.writer.Crit(fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
// Errorf logs an error statement
|
||||
func (l *SysLogger) Errorf(format string, v ...interface{}) {
|
||||
l.writer.Err(fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
// Debugf logs a debug statement
|
||||
func (l *SysLogger) Debugf(format string, v ...interface{}) {
|
||||
if l.debug {
|
||||
l.writer.Debug(fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
||||
|
||||
// Tracef logs a trace statement
|
||||
func (l *SysLogger) Tracef(format string, v ...interface{}) {
|
||||
if l.trace {
|
||||
l.writer.Notice(fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
52
vendor/github.com/nats-io/gnatsd/logger/syslog_windows.go
generated
vendored
52
vendor/github.com/nats-io/gnatsd/logger/syslog_windows.go
generated
vendored
|
@ -1,52 +0,0 @@
|
|||
// Copyright 2012-2014 Apcera Inc. All rights reserved.
|
||||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
type SysLogger struct {
|
||||
writer *log.Logger
|
||||
debug bool
|
||||
trace bool
|
||||
}
|
||||
|
||||
func NewSysLogger(debug, trace bool) *SysLogger {
|
||||
w := log.New(os.Stdout, "gnatsd", log.LstdFlags)
|
||||
|
||||
return &SysLogger{
|
||||
writer: w,
|
||||
debug: debug,
|
||||
trace: trace,
|
||||
}
|
||||
}
|
||||
|
||||
func NewRemoteSysLogger(fqn string, debug, trace bool) *SysLogger {
|
||||
return NewSysLogger(debug, trace)
|
||||
}
|
||||
|
||||
func (l *SysLogger) Noticef(format string, v ...interface{}) {
|
||||
l.writer.Println("NOTICE", fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func (l *SysLogger) Fatalf(format string, v ...interface{}) {
|
||||
l.writer.Println("CRITICAL", fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func (l *SysLogger) Errorf(format string, v ...interface{}) {
|
||||
l.writer.Println("ERROR", fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func (l *SysLogger) Debugf(format string, v ...interface{}) {
|
||||
if l.debug {
|
||||
l.writer.Println("DEBUG", fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *SysLogger) Tracef(format string, v ...interface{}) {
|
||||
if l.trace {
|
||||
l.writer.Println("NOTICE", fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
17
vendor/github.com/nats-io/gnatsd/server/auth.go
generated
vendored
17
vendor/github.com/nats-io/gnatsd/server/auth.go
generated
vendored
|
@ -1,17 +0,0 @@
|
|||
// Copyright 2012-2014 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
// Auth is an interface for implementing authentication
|
||||
type Auth interface {
|
||||
// Check if a client is authorized to connect
|
||||
Check(c ClientAuth) bool
|
||||
}
|
||||
|
||||
// ClientAuth is an interface for client authentication
|
||||
type ClientAuth interface {
|
||||
// Get options associated with a client
|
||||
GetOpts() *clientOpts
|
||||
// Optionally map a user after auth.
|
||||
RegisterUser(*User)
|
||||
}
|
33
vendor/github.com/nats-io/gnatsd/server/ciphersuites_1.4.go
generated
vendored
33
vendor/github.com/nats-io/gnatsd/server/ciphersuites_1.4.go
generated
vendored
|
@ -1,33 +0,0 @@
|
|||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
|
||||
// +build go1.4,!go1.5
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// Where we maintain all of the available 1.4 ciphers
|
||||
var cipherMap = map[string]uint16{
|
||||
"TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
}
|
||||
|
||||
func defaultCipherSuites() []uint16 {
|
||||
return []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
}
|
||||
}
|
38
vendor/github.com/nats-io/gnatsd/server/ciphersuites_1.5.go
generated
vendored
38
vendor/github.com/nats-io/gnatsd/server/ciphersuites_1.5.go
generated
vendored
|
@ -1,38 +0,0 @@
|
|||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
|
||||
// +build go1.5
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// Where we maintain all of the available 1.5 ciphers
|
||||
var cipherMap = map[string]uint16{
|
||||
"TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
}
|
||||
|
||||
func defaultCipherSuites() []uint16 {
|
||||
return []uint16{
|
||||
// The SHA384 versions are only in Go1.5
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
}
|
||||
}
|
1366
vendor/github.com/nats-io/gnatsd/server/client.go
generated
vendored
1366
vendor/github.com/nats-io/gnatsd/server/client.go
generated
vendored
File diff suppressed because it is too large
Load diff
82
vendor/github.com/nats-io/gnatsd/server/const.go
generated
vendored
82
vendor/github.com/nats-io/gnatsd/server/const.go
generated
vendored
|
@ -1,82 +0,0 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// VERSION is the current version for the server.
|
||||
VERSION = "0.9.6"
|
||||
|
||||
// DEFAULT_PORT is the default port for client connections.
|
||||
DEFAULT_PORT = 4222
|
||||
|
||||
// RANDOM_PORT is the value for port that, when supplied, will cause the
|
||||
// server to listen on a randomly-chosen available port. The resolved port
|
||||
// is available via the Addr() method.
|
||||
RANDOM_PORT = -1
|
||||
|
||||
// DEFAULT_HOST defaults to all interfaces.
|
||||
DEFAULT_HOST = "0.0.0.0"
|
||||
|
||||
// MAX_CONTROL_LINE_SIZE is the maximum allowed protocol control line size.
|
||||
// 1k should be plenty since payloads sans connect string are separate
|
||||
MAX_CONTROL_LINE_SIZE = 1024
|
||||
|
||||
// MAX_PAYLOAD_SIZE is the maximum allowed payload size. Should be using
|
||||
// something different if > 1MB payloads are needed.
|
||||
MAX_PAYLOAD_SIZE = (1024 * 1024)
|
||||
|
||||
// DEFAULT_MAX_CONNECTIONS is the default maximum connections allowed.
|
||||
DEFAULT_MAX_CONNECTIONS = (64 * 1024)
|
||||
|
||||
// TLS_TIMEOUT is the TLS wait time.
|
||||
TLS_TIMEOUT = 500 * time.Millisecond
|
||||
|
||||
// AUTH_TIMEOUT is the authorization wait time.
|
||||
AUTH_TIMEOUT = 2 * TLS_TIMEOUT
|
||||
|
||||
// DEFAULT_PING_INTERVAL is how often pings are sent to clients and routes.
|
||||
DEFAULT_PING_INTERVAL = 2 * time.Minute
|
||||
|
||||
// DEFAULT_PING_MAX_OUT is maximum allowed pings outstanding before disconnect.
|
||||
DEFAULT_PING_MAX_OUT = 2
|
||||
|
||||
// CR_LF string
|
||||
CR_LF = "\r\n"
|
||||
|
||||
// LEN_CR_LF hold onto the computed size.
|
||||
LEN_CR_LF = len(CR_LF)
|
||||
|
||||
// DEFAULT_FLUSH_DEADLINE is the write/flush deadlines.
|
||||
DEFAULT_FLUSH_DEADLINE = 2 * time.Second
|
||||
|
||||
// DEFAULT_HTTP_PORT is the default monitoring port.
|
||||
DEFAULT_HTTP_PORT = 8222
|
||||
|
||||
// ACCEPT_MIN_SLEEP is the minimum acceptable sleep times on temporary errors.
|
||||
ACCEPT_MIN_SLEEP = 10 * time.Millisecond
|
||||
|
||||
// ACCEPT_MAX_SLEEP is the maximum acceptable sleep times on temporary errors
|
||||
ACCEPT_MAX_SLEEP = 1 * time.Second
|
||||
|
||||
// DEFAULT_ROUTE_CONNECT Route solicitation intervals.
|
||||
DEFAULT_ROUTE_CONNECT = 1 * time.Second
|
||||
|
||||
// DEFAULT_ROUTE_RECONNECT Route reconnect intervals.
|
||||
DEFAULT_ROUTE_RECONNECT = 1 * time.Second
|
||||
|
||||
// DEFAULT_ROUTE_DIAL Route dial timeout.
|
||||
DEFAULT_ROUTE_DIAL = 1 * time.Second
|
||||
|
||||
// PROTO_SNIPPET_SIZE is the default size of proto to print on parse errors.
|
||||
PROTO_SNIPPET_SIZE = 32
|
||||
|
||||
// MAX_MSG_ARGS Maximum possible number of arguments from MSG proto.
|
||||
MAX_MSG_ARGS = 4
|
||||
|
||||
// MAX_PUB_ARGS Maximum possible number of arguments from PUB proto.
|
||||
MAX_PUB_ARGS = 3
|
||||
)
|
32
vendor/github.com/nats-io/gnatsd/server/errors.go
generated
vendored
32
vendor/github.com/nats-io/gnatsd/server/errors.go
generated
vendored
|
@ -1,32 +0,0 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrConnectionClosed represents an error condition on a closed connection.
|
||||
ErrConnectionClosed = errors.New("Connection Closed")
|
||||
|
||||
// ErrAuthorization represents an error condition on failed authorization.
|
||||
ErrAuthorization = errors.New("Authorization Error")
|
||||
|
||||
// ErrAuthTimeout represents an error condition on failed authorization due to timeout.
|
||||
ErrAuthTimeout = errors.New("Authorization Timeout")
|
||||
|
||||
// ErrMaxPayload represents an error condition when the payload is too big.
|
||||
ErrMaxPayload = errors.New("Maximum Payload Exceeded")
|
||||
|
||||
// ErrMaxControlLine represents an error condition when the control line is too big.
|
||||
ErrMaxControlLine = errors.New("Maximum Control Line Exceeded")
|
||||
|
||||
// ErrReservedPublishSubject represents an error condition when sending to a reserved subject, e.g. _SYS.>
|
||||
ErrReservedPublishSubject = errors.New("Reserved Internal Subject")
|
||||
|
||||
// ErrBadClientProtocol signals a client requested an invalud client protocol.
|
||||
ErrBadClientProtocol = errors.New("Invalid Client Protocol")
|
||||
|
||||
// ErrTooManyConnections signals a client that the maximum number of connections supported by the
|
||||
// server has been reached.
|
||||
ErrTooManyConnections = errors.New("Maximum Connections Exceeded")
|
||||
)
|
132
vendor/github.com/nats-io/gnatsd/server/log.go
generated
vendored
132
vendor/github.com/nats-io/gnatsd/server/log.go
generated
vendored
|
@ -1,132 +0,0 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/nats-io/gnatsd/logger"
|
||||
)
|
||||
|
||||
// Package globals for performance checks
|
||||
var trace int32
|
||||
var debug int32
|
||||
|
||||
var log = struct {
|
||||
sync.Mutex
|
||||
logger Logger
|
||||
}{}
|
||||
|
||||
// Logger interface of the NATS Server
|
||||
type Logger interface {
|
||||
|
||||
// Log a notice statement
|
||||
Noticef(format string, v ...interface{})
|
||||
|
||||
// Log a fatal error
|
||||
Fatalf(format string, v ...interface{})
|
||||
|
||||
// Log an error
|
||||
Errorf(format string, v ...interface{})
|
||||
|
||||
// Log a debug statement
|
||||
Debugf(format string, v ...interface{})
|
||||
|
||||
// Log a trace statement
|
||||
Tracef(format string, v ...interface{})
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the server
|
||||
func (s *Server) SetLogger(logger Logger, debugFlag, traceFlag bool) {
|
||||
if debugFlag {
|
||||
atomic.StoreInt32(&debug, 1)
|
||||
} else {
|
||||
atomic.StoreInt32(&debug, 0)
|
||||
}
|
||||
if traceFlag {
|
||||
atomic.StoreInt32(&trace, 1)
|
||||
} else {
|
||||
atomic.StoreInt32(&trace, 0)
|
||||
}
|
||||
|
||||
log.Lock()
|
||||
log.logger = logger
|
||||
log.Unlock()
|
||||
}
|
||||
|
||||
// If the logger is a file based logger, close and re-open the file.
|
||||
// This allows for file rotation by 'mv'ing the file then signalling
|
||||
// the process to trigger this function.
|
||||
func (s *Server) ReOpenLogFile() {
|
||||
// Check to make sure this is a file logger.
|
||||
log.Lock()
|
||||
ll := log.logger
|
||||
log.Unlock()
|
||||
|
||||
if ll == nil {
|
||||
Noticef("File log re-open ignored, no logger")
|
||||
return
|
||||
}
|
||||
if s.opts.LogFile == "" {
|
||||
Noticef("File log re-open ignored, not a file logger")
|
||||
} else {
|
||||
fileLog := logger.NewFileLogger(s.opts.LogFile,
|
||||
s.opts.Logtime, s.opts.Debug, s.opts.Trace, true)
|
||||
s.SetLogger(fileLog, s.opts.Debug, s.opts.Trace)
|
||||
Noticef("File log re-opened")
|
||||
}
|
||||
}
|
||||
|
||||
// Noticef logs a notice statement
|
||||
func Noticef(format string, v ...interface{}) {
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Noticef(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Errorf logs an error
|
||||
func Errorf(format string, v ...interface{}) {
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Errorf(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Fatalf logs a fatal error
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Fatalf(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Debugf logs a debug statement
|
||||
func Debugf(format string, v ...interface{}) {
|
||||
if atomic.LoadInt32(&debug) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Debugf(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Tracef logs a trace statement
|
||||
func Tracef(format string, v ...interface{}) {
|
||||
if atomic.LoadInt32(&trace) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Tracef(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
func executeLogCall(f func(logger Logger, format string, v ...interface{}), format string, args ...interface{}) {
|
||||
log.Lock()
|
||||
defer log.Unlock()
|
||||
if log.logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
f(log.logger, format, args...)
|
||||
}
|
526
vendor/github.com/nats-io/gnatsd/server/monitor.go
generated
vendored
526
vendor/github.com/nats-io/gnatsd/server/monitor.go
generated
vendored
|
@ -1,526 +0,0 @@
|
|||
// Copyright 2013-2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/gnatsd/server/pse"
|
||||
)
|
||||
|
||||
// Snapshot this
|
||||
var numCores int
|
||||
|
||||
func init() {
|
||||
numCores = runtime.NumCPU()
|
||||
}
|
||||
|
||||
// Connz represents detailed information on current client connections.
|
||||
type Connz struct {
|
||||
Now time.Time `json:"now"`
|
||||
NumConns int `json:"num_connections"`
|
||||
Total int `json:"total"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
Conns []ConnInfo `json:"connections"`
|
||||
}
|
||||
|
||||
// ConnInfo has detailed information on a per connection basis.
|
||||
type ConnInfo struct {
|
||||
Cid uint64 `json:"cid"`
|
||||
IP string `json:"ip"`
|
||||
Port int `json:"port"`
|
||||
Start time.Time `json:"start"`
|
||||
LastActivity time.Time `json:"last_activity"`
|
||||
Uptime string `json:"uptime"`
|
||||
Idle string `json:"idle"`
|
||||
Pending int `json:"pending_bytes"`
|
||||
InMsgs int64 `json:"in_msgs"`
|
||||
OutMsgs int64 `json:"out_msgs"`
|
||||
InBytes int64 `json:"in_bytes"`
|
||||
OutBytes int64 `json:"out_bytes"`
|
||||
NumSubs uint32 `json:"subscriptions"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Lang string `json:"lang,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
TLSVersion string `json:"tls_version,omitempty"`
|
||||
TLSCipher string `json:"tls_cipher_suite,omitempty"`
|
||||
AuthorizedUser string `json:"authorized_user,omitempty"`
|
||||
Subs []string `json:"subscriptions_list,omitempty"`
|
||||
}
|
||||
|
||||
// DefaultConnListSize is the default size of the connection list.
|
||||
const DefaultConnListSize = 1024
|
||||
|
||||
const defaultStackBufSize = 10000
|
||||
|
||||
// HandleConnz process HTTP requests for connection information.
|
||||
func (s *Server) HandleConnz(w http.ResponseWriter, r *http.Request) {
|
||||
sortOpt := SortOpt(r.URL.Query().Get("sort"))
|
||||
|
||||
// If no sort option given or sort is by uptime, then sort by cid
|
||||
if sortOpt == "" || sortOpt == byUptime {
|
||||
sortOpt = byCid
|
||||
} else if !sortOpt.IsValid() {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
w.Write([]byte(fmt.Sprintf("Invalid sorting option: %s", sortOpt)))
|
||||
return
|
||||
}
|
||||
|
||||
c := &Connz{}
|
||||
c.Now = time.Now()
|
||||
|
||||
auth, _ := strconv.Atoi(r.URL.Query().Get("auth"))
|
||||
subs, _ := strconv.Atoi(r.URL.Query().Get("subs"))
|
||||
c.Offset, _ = strconv.Atoi(r.URL.Query().Get("offset"))
|
||||
c.Limit, _ = strconv.Atoi(r.URL.Query().Get("limit"))
|
||||
|
||||
if c.Limit == 0 {
|
||||
c.Limit = DefaultConnListSize
|
||||
}
|
||||
|
||||
// Walk the list
|
||||
s.mu.Lock()
|
||||
s.httpReqStats[ConnzPath]++
|
||||
tlsRequired := s.info.TLSRequired
|
||||
|
||||
// number total of clients. The resulting ConnInfo array
|
||||
// may be smaller if pagination is used.
|
||||
totalClients := len(s.clients)
|
||||
c.Total = totalClients
|
||||
|
||||
i := 0
|
||||
pairs := make(Pairs, totalClients)
|
||||
for _, client := range s.clients {
|
||||
client.mu.Lock()
|
||||
switch sortOpt {
|
||||
case byCid:
|
||||
pairs[i] = Pair{Key: client, Val: int64(client.cid)}
|
||||
case bySubs:
|
||||
pairs[i] = Pair{Key: client, Val: int64(len(client.subs))}
|
||||
case byPending:
|
||||
pairs[i] = Pair{Key: client, Val: int64(client.bw.Buffered())}
|
||||
case byOutMsgs:
|
||||
pairs[i] = Pair{Key: client, Val: client.outMsgs}
|
||||
case byInMsgs:
|
||||
pairs[i] = Pair{Key: client, Val: atomic.LoadInt64(&client.inMsgs)}
|
||||
case byOutBytes:
|
||||
pairs[i] = Pair{Key: client, Val: client.outBytes}
|
||||
case byInBytes:
|
||||
pairs[i] = Pair{Key: client, Val: atomic.LoadInt64(&client.inBytes)}
|
||||
case byLast:
|
||||
pairs[i] = Pair{Key: client, Val: client.last.UnixNano()}
|
||||
case byIdle:
|
||||
pairs[i] = Pair{Key: client, Val: c.Now.Sub(client.last).Nanoseconds()}
|
||||
}
|
||||
client.mu.Unlock()
|
||||
i++
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
if totalClients > 0 {
|
||||
if sortOpt == byCid {
|
||||
// Return in ascending order
|
||||
sort.Sort(pairs)
|
||||
} else {
|
||||
// Return in descending order
|
||||
sort.Sort(sort.Reverse(pairs))
|
||||
}
|
||||
}
|
||||
|
||||
minoff := c.Offset
|
||||
maxoff := c.Offset + c.Limit
|
||||
|
||||
// Make sure these are sane.
|
||||
if minoff > totalClients {
|
||||
minoff = totalClients
|
||||
}
|
||||
if maxoff > totalClients {
|
||||
maxoff = totalClients
|
||||
}
|
||||
pairs = pairs[minoff:maxoff]
|
||||
|
||||
// Now we have the real number of ConnInfo objects, we can set c.NumConns
|
||||
// and allocate the array
|
||||
c.NumConns = len(pairs)
|
||||
c.Conns = make([]ConnInfo, c.NumConns)
|
||||
|
||||
i = 0
|
||||
for _, pair := range pairs {
|
||||
|
||||
client := pair.Key
|
||||
|
||||
client.mu.Lock()
|
||||
|
||||
// First, fill ConnInfo with current client's values. We will
|
||||
// then overwrite the field used for the sort with what was stored
|
||||
// in 'pair'.
|
||||
ci := &c.Conns[i]
|
||||
|
||||
ci.Cid = client.cid
|
||||
ci.Start = client.start
|
||||
ci.LastActivity = client.last
|
||||
ci.Uptime = myUptime(c.Now.Sub(client.start))
|
||||
ci.Idle = myUptime(c.Now.Sub(client.last))
|
||||
ci.OutMsgs = client.outMsgs
|
||||
ci.OutBytes = client.outBytes
|
||||
ci.NumSubs = uint32(len(client.subs))
|
||||
ci.Pending = client.bw.Buffered()
|
||||
ci.Name = client.opts.Name
|
||||
ci.Lang = client.opts.Lang
|
||||
ci.Version = client.opts.Version
|
||||
// inMsgs and inBytes are updated outside of the client's lock, so
|
||||
// we need to use atomic here.
|
||||
ci.InMsgs = atomic.LoadInt64(&client.inMsgs)
|
||||
ci.InBytes = atomic.LoadInt64(&client.inBytes)
|
||||
|
||||
// Now overwrite the field that was used as the sort key, so results
|
||||
// still look sorted even if the value has changed since sort occurred.
|
||||
sortValue := pair.Val
|
||||
switch sortOpt {
|
||||
case bySubs:
|
||||
ci.NumSubs = uint32(sortValue)
|
||||
case byPending:
|
||||
ci.Pending = int(sortValue)
|
||||
case byOutMsgs:
|
||||
ci.OutMsgs = sortValue
|
||||
case byInMsgs:
|
||||
ci.InMsgs = sortValue
|
||||
case byOutBytes:
|
||||
ci.OutBytes = sortValue
|
||||
case byInBytes:
|
||||
ci.InBytes = sortValue
|
||||
case byLast:
|
||||
ci.LastActivity = time.Unix(0, sortValue)
|
||||
case byIdle:
|
||||
ci.Idle = myUptime(time.Duration(sortValue))
|
||||
}
|
||||
|
||||
// If the connection is gone, too bad, we won't set TLSVersion and TLSCipher.
|
||||
if tlsRequired && client.nc != nil {
|
||||
conn := client.nc.(*tls.Conn)
|
||||
cs := conn.ConnectionState()
|
||||
ci.TLSVersion = tlsVersion(cs.Version)
|
||||
ci.TLSCipher = tlsCipher(cs.CipherSuite)
|
||||
}
|
||||
|
||||
switch conn := client.nc.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
addr := conn.RemoteAddr().(*net.TCPAddr)
|
||||
ci.Port = addr.Port
|
||||
ci.IP = addr.IP.String()
|
||||
}
|
||||
|
||||
// Fill in subscription data if requested.
|
||||
if subs == 1 {
|
||||
sublist := make([]*subscription, 0, len(client.subs))
|
||||
for _, sub := range client.subs {
|
||||
sublist = append(sublist, sub)
|
||||
}
|
||||
ci.Subs = castToSliceString(sublist)
|
||||
}
|
||||
|
||||
// Fill in user if auth requested.
|
||||
if auth == 1 {
|
||||
ci.AuthorizedUser = client.opts.Username
|
||||
}
|
||||
|
||||
client.mu.Unlock()
|
||||
i++
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(c, "", " ")
|
||||
if err != nil {
|
||||
Errorf("Error marshalling response to /connz request: %v", err)
|
||||
}
|
||||
|
||||
// Handle response
|
||||
ResponseHandler(w, r, b)
|
||||
}
|
||||
|
||||
func castToSliceString(input []*subscription) []string {
|
||||
output := make([]string, 0, len(input))
|
||||
for _, line := range input {
|
||||
output = append(output, string(line.subject))
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// Subsz represents detail information on current connections.
|
||||
type Subsz struct {
|
||||
*SublistStats
|
||||
}
|
||||
|
||||
// Routez represents detailed information on current client connections.
|
||||
type Routez struct {
|
||||
Now time.Time `json:"now"`
|
||||
NumRoutes int `json:"num_routes"`
|
||||
Routes []*RouteInfo `json:"routes"`
|
||||
}
|
||||
|
||||
// RouteInfo has detailed information on a per connection basis.
|
||||
type RouteInfo struct {
|
||||
Rid uint64 `json:"rid"`
|
||||
RemoteID string `json:"remote_id"`
|
||||
DidSolicit bool `json:"did_solicit"`
|
||||
IsConfigured bool `json:"is_configured"`
|
||||
IP string `json:"ip"`
|
||||
Port int `json:"port"`
|
||||
Pending int `json:"pending_size"`
|
||||
InMsgs int64 `json:"in_msgs"`
|
||||
OutMsgs int64 `json:"out_msgs"`
|
||||
InBytes int64 `json:"in_bytes"`
|
||||
OutBytes int64 `json:"out_bytes"`
|
||||
NumSubs uint32 `json:"subscriptions"`
|
||||
Subs []string `json:"subscriptions_list,omitempty"`
|
||||
}
|
||||
|
||||
// HandleRoutez process HTTP requests for route information.
|
||||
func (s *Server) HandleRoutez(w http.ResponseWriter, r *http.Request) {
|
||||
rs := &Routez{Routes: []*RouteInfo{}}
|
||||
rs.Now = time.Now()
|
||||
|
||||
subs, _ := strconv.Atoi(r.URL.Query().Get("subs"))
|
||||
|
||||
// Walk the list
|
||||
s.mu.Lock()
|
||||
|
||||
s.httpReqStats[RoutezPath]++
|
||||
rs.NumRoutes = len(s.routes)
|
||||
|
||||
for _, r := range s.routes {
|
||||
r.mu.Lock()
|
||||
ri := &RouteInfo{
|
||||
Rid: r.cid,
|
||||
RemoteID: r.route.remoteID,
|
||||
DidSolicit: r.route.didSolicit,
|
||||
IsConfigured: r.route.routeType == Explicit,
|
||||
InMsgs: atomic.LoadInt64(&r.inMsgs),
|
||||
OutMsgs: r.outMsgs,
|
||||
InBytes: atomic.LoadInt64(&r.inBytes),
|
||||
OutBytes: r.outBytes,
|
||||
NumSubs: uint32(len(r.subs)),
|
||||
}
|
||||
|
||||
if subs == 1 {
|
||||
sublist := make([]*subscription, 0, len(r.subs))
|
||||
for _, sub := range r.subs {
|
||||
sublist = append(sublist, sub)
|
||||
}
|
||||
ri.Subs = castToSliceString(sublist)
|
||||
}
|
||||
r.mu.Unlock()
|
||||
|
||||
if ip, ok := r.nc.(*net.TCPConn); ok {
|
||||
addr := ip.RemoteAddr().(*net.TCPAddr)
|
||||
ri.Port = addr.Port
|
||||
ri.IP = addr.IP.String()
|
||||
}
|
||||
rs.Routes = append(rs.Routes, ri)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
b, err := json.MarshalIndent(rs, "", " ")
|
||||
if err != nil {
|
||||
Errorf("Error marshalling response to /routez request: %v", err)
|
||||
}
|
||||
|
||||
// Handle response
|
||||
ResponseHandler(w, r, b)
|
||||
}
|
||||
|
||||
// HandleSubsz processes HTTP requests for subjects stats.
|
||||
func (s *Server) HandleSubsz(w http.ResponseWriter, r *http.Request) {
|
||||
s.mu.Lock()
|
||||
s.httpReqStats[SubszPath]++
|
||||
s.mu.Unlock()
|
||||
|
||||
st := &Subsz{s.sl.Stats()}
|
||||
b, err := json.MarshalIndent(st, "", " ")
|
||||
if err != nil {
|
||||
Errorf("Error marshalling response to /subscriptionsz request: %v", err)
|
||||
}
|
||||
|
||||
// Handle response
|
||||
ResponseHandler(w, r, b)
|
||||
}
|
||||
|
||||
// HandleStacksz processes HTTP requests for getting stacks
|
||||
func (s *Server) HandleStacksz(w http.ResponseWriter, r *http.Request) {
|
||||
// Do not get any lock here that would prevent getting the stacks
|
||||
// if we were to have a deadlock somewhere.
|
||||
var defaultBuf [defaultStackBufSize]byte
|
||||
size := defaultStackBufSize
|
||||
buf := defaultBuf[:size]
|
||||
n := 0
|
||||
for {
|
||||
n = runtime.Stack(buf, true)
|
||||
if n < size {
|
||||
break
|
||||
}
|
||||
size *= 2
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
// Handle response
|
||||
ResponseHandler(w, r, buf[:n])
|
||||
}
|
||||
|
||||
// Varz will output server information on the monitoring port at /varz.
|
||||
type Varz struct {
|
||||
*Info
|
||||
*Options
|
||||
Port int `json:"port"`
|
||||
MaxPayload int `json:"max_payload"`
|
||||
Start time.Time `json:"start"`
|
||||
Now time.Time `json:"now"`
|
||||
Uptime string `json:"uptime"`
|
||||
Mem int64 `json:"mem"`
|
||||
Cores int `json:"cores"`
|
||||
CPU float64 `json:"cpu"`
|
||||
Connections int `json:"connections"`
|
||||
TotalConnections uint64 `json:"total_connections"`
|
||||
Routes int `json:"routes"`
|
||||
Remotes int `json:"remotes"`
|
||||
InMsgs int64 `json:"in_msgs"`
|
||||
OutMsgs int64 `json:"out_msgs"`
|
||||
InBytes int64 `json:"in_bytes"`
|
||||
OutBytes int64 `json:"out_bytes"`
|
||||
SlowConsumers int64 `json:"slow_consumers"`
|
||||
Subscriptions uint32 `json:"subscriptions"`
|
||||
HTTPReqStats map[string]uint64 `json:"http_req_stats"`
|
||||
}
|
||||
|
||||
type usage struct {
|
||||
CPU float32
|
||||
Cores int
|
||||
Mem int64
|
||||
}
|
||||
|
||||
func myUptime(d time.Duration) string {
|
||||
// Just use total seconds for uptime, and display days / years
|
||||
tsecs := d / time.Second
|
||||
tmins := tsecs / 60
|
||||
thrs := tmins / 60
|
||||
tdays := thrs / 24
|
||||
tyrs := tdays / 365
|
||||
|
||||
if tyrs > 0 {
|
||||
return fmt.Sprintf("%dy%dd%dh%dm%ds", tyrs, tdays%365, thrs%24, tmins%60, tsecs%60)
|
||||
}
|
||||
if tdays > 0 {
|
||||
return fmt.Sprintf("%dd%dh%dm%ds", tdays, thrs%24, tmins%60, tsecs%60)
|
||||
}
|
||||
if thrs > 0 {
|
||||
return fmt.Sprintf("%dh%dm%ds", thrs, tmins%60, tsecs%60)
|
||||
}
|
||||
if tmins > 0 {
|
||||
return fmt.Sprintf("%dm%ds", tmins, tsecs%60)
|
||||
}
|
||||
return fmt.Sprintf("%ds", tsecs)
|
||||
}
|
||||
|
||||
// HandleRoot will show basic info and links to others handlers.
|
||||
func (s *Server) HandleRoot(w http.ResponseWriter, r *http.Request) {
|
||||
// This feels dumb to me, but is required: https://code.google.com/p/go/issues/detail?id=4799
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
s.httpReqStats[RootPath]++
|
||||
s.mu.Unlock()
|
||||
fmt.Fprintf(w, `<html lang="en">
|
||||
<head>
|
||||
<link rel="shortcut icon" href="http://nats.io/img/favicon.ico">
|
||||
<style type="text/css">
|
||||
body { font-family: "Century Gothic", CenturyGothic, AppleGothic, sans-serif; font-size: 22; }
|
||||
a { margin-left: 32px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<img src="http://nats.io/img/logo.png" alt="NATS">
|
||||
<br/>
|
||||
<a href=/varz>varz</a><br/>
|
||||
<a href=/connz>connz</a><br/>
|
||||
<a href=/routez>routez</a><br/>
|
||||
<a href=/subsz>subsz</a><br/>
|
||||
<br/>
|
||||
<a href=http://nats.io/documentation/server/gnatsd-monitoring/>help</a>
|
||||
</body>
|
||||
</html>`)
|
||||
}
|
||||
|
||||
// HandleVarz will process HTTP requests for server information.
|
||||
func (s *Server) HandleVarz(w http.ResponseWriter, r *http.Request) {
|
||||
v := &Varz{Info: &s.info, Options: s.opts, MaxPayload: s.opts.MaxPayload, Start: s.start}
|
||||
v.Now = time.Now()
|
||||
v.Uptime = myUptime(time.Since(s.start))
|
||||
v.Port = v.Info.Port
|
||||
|
||||
updateUsage(v)
|
||||
|
||||
s.mu.Lock()
|
||||
v.Connections = len(s.clients)
|
||||
v.TotalConnections = s.totalClients
|
||||
v.Routes = len(s.routes)
|
||||
v.Remotes = len(s.remotes)
|
||||
v.InMsgs = s.inMsgs
|
||||
v.InBytes = s.inBytes
|
||||
v.OutMsgs = s.outMsgs
|
||||
v.OutBytes = s.outBytes
|
||||
v.SlowConsumers = s.slowConsumers
|
||||
v.Subscriptions = s.sl.Count()
|
||||
s.httpReqStats[VarzPath]++
|
||||
// Need a copy here since s.httpReqStas can change while doing
|
||||
// the marshaling down below.
|
||||
v.HTTPReqStats = make(map[string]uint64, len(s.httpReqStats))
|
||||
for key, val := range s.httpReqStats {
|
||||
v.HTTPReqStats[key] = val
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
b, err := json.MarshalIndent(v, "", " ")
|
||||
if err != nil {
|
||||
Errorf("Error marshalling response to /varz request: %v", err)
|
||||
}
|
||||
|
||||
// Handle response
|
||||
ResponseHandler(w, r, b)
|
||||
}
|
||||
|
||||
// Grab RSS and PCPU
|
||||
func updateUsage(v *Varz) {
|
||||
var rss, vss int64
|
||||
var pcpu float64
|
||||
|
||||
pse.ProcUsage(&pcpu, &rss, &vss)
|
||||
|
||||
v.Mem = rss
|
||||
v.CPU = pcpu
|
||||
v.Cores = numCores
|
||||
}
|
||||
|
||||
// ResponseHandler handles responses for monitoring routes
|
||||
func ResponseHandler(w http.ResponseWriter, r *http.Request, data []byte) {
|
||||
// Get callback from request
|
||||
callback := r.URL.Query().Get("callback")
|
||||
// If callback is not empty then
|
||||
if callback != "" {
|
||||
// Response for JSONP
|
||||
w.Header().Set("Content-Type", "application/javascript")
|
||||
fmt.Fprintf(w, "%s(%s)", callback, data)
|
||||
} else {
|
||||
// Otherwise JSON
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(data)
|
||||
}
|
||||
}
|
50
vendor/github.com/nats-io/gnatsd/server/monitor_sort_opts.go
generated
vendored
50
vendor/github.com/nats-io/gnatsd/server/monitor_sort_opts.go
generated
vendored
|
@ -1,50 +0,0 @@
|
|||
// Copyright 2013-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
// SortOpt is a helper type to sort by ConnInfo values
|
||||
type SortOpt string
|
||||
|
||||
const (
|
||||
byCid SortOpt = "cid"
|
||||
bySubs = "subs"
|
||||
byPending = "pending"
|
||||
byOutMsgs = "msgs_to"
|
||||
byInMsgs = "msgs_from"
|
||||
byOutBytes = "bytes_to"
|
||||
byInBytes = "bytes_from"
|
||||
byLast = "last"
|
||||
byIdle = "idle"
|
||||
byUptime = "uptime"
|
||||
)
|
||||
|
||||
// IsValid determines if a sort option is valid
|
||||
func (s SortOpt) IsValid() bool {
|
||||
switch s {
|
||||
case "", byCid, bySubs, byPending, byOutMsgs, byInMsgs, byOutBytes, byInBytes, byLast, byIdle, byUptime:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Pair type is internally used.
|
||||
type Pair struct {
|
||||
Key *client
|
||||
Val int64
|
||||
}
|
||||
|
||||
// Pairs type is internally used.
|
||||
type Pairs []Pair
|
||||
|
||||
func (d Pairs) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func (d Pairs) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
||||
|
||||
func (d Pairs) Less(i, j int) bool {
|
||||
return d[i].Val < d[j].Val
|
||||
}
|
802
vendor/github.com/nats-io/gnatsd/server/opts.go
generated
vendored
802
vendor/github.com/nats-io/gnatsd/server/opts.go
generated
vendored
|
@ -1,802 +0,0 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/gnatsd/conf"
|
||||
)
|
||||
|
||||
// For multiple accounts/users.
|
||||
type User struct {
|
||||
Username string `json:"user"`
|
||||
Password string `json:"password"`
|
||||
Permissions *Permissions `json:"permissions"`
|
||||
}
|
||||
|
||||
// Authorization are the allowed subjects on a per
|
||||
// publish or subscribe basis.
|
||||
type Permissions struct {
|
||||
Publish []string `json:"publish"`
|
||||
Subscribe []string `json:"subscribe"`
|
||||
}
|
||||
|
||||
// Options for clusters.
|
||||
type ClusterOpts struct {
|
||||
Host string `json:"addr"`
|
||||
Port int `json:"cluster_port"`
|
||||
Username string `json:"-"`
|
||||
Password string `json:"-"`
|
||||
AuthTimeout float64 `json:"auth_timeout"`
|
||||
TLSTimeout float64 `json:"-"`
|
||||
TLSConfig *tls.Config `json:"-"`
|
||||
ListenStr string `json:"-"`
|
||||
NoAdvertise bool `json:"-"`
|
||||
}
|
||||
|
||||
// Options block for gnatsd server.
|
||||
type Options struct {
|
||||
Host string `json:"addr"`
|
||||
Port int `json:"port"`
|
||||
Trace bool `json:"-"`
|
||||
Debug bool `json:"-"`
|
||||
NoLog bool `json:"-"`
|
||||
NoSigs bool `json:"-"`
|
||||
Logtime bool `json:"-"`
|
||||
MaxConn int `json:"max_connections"`
|
||||
Users []*User `json:"-"`
|
||||
Username string `json:"-"`
|
||||
Password string `json:"-"`
|
||||
Authorization string `json:"-"`
|
||||
PingInterval time.Duration `json:"ping_interval"`
|
||||
MaxPingsOut int `json:"ping_max"`
|
||||
HTTPHost string `json:"http_host"`
|
||||
HTTPPort int `json:"http_port"`
|
||||
HTTPSPort int `json:"https_port"`
|
||||
AuthTimeout float64 `json:"auth_timeout"`
|
||||
MaxControlLine int `json:"max_control_line"`
|
||||
MaxPayload int `json:"max_payload"`
|
||||
Cluster ClusterOpts `json:"cluster"`
|
||||
ProfPort int `json:"-"`
|
||||
PidFile string `json:"-"`
|
||||
LogFile string `json:"-"`
|
||||
Syslog bool `json:"-"`
|
||||
RemoteSyslog string `json:"-"`
|
||||
Routes []*url.URL `json:"-"`
|
||||
RoutesStr string `json:"-"`
|
||||
TLSTimeout float64 `json:"tls_timeout"`
|
||||
TLS bool `json:"-"`
|
||||
TLSVerify bool `json:"-"`
|
||||
TLSCert string `json:"-"`
|
||||
TLSKey string `json:"-"`
|
||||
TLSCaCert string `json:"-"`
|
||||
TLSConfig *tls.Config `json:"-"`
|
||||
}
|
||||
|
||||
// Configuration file authorization section.
|
||||
type authorization struct {
|
||||
// Singles
|
||||
user string
|
||||
pass string
|
||||
// Multiple Users
|
||||
users []*User
|
||||
timeout float64
|
||||
defaultPermissions *Permissions
|
||||
}
|
||||
|
||||
// TLSConfigOpts holds the parsed tls config information,
|
||||
// used with flag parsing
|
||||
type TLSConfigOpts struct {
|
||||
CertFile string
|
||||
KeyFile string
|
||||
CaFile string
|
||||
Verify bool
|
||||
Timeout float64
|
||||
Ciphers []uint16
|
||||
}
|
||||
|
||||
var tlsUsage = `
|
||||
TLS configuration is specified in the tls section of a configuration file:
|
||||
|
||||
e.g.
|
||||
|
||||
tls {
|
||||
cert_file: "./certs/server-cert.pem"
|
||||
key_file: "./certs/server-key.pem"
|
||||
ca_file: "./certs/ca.pem"
|
||||
verify: true
|
||||
|
||||
cipher_suites: [
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
|
||||
]
|
||||
}
|
||||
|
||||
Available cipher suites include:
|
||||
`
|
||||
|
||||
// ProcessConfigFile processes a configuration file.
|
||||
// FIXME(dlc): Hacky
|
||||
func ProcessConfigFile(configFile string) (*Options, error) {
|
||||
opts := &Options{}
|
||||
|
||||
if configFile == "" {
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
m, err := conf.ParseFile(configFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for k, v := range m {
|
||||
switch strings.ToLower(k) {
|
||||
case "listen":
|
||||
hp, err := parseListen(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.Host = hp.host
|
||||
opts.Port = hp.port
|
||||
case "port":
|
||||
opts.Port = int(v.(int64))
|
||||
case "host", "net":
|
||||
opts.Host = v.(string)
|
||||
case "debug":
|
||||
opts.Debug = v.(bool)
|
||||
case "trace":
|
||||
opts.Trace = v.(bool)
|
||||
case "logtime":
|
||||
opts.Logtime = v.(bool)
|
||||
case "authorization":
|
||||
am := v.(map[string]interface{})
|
||||
auth, err := parseAuthorization(am)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.Username = auth.user
|
||||
opts.Password = auth.pass
|
||||
opts.AuthTimeout = auth.timeout
|
||||
// Check for multiple users defined
|
||||
if auth.users != nil {
|
||||
if auth.user != "" {
|
||||
return nil, fmt.Errorf("Can not have a single user/pass and a users array")
|
||||
}
|
||||
opts.Users = auth.users
|
||||
}
|
||||
case "http":
|
||||
hp, err := parseListen(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.HTTPHost = hp.host
|
||||
opts.HTTPPort = hp.port
|
||||
case "https":
|
||||
hp, err := parseListen(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.HTTPHost = hp.host
|
||||
opts.HTTPSPort = hp.port
|
||||
case "http_port", "monitor_port":
|
||||
opts.HTTPPort = int(v.(int64))
|
||||
case "https_port":
|
||||
opts.HTTPSPort = int(v.(int64))
|
||||
case "cluster":
|
||||
cm := v.(map[string]interface{})
|
||||
if err := parseCluster(cm, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "logfile", "log_file":
|
||||
opts.LogFile = v.(string)
|
||||
case "syslog":
|
||||
opts.Syslog = v.(bool)
|
||||
case "remote_syslog":
|
||||
opts.RemoteSyslog = v.(string)
|
||||
case "pidfile", "pid_file":
|
||||
opts.PidFile = v.(string)
|
||||
case "prof_port":
|
||||
opts.ProfPort = int(v.(int64))
|
||||
case "max_control_line":
|
||||
opts.MaxControlLine = int(v.(int64))
|
||||
case "max_payload":
|
||||
opts.MaxPayload = int(v.(int64))
|
||||
case "max_connections", "max_conn":
|
||||
opts.MaxConn = int(v.(int64))
|
||||
case "ping_interval":
|
||||
opts.PingInterval = time.Duration(int(v.(int64))) * time.Second
|
||||
case "ping_max":
|
||||
opts.MaxPingsOut = int(v.(int64))
|
||||
case "tls":
|
||||
tlsm := v.(map[string]interface{})
|
||||
tc, err := parseTLS(tlsm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opts.TLSConfig, err = GenTLSConfig(tc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.TLSTimeout = tc.Timeout
|
||||
}
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// hostPort is simple struct to hold parsed listen/addr strings.
|
||||
type hostPort struct {
|
||||
host string
|
||||
port int
|
||||
}
|
||||
|
||||
// parseListen will parse listen option which is replacing host/net and port
|
||||
func parseListen(v interface{}) (*hostPort, error) {
|
||||
hp := &hostPort{}
|
||||
switch v.(type) {
|
||||
// Only a port
|
||||
case int64:
|
||||
hp.port = int(v.(int64))
|
||||
case string:
|
||||
host, port, err := net.SplitHostPort(v.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse address string %q", v)
|
||||
}
|
||||
hp.port, err = strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse port %q", port)
|
||||
}
|
||||
hp.host = host
|
||||
}
|
||||
return hp, nil
|
||||
}
|
||||
|
||||
// parseCluster will parse the cluster config.
|
||||
func parseCluster(cm map[string]interface{}, opts *Options) error {
|
||||
for mk, mv := range cm {
|
||||
switch strings.ToLower(mk) {
|
||||
case "listen":
|
||||
hp, err := parseListen(mv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.Cluster.Host = hp.host
|
||||
opts.Cluster.Port = hp.port
|
||||
case "port":
|
||||
opts.Cluster.Port = int(mv.(int64))
|
||||
case "host", "net":
|
||||
opts.Cluster.Host = mv.(string)
|
||||
case "authorization":
|
||||
am := mv.(map[string]interface{})
|
||||
auth, err := parseAuthorization(am)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if auth.users != nil {
|
||||
return fmt.Errorf("Cluster authorization does not allow multiple users")
|
||||
}
|
||||
opts.Cluster.Username = auth.user
|
||||
opts.Cluster.Password = auth.pass
|
||||
opts.Cluster.AuthTimeout = auth.timeout
|
||||
case "routes":
|
||||
ra := mv.([]interface{})
|
||||
opts.Routes = make([]*url.URL, 0, len(ra))
|
||||
for _, r := range ra {
|
||||
routeURL := r.(string)
|
||||
url, err := url.Parse(routeURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing route url [%q]", routeURL)
|
||||
}
|
||||
opts.Routes = append(opts.Routes, url)
|
||||
}
|
||||
case "tls":
|
||||
tlsm := mv.(map[string]interface{})
|
||||
tc, err := parseTLS(tlsm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.Cluster.TLSConfig, err = GenTLSConfig(tc); err != nil {
|
||||
return err
|
||||
}
|
||||
// For clusters, we will force strict verification. We also act
|
||||
// as both client and server, so will mirror the rootCA to the
|
||||
// clientCA pool.
|
||||
opts.Cluster.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
opts.Cluster.TLSConfig.RootCAs = opts.Cluster.TLSConfig.ClientCAs
|
||||
opts.Cluster.TLSTimeout = tc.Timeout
|
||||
case "no_advertise":
|
||||
opts.Cluster.NoAdvertise = mv.(bool)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper function to parse Authorization configs.
|
||||
func parseAuthorization(am map[string]interface{}) (*authorization, error) {
|
||||
auth := &authorization{}
|
||||
for mk, mv := range am {
|
||||
switch strings.ToLower(mk) {
|
||||
case "user", "username":
|
||||
auth.user = mv.(string)
|
||||
case "pass", "password":
|
||||
auth.pass = mv.(string)
|
||||
case "timeout":
|
||||
at := float64(1)
|
||||
switch mv.(type) {
|
||||
case int64:
|
||||
at = float64(mv.(int64))
|
||||
case float64:
|
||||
at = mv.(float64)
|
||||
}
|
||||
auth.timeout = at
|
||||
case "users":
|
||||
users, err := parseUsers(mv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth.users = users
|
||||
case "default_permission", "default_permissions":
|
||||
pm, ok := mv.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Expected default permissions to be a map/struct, got %+v", mv)
|
||||
}
|
||||
permissions, err := parseUserPermissions(pm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth.defaultPermissions = permissions
|
||||
}
|
||||
|
||||
// Now check for permission defaults with multiple users, etc.
|
||||
if auth.users != nil && auth.defaultPermissions != nil {
|
||||
for _, user := range auth.users {
|
||||
if user.Permissions == nil {
|
||||
user.Permissions = auth.defaultPermissions
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
// Helper function to parse multiple users array with optional permissions.
|
||||
func parseUsers(mv interface{}) ([]*User, error) {
|
||||
// Make sure we have an array
|
||||
uv, ok := mv.([]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Expected users field to be an array, got %v", mv)
|
||||
}
|
||||
users := []*User{}
|
||||
for _, u := range uv {
|
||||
// Check its a map/struct
|
||||
um, ok := u.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Expected user entry to be a map/struct, got %v", u)
|
||||
}
|
||||
user := &User{}
|
||||
for k, v := range um {
|
||||
switch strings.ToLower(k) {
|
||||
case "user", "username":
|
||||
user.Username = v.(string)
|
||||
case "pass", "password":
|
||||
user.Password = v.(string)
|
||||
case "permission", "permissions", "authroization":
|
||||
pm, ok := v.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Expected user permissions to be a map/struct, got %+v", v)
|
||||
}
|
||||
permissions, err := parseUserPermissions(pm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
user.Permissions = permissions
|
||||
}
|
||||
}
|
||||
// Check to make sure we have at least username and password
|
||||
if user.Username == "" || user.Password == "" {
|
||||
return nil, fmt.Errorf("User entry requires a user and a password")
|
||||
}
|
||||
users = append(users, user)
|
||||
}
|
||||
return users, nil
|
||||
}
|
||||
|
||||
// Helper function to parse user/account permissions
|
||||
func parseUserPermissions(pm map[string]interface{}) (*Permissions, error) {
|
||||
p := &Permissions{}
|
||||
for k, v := range pm {
|
||||
switch strings.ToLower(k) {
|
||||
case "pub", "publish":
|
||||
subjects, err := parseSubjects(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.Publish = subjects
|
||||
case "sub", "subscribe":
|
||||
subjects, err := parseSubjects(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.Subscribe = subjects
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown field %s parsing permissions", k)
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Helper function to parse subject singeltons and/or arrays
|
||||
func parseSubjects(v interface{}) ([]string, error) {
|
||||
var subjects []string
|
||||
switch v.(type) {
|
||||
case string:
|
||||
subjects = append(subjects, v.(string))
|
||||
case []string:
|
||||
subjects = v.([]string)
|
||||
case []interface{}:
|
||||
for _, i := range v.([]interface{}) {
|
||||
subject, ok := i.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Subject in permissions array cannot be cast to string")
|
||||
}
|
||||
subjects = append(subjects, subject)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("Expected subject permissions to be a subject, or array of subjects, got %T", v)
|
||||
}
|
||||
return checkSubjectArray(subjects)
|
||||
}
|
||||
|
||||
// Helper function to validate subjects, etc for account permissioning.
|
||||
func checkSubjectArray(sa []string) ([]string, error) {
|
||||
for _, s := range sa {
|
||||
if !IsValidSubject(s) {
|
||||
return nil, fmt.Errorf("Subject %q is not a valid subject", s)
|
||||
}
|
||||
}
|
||||
return sa, nil
|
||||
}
|
||||
|
||||
// PrintTLSHelpAndDie prints TLS usage and exits.
|
||||
func PrintTLSHelpAndDie() {
|
||||
fmt.Printf("%s\n", tlsUsage)
|
||||
for k := range cipherMap {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func parseCipher(cipherName string) (uint16, error) {
|
||||
|
||||
cipher, exists := cipherMap[cipherName]
|
||||
if !exists {
|
||||
return 0, fmt.Errorf("Unrecognized cipher %s", cipherName)
|
||||
}
|
||||
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
// Helper function to parse TLS configs.
|
||||
func parseTLS(tlsm map[string]interface{}) (*TLSConfigOpts, error) {
|
||||
tc := TLSConfigOpts{}
|
||||
for mk, mv := range tlsm {
|
||||
switch strings.ToLower(mk) {
|
||||
case "cert_file":
|
||||
certFile, ok := mv.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing tls config, expected 'cert_file' to be filename")
|
||||
}
|
||||
tc.CertFile = certFile
|
||||
case "key_file":
|
||||
keyFile, ok := mv.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing tls config, expected 'key_file' to be filename")
|
||||
}
|
||||
tc.KeyFile = keyFile
|
||||
case "ca_file":
|
||||
caFile, ok := mv.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing tls config, expected 'ca_file' to be filename")
|
||||
}
|
||||
tc.CaFile = caFile
|
||||
case "verify":
|
||||
verify, ok := mv.(bool)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing tls config, expected 'verify' to be a boolean")
|
||||
}
|
||||
tc.Verify = verify
|
||||
case "cipher_suites":
|
||||
ra := mv.([]interface{})
|
||||
if len(ra) == 0 {
|
||||
return nil, fmt.Errorf("error parsing tls config, 'cipher_suites' cannot be empty")
|
||||
}
|
||||
tc.Ciphers = make([]uint16, 0, len(ra))
|
||||
for _, r := range ra {
|
||||
cipher, err := parseCipher(r.(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tc.Ciphers = append(tc.Ciphers, cipher)
|
||||
}
|
||||
case "timeout":
|
||||
at := float64(0)
|
||||
switch mv.(type) {
|
||||
case int64:
|
||||
at = float64(mv.(int64))
|
||||
case float64:
|
||||
at = mv.(float64)
|
||||
}
|
||||
tc.Timeout = at
|
||||
default:
|
||||
return nil, fmt.Errorf("error parsing tls config, unknown field [%q]", mk)
|
||||
}
|
||||
}
|
||||
|
||||
// If cipher suites were not specified then use the defaults
|
||||
if tc.Ciphers == nil {
|
||||
tc.Ciphers = defaultCipherSuites()
|
||||
}
|
||||
|
||||
return &tc, nil
|
||||
}
|
||||
|
||||
// GenTLSConfig loads TLS related configuration parameters.
|
||||
func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) {
|
||||
|
||||
// Now load in cert and private key
|
||||
cert, err := tls.LoadX509KeyPair(tc.CertFile, tc.KeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err)
|
||||
}
|
||||
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing certificate: %v", err)
|
||||
}
|
||||
|
||||
// Create TLSConfig
|
||||
// We will determine the cipher suites that we prefer.
|
||||
config := tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
PreferServerCipherSuites: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: tc.Ciphers,
|
||||
}
|
||||
|
||||
// Require client certificates as needed
|
||||
if tc.Verify {
|
||||
config.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
}
|
||||
// Add in CAs if applicable.
|
||||
if tc.CaFile != "" {
|
||||
rootPEM, err := ioutil.ReadFile(tc.CaFile)
|
||||
if err != nil || rootPEM == nil {
|
||||
return nil, err
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
ok := pool.AppendCertsFromPEM([]byte(rootPEM))
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to parse root ca certificate")
|
||||
}
|
||||
config.ClientCAs = pool
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// MergeOptions will merge two options giving preference to the flagOpts
|
||||
// if the item is present.
|
||||
func MergeOptions(fileOpts, flagOpts *Options) *Options {
|
||||
if fileOpts == nil {
|
||||
return flagOpts
|
||||
}
|
||||
if flagOpts == nil {
|
||||
return fileOpts
|
||||
}
|
||||
// Merge the two, flagOpts override
|
||||
opts := *fileOpts
|
||||
|
||||
if flagOpts.Port != 0 {
|
||||
opts.Port = flagOpts.Port
|
||||
}
|
||||
if flagOpts.Host != "" {
|
||||
opts.Host = flagOpts.Host
|
||||
}
|
||||
if flagOpts.Username != "" {
|
||||
opts.Username = flagOpts.Username
|
||||
}
|
||||
if flagOpts.Password != "" {
|
||||
opts.Password = flagOpts.Password
|
||||
}
|
||||
if flagOpts.Authorization != "" {
|
||||
opts.Authorization = flagOpts.Authorization
|
||||
}
|
||||
if flagOpts.HTTPPort != 0 {
|
||||
opts.HTTPPort = flagOpts.HTTPPort
|
||||
}
|
||||
if flagOpts.Debug {
|
||||
opts.Debug = true
|
||||
}
|
||||
if flagOpts.Trace {
|
||||
opts.Trace = true
|
||||
}
|
||||
if flagOpts.Logtime {
|
||||
opts.Logtime = true
|
||||
}
|
||||
if flagOpts.LogFile != "" {
|
||||
opts.LogFile = flagOpts.LogFile
|
||||
}
|
||||
if flagOpts.PidFile != "" {
|
||||
opts.PidFile = flagOpts.PidFile
|
||||
}
|
||||
if flagOpts.ProfPort != 0 {
|
||||
opts.ProfPort = flagOpts.ProfPort
|
||||
}
|
||||
if flagOpts.Cluster.ListenStr != "" {
|
||||
opts.Cluster.ListenStr = flagOpts.Cluster.ListenStr
|
||||
}
|
||||
if flagOpts.Cluster.NoAdvertise {
|
||||
opts.Cluster.NoAdvertise = true
|
||||
}
|
||||
if flagOpts.RoutesStr != "" {
|
||||
mergeRoutes(&opts, flagOpts)
|
||||
}
|
||||
return &opts
|
||||
}
|
||||
|
||||
// RoutesFromStr parses route URLs from a string
|
||||
func RoutesFromStr(routesStr string) []*url.URL {
|
||||
routes := strings.Split(routesStr, ",")
|
||||
if len(routes) == 0 {
|
||||
return nil
|
||||
}
|
||||
routeUrls := []*url.URL{}
|
||||
for _, r := range routes {
|
||||
r = strings.TrimSpace(r)
|
||||
u, _ := url.Parse(r)
|
||||
routeUrls = append(routeUrls, u)
|
||||
}
|
||||
return routeUrls
|
||||
}
|
||||
|
||||
// This will merge the flag routes and override anything that was present.
|
||||
func mergeRoutes(opts, flagOpts *Options) {
|
||||
routeUrls := RoutesFromStr(flagOpts.RoutesStr)
|
||||
if routeUrls == nil {
|
||||
return
|
||||
}
|
||||
opts.Routes = routeUrls
|
||||
opts.RoutesStr = flagOpts.RoutesStr
|
||||
}
|
||||
|
||||
// RemoveSelfReference removes this server from an array of routes
|
||||
func RemoveSelfReference(clusterPort int, routes []*url.URL) ([]*url.URL, error) {
|
||||
var cleanRoutes []*url.URL
|
||||
cport := strconv.Itoa(clusterPort)
|
||||
|
||||
selfIPs := getInterfaceIPs()
|
||||
for _, r := range routes {
|
||||
host, port, err := net.SplitHostPort(r.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cport == port && isIPInList(selfIPs, getURLIP(host)) {
|
||||
Noticef("Self referencing IP found: ", r)
|
||||
continue
|
||||
}
|
||||
cleanRoutes = append(cleanRoutes, r)
|
||||
}
|
||||
|
||||
return cleanRoutes, nil
|
||||
}
|
||||
|
||||
func isIPInList(list1 []net.IP, list2 []net.IP) bool {
|
||||
for _, ip1 := range list1 {
|
||||
for _, ip2 := range list2 {
|
||||
if ip1.Equal(ip2) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getURLIP(ipStr string) []net.IP {
|
||||
ipList := []net.IP{}
|
||||
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip != nil {
|
||||
ipList = append(ipList, ip)
|
||||
return ipList
|
||||
}
|
||||
|
||||
hostAddr, err := net.LookupHost(ipStr)
|
||||
if err != nil {
|
||||
Errorf("Error looking up host with route hostname: %v", err)
|
||||
return ipList
|
||||
}
|
||||
for _, addr := range hostAddr {
|
||||
ip = net.ParseIP(addr)
|
||||
if ip != nil {
|
||||
ipList = append(ipList, ip)
|
||||
}
|
||||
}
|
||||
return ipList
|
||||
}
|
||||
|
||||
func getInterfaceIPs() []net.IP {
|
||||
var localIPs []net.IP
|
||||
|
||||
interfaceAddr, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
Errorf("Error getting self referencing address: %v", err)
|
||||
return localIPs
|
||||
}
|
||||
|
||||
for i := 0; i < len(interfaceAddr); i++ {
|
||||
interfaceIP, _, _ := net.ParseCIDR(interfaceAddr[i].String())
|
||||
if net.ParseIP(interfaceIP.String()) != nil {
|
||||
localIPs = append(localIPs, interfaceIP)
|
||||
} else {
|
||||
Errorf("Error parsing self referencing address: %v", err)
|
||||
}
|
||||
}
|
||||
return localIPs
|
||||
}
|
||||
|
||||
func processOptions(opts *Options) {
|
||||
// Setup non-standard Go defaults
|
||||
if opts.Host == "" {
|
||||
opts.Host = DEFAULT_HOST
|
||||
}
|
||||
if opts.HTTPHost == "" {
|
||||
// Default to same bind from server if left undefined
|
||||
opts.HTTPHost = opts.Host
|
||||
}
|
||||
if opts.Port == 0 {
|
||||
opts.Port = DEFAULT_PORT
|
||||
} else if opts.Port == RANDOM_PORT {
|
||||
// Choose randomly inside of net.Listen
|
||||
opts.Port = 0
|
||||
}
|
||||
if opts.MaxConn == 0 {
|
||||
opts.MaxConn = DEFAULT_MAX_CONNECTIONS
|
||||
}
|
||||
if opts.PingInterval == 0 {
|
||||
opts.PingInterval = DEFAULT_PING_INTERVAL
|
||||
}
|
||||
if opts.MaxPingsOut == 0 {
|
||||
opts.MaxPingsOut = DEFAULT_PING_MAX_OUT
|
||||
}
|
||||
if opts.TLSTimeout == 0 {
|
||||
opts.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
|
||||
}
|
||||
if opts.AuthTimeout == 0 {
|
||||
opts.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
|
||||
}
|
||||
if opts.Cluster.Host == "" {
|
||||
opts.Cluster.Host = DEFAULT_HOST
|
||||
}
|
||||
if opts.Cluster.TLSTimeout == 0 {
|
||||
opts.Cluster.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
|
||||
}
|
||||
if opts.Cluster.AuthTimeout == 0 {
|
||||
opts.Cluster.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
|
||||
}
|
||||
if opts.MaxControlLine == 0 {
|
||||
opts.MaxControlLine = MAX_CONTROL_LINE_SIZE
|
||||
}
|
||||
if opts.MaxPayload == 0 {
|
||||
opts.MaxPayload = MAX_PAYLOAD_SIZE
|
||||
}
|
||||
}
|
738
vendor/github.com/nats-io/gnatsd/server/parser.go
generated
vendored
738
vendor/github.com/nats-io/gnatsd/server/parser.go
generated
vendored
|
@ -1,738 +0,0 @@
|
|||
// Copyright 2012-2014 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type pubArg struct {
|
||||
subject []byte
|
||||
reply []byte
|
||||
sid []byte
|
||||
szb []byte
|
||||
size int
|
||||
}
|
||||
|
||||
type parseState struct {
|
||||
state int
|
||||
as int
|
||||
drop int
|
||||
pa pubArg
|
||||
argBuf []byte
|
||||
msgBuf []byte
|
||||
scratch [MAX_CONTROL_LINE_SIZE]byte
|
||||
}
|
||||
|
||||
// Parser constants
|
||||
const (
|
||||
OP_START = iota
|
||||
OP_PLUS
|
||||
OP_PLUS_O
|
||||
OP_PLUS_OK
|
||||
OP_MINUS
|
||||
OP_MINUS_E
|
||||
OP_MINUS_ER
|
||||
OP_MINUS_ERR
|
||||
OP_MINUS_ERR_SPC
|
||||
MINUS_ERR_ARG
|
||||
OP_C
|
||||
OP_CO
|
||||
OP_CON
|
||||
OP_CONN
|
||||
OP_CONNE
|
||||
OP_CONNEC
|
||||
OP_CONNECT
|
||||
CONNECT_ARG
|
||||
OP_P
|
||||
OP_PU
|
||||
OP_PUB
|
||||
OP_PUB_SPC
|
||||
PUB_ARG
|
||||
OP_PI
|
||||
OP_PIN
|
||||
OP_PING
|
||||
OP_PO
|
||||
OP_PON
|
||||
OP_PONG
|
||||
MSG_PAYLOAD
|
||||
MSG_END
|
||||
OP_S
|
||||
OP_SU
|
||||
OP_SUB
|
||||
OP_SUB_SPC
|
||||
SUB_ARG
|
||||
OP_U
|
||||
OP_UN
|
||||
OP_UNS
|
||||
OP_UNSU
|
||||
OP_UNSUB
|
||||
OP_UNSUB_SPC
|
||||
UNSUB_ARG
|
||||
OP_M
|
||||
OP_MS
|
||||
OP_MSG
|
||||
OP_MSG_SPC
|
||||
MSG_ARG
|
||||
OP_I
|
||||
OP_IN
|
||||
OP_INF
|
||||
OP_INFO
|
||||
INFO_ARG
|
||||
)
|
||||
|
||||
func (c *client) parse(buf []byte) error {
|
||||
var i int
|
||||
var b byte
|
||||
|
||||
mcl := MAX_CONTROL_LINE_SIZE
|
||||
if c.srv != nil && c.srv.opts != nil {
|
||||
mcl = c.srv.opts.MaxControlLine
|
||||
}
|
||||
|
||||
// snapshot this, and reset when we receive a
|
||||
// proper CONNECT if needed.
|
||||
authSet := c.isAuthTimerSet()
|
||||
|
||||
// Move to loop instead of range syntax to allow jumping of i
|
||||
for i = 0; i < len(buf); i++ {
|
||||
b = buf[i]
|
||||
|
||||
switch c.state {
|
||||
case OP_START:
|
||||
if b != 'C' && b != 'c' && authSet {
|
||||
goto authErr
|
||||
}
|
||||
switch b {
|
||||
case 'P', 'p':
|
||||
c.state = OP_P
|
||||
case 'S', 's':
|
||||
c.state = OP_S
|
||||
case 'U', 'u':
|
||||
c.state = OP_U
|
||||
case 'M', 'm':
|
||||
if c.typ == CLIENT {
|
||||
goto parseErr
|
||||
} else {
|
||||
c.state = OP_M
|
||||
}
|
||||
case 'C', 'c':
|
||||
c.state = OP_C
|
||||
case 'I', 'i':
|
||||
c.state = OP_I
|
||||
case '+':
|
||||
c.state = OP_PLUS
|
||||
case '-':
|
||||
c.state = OP_MINUS
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_P:
|
||||
switch b {
|
||||
case 'U', 'u':
|
||||
c.state = OP_PU
|
||||
case 'I', 'i':
|
||||
c.state = OP_PI
|
||||
case 'O', 'o':
|
||||
c.state = OP_PO
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PU:
|
||||
switch b {
|
||||
case 'B', 'b':
|
||||
c.state = OP_PUB
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PUB:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_PUB_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PUB_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = PUB_ARG
|
||||
c.as = i
|
||||
}
|
||||
case PUB_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processPub(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = OP_START, i+1, MSG_PAYLOAD
|
||||
// If we don't have a saved buffer then jump ahead with
|
||||
// the index. If this overruns what is left we fall out
|
||||
// and process split buffer.
|
||||
if c.msgBuf == nil {
|
||||
i = c.as + c.pa.size - LEN_CR_LF
|
||||
}
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case MSG_PAYLOAD:
|
||||
if c.msgBuf != nil {
|
||||
// copy as much as we can to the buffer and skip ahead.
|
||||
toCopy := c.pa.size - len(c.msgBuf)
|
||||
avail := len(buf) - i
|
||||
if avail < toCopy {
|
||||
toCopy = avail
|
||||
}
|
||||
if toCopy > 0 {
|
||||
start := len(c.msgBuf)
|
||||
// This is needed for copy to work.
|
||||
c.msgBuf = c.msgBuf[:start+toCopy]
|
||||
copy(c.msgBuf[start:], buf[i:i+toCopy])
|
||||
// Update our index
|
||||
i = (i + toCopy) - 1
|
||||
} else {
|
||||
// Fall back to append if needed.
|
||||
c.msgBuf = append(c.msgBuf, b)
|
||||
}
|
||||
if len(c.msgBuf) >= c.pa.size {
|
||||
c.state = MSG_END
|
||||
}
|
||||
} else if i-c.as >= c.pa.size {
|
||||
c.state = MSG_END
|
||||
}
|
||||
case MSG_END:
|
||||
switch b {
|
||||
case '\n':
|
||||
if c.msgBuf != nil {
|
||||
c.msgBuf = append(c.msgBuf, b)
|
||||
} else {
|
||||
c.msgBuf = buf[c.as : i+1]
|
||||
}
|
||||
// strict check for proto
|
||||
if len(c.msgBuf) != c.pa.size+LEN_CR_LF {
|
||||
goto parseErr
|
||||
}
|
||||
c.processMsg(c.msgBuf)
|
||||
c.argBuf, c.msgBuf = nil, nil
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.msgBuf != nil {
|
||||
c.msgBuf = append(c.msgBuf, b)
|
||||
}
|
||||
continue
|
||||
}
|
||||
case OP_S:
|
||||
switch b {
|
||||
case 'U', 'u':
|
||||
c.state = OP_SU
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_SU:
|
||||
switch b {
|
||||
case 'B', 'b':
|
||||
c.state = OP_SUB
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_SUB:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_SUB_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_SUB_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = SUB_ARG
|
||||
c.as = i
|
||||
}
|
||||
case SUB_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processSub(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_U:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_UN
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UN:
|
||||
switch b {
|
||||
case 'S', 's':
|
||||
c.state = OP_UNS
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UNS:
|
||||
switch b {
|
||||
case 'U', 'u':
|
||||
c.state = OP_UNSU
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UNSU:
|
||||
switch b {
|
||||
case 'B', 'b':
|
||||
c.state = OP_UNSUB
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UNSUB:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_UNSUB_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UNSUB_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = UNSUB_ARG
|
||||
c.as = i
|
||||
}
|
||||
case UNSUB_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processUnsub(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_PI:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_PIN
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PIN:
|
||||
switch b {
|
||||
case 'G', 'g':
|
||||
c.state = OP_PING
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PING:
|
||||
switch b {
|
||||
case '\n':
|
||||
c.processPing()
|
||||
c.drop, c.state = 0, OP_START
|
||||
}
|
||||
case OP_PO:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_PON
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PON:
|
||||
switch b {
|
||||
case 'G', 'g':
|
||||
c.state = OP_PONG
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PONG:
|
||||
switch b {
|
||||
case '\n':
|
||||
c.processPong()
|
||||
c.drop, c.state = 0, OP_START
|
||||
}
|
||||
case OP_C:
|
||||
switch b {
|
||||
case 'O', 'o':
|
||||
c.state = OP_CO
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CO:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_CON
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CON:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_CONN
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CONN:
|
||||
switch b {
|
||||
case 'E', 'e':
|
||||
c.state = OP_CONNE
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CONNE:
|
||||
switch b {
|
||||
case 'C', 'c':
|
||||
c.state = OP_CONNEC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CONNEC:
|
||||
switch b {
|
||||
case 'T', 't':
|
||||
c.state = OP_CONNECT
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CONNECT:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = CONNECT_ARG
|
||||
c.as = i
|
||||
}
|
||||
case CONNECT_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processConnect(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.state = 0, OP_START
|
||||
// Reset notion on authSet
|
||||
authSet = c.isAuthTimerSet()
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_M:
|
||||
switch b {
|
||||
case 'S', 's':
|
||||
c.state = OP_MS
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MS:
|
||||
switch b {
|
||||
case 'G', 'g':
|
||||
c.state = OP_MSG
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MSG:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_MSG_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MSG_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = MSG_ARG
|
||||
c.as = i
|
||||
}
|
||||
case MSG_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processMsgArgs(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = 0, i+1, MSG_PAYLOAD
|
||||
|
||||
// jump ahead with the index. If this overruns
|
||||
// what is left we fall out and process split
|
||||
// buffer.
|
||||
i = c.as + c.pa.size - 1
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_I:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_IN
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_IN:
|
||||
switch b {
|
||||
case 'F', 'f':
|
||||
c.state = OP_INF
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_INF:
|
||||
switch b {
|
||||
case 'O', 'o':
|
||||
c.state = OP_INFO
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_INFO:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = INFO_ARG
|
||||
c.as = i
|
||||
}
|
||||
case INFO_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processInfo(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_PLUS:
|
||||
switch b {
|
||||
case 'O', 'o':
|
||||
c.state = OP_PLUS_O
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PLUS_O:
|
||||
switch b {
|
||||
case 'K', 'k':
|
||||
c.state = OP_PLUS_OK
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PLUS_OK:
|
||||
switch b {
|
||||
case '\n':
|
||||
c.drop, c.state = 0, OP_START
|
||||
}
|
||||
case OP_MINUS:
|
||||
switch b {
|
||||
case 'E', 'e':
|
||||
c.state = OP_MINUS_E
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_E:
|
||||
switch b {
|
||||
case 'R', 'r':
|
||||
c.state = OP_MINUS_ER
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_ER:
|
||||
switch b {
|
||||
case 'R', 'r':
|
||||
c.state = OP_MINUS_ERR
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_ERR:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_MINUS_ERR_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_ERR_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = MINUS_ERR_ARG
|
||||
c.as = i
|
||||
}
|
||||
case MINUS_ERR_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
c.processErr(string(arg))
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
}
|
||||
|
||||
// Check for split buffer scenarios for any ARG state.
|
||||
if c.state == SUB_ARG || c.state == UNSUB_ARG || c.state == PUB_ARG ||
|
||||
c.state == MSG_ARG || c.state == MINUS_ERR_ARG ||
|
||||
c.state == CONNECT_ARG || c.state == INFO_ARG {
|
||||
// Setup a holder buffer to deal with split buffer scenario.
|
||||
if c.argBuf == nil {
|
||||
c.argBuf = c.scratch[:0]
|
||||
c.argBuf = append(c.argBuf, buf[c.as:i-c.drop]...)
|
||||
}
|
||||
// Check for violations of control line length here. Note that this is not
|
||||
// exact at all but the performance hit is too great to be precise, and
|
||||
// catching here should prevent memory exhaustion attacks.
|
||||
if len(c.argBuf) > mcl {
|
||||
c.sendErr("Maximum Control Line Exceeded")
|
||||
c.closeConnection()
|
||||
return ErrMaxControlLine
|
||||
}
|
||||
}
|
||||
|
||||
// Check for split msg
|
||||
if (c.state == MSG_PAYLOAD || c.state == MSG_END) && c.msgBuf == nil {
|
||||
// We need to clone the pubArg if it is still referencing the
|
||||
// read buffer and we are not able to process the msg.
|
||||
if c.argBuf == nil {
|
||||
// Works also for MSG_ARG, when message comes from ROUTE.
|
||||
c.clonePubArg()
|
||||
}
|
||||
|
||||
// If we will overflow the scratch buffer, just create a
|
||||
// new buffer to hold the split message.
|
||||
if c.pa.size > cap(c.scratch)-len(c.argBuf) {
|
||||
lrem := len(buf[c.as:])
|
||||
|
||||
// Consider it a protocol error when the remaining payload
|
||||
// is larger than the reported size for PUB. It can happen
|
||||
// when processing incomplete messages from rogue clients.
|
||||
if lrem > c.pa.size+LEN_CR_LF {
|
||||
goto parseErr
|
||||
}
|
||||
c.msgBuf = make([]byte, lrem, c.pa.size+LEN_CR_LF)
|
||||
copy(c.msgBuf, buf[c.as:])
|
||||
} else {
|
||||
c.msgBuf = c.scratch[len(c.argBuf):len(c.argBuf)]
|
||||
c.msgBuf = append(c.msgBuf, (buf[c.as:])...)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
authErr:
|
||||
c.authViolation()
|
||||
return ErrAuthorization
|
||||
|
||||
parseErr:
|
||||
c.sendErr("Unknown Protocol Operation")
|
||||
snip := protoSnippet(i, buf)
|
||||
err := fmt.Errorf("%s Parser ERROR, state=%d, i=%d: proto='%s...'",
|
||||
c.typeString(), c.state, i, snip)
|
||||
return err
|
||||
}
|
||||
|
||||
func protoSnippet(start int, buf []byte) string {
|
||||
stop := start + PROTO_SNIPPET_SIZE
|
||||
bufSize := len(buf)
|
||||
if start >= bufSize {
|
||||
return `""`
|
||||
}
|
||||
if stop > bufSize {
|
||||
stop = bufSize - 1
|
||||
}
|
||||
return fmt.Sprintf("%q", buf[start:stop])
|
||||
}
|
||||
|
||||
// clonePubArg is used when the split buffer scenario has the pubArg in the existing read buffer, but
|
||||
// we need to hold onto it into the next read.
|
||||
func (c *client) clonePubArg() {
|
||||
c.argBuf = c.scratch[:0]
|
||||
c.argBuf = append(c.argBuf, c.pa.subject...)
|
||||
c.argBuf = append(c.argBuf, c.pa.reply...)
|
||||
c.argBuf = append(c.argBuf, c.pa.sid...)
|
||||
c.argBuf = append(c.argBuf, c.pa.szb...)
|
||||
|
||||
c.pa.subject = c.argBuf[:len(c.pa.subject)]
|
||||
|
||||
if c.pa.reply != nil {
|
||||
c.pa.reply = c.argBuf[len(c.pa.subject) : len(c.pa.subject)+len(c.pa.reply)]
|
||||
}
|
||||
|
||||
if c.pa.sid != nil {
|
||||
c.pa.sid = c.argBuf[len(c.pa.subject)+len(c.pa.reply) : len(c.pa.subject)+len(c.pa.reply)+len(c.pa.sid)]
|
||||
}
|
||||
|
||||
c.pa.szb = c.argBuf[len(c.pa.subject)+len(c.pa.reply)+len(c.pa.sid):]
|
||||
}
|
23
vendor/github.com/nats-io/gnatsd/server/pse/pse_darwin.go
generated
vendored
23
vendor/github.com/nats-io/gnatsd/server/pse/pse_darwin.go
generated
vendored
|
@ -1,23 +0,0 @@
|
|||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package pse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
pidStr := fmt.Sprintf("%d", os.Getpid())
|
||||
out, err := exec.Command("ps", "o", "pcpu=,rss=,vsz=", "-p", pidStr).Output()
|
||||
if err != nil {
|
||||
*rss, *vss = -1, -1
|
||||
return errors.New(fmt.Sprintf("ps call failed:%v", err))
|
||||
}
|
||||
fmt.Sscanf(string(out), "%f %d %d", pcpu, rss, vss)
|
||||
*rss *= 1024 // 1k blocks, want bytes.
|
||||
*vss *= 1024 // 1k blocks, want bytes.
|
||||
return nil
|
||||
}
|
72
vendor/github.com/nats-io/gnatsd/server/pse/pse_freebsd.go
generated
vendored
72
vendor/github.com/nats-io/gnatsd/server/pse/pse_freebsd.go
generated
vendored
|
@ -1,72 +0,0 @@
|
|||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package pse
|
||||
|
||||
/*
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/user.h>
|
||||
#include <stddef.h>
|
||||
#include <unistd.h>
|
||||
|
||||
long pagetok(long size)
|
||||
{
|
||||
int pageshift, pagesize;
|
||||
|
||||
pagesize = getpagesize();
|
||||
pageshift = 0;
|
||||
|
||||
while (pagesize > 1) {
|
||||
pageshift++;
|
||||
pagesize >>= 1;
|
||||
}
|
||||
|
||||
return (size << pageshift);
|
||||
}
|
||||
|
||||
int getusage(double *pcpu, unsigned int *rss, unsigned int *vss)
|
||||
{
|
||||
int mib[4], ret;
|
||||
size_t len;
|
||||
struct kinfo_proc kp;
|
||||
|
||||
len = 4;
|
||||
sysctlnametomib("kern.proc.pid", mib, &len);
|
||||
|
||||
mib[3] = getpid();
|
||||
len = sizeof(kp);
|
||||
|
||||
ret = sysctl(mib, 4, &kp, &len, NULL, 0);
|
||||
if (ret != 0) {
|
||||
return (errno);
|
||||
}
|
||||
|
||||
*rss = pagetok(kp.ki_rssize);
|
||||
*vss = kp.ki_size;
|
||||
*pcpu = kp.ki_pctcpu;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// This is a placeholder for now.
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
var r, v C.uint
|
||||
var c C.double
|
||||
|
||||
if ret := C.getusage(&c, &r, &v); ret != 0 {
|
||||
return syscall.Errno(ret)
|
||||
}
|
||||
|
||||
*pcpu = float64(c)
|
||||
*rss = int64(r)
|
||||
*vss = int64(v)
|
||||
|
||||
return nil
|
||||
}
|
115
vendor/github.com/nats-io/gnatsd/server/pse/pse_linux.go
generated
vendored
115
vendor/github.com/nats-io/gnatsd/server/pse/pse_linux.go
generated
vendored
|
@ -1,115 +0,0 @@
|
|||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package pse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
procStatFile string
|
||||
ticks int64
|
||||
lastTotal int64
|
||||
lastSeconds int64
|
||||
ipcpu int64
|
||||
)
|
||||
|
||||
const (
|
||||
utimePos = 13
|
||||
stimePos = 14
|
||||
startPos = 21
|
||||
vssPos = 22
|
||||
rssPos = 23
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Avoiding to generate docker image without CGO
|
||||
ticks = 100 // int64(C.sysconf(C._SC_CLK_TCK))
|
||||
procStatFile = fmt.Sprintf("/proc/%d/stat", os.Getpid())
|
||||
periodic()
|
||||
}
|
||||
|
||||
// Sampling function to keep pcpu relevant.
|
||||
func periodic() {
|
||||
contents, err := ioutil.ReadFile(procStatFile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fields := bytes.Fields(contents)
|
||||
|
||||
// PCPU
|
||||
pstart := parseInt64(fields[startPos])
|
||||
utime := parseInt64(fields[utimePos])
|
||||
stime := parseInt64(fields[stimePos])
|
||||
total := utime + stime
|
||||
|
||||
var sysinfo syscall.Sysinfo_t
|
||||
if err := syscall.Sysinfo(&sysinfo); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
seconds := int64(sysinfo.Uptime) - (pstart / ticks)
|
||||
|
||||
// Save off temps
|
||||
lt := lastTotal
|
||||
ls := lastSeconds
|
||||
|
||||
// Update last sample
|
||||
lastTotal = total
|
||||
lastSeconds = seconds
|
||||
|
||||
// Adjust to current time window
|
||||
total -= lt
|
||||
seconds -= ls
|
||||
|
||||
if seconds > 0 {
|
||||
atomic.StoreInt64(&ipcpu, (total*1000/ticks)/seconds)
|
||||
}
|
||||
|
||||
time.AfterFunc(1*time.Second, periodic)
|
||||
}
|
||||
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
contents, err := ioutil.ReadFile(procStatFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields := bytes.Fields(contents)
|
||||
|
||||
// Memory
|
||||
*rss = (parseInt64(fields[rssPos])) << 12
|
||||
*vss = parseInt64(fields[vssPos])
|
||||
|
||||
// PCPU
|
||||
// We track this with periodic sampling, so just load and go.
|
||||
*pcpu = float64(atomic.LoadInt64(&ipcpu)) / 10.0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ascii numbers 0-9
|
||||
const (
|
||||
asciiZero = 48
|
||||
asciiNine = 57
|
||||
)
|
||||
|
||||
// parseInt64 expects decimal positive numbers. We
|
||||
// return -1 to signal error
|
||||
func parseInt64(d []byte) (n int64) {
|
||||
if len(d) == 0 {
|
||||
return -1
|
||||
}
|
||||
for _, dec := range d {
|
||||
if dec < asciiZero || dec > asciiNine {
|
||||
return -1
|
||||
}
|
||||
n = n*10 + (int64(dec) - asciiZero)
|
||||
}
|
||||
return n
|
||||
}
|
13
vendor/github.com/nats-io/gnatsd/server/pse/pse_rumprun.go
generated
vendored
13
vendor/github.com/nats-io/gnatsd/server/pse/pse_rumprun.go
generated
vendored
|
@ -1,13 +0,0 @@
|
|||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
// +build rumprun
|
||||
|
||||
package pse
|
||||
|
||||
// This is a placeholder for now.
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
*pcpu = 0.0
|
||||
*rss = 0
|
||||
*vss = 0
|
||||
|
||||
return nil
|
||||
}
|
12
vendor/github.com/nats-io/gnatsd/server/pse/pse_solaris.go
generated
vendored
12
vendor/github.com/nats-io/gnatsd/server/pse/pse_solaris.go
generated
vendored
|
@ -1,12 +0,0 @@
|
|||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package pse
|
||||
|
||||
// This is a placeholder for now.
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
*pcpu = 0.0
|
||||
*rss = 0
|
||||
*vss = 0
|
||||
|
||||
return nil
|
||||
}
|
268
vendor/github.com/nats-io/gnatsd/server/pse/pse_windows.go
generated
vendored
268
vendor/github.com/nats-io/gnatsd/server/pse/pse_windows.go
generated
vendored
|
@ -1,268 +0,0 @@
|
|||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
// +build windows
|
||||
|
||||
package pse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
pdh = syscall.NewLazyDLL("pdh.dll")
|
||||
winPdhOpenQuery = pdh.NewProc("PdhOpenQuery")
|
||||
winPdhAddCounter = pdh.NewProc("PdhAddCounterW")
|
||||
winPdhCollectQueryData = pdh.NewProc("PdhCollectQueryData")
|
||||
winPdhGetFormattedCounterValue = pdh.NewProc("PdhGetFormattedCounterValue")
|
||||
winPdhGetFormattedCounterArray = pdh.NewProc("PdhGetFormattedCounterArrayW")
|
||||
)
|
||||
|
||||
// global performance counter query handle and counters
|
||||
var (
|
||||
pcHandle PDH_HQUERY
|
||||
pidCounter, cpuCounter, rssCounter, vssCounter PDH_HCOUNTER
|
||||
prevCPU float64
|
||||
prevRss int64
|
||||
prevVss int64
|
||||
lastSampleTime time.Time
|
||||
processPid int
|
||||
pcQueryLock sync.Mutex
|
||||
initialSample = true
|
||||
)
|
||||
|
||||
// maxQuerySize is the number of values to return from a query.
|
||||
// It represents the maximum # of servers that can be queried
|
||||
// simultaneously running on a machine.
|
||||
const maxQuerySize = 512
|
||||
|
||||
// Keep static memory around to reuse; this works best for passing
|
||||
// into the pdh API.
|
||||
var counterResults [maxQuerySize]PDH_FMT_COUNTERVALUE_ITEM_DOUBLE
|
||||
|
||||
// PDH Types
|
||||
type (
|
||||
PDH_HQUERY syscall.Handle
|
||||
PDH_HCOUNTER syscall.Handle
|
||||
)
|
||||
|
||||
// PDH constants used here
|
||||
const (
|
||||
PDH_FMT_DOUBLE = 0x00000200
|
||||
PDH_INVALID_DATA = 0xC0000BC6
|
||||
PDH_MORE_DATA = 0x800007D2
|
||||
)
|
||||
|
||||
// PDH_FMT_COUNTERVALUE_DOUBLE - double value
|
||||
type PDH_FMT_COUNTERVALUE_DOUBLE struct {
|
||||
CStatus uint32
|
||||
DoubleValue float64
|
||||
}
|
||||
|
||||
// PDH_FMT_COUNTERVALUE_ITEM_DOUBLE is an array
|
||||
// element of a double value
|
||||
type PDH_FMT_COUNTERVALUE_ITEM_DOUBLE struct {
|
||||
SzName *uint16 // pointer to a string
|
||||
FmtValue PDH_FMT_COUNTERVALUE_DOUBLE
|
||||
}
|
||||
|
||||
func pdhAddCounter(hQuery PDH_HQUERY, szFullCounterPath string, dwUserData uintptr, phCounter *PDH_HCOUNTER) error {
|
||||
ptxt, _ := syscall.UTF16PtrFromString(szFullCounterPath)
|
||||
r0, _, _ := winPdhAddCounter.Call(
|
||||
uintptr(hQuery),
|
||||
uintptr(unsafe.Pointer(ptxt)),
|
||||
dwUserData,
|
||||
uintptr(unsafe.Pointer(phCounter)))
|
||||
|
||||
if r0 != 0 {
|
||||
return fmt.Errorf("pdhAddCounter failed. %d", r0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func pdhOpenQuery(datasrc *uint16, userdata uint32, query *PDH_HQUERY) error {
|
||||
r0, _, _ := syscall.Syscall(winPdhOpenQuery.Addr(), 3, 0, uintptr(userdata), uintptr(unsafe.Pointer(query)))
|
||||
if r0 != 0 {
|
||||
return fmt.Errorf("pdhOpenQuery failed - %d", r0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func pdhCollectQueryData(hQuery PDH_HQUERY) error {
|
||||
r0, _, _ := winPdhCollectQueryData.Call(uintptr(hQuery))
|
||||
if r0 != 0 {
|
||||
return fmt.Errorf("pdhCollectQueryData failed - %d", r0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// pdhGetFormattedCounterArrayDouble returns the value of return code
|
||||
// rather than error, to easily check return codes
|
||||
func pdhGetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_DOUBLE) uint32 {
|
||||
ret, _, _ := winPdhGetFormattedCounterArray.Call(
|
||||
uintptr(hCounter),
|
||||
uintptr(PDH_FMT_DOUBLE),
|
||||
uintptr(unsafe.Pointer(lpdwBufferSize)),
|
||||
uintptr(unsafe.Pointer(lpdwBufferCount)),
|
||||
uintptr(unsafe.Pointer(itemBuffer)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
func getCounterArrayData(counter PDH_HCOUNTER) ([]float64, error) {
|
||||
var bufSize uint32
|
||||
var bufCount uint32
|
||||
|
||||
// Retrieving array data requires two calls, the first which
|
||||
// requires an addressable empty buffer, and sets size fields.
|
||||
// The second call returns the data.
|
||||
initialBuf := make([]PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, 1)
|
||||
ret := pdhGetFormattedCounterArrayDouble(counter, &bufSize, &bufCount, &initialBuf[0])
|
||||
if ret == PDH_MORE_DATA {
|
||||
// we'll likely never get here, but be safe.
|
||||
if bufCount > maxQuerySize {
|
||||
bufCount = maxQuerySize
|
||||
}
|
||||
ret = pdhGetFormattedCounterArrayDouble(counter, &bufSize, &bufCount, &counterResults[0])
|
||||
if ret == 0 {
|
||||
rv := make([]float64, bufCount)
|
||||
for i := 0; i < int(bufCount); i++ {
|
||||
rv[i] = counterResults[i].FmtValue.DoubleValue
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
}
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("getCounterArrayData failed - %d", ret)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// getProcessImageName returns the name of the process image, as expected by
|
||||
// the performance counter API.
|
||||
func getProcessImageName() (name string) {
|
||||
name = filepath.Base(os.Args[0])
|
||||
name = strings.TrimRight(name, ".exe")
|
||||
return
|
||||
}
|
||||
|
||||
// initialize our counters
|
||||
func initCounters() (err error) {
|
||||
|
||||
processPid = os.Getpid()
|
||||
// require an addressible nil pointer
|
||||
var source uint16
|
||||
if err := pdhOpenQuery(&source, 0, &pcHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// setup the performance counters, search for all server instances
|
||||
name := fmt.Sprintf("%s*", getProcessImageName())
|
||||
pidQuery := fmt.Sprintf("\\Process(%s)\\ID Process", name)
|
||||
cpuQuery := fmt.Sprintf("\\Process(%s)\\%% Processor Time", name)
|
||||
rssQuery := fmt.Sprintf("\\Process(%s)\\Working Set - Private", name)
|
||||
vssQuery := fmt.Sprintf("\\Process(%s)\\Virtual Bytes", name)
|
||||
|
||||
if err = pdhAddCounter(pcHandle, pidQuery, 0, &pidCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pdhAddCounter(pcHandle, cpuQuery, 0, &cpuCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pdhAddCounter(pcHandle, rssQuery, 0, &rssCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pdhAddCounter(pcHandle, vssQuery, 0, &vssCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// prime the counters by collecting once, and sleep to get somewhat
|
||||
// useful information the first request. Counters for the CPU require
|
||||
// at least two collect calls.
|
||||
if err = pdhCollectQueryData(pcHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(50)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcUsage returns process CPU and memory statistics
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
var err error
|
||||
|
||||
// For simplicity, protect the entire call.
|
||||
// Most simultaneous requests will immediately return
|
||||
// with cached values.
|
||||
pcQueryLock.Lock()
|
||||
defer pcQueryLock.Unlock()
|
||||
|
||||
// First time through, initialize counters.
|
||||
if initialSample {
|
||||
if err = initCounters(); err != nil {
|
||||
return err
|
||||
}
|
||||
initialSample = false
|
||||
} else if time.Since(lastSampleTime) < (2 * time.Second) {
|
||||
// only refresh every two seconds as to minimize impact
|
||||
// on the server.
|
||||
*pcpu = prevCPU
|
||||
*rss = prevRss
|
||||
*vss = prevVss
|
||||
return nil
|
||||
}
|
||||
|
||||
// always save the sample time, even on errors.
|
||||
defer func() {
|
||||
lastSampleTime = time.Now()
|
||||
}()
|
||||
|
||||
// refresh the performance counter data
|
||||
if err = pdhCollectQueryData(pcHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// retrieve the data
|
||||
var pidAry, cpuAry, rssAry, vssAry []float64
|
||||
if pidAry, err = getCounterArrayData(pidCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if cpuAry, err = getCounterArrayData(cpuCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if rssAry, err = getCounterArrayData(rssCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if vssAry, err = getCounterArrayData(vssCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
// find the index of the entry for this process
|
||||
idx := int(-1)
|
||||
for i := range pidAry {
|
||||
if int(pidAry[i]) == processPid {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
// no pid found...
|
||||
if idx < 0 {
|
||||
return fmt.Errorf("could not find pid in performance counter results")
|
||||
}
|
||||
// assign values from the performance counters
|
||||
*pcpu = cpuAry[idx]
|
||||
*rss = int64(rssAry[idx])
|
||||
*vss = int64(vssAry[idx])
|
||||
|
||||
// save off cache values
|
||||
prevCPU = *pcpu
|
||||
prevRss = *rss
|
||||
prevVss = *vss
|
||||
|
||||
return nil
|
||||
}
|
731
vendor/github.com/nats-io/gnatsd/server/route.go
generated
vendored
731
vendor/github.com/nats-io/gnatsd/server/route.go
generated
vendored
|
@ -1,731 +0,0 @@
|
|||
// Copyright 2013-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/gnatsd/util"
|
||||
)
|
||||
|
||||
// RouteType designates the router type
|
||||
type RouteType int
|
||||
|
||||
// Type of Route
|
||||
const (
|
||||
// This route we learned from speaking to other routes.
|
||||
Implicit RouteType = iota
|
||||
// This route was explicitly configured.
|
||||
Explicit
|
||||
)
|
||||
|
||||
type route struct {
|
||||
remoteID string
|
||||
didSolicit bool
|
||||
retry bool
|
||||
routeType RouteType
|
||||
url *url.URL
|
||||
authRequired bool
|
||||
tlsRequired bool
|
||||
}
|
||||
|
||||
type connectInfo struct {
|
||||
Verbose bool `json:"verbose"`
|
||||
Pedantic bool `json:"pedantic"`
|
||||
User string `json:"user,omitempty"`
|
||||
Pass string `json:"pass,omitempty"`
|
||||
TLS bool `json:"tls_required"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Route protocol constants
|
||||
const (
|
||||
ConProto = "CONNECT %s" + _CRLF_
|
||||
InfoProto = "INFO %s" + _CRLF_
|
||||
)
|
||||
|
||||
// Lock should be held entering here.
|
||||
func (c *client) sendConnect(tlsRequired bool) {
|
||||
var user, pass string
|
||||
if userInfo := c.route.url.User; userInfo != nil {
|
||||
user = userInfo.Username()
|
||||
pass, _ = userInfo.Password()
|
||||
}
|
||||
cinfo := connectInfo{
|
||||
Verbose: false,
|
||||
Pedantic: false,
|
||||
User: user,
|
||||
Pass: pass,
|
||||
TLS: tlsRequired,
|
||||
Name: c.srv.info.ID,
|
||||
}
|
||||
b, err := json.Marshal(cinfo)
|
||||
if err != nil {
|
||||
c.Errorf("Error marshalling CONNECT to route: %v\n", err)
|
||||
c.closeConnection()
|
||||
return
|
||||
}
|
||||
c.sendProto([]byte(fmt.Sprintf(ConProto, b)), true)
|
||||
}
|
||||
|
||||
// Process the info message if we are a route.
|
||||
func (c *client) processRouteInfo(info *Info) {
|
||||
c.mu.Lock()
|
||||
// Connection can be closed at any time (by auth timeout, etc).
|
||||
// Does not make sense to continue here if connection is gone.
|
||||
if c.route == nil || c.nc == nil {
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
s := c.srv
|
||||
remoteID := c.route.remoteID
|
||||
|
||||
// We receive an INFO from a server that informs us about another server,
|
||||
// so the info.ID in the INFO protocol does not match the ID of this route.
|
||||
if remoteID != "" && remoteID != info.ID {
|
||||
c.mu.Unlock()
|
||||
|
||||
// Process this implicit route. We will check that it is not an explicit
|
||||
// route and/or that it has not been connected already.
|
||||
s.processImplicitRoute(info)
|
||||
return
|
||||
}
|
||||
|
||||
// Need to set this for the detection of the route to self to work
|
||||
// in closeConnection().
|
||||
c.route.remoteID = info.ID
|
||||
|
||||
// Detect route to self.
|
||||
if c.route.remoteID == s.info.ID {
|
||||
c.mu.Unlock()
|
||||
c.closeConnection()
|
||||
return
|
||||
}
|
||||
|
||||
// Copy over important information.
|
||||
c.route.authRequired = info.AuthRequired
|
||||
c.route.tlsRequired = info.TLSRequired
|
||||
|
||||
// If we do not know this route's URL, construct one on the fly
|
||||
// from the information provided.
|
||||
if c.route.url == nil {
|
||||
// Add in the URL from host and port
|
||||
hp := net.JoinHostPort(info.Host, strconv.Itoa(info.Port))
|
||||
url, err := url.Parse(fmt.Sprintf("nats-route://%s/", hp))
|
||||
if err != nil {
|
||||
c.Errorf("Error parsing URL from INFO: %v\n", err)
|
||||
c.mu.Unlock()
|
||||
c.closeConnection()
|
||||
return
|
||||
}
|
||||
c.route.url = url
|
||||
}
|
||||
|
||||
// Check to see if we have this remote already registered.
|
||||
// This can happen when both servers have routes to each other.
|
||||
c.mu.Unlock()
|
||||
|
||||
if added, sendInfo := s.addRoute(c, info); added {
|
||||
c.Debugf("Registering remote route %q", info.ID)
|
||||
// Send our local subscriptions to this route.
|
||||
s.sendLocalSubsToRoute(c)
|
||||
if sendInfo {
|
||||
// Need to get the remote IP address.
|
||||
c.mu.Lock()
|
||||
switch conn := c.nc.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
addr := conn.RemoteAddr().(*net.TCPAddr)
|
||||
info.IP = fmt.Sprintf("nats-route://%s/", net.JoinHostPort(addr.IP.String(), strconv.Itoa(info.Port)))
|
||||
default:
|
||||
info.IP = fmt.Sprintf("%s", c.route.url)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
// Now let the known servers know about this new route
|
||||
s.forwardNewRouteInfoToKnownServers(info)
|
||||
}
|
||||
// If the server Info did not have these URLs, update and send an INFO
|
||||
// protocol to all clients that support it (unless the feature is disabled).
|
||||
if s.updateServerINFO(info.ClientConnectURLs) {
|
||||
s.sendAsyncInfoToClients()
|
||||
}
|
||||
} else {
|
||||
c.Debugf("Detected duplicate remote route %q", info.ID)
|
||||
c.closeConnection()
|
||||
}
|
||||
}
|
||||
|
||||
// sendAsyncInfoToClients sends an INFO protocol to all
|
||||
// connected clients that accept async INFO updates.
|
||||
func (s *Server) sendAsyncInfoToClients() {
|
||||
s.mu.Lock()
|
||||
// If there are no clients supporting async INFO protocols, we are done.
|
||||
if s.cproto == 0 {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Capture under lock
|
||||
proto := s.infoJSON
|
||||
|
||||
// Make a copy of ALL clients so we can release server lock while
|
||||
// sending the protocol to clients. We could check the conditions
|
||||
// (proto support, first PONG sent) here and so have potentially
|
||||
// a limited number of clients, but that would mean grabbing the
|
||||
// client's lock here, which we don't want since we would still
|
||||
// need it in the second loop.
|
||||
clients := make([]*client, 0, len(s.clients))
|
||||
for _, c := range s.clients {
|
||||
clients = append(clients, c)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
for _, c := range clients {
|
||||
c.mu.Lock()
|
||||
// If server did not yet receive the CONNECT protocol, check later
|
||||
// when sending the first PONG.
|
||||
if !c.flags.isSet(connectReceived) {
|
||||
c.flags.set(infoUpdated)
|
||||
} else if c.opts.Protocol >= ClientProtoInfo {
|
||||
// Send only if first PONG was sent
|
||||
if c.flags.isSet(firstPongSent) {
|
||||
// sendInfo takes care of checking if the connection is still
|
||||
// valid or not, so don't duplicate tests here.
|
||||
c.sendInfo(proto)
|
||||
} else {
|
||||
// Otherwise, notify that INFO has changed and check later.
|
||||
c.flags.set(infoUpdated)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// This will process implicit route information received from another server.
|
||||
// We will check to see if we have configured or are already connected,
|
||||
// and if so we will ignore. Otherwise we will attempt to connect.
|
||||
func (s *Server) processImplicitRoute(info *Info) {
|
||||
remoteID := info.ID
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Don't connect to ourself
|
||||
if remoteID == s.info.ID {
|
||||
return
|
||||
}
|
||||
// Check if this route already exists
|
||||
if _, exists := s.remotes[remoteID]; exists {
|
||||
return
|
||||
}
|
||||
// Check if we have this route as a configured route
|
||||
if s.hasThisRouteConfigured(info) {
|
||||
return
|
||||
}
|
||||
|
||||
// Initiate the connection, using info.IP instead of info.URL here...
|
||||
r, err := url.Parse(info.IP)
|
||||
if err != nil {
|
||||
Debugf("Error parsing URL from INFO: %v\n", err)
|
||||
return
|
||||
}
|
||||
if info.AuthRequired {
|
||||
r.User = url.UserPassword(s.opts.Cluster.Username, s.opts.Cluster.Password)
|
||||
}
|
||||
s.startGoRoutine(func() { s.connectToRoute(r, false) })
|
||||
}
|
||||
|
||||
// hasThisRouteConfigured returns true if info.Host:info.Port is present
|
||||
// in the server's opts.Routes, false otherwise.
|
||||
// Server lock is assumed to be held by caller.
|
||||
func (s *Server) hasThisRouteConfigured(info *Info) bool {
|
||||
urlToCheckExplicit := strings.ToLower(net.JoinHostPort(info.Host, strconv.Itoa(info.Port)))
|
||||
for _, ri := range s.opts.Routes {
|
||||
if strings.ToLower(ri.Host) == urlToCheckExplicit {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// forwardNewRouteInfoToKnownServers sends the INFO protocol of the new route
|
||||
// to all routes known by this server. In turn, each server will contact this
|
||||
// new route.
|
||||
func (s *Server) forwardNewRouteInfoToKnownServers(info *Info) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
b, _ := json.Marshal(info)
|
||||
infoJSON := []byte(fmt.Sprintf(InfoProto, b))
|
||||
|
||||
for _, r := range s.routes {
|
||||
r.mu.Lock()
|
||||
if r.route.remoteID != info.ID {
|
||||
r.sendInfo(infoJSON)
|
||||
}
|
||||
r.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// This will send local subscription state to a new route connection.
|
||||
// FIXME(dlc) - This could be a DOS or perf issue with many clients
|
||||
// and large subscription space. Plus buffering in place not a good idea.
|
||||
func (s *Server) sendLocalSubsToRoute(route *client) {
|
||||
b := bytes.Buffer{}
|
||||
s.mu.Lock()
|
||||
for _, client := range s.clients {
|
||||
client.mu.Lock()
|
||||
subs := make([]*subscription, 0, len(client.subs))
|
||||
for _, sub := range client.subs {
|
||||
subs = append(subs, sub)
|
||||
}
|
||||
client.mu.Unlock()
|
||||
for _, sub := range subs {
|
||||
rsid := routeSid(sub)
|
||||
proto := fmt.Sprintf(subProto, sub.subject, sub.queue, rsid)
|
||||
b.WriteString(proto)
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
route.mu.Lock()
|
||||
defer route.mu.Unlock()
|
||||
route.sendProto(b.Bytes(), true)
|
||||
|
||||
route.Debugf("Route sent local subscriptions")
|
||||
}
|
||||
|
||||
func (s *Server) createRoute(conn net.Conn, rURL *url.URL) *client {
|
||||
didSolicit := rURL != nil
|
||||
r := &route{didSolicit: didSolicit}
|
||||
for _, route := range s.opts.Routes {
|
||||
if rURL != nil && (strings.ToLower(rURL.Host) == strings.ToLower(route.Host)) {
|
||||
r.routeType = Explicit
|
||||
}
|
||||
}
|
||||
|
||||
c := &client{srv: s, nc: conn, opts: clientOpts{}, typ: ROUTER, route: r}
|
||||
|
||||
// Grab server variables
|
||||
s.mu.Lock()
|
||||
infoJSON := s.routeInfoJSON
|
||||
authRequired := s.routeInfo.AuthRequired
|
||||
tlsRequired := s.routeInfo.TLSRequired
|
||||
s.mu.Unlock()
|
||||
|
||||
// Grab lock
|
||||
c.mu.Lock()
|
||||
|
||||
// Initialize
|
||||
c.initClient()
|
||||
|
||||
c.Debugf("Route connection created")
|
||||
|
||||
if didSolicit {
|
||||
// Do this before the TLS code, otherwise, in case of failure
|
||||
// and if route is explicit, it would try to reconnect to 'nil'...
|
||||
r.url = rURL
|
||||
}
|
||||
|
||||
// Check for TLS
|
||||
if tlsRequired {
|
||||
// Copy off the config to add in ServerName if we
|
||||
tlsConfig := util.CloneTLSConfig(s.opts.Cluster.TLSConfig)
|
||||
|
||||
// If we solicited, we will act like the client, otherwise the server.
|
||||
if didSolicit {
|
||||
c.Debugf("Starting TLS route client handshake")
|
||||
// Specify the ServerName we are expecting.
|
||||
host, _, _ := net.SplitHostPort(rURL.Host)
|
||||
tlsConfig.ServerName = host
|
||||
c.nc = tls.Client(c.nc, tlsConfig)
|
||||
} else {
|
||||
c.Debugf("Starting TLS route server handshake")
|
||||
c.nc = tls.Server(c.nc, tlsConfig)
|
||||
}
|
||||
|
||||
conn := c.nc.(*tls.Conn)
|
||||
|
||||
// Setup the timeout
|
||||
ttl := secondsToDuration(s.opts.Cluster.TLSTimeout)
|
||||
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
|
||||
conn.SetReadDeadline(time.Now().Add(ttl))
|
||||
|
||||
c.mu.Unlock()
|
||||
if err := conn.Handshake(); err != nil {
|
||||
c.Debugf("TLS route handshake error: %v", err)
|
||||
c.sendErr("Secure Connection - TLS Required")
|
||||
c.closeConnection()
|
||||
return nil
|
||||
}
|
||||
// Reset the read deadline
|
||||
conn.SetReadDeadline(time.Time{})
|
||||
|
||||
// Re-Grab lock
|
||||
c.mu.Lock()
|
||||
|
||||
// Verify that the connection did not go away while we released the lock.
|
||||
if c.nc == nil {
|
||||
c.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rewrap bw
|
||||
c.bw = bufio.NewWriterSize(c.nc, startBufSize)
|
||||
}
|
||||
|
||||
// Do final client initialization
|
||||
|
||||
// Set the Ping timer
|
||||
c.setPingTimer()
|
||||
|
||||
// For routes, the "client" is added to s.routes only when processing
|
||||
// the INFO protocol, that is much later.
|
||||
// In the meantime, if the server shutsdown, there would be no reference
|
||||
// to the client (connection) to be closed, leaving this readLoop
|
||||
// uinterrupted, causing the Shutdown() to wait indefinitively.
|
||||
// We need to store the client in a special map, under a special lock.
|
||||
s.grMu.Lock()
|
||||
s.grTmpClients[c.cid] = c
|
||||
s.grMu.Unlock()
|
||||
|
||||
// Spin up the read loop.
|
||||
s.startGoRoutine(func() { c.readLoop() })
|
||||
|
||||
if tlsRequired {
|
||||
c.Debugf("TLS handshake complete")
|
||||
cs := c.nc.(*tls.Conn).ConnectionState()
|
||||
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
|
||||
}
|
||||
|
||||
// Queue Connect proto if we solicited the connection.
|
||||
if didSolicit {
|
||||
c.Debugf("Route connect msg sent")
|
||||
c.sendConnect(tlsRequired)
|
||||
}
|
||||
|
||||
// Send our info to the other side.
|
||||
c.sendInfo(infoJSON)
|
||||
|
||||
// Check for Auth required state for incoming connections.
|
||||
if authRequired && !didSolicit {
|
||||
ttl := secondsToDuration(s.opts.Cluster.AuthTimeout)
|
||||
c.setAuthTimer(ttl)
|
||||
}
|
||||
|
||||
c.mu.Unlock()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
const (
|
||||
_CRLF_ = "\r\n"
|
||||
_EMPTY_ = ""
|
||||
_SPC_ = " "
|
||||
)
|
||||
|
||||
const (
|
||||
subProto = "SUB %s %s %s" + _CRLF_
|
||||
unsubProto = "UNSUB %s%s" + _CRLF_
|
||||
)
|
||||
|
||||
// FIXME(dlc) - Make these reserved and reject if they come in as a sid
|
||||
// from a client connection.
|
||||
// Route constants
|
||||
const (
|
||||
RSID = "RSID"
|
||||
QRSID = "QRSID"
|
||||
|
||||
RSID_CID_INDEX = 1
|
||||
RSID_SID_INDEX = 2
|
||||
EXPECTED_MATCHES = 3
|
||||
)
|
||||
|
||||
// FIXME(dlc) - This may be too slow, check at later date.
|
||||
var qrsidRe = regexp.MustCompile(`QRSID:(\d+):([^\s]+)`)
|
||||
|
||||
func (s *Server) routeSidQueueSubscriber(rsid []byte) (*subscription, bool) {
|
||||
if !bytes.HasPrefix(rsid, []byte(QRSID)) {
|
||||
return nil, false
|
||||
}
|
||||
matches := qrsidRe.FindSubmatch(rsid)
|
||||
if matches == nil || len(matches) != EXPECTED_MATCHES {
|
||||
return nil, false
|
||||
}
|
||||
cid := uint64(parseInt64(matches[RSID_CID_INDEX]))
|
||||
|
||||
s.mu.Lock()
|
||||
client := s.clients[cid]
|
||||
s.mu.Unlock()
|
||||
|
||||
if client == nil {
|
||||
return nil, true
|
||||
}
|
||||
sid := matches[RSID_SID_INDEX]
|
||||
|
||||
client.mu.Lock()
|
||||
sub, ok := client.subs[string(sid)]
|
||||
client.mu.Unlock()
|
||||
if ok {
|
||||
return sub, true
|
||||
}
|
||||
return nil, true
|
||||
}
|
||||
|
||||
func routeSid(sub *subscription) string {
|
||||
var qi string
|
||||
if len(sub.queue) > 0 {
|
||||
qi = "Q"
|
||||
}
|
||||
return fmt.Sprintf("%s%s:%d:%s", qi, RSID, sub.client.cid, sub.sid)
|
||||
}
|
||||
|
||||
func (s *Server) addRoute(c *client, info *Info) (bool, bool) {
|
||||
id := c.route.remoteID
|
||||
sendInfo := false
|
||||
|
||||
s.mu.Lock()
|
||||
if !s.running {
|
||||
s.mu.Unlock()
|
||||
return false, false
|
||||
}
|
||||
remote, exists := s.remotes[id]
|
||||
if !exists {
|
||||
// Remove from the temporary map
|
||||
s.grMu.Lock()
|
||||
delete(s.grTmpClients, c.cid)
|
||||
s.grMu.Unlock()
|
||||
|
||||
s.routes[c.cid] = c
|
||||
s.remotes[id] = c
|
||||
|
||||
// If this server's ID is (alpha) less than the peer, then we will
|
||||
// make sure that if we are disconnected, we will try to connect once
|
||||
// more. This is to mitigate the issue where both sides add the route
|
||||
// on the opposite connection, and therefore we end-up with both
|
||||
// being dropped.
|
||||
if s.info.ID < id {
|
||||
c.mu.Lock()
|
||||
// Make this as a retry (otherwise, only explicit are retried).
|
||||
c.route.retry = true
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// we don't need to send if the only route is the one we just accepted.
|
||||
sendInfo = len(s.routes) > 1
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
if exists && c.route.didSolicit {
|
||||
// upgrade to solicited?
|
||||
remote.mu.Lock()
|
||||
// the existing route (remote) should keep its 'retry' value, and
|
||||
// not be replaced with c.route.retry.
|
||||
retry := remote.route.retry
|
||||
remote.route = c.route
|
||||
remote.route.retry = retry
|
||||
remote.mu.Unlock()
|
||||
}
|
||||
|
||||
return !exists, sendInfo
|
||||
}
|
||||
|
||||
func (s *Server) broadcastInterestToRoutes(proto string) {
|
||||
var arg []byte
|
||||
if atomic.LoadInt32(&trace) == 1 {
|
||||
arg = []byte(proto[:len(proto)-LEN_CR_LF])
|
||||
}
|
||||
protoAsBytes := []byte(proto)
|
||||
s.mu.Lock()
|
||||
for _, route := range s.routes {
|
||||
// FIXME(dlc) - Make same logic as deliverMsg
|
||||
route.mu.Lock()
|
||||
route.sendProto(protoAsBytes, true)
|
||||
route.mu.Unlock()
|
||||
route.traceOutOp("", arg)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// broadcastSubscribe will forward a client subscription
|
||||
// to all active routes.
|
||||
func (s *Server) broadcastSubscribe(sub *subscription) {
|
||||
if s.numRoutes() == 0 {
|
||||
return
|
||||
}
|
||||
rsid := routeSid(sub)
|
||||
proto := fmt.Sprintf(subProto, sub.subject, sub.queue, rsid)
|
||||
s.broadcastInterestToRoutes(proto)
|
||||
}
|
||||
|
||||
// broadcastUnSubscribe will forward a client unsubscribe
|
||||
// action to all active routes.
|
||||
func (s *Server) broadcastUnSubscribe(sub *subscription) {
|
||||
if s.numRoutes() == 0 {
|
||||
return
|
||||
}
|
||||
rsid := routeSid(sub)
|
||||
maxStr := _EMPTY_
|
||||
sub.client.mu.Lock()
|
||||
// Set max if we have it set and have not tripped auto-unsubscribe
|
||||
if sub.max > 0 && sub.nm < sub.max {
|
||||
maxStr = fmt.Sprintf(" %d", sub.max)
|
||||
}
|
||||
sub.client.mu.Unlock()
|
||||
proto := fmt.Sprintf(unsubProto, rsid, maxStr)
|
||||
s.broadcastInterestToRoutes(proto)
|
||||
}
|
||||
|
||||
func (s *Server) routeAcceptLoop(ch chan struct{}) {
|
||||
hp := net.JoinHostPort(s.opts.Cluster.Host, strconv.Itoa(s.opts.Cluster.Port))
|
||||
Noticef("Listening for route connections on %s", hp)
|
||||
l, e := net.Listen("tcp", hp)
|
||||
if e != nil {
|
||||
// We need to close this channel to avoid a deadlock
|
||||
close(ch)
|
||||
Fatalf("Error listening on router port: %d - %v", s.opts.Cluster.Port, e)
|
||||
return
|
||||
}
|
||||
|
||||
// Setup state that can enable shutdown
|
||||
s.mu.Lock()
|
||||
s.routeListener = l
|
||||
s.mu.Unlock()
|
||||
|
||||
// Let them know we are up
|
||||
close(ch)
|
||||
|
||||
tmpDelay := ACCEPT_MIN_SLEEP
|
||||
|
||||
for s.isRunning() {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Temporary() {
|
||||
Debugf("Temporary Route Accept Errorf(%v), sleeping %dms",
|
||||
ne, tmpDelay/time.Millisecond)
|
||||
time.Sleep(tmpDelay)
|
||||
tmpDelay *= 2
|
||||
if tmpDelay > ACCEPT_MAX_SLEEP {
|
||||
tmpDelay = ACCEPT_MAX_SLEEP
|
||||
}
|
||||
} else if s.isRunning() {
|
||||
Noticef("Accept error: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
tmpDelay = ACCEPT_MIN_SLEEP
|
||||
s.startGoRoutine(func() {
|
||||
s.createRoute(conn, nil)
|
||||
s.grWG.Done()
|
||||
})
|
||||
}
|
||||
Debugf("Router accept loop exiting..")
|
||||
s.done <- true
|
||||
}
|
||||
|
||||
// StartRouting will start the accept loop on the cluster host:port
|
||||
// and will actively try to connect to listed routes.
|
||||
func (s *Server) StartRouting(clientListenReady chan struct{}) {
|
||||
defer s.grWG.Done()
|
||||
|
||||
// Wait for the client listen port to be opened, and
|
||||
// the possible ephemeral port to be selected.
|
||||
<-clientListenReady
|
||||
|
||||
// Get all possible URLs (when server listens to 0.0.0.0).
|
||||
// This is going to be sent to other Servers, so that they can let their
|
||||
// clients know about us.
|
||||
clientConnectURLs := s.getClientConnectURLs()
|
||||
|
||||
// Check for TLSConfig
|
||||
tlsReq := s.opts.Cluster.TLSConfig != nil
|
||||
info := Info{
|
||||
ID: s.info.ID,
|
||||
Version: s.info.Version,
|
||||
Host: s.opts.Cluster.Host,
|
||||
Port: s.opts.Cluster.Port,
|
||||
AuthRequired: false,
|
||||
TLSRequired: tlsReq,
|
||||
SSLRequired: tlsReq,
|
||||
TLSVerify: tlsReq,
|
||||
MaxPayload: s.info.MaxPayload,
|
||||
ClientConnectURLs: clientConnectURLs,
|
||||
}
|
||||
// Check for Auth items
|
||||
if s.opts.Cluster.Username != "" {
|
||||
info.AuthRequired = true
|
||||
}
|
||||
s.routeInfo = info
|
||||
b, _ := json.Marshal(info)
|
||||
s.routeInfoJSON = []byte(fmt.Sprintf(InfoProto, b))
|
||||
|
||||
// Spin up the accept loop
|
||||
ch := make(chan struct{})
|
||||
go s.routeAcceptLoop(ch)
|
||||
<-ch
|
||||
|
||||
// Solicit Routes if needed.
|
||||
s.solicitRoutes()
|
||||
}
|
||||
|
||||
func (s *Server) reConnectToRoute(rURL *url.URL, rtype RouteType) {
|
||||
tryForEver := rtype == Explicit
|
||||
if tryForEver {
|
||||
time.Sleep(DEFAULT_ROUTE_RECONNECT)
|
||||
}
|
||||
s.connectToRoute(rURL, tryForEver)
|
||||
}
|
||||
|
||||
func (s *Server) connectToRoute(rURL *url.URL, tryForEver bool) {
|
||||
defer s.grWG.Done()
|
||||
for s.isRunning() && rURL != nil {
|
||||
Debugf("Trying to connect to route on %s", rURL.Host)
|
||||
conn, err := net.DialTimeout("tcp", rURL.Host, DEFAULT_ROUTE_DIAL)
|
||||
if err != nil {
|
||||
Debugf("Error trying to connect to route: %v", err)
|
||||
select {
|
||||
case <-s.rcQuit:
|
||||
return
|
||||
case <-time.After(DEFAULT_ROUTE_CONNECT):
|
||||
if !tryForEver {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
// We have a route connection here.
|
||||
// Go ahead and create it and exit this func.
|
||||
s.createRoute(conn, rURL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *client) isSolicitedRoute() bool {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.typ == ROUTER && c.route != nil && c.route.didSolicit
|
||||
}
|
||||
|
||||
func (s *Server) solicitRoutes() {
|
||||
for _, r := range s.opts.Routes {
|
||||
route := r
|
||||
s.startGoRoutine(func() { s.connectToRoute(route, true) })
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) numRoutes() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.routes)
|
||||
}
|
923
vendor/github.com/nats-io/gnatsd/server/server.go
generated
vendored
923
vendor/github.com/nats-io/gnatsd/server/server.go
generated
vendored
|
@ -1,923 +0,0 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
// Allow dynamic profiling.
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/nats-io/gnatsd/util"
|
||||
)
|
||||
|
||||
// Info is the information sent to clients to help them understand information
|
||||
// about this server.
|
||||
type Info struct {
|
||||
ID string `json:"server_id"`
|
||||
Version string `json:"version"`
|
||||
GoVersion string `json:"go"`
|
||||
Host string `json:"host"`
|
||||
Port int `json:"port"`
|
||||
AuthRequired bool `json:"auth_required"`
|
||||
SSLRequired bool `json:"ssl_required"` // DEPRECATED: ssl json used for older clients
|
||||
TLSRequired bool `json:"tls_required"`
|
||||
TLSVerify bool `json:"tls_verify"`
|
||||
MaxPayload int `json:"max_payload"`
|
||||
IP string `json:"ip,omitempty"`
|
||||
ClientConnectURLs []string `json:"connect_urls,omitempty"` // Contains URLs a client can connect to.
|
||||
|
||||
// Used internally for quick look-ups.
|
||||
clientConnectURLs map[string]struct{}
|
||||
}
|
||||
|
||||
// Server is our main struct.
|
||||
type Server struct {
|
||||
gcid uint64
|
||||
grid uint64
|
||||
stats
|
||||
mu sync.Mutex
|
||||
info Info
|
||||
infoJSON []byte
|
||||
sl *Sublist
|
||||
opts *Options
|
||||
cAuth Auth
|
||||
rAuth Auth
|
||||
trace bool
|
||||
debug bool
|
||||
running bool
|
||||
listener net.Listener
|
||||
clients map[uint64]*client
|
||||
routes map[uint64]*client
|
||||
remotes map[string]*client
|
||||
totalClients uint64
|
||||
done chan bool
|
||||
start time.Time
|
||||
http net.Listener
|
||||
httpReqStats map[string]uint64
|
||||
routeListener net.Listener
|
||||
routeInfo Info
|
||||
routeInfoJSON []byte
|
||||
rcQuit chan bool
|
||||
grMu sync.Mutex
|
||||
grTmpClients map[uint64]*client
|
||||
grRunning bool
|
||||
grWG sync.WaitGroup // to wait on various go routines
|
||||
cproto int64 // number of clients supporting async INFO
|
||||
}
|
||||
|
||||
// Make sure all are 64bits for atomic use
|
||||
type stats struct {
|
||||
inMsgs int64
|
||||
outMsgs int64
|
||||
inBytes int64
|
||||
outBytes int64
|
||||
slowConsumers int64
|
||||
}
|
||||
|
||||
// New will setup a new server struct after parsing the options.
|
||||
func New(opts *Options) *Server {
|
||||
processOptions(opts)
|
||||
|
||||
// Process TLS options, including whether we require client certificates.
|
||||
tlsReq := opts.TLSConfig != nil
|
||||
verify := (tlsReq && opts.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert)
|
||||
|
||||
info := Info{
|
||||
ID: genID(),
|
||||
Version: VERSION,
|
||||
GoVersion: runtime.Version(),
|
||||
Host: opts.Host,
|
||||
Port: opts.Port,
|
||||
AuthRequired: false,
|
||||
TLSRequired: tlsReq,
|
||||
SSLRequired: tlsReq,
|
||||
TLSVerify: verify,
|
||||
MaxPayload: opts.MaxPayload,
|
||||
clientConnectURLs: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
info: info,
|
||||
sl: NewSublist(),
|
||||
opts: opts,
|
||||
debug: opts.Debug,
|
||||
trace: opts.Trace,
|
||||
done: make(chan bool, 1),
|
||||
start: time.Now(),
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// For tracking clients
|
||||
s.clients = make(map[uint64]*client)
|
||||
|
||||
// For tracking connections that are not yet registered
|
||||
// in s.routes, but for which readLoop has started.
|
||||
s.grTmpClients = make(map[uint64]*client)
|
||||
|
||||
// For tracking routes and their remote ids
|
||||
s.routes = make(map[uint64]*client)
|
||||
s.remotes = make(map[string]*client)
|
||||
|
||||
// Used to kick out all of the route
|
||||
// connect Go routines.
|
||||
s.rcQuit = make(chan bool)
|
||||
s.generateServerInfoJSON()
|
||||
s.handleSignals()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// SetClientAuthMethod sets the authentication method for clients.
|
||||
func (s *Server) SetClientAuthMethod(authMethod Auth) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.info.AuthRequired = true
|
||||
s.cAuth = authMethod
|
||||
|
||||
s.generateServerInfoJSON()
|
||||
}
|
||||
|
||||
// SetRouteAuthMethod sets the authentication method for routes.
|
||||
func (s *Server) SetRouteAuthMethod(authMethod Auth) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.rAuth = authMethod
|
||||
}
|
||||
|
||||
func (s *Server) generateServerInfoJSON() {
|
||||
// Generate the info json
|
||||
b, err := json.Marshal(s.info)
|
||||
if err != nil {
|
||||
Fatalf("Error marshalling INFO JSON: %+v\n", err)
|
||||
return
|
||||
}
|
||||
s.infoJSON = []byte(fmt.Sprintf("INFO %s %s", b, CR_LF))
|
||||
}
|
||||
|
||||
// PrintAndDie is exported for access in other packages.
|
||||
func PrintAndDie(msg string) {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", msg)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// PrintServerAndExit will print our version and exit.
|
||||
func PrintServerAndExit() {
|
||||
fmt.Printf("nats-server version %s\n", VERSION)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// ProcessCommandLineArgs takes the command line arguments
|
||||
// validating and setting flags for handling in case any
|
||||
// sub command was present.
|
||||
func ProcessCommandLineArgs(cmd *flag.FlagSet) (showVersion bool, showHelp bool, err error) {
|
||||
if len(cmd.Args()) > 0 {
|
||||
arg := cmd.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "version":
|
||||
return true, false, nil
|
||||
case "help":
|
||||
return false, true, nil
|
||||
default:
|
||||
return false, false, fmt.Errorf("Unrecognized command: %q\n", arg)
|
||||
}
|
||||
}
|
||||
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// Protected check on running state
|
||||
func (s *Server) isRunning() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.running
|
||||
}
|
||||
|
||||
func (s *Server) logPid() {
|
||||
pidStr := strconv.Itoa(os.Getpid())
|
||||
err := ioutil.WriteFile(s.opts.PidFile, []byte(pidStr), 0660)
|
||||
if err != nil {
|
||||
PrintAndDie(fmt.Sprintf("Could not write pidfile: %v\n", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Start up the server, this will block.
|
||||
// Start via a Go routine if needed.
|
||||
func (s *Server) Start() {
|
||||
Noticef("Starting nats-server version %s", VERSION)
|
||||
Debugf("Go build version %s", s.info.GoVersion)
|
||||
|
||||
// Avoid RACE between Start() and Shutdown()
|
||||
s.mu.Lock()
|
||||
s.running = true
|
||||
s.mu.Unlock()
|
||||
|
||||
s.grMu.Lock()
|
||||
s.grRunning = true
|
||||
s.grMu.Unlock()
|
||||
|
||||
// Log the pid to a file
|
||||
if s.opts.PidFile != _EMPTY_ {
|
||||
s.logPid()
|
||||
}
|
||||
|
||||
// Start up the http server if needed.
|
||||
if s.opts.HTTPPort != 0 {
|
||||
s.StartHTTPMonitoring()
|
||||
}
|
||||
|
||||
// Start up the https server if needed.
|
||||
if s.opts.HTTPSPort != 0 {
|
||||
if s.opts.TLSConfig == nil {
|
||||
Fatalf("TLS cert and key required for HTTPS")
|
||||
return
|
||||
}
|
||||
s.StartHTTPSMonitoring()
|
||||
}
|
||||
|
||||
// The Routing routine needs to wait for the client listen
|
||||
// port to be opened and potential ephemeral port selected.
|
||||
clientListenReady := make(chan struct{})
|
||||
|
||||
// Start up routing as well if needed.
|
||||
if s.opts.Cluster.Port != 0 {
|
||||
s.startGoRoutine(func() {
|
||||
s.StartRouting(clientListenReady)
|
||||
})
|
||||
}
|
||||
|
||||
// Pprof http endpoint for the profiler.
|
||||
if s.opts.ProfPort != 0 {
|
||||
s.StartProfiler()
|
||||
}
|
||||
|
||||
// Wait for clients.
|
||||
s.AcceptLoop(clientListenReady)
|
||||
}
|
||||
|
||||
// Shutdown will shutdown the server instance by kicking out the AcceptLoop
|
||||
// and closing all associated clients.
|
||||
func (s *Server) Shutdown() {
|
||||
s.mu.Lock()
|
||||
|
||||
// Prevent issues with multiple calls.
|
||||
if !s.running {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
s.running = false
|
||||
s.grMu.Lock()
|
||||
s.grRunning = false
|
||||
s.grMu.Unlock()
|
||||
|
||||
conns := make(map[uint64]*client)
|
||||
|
||||
// Copy off the clients
|
||||
for i, c := range s.clients {
|
||||
conns[i] = c
|
||||
}
|
||||
// Copy off the connections that are not yet registered
|
||||
// in s.routes, but for which the readLoop has started
|
||||
s.grMu.Lock()
|
||||
for i, c := range s.grTmpClients {
|
||||
conns[i] = c
|
||||
}
|
||||
s.grMu.Unlock()
|
||||
// Copy off the routes
|
||||
for i, r := range s.routes {
|
||||
conns[i] = r
|
||||
}
|
||||
|
||||
// Number of done channel responses we expect.
|
||||
doneExpected := 0
|
||||
|
||||
// Kick client AcceptLoop()
|
||||
if s.listener != nil {
|
||||
doneExpected++
|
||||
s.listener.Close()
|
||||
s.listener = nil
|
||||
}
|
||||
|
||||
// Kick route AcceptLoop()
|
||||
if s.routeListener != nil {
|
||||
doneExpected++
|
||||
s.routeListener.Close()
|
||||
s.routeListener = nil
|
||||
}
|
||||
|
||||
// Kick HTTP monitoring if its running
|
||||
if s.http != nil {
|
||||
doneExpected++
|
||||
s.http.Close()
|
||||
s.http = nil
|
||||
}
|
||||
|
||||
// Release the solicited routes connect go routines.
|
||||
close(s.rcQuit)
|
||||
|
||||
s.mu.Unlock()
|
||||
|
||||
// Close client and route connections
|
||||
for _, c := range conns {
|
||||
c.closeConnection()
|
||||
}
|
||||
|
||||
// Block until the accept loops exit
|
||||
for doneExpected > 0 {
|
||||
<-s.done
|
||||
doneExpected--
|
||||
}
|
||||
|
||||
// Wait for go routines to be done.
|
||||
s.grWG.Wait()
|
||||
}
|
||||
|
||||
// AcceptLoop is exported for easier testing.
|
||||
func (s *Server) AcceptLoop(clr chan struct{}) {
|
||||
// If we were to exit before the listener is setup properly,
|
||||
// make sure we close the channel.
|
||||
defer func() {
|
||||
if clr != nil {
|
||||
close(clr)
|
||||
}
|
||||
}()
|
||||
|
||||
hp := net.JoinHostPort(s.opts.Host, strconv.Itoa(s.opts.Port))
|
||||
Noticef("Listening for client connections on %s", hp)
|
||||
l, e := net.Listen("tcp", hp)
|
||||
if e != nil {
|
||||
Fatalf("Error listening on port: %s, %q", hp, e)
|
||||
return
|
||||
}
|
||||
|
||||
// Alert of TLS enabled.
|
||||
if s.opts.TLSConfig != nil {
|
||||
Noticef("TLS required for client connections")
|
||||
}
|
||||
|
||||
Debugf("Server id is %s", s.info.ID)
|
||||
Noticef("Server is ready")
|
||||
|
||||
// Setup state that can enable shutdown
|
||||
s.mu.Lock()
|
||||
s.listener = l
|
||||
|
||||
// If server was started with RANDOM_PORT (-1), opts.Port would be equal
|
||||
// to 0 at the beginning this function. So we need to get the actual port
|
||||
if s.opts.Port == 0 {
|
||||
// Write resolved port back to options.
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
Fatalf("Error parsing server address (%s): %s", l.Addr().String(), e)
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
portNum, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
Fatalf("Error parsing server address (%s): %s", l.Addr().String(), e)
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
s.opts.Port = portNum
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
// Let the caller know that we are ready
|
||||
close(clr)
|
||||
clr = nil
|
||||
|
||||
tmpDelay := ACCEPT_MIN_SLEEP
|
||||
|
||||
for s.isRunning() {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Temporary() {
|
||||
Debugf("Temporary Client Accept Error(%v), sleeping %dms",
|
||||
ne, tmpDelay/time.Millisecond)
|
||||
time.Sleep(tmpDelay)
|
||||
tmpDelay *= 2
|
||||
if tmpDelay > ACCEPT_MAX_SLEEP {
|
||||
tmpDelay = ACCEPT_MAX_SLEEP
|
||||
}
|
||||
} else if s.isRunning() {
|
||||
Noticef("Accept error: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
tmpDelay = ACCEPT_MIN_SLEEP
|
||||
s.startGoRoutine(func() {
|
||||
s.createClient(conn)
|
||||
s.grWG.Done()
|
||||
})
|
||||
}
|
||||
Noticef("Server Exiting..")
|
||||
s.done <- true
|
||||
}
|
||||
|
||||
// StartProfiler is called to enable dynamic profiling.
|
||||
func (s *Server) StartProfiler() {
|
||||
Noticef("Starting profiling on http port %d", s.opts.ProfPort)
|
||||
hp := net.JoinHostPort(s.opts.Host, strconv.Itoa(s.opts.ProfPort))
|
||||
go func() {
|
||||
err := http.ListenAndServe(hp, nil)
|
||||
if err != nil {
|
||||
Fatalf("error starting monitor server: %s", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// StartHTTPMonitoring will enable the HTTP monitoring port.
|
||||
func (s *Server) StartHTTPMonitoring() {
|
||||
s.startMonitoring(false)
|
||||
}
|
||||
|
||||
// StartHTTPSMonitoring will enable the HTTPS monitoring port.
|
||||
func (s *Server) StartHTTPSMonitoring() {
|
||||
s.startMonitoring(true)
|
||||
}
|
||||
|
||||
// HTTP endpoints
|
||||
const (
|
||||
RootPath = "/"
|
||||
VarzPath = "/varz"
|
||||
ConnzPath = "/connz"
|
||||
RoutezPath = "/routez"
|
||||
SubszPath = "/subsz"
|
||||
StackszPath = "/stacksz"
|
||||
)
|
||||
|
||||
// Start the monitoring server
|
||||
func (s *Server) startMonitoring(secure bool) {
|
||||
|
||||
// Used to track HTTP requests
|
||||
s.httpReqStats = map[string]uint64{
|
||||
RootPath: 0,
|
||||
VarzPath: 0,
|
||||
ConnzPath: 0,
|
||||
RoutezPath: 0,
|
||||
SubszPath: 0,
|
||||
}
|
||||
|
||||
var hp string
|
||||
var err error
|
||||
|
||||
if secure {
|
||||
hp = net.JoinHostPort(s.opts.HTTPHost, strconv.Itoa(s.opts.HTTPSPort))
|
||||
Noticef("Starting https monitor on %s", hp)
|
||||
config := util.CloneTLSConfig(s.opts.TLSConfig)
|
||||
config.ClientAuth = tls.NoClientCert
|
||||
s.http, err = tls.Listen("tcp", hp, config)
|
||||
|
||||
} else {
|
||||
hp = net.JoinHostPort(s.opts.HTTPHost, strconv.Itoa(s.opts.HTTPPort))
|
||||
Noticef("Starting http monitor on %s", hp)
|
||||
s.http, err = net.Listen("tcp", hp)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
Fatalf("Can't listen to the monitor port: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Root
|
||||
mux.HandleFunc(RootPath, s.HandleRoot)
|
||||
// Varz
|
||||
mux.HandleFunc(VarzPath, s.HandleVarz)
|
||||
// Connz
|
||||
mux.HandleFunc(ConnzPath, s.HandleConnz)
|
||||
// Routez
|
||||
mux.HandleFunc(RoutezPath, s.HandleRoutez)
|
||||
// Subz
|
||||
mux.HandleFunc(SubszPath, s.HandleSubsz)
|
||||
// Subz alias for backwards compatibility
|
||||
mux.HandleFunc("/subscriptionsz", s.HandleSubsz)
|
||||
// Stacksz
|
||||
mux.HandleFunc(StackszPath, s.HandleStacksz)
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: hp,
|
||||
Handler: mux,
|
||||
ReadTimeout: 2 * time.Second,
|
||||
WriteTimeout: 2 * time.Second,
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
}
|
||||
|
||||
go func() {
|
||||
srv.Serve(s.http)
|
||||
srv.Handler = nil
|
||||
s.done <- true
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Server) createClient(conn net.Conn) *client {
|
||||
c := &client{srv: s, nc: conn, opts: defaultOpts, mpay: s.info.MaxPayload, start: time.Now()}
|
||||
|
||||
// Grab JSON info string
|
||||
s.mu.Lock()
|
||||
info := s.infoJSON
|
||||
authRequired := s.info.AuthRequired
|
||||
tlsRequired := s.info.TLSRequired
|
||||
s.totalClients++
|
||||
s.mu.Unlock()
|
||||
|
||||
// Grab lock
|
||||
c.mu.Lock()
|
||||
|
||||
// Initialize
|
||||
c.initClient()
|
||||
|
||||
c.Debugf("Client connection created")
|
||||
|
||||
// Check for Auth
|
||||
if authRequired {
|
||||
c.setAuthTimer(secondsToDuration(s.opts.AuthTimeout))
|
||||
}
|
||||
|
||||
// Send our information.
|
||||
c.sendInfo(info)
|
||||
|
||||
// Unlock to register
|
||||
c.mu.Unlock()
|
||||
|
||||
// Register with the server.
|
||||
s.mu.Lock()
|
||||
// If server is not running, Shutdown() may have already gathered the
|
||||
// list of connections to close. It won't contain this one, so we need
|
||||
// to bail out now otherwise the readLoop started down there would not
|
||||
// be interrupted.
|
||||
if !s.running {
|
||||
s.mu.Unlock()
|
||||
return c
|
||||
}
|
||||
// If there is a max connections specified, check that adding
|
||||
// this new client would not push us over the max
|
||||
if s.opts.MaxConn > 0 && len(s.clients) >= s.opts.MaxConn {
|
||||
s.mu.Unlock()
|
||||
c.maxConnExceeded()
|
||||
return nil
|
||||
}
|
||||
s.clients[c.cid] = c
|
||||
s.mu.Unlock()
|
||||
|
||||
// Re-Grab lock
|
||||
c.mu.Lock()
|
||||
|
||||
// Check for TLS
|
||||
if tlsRequired {
|
||||
c.Debugf("Starting TLS client connection handshake")
|
||||
c.nc = tls.Server(c.nc, s.opts.TLSConfig)
|
||||
conn := c.nc.(*tls.Conn)
|
||||
|
||||
// Setup the timeout
|
||||
ttl := secondsToDuration(s.opts.TLSTimeout)
|
||||
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
|
||||
conn.SetReadDeadline(time.Now().Add(ttl))
|
||||
|
||||
// Force handshake
|
||||
c.mu.Unlock()
|
||||
if err := conn.Handshake(); err != nil {
|
||||
c.Debugf("TLS handshake error: %v", err)
|
||||
c.sendErr("Secure Connection - TLS Required")
|
||||
c.closeConnection()
|
||||
return nil
|
||||
}
|
||||
// Reset the read deadline
|
||||
conn.SetReadDeadline(time.Time{})
|
||||
|
||||
// Re-Grab lock
|
||||
c.mu.Lock()
|
||||
}
|
||||
|
||||
// The connection may have been closed
|
||||
if c.nc == nil {
|
||||
c.mu.Unlock()
|
||||
return c
|
||||
}
|
||||
|
||||
if tlsRequired {
|
||||
// Rewrap bw
|
||||
c.bw = bufio.NewWriterSize(c.nc, startBufSize)
|
||||
}
|
||||
|
||||
// Do final client initialization
|
||||
|
||||
// Set the Ping timer
|
||||
c.setPingTimer()
|
||||
|
||||
// Spin up the read loop.
|
||||
s.startGoRoutine(func() { c.readLoop() })
|
||||
|
||||
if tlsRequired {
|
||||
c.Debugf("TLS handshake complete")
|
||||
cs := c.nc.(*tls.Conn).ConnectionState()
|
||||
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
|
||||
}
|
||||
|
||||
c.mu.Unlock()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// updateServerINFO updates the server's Info object with the given
|
||||
// array of URLs and re-generate the infoJSON byte array, only if the
|
||||
// given URLs were not already recorded and if the feature is not
|
||||
// disabled.
|
||||
// Returns a boolean indicating if server's Info was updated.
|
||||
func (s *Server) updateServerINFO(urls []string) bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Feature disabled, do not update.
|
||||
if s.opts.Cluster.NoAdvertise {
|
||||
return false
|
||||
}
|
||||
|
||||
// Will be set to true if we alter the server's Info object.
|
||||
wasUpdated := false
|
||||
for _, url := range urls {
|
||||
if _, present := s.info.clientConnectURLs[url]; !present {
|
||||
|
||||
s.info.clientConnectURLs[url] = struct{}{}
|
||||
s.info.ClientConnectURLs = append(s.info.ClientConnectURLs, url)
|
||||
wasUpdated = true
|
||||
}
|
||||
}
|
||||
if wasUpdated {
|
||||
s.generateServerInfoJSON()
|
||||
}
|
||||
return wasUpdated
|
||||
}
|
||||
|
||||
// Handle closing down a connection when the handshake has timedout.
|
||||
func tlsTimeout(c *client, conn *tls.Conn) {
|
||||
c.mu.Lock()
|
||||
nc := c.nc
|
||||
c.mu.Unlock()
|
||||
// Check if already closed
|
||||
if nc == nil {
|
||||
return
|
||||
}
|
||||
cs := conn.ConnectionState()
|
||||
if !cs.HandshakeComplete {
|
||||
c.Debugf("TLS handshake timeout")
|
||||
c.sendErr("Secure Connection - TLS Required")
|
||||
c.closeConnection()
|
||||
}
|
||||
}
|
||||
|
||||
// Seems silly we have to write these
|
||||
func tlsVersion(ver uint16) string {
|
||||
switch ver {
|
||||
case tls.VersionTLS10:
|
||||
return "1.0"
|
||||
case tls.VersionTLS11:
|
||||
return "1.1"
|
||||
case tls.VersionTLS12:
|
||||
return "1.2"
|
||||
}
|
||||
return fmt.Sprintf("Unknown [%x]", ver)
|
||||
}
|
||||
|
||||
// We use hex here so we don't need multiple versions
|
||||
func tlsCipher(cs uint16) string {
|
||||
switch cs {
|
||||
case 0x0005:
|
||||
return "TLS_RSA_WITH_RC4_128_SHA"
|
||||
case 0x000a:
|
||||
return "TLS_RSA_WITH_3DES_EDE_CBC_SHA"
|
||||
case 0x002f:
|
||||
return "TLS_RSA_WITH_AES_128_CBC_SHA"
|
||||
case 0x0035:
|
||||
return "TLS_RSA_WITH_AES_256_CBC_SHA"
|
||||
case 0xc007:
|
||||
return "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA"
|
||||
case 0xc009:
|
||||
return "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
|
||||
case 0xc00a:
|
||||
return "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA"
|
||||
case 0xc011:
|
||||
return "TLS_ECDHE_RSA_WITH_RC4_128_SHA"
|
||||
case 0xc012:
|
||||
return "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA"
|
||||
case 0xc013:
|
||||
return "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"
|
||||
case 0xc014:
|
||||
return "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"
|
||||
case 0xc02f:
|
||||
return "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
|
||||
case 0xc02b:
|
||||
return "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
|
||||
case 0xc030:
|
||||
return "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"
|
||||
case 0xc02c:
|
||||
return "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
|
||||
}
|
||||
return fmt.Sprintf("Unknown [%x]", cs)
|
||||
}
|
||||
|
||||
func (s *Server) checkClientAuth(c *client) bool {
|
||||
if s.cAuth == nil {
|
||||
return true
|
||||
}
|
||||
return s.cAuth.Check(c)
|
||||
}
|
||||
|
||||
func (s *Server) checkRouterAuth(c *client) bool {
|
||||
if s.rAuth == nil {
|
||||
return true
|
||||
}
|
||||
return s.rAuth.Check(c)
|
||||
}
|
||||
|
||||
// Check auth and return boolean indicating if client is ok
|
||||
func (s *Server) checkAuth(c *client) bool {
|
||||
switch c.typ {
|
||||
case CLIENT:
|
||||
return s.checkClientAuth(c)
|
||||
case ROUTER:
|
||||
return s.checkRouterAuth(c)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Remove a client or route from our internal accounting.
|
||||
func (s *Server) removeClient(c *client) {
|
||||
var rID string
|
||||
c.mu.Lock()
|
||||
cid := c.cid
|
||||
typ := c.typ
|
||||
r := c.route
|
||||
if r != nil {
|
||||
rID = r.remoteID
|
||||
}
|
||||
updateProtoInfoCount := false
|
||||
if typ == CLIENT && c.opts.Protocol >= ClientProtoInfo {
|
||||
updateProtoInfoCount = true
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
s.mu.Lock()
|
||||
switch typ {
|
||||
case CLIENT:
|
||||
delete(s.clients, cid)
|
||||
if updateProtoInfoCount {
|
||||
s.cproto--
|
||||
}
|
||||
case ROUTER:
|
||||
delete(s.routes, cid)
|
||||
if r != nil {
|
||||
rc, ok := s.remotes[rID]
|
||||
// Only delete it if it is us..
|
||||
if ok && c == rc {
|
||||
delete(s.remotes, rID)
|
||||
}
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// These are some helpers for accounting in functional tests.
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
// NumRoutes will report the number of registered routes.
|
||||
func (s *Server) NumRoutes() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.routes)
|
||||
}
|
||||
|
||||
// NumRemotes will report number of registered remotes.
|
||||
func (s *Server) NumRemotes() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.remotes)
|
||||
}
|
||||
|
||||
// NumClients will report the number of registered clients.
|
||||
func (s *Server) NumClients() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.clients)
|
||||
}
|
||||
|
||||
// NumSubscriptions will report how many subscriptions are active.
|
||||
func (s *Server) NumSubscriptions() uint32 {
|
||||
s.mu.Lock()
|
||||
subs := s.sl.Count()
|
||||
s.mu.Unlock()
|
||||
return subs
|
||||
}
|
||||
|
||||
// Addr will return the net.Addr object for the current listener.
|
||||
func (s *Server) Addr() net.Addr {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.listener == nil {
|
||||
return nil
|
||||
}
|
||||
return s.listener.Addr()
|
||||
}
|
||||
|
||||
// ReadyForConnections returns `true` if the server is ready to accept client
|
||||
// and, if routing is enabled, route connections. If after the duration
|
||||
// `dur` the server is still not ready, returns `false`.
|
||||
func (s *Server) ReadyForConnections(dur time.Duration) bool {
|
||||
end := time.Now().Add(dur)
|
||||
for time.Now().Before(end) {
|
||||
s.mu.Lock()
|
||||
ok := s.listener != nil && (s.opts.Cluster.Port == 0 || s.routeListener != nil)
|
||||
s.mu.Unlock()
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
time.Sleep(25 * time.Millisecond)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ID returns the server's ID
|
||||
func (s *Server) ID() string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.info.ID
|
||||
}
|
||||
|
||||
func (s *Server) startGoRoutine(f func()) {
|
||||
s.grMu.Lock()
|
||||
if s.grRunning {
|
||||
s.grWG.Add(1)
|
||||
go f()
|
||||
}
|
||||
s.grMu.Unlock()
|
||||
}
|
||||
|
||||
// getClientConnectURLs returns suitable URLs for clients to connect to the listen
|
||||
// port based on the server options' Host and Port. If the Host corresponds to
|
||||
// "any" interfaces, this call returns the list of resolved IP addresses.
|
||||
func (s *Server) getClientConnectURLs() []string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
sPort := strconv.Itoa(s.opts.Port)
|
||||
urls := make([]string, 0, 1)
|
||||
|
||||
ipAddr, err := net.ResolveIPAddr("ip", s.opts.Host)
|
||||
// If the host is "any" (0.0.0.0 or ::), get specific IPs from available
|
||||
// interfaces.
|
||||
if err == nil && ipAddr.IP.IsUnspecified() {
|
||||
var ip net.IP
|
||||
ifaces, _ := net.Interfaces()
|
||||
for _, i := range ifaces {
|
||||
addrs, _ := i.Addrs()
|
||||
for _, addr := range addrs {
|
||||
switch v := addr.(type) {
|
||||
case *net.IPNet:
|
||||
ip = v.IP
|
||||
case *net.IPAddr:
|
||||
ip = v.IP
|
||||
}
|
||||
// Skip non global unicast addresses
|
||||
if !ip.IsGlobalUnicast() || ip.IsUnspecified() {
|
||||
ip = nil
|
||||
continue
|
||||
}
|
||||
urls = append(urls, net.JoinHostPort(ip.String(), sPort))
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil || len(urls) == 0 {
|
||||
// We are here if s.opts.Host is not "0.0.0.0" nor "::", or if for some
|
||||
// reason we could not add any URL in the loop above.
|
||||
// We had a case where a Windows VM was hosed and would have err == nil
|
||||
// and not add any address in the array in the loop above, and we
|
||||
// ended-up returning 0.0.0.0, which is problematic for Windows clients.
|
||||
// Check for 0.0.0.0 or :: specifically, and ignore if that's the case.
|
||||
if s.opts.Host == "0.0.0.0" || s.opts.Host == "::" {
|
||||
Errorf("Address %q can not be resolved properly", s.opts.Host)
|
||||
} else {
|
||||
urls = append(urls, net.JoinHostPort(s.opts.Host, sPort))
|
||||
}
|
||||
}
|
||||
return urls
|
||||
}
|
34
vendor/github.com/nats-io/gnatsd/server/signal.go
generated
vendored
34
vendor/github.com/nats-io/gnatsd/server/signal.go
generated
vendored
|
@ -1,34 +0,0 @@
|
|||
// +build !windows
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Signal Handling
|
||||
func (s *Server) handleSignals() {
|
||||
if s.opts.NoSigs {
|
||||
return
|
||||
}
|
||||
c := make(chan os.Signal, 1)
|
||||
|
||||
signal.Notify(c, syscall.SIGINT, syscall.SIGUSR1)
|
||||
|
||||
go func() {
|
||||
for sig := range c {
|
||||
Debugf("Trapped %q signal", sig)
|
||||
switch sig {
|
||||
case syscall.SIGINT:
|
||||
Noticef("Server Exiting..")
|
||||
os.Exit(0)
|
||||
case syscall.SIGUSR1:
|
||||
// File log re-open for rotating file logs.
|
||||
s.ReOpenLogFile()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
26
vendor/github.com/nats-io/gnatsd/server/signal_windows.go
generated
vendored
26
vendor/github.com/nats-io/gnatsd/server/signal_windows.go
generated
vendored
|
@ -1,26 +0,0 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
)
|
||||
|
||||
// Signal Handling
|
||||
func (s *Server) handleSignals() {
|
||||
if s.opts.NoSigs {
|
||||
return
|
||||
}
|
||||
c := make(chan os.Signal, 1)
|
||||
|
||||
signal.Notify(c, os.Interrupt)
|
||||
|
||||
go func() {
|
||||
for sig := range c {
|
||||
Debugf("Trapped %q signal", sig)
|
||||
Noticef("Server Exiting..")
|
||||
os.Exit(0)
|
||||
}
|
||||
}()
|
||||
}
|
643
vendor/github.com/nats-io/gnatsd/server/sublist.go
generated
vendored
643
vendor/github.com/nats-io/gnatsd/server/sublist.go
generated
vendored
|
@ -1,643 +0,0 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
// Package sublist is a routing mechanism to handle subject distribution
|
||||
// and provides a facility to match subjects from published messages to
|
||||
// interested subscribers. Subscribers can have wildcard subjects to match
|
||||
// multiple published subjects.
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Common byte variables for wildcards and token separator.
|
||||
const (
|
||||
pwc = '*'
|
||||
fwc = '>'
|
||||
tsep = "."
|
||||
btsep = '.'
|
||||
)
|
||||
|
||||
// Sublist related errors
|
||||
var (
|
||||
ErrInvalidSubject = errors.New("sublist: Invalid Subject")
|
||||
ErrNotFound = errors.New("sublist: No Matches Found")
|
||||
)
|
||||
|
||||
// cacheMax is used to bound limit the frontend cache
|
||||
const slCacheMax = 1024
|
||||
|
||||
// A result structure better optimized for queue subs.
|
||||
type SublistResult struct {
|
||||
psubs []*subscription
|
||||
qsubs [][]*subscription // don't make this a map, too expensive to iterate
|
||||
}
|
||||
|
||||
// A Sublist stores and efficiently retrieves subscriptions.
|
||||
type Sublist struct {
|
||||
sync.RWMutex
|
||||
genid uint64
|
||||
matches uint64
|
||||
cacheHits uint64
|
||||
inserts uint64
|
||||
removes uint64
|
||||
cache map[string]*SublistResult
|
||||
root *level
|
||||
count uint32
|
||||
}
|
||||
|
||||
// A node contains subscriptions and a pointer to the next level.
|
||||
type node struct {
|
||||
next *level
|
||||
psubs []*subscription
|
||||
qsubs [][]*subscription
|
||||
}
|
||||
|
||||
// A level represents a group of nodes and special pointers to
|
||||
// wildcard nodes.
|
||||
type level struct {
|
||||
nodes map[string]*node
|
||||
pwc, fwc *node
|
||||
}
|
||||
|
||||
// Create a new default node.
|
||||
func newNode() *node {
|
||||
return &node{psubs: make([]*subscription, 0, 4)}
|
||||
}
|
||||
|
||||
// Create a new default level. We use FNV1A as the hash
|
||||
// algortihm for the tokens, which should be short.
|
||||
func newLevel() *level {
|
||||
return &level{nodes: make(map[string]*node)}
|
||||
}
|
||||
|
||||
// New will create a default sublist
|
||||
func NewSublist() *Sublist {
|
||||
return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
|
||||
}
|
||||
|
||||
// Insert adds a subscription into the sublist
|
||||
func (s *Sublist) Insert(sub *subscription) error {
|
||||
// copy the subject since we hold this and this might be part of a large byte slice.
|
||||
subject := string(sub.subject)
|
||||
tsa := [32]string{}
|
||||
tokens := tsa[:0]
|
||||
start := 0
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if subject[i] == btsep {
|
||||
tokens = append(tokens, subject[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
tokens = append(tokens, subject[start:])
|
||||
|
||||
s.Lock()
|
||||
|
||||
sfwc := false
|
||||
l := s.root
|
||||
var n *node
|
||||
|
||||
for _, t := range tokens {
|
||||
if len(t) == 0 || sfwc {
|
||||
s.Unlock()
|
||||
return ErrInvalidSubject
|
||||
}
|
||||
|
||||
switch t[0] {
|
||||
case pwc:
|
||||
n = l.pwc
|
||||
case fwc:
|
||||
n = l.fwc
|
||||
sfwc = true
|
||||
default:
|
||||
n = l.nodes[t]
|
||||
}
|
||||
if n == nil {
|
||||
n = newNode()
|
||||
switch t[0] {
|
||||
case pwc:
|
||||
l.pwc = n
|
||||
case fwc:
|
||||
l.fwc = n
|
||||
default:
|
||||
l.nodes[t] = n
|
||||
}
|
||||
}
|
||||
if n.next == nil {
|
||||
n.next = newLevel()
|
||||
}
|
||||
l = n.next
|
||||
}
|
||||
if sub.queue == nil {
|
||||
n.psubs = append(n.psubs, sub)
|
||||
} else {
|
||||
// This is a queue subscription
|
||||
if i := findQSliceForSub(sub, n.qsubs); i >= 0 {
|
||||
n.qsubs[i] = append(n.qsubs[i], sub)
|
||||
} else {
|
||||
n.qsubs = append(n.qsubs, []*subscription{sub})
|
||||
}
|
||||
}
|
||||
|
||||
s.count++
|
||||
s.inserts++
|
||||
|
||||
s.addToCache(subject, sub)
|
||||
atomic.AddUint64(&s.genid, 1)
|
||||
|
||||
s.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deep copy
|
||||
func copyResult(r *SublistResult) *SublistResult {
|
||||
nr := &SublistResult{}
|
||||
nr.psubs = append([]*subscription(nil), r.psubs...)
|
||||
for _, qr := range r.qsubs {
|
||||
nqr := append([]*subscription(nil), qr...)
|
||||
nr.qsubs = append(nr.qsubs, nqr)
|
||||
}
|
||||
return nr
|
||||
}
|
||||
|
||||
// addToCache will add the new entry to existing cache
|
||||
// entries if needed. Assumes write lock is held.
|
||||
func (s *Sublist) addToCache(subject string, sub *subscription) {
|
||||
for k, r := range s.cache {
|
||||
if matchLiteral(k, subject) {
|
||||
// Copy since others may have a reference.
|
||||
nr := copyResult(r)
|
||||
if sub.queue == nil {
|
||||
nr.psubs = append(nr.psubs, sub)
|
||||
} else {
|
||||
if i := findQSliceForSub(sub, nr.qsubs); i >= 0 {
|
||||
nr.qsubs[i] = append(nr.qsubs[i], sub)
|
||||
} else {
|
||||
nr.qsubs = append(nr.qsubs, []*subscription{sub})
|
||||
}
|
||||
}
|
||||
s.cache[k] = nr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeFromCache will remove the sub from any active cache entries.
|
||||
// Assumes write lock is held.
|
||||
func (s *Sublist) removeFromCache(subject string, sub *subscription) {
|
||||
for k := range s.cache {
|
||||
if !matchLiteral(k, subject) {
|
||||
continue
|
||||
}
|
||||
// Since someone else may be referecing, can't modify the list
|
||||
// safely, just let it re-populate.
|
||||
delete(s.cache, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Match will match all entries to the literal subject.
|
||||
// It will return a set of results for both normal and queue subscribers.
|
||||
func (s *Sublist) Match(subject string) *SublistResult {
|
||||
s.RLock()
|
||||
atomic.AddUint64(&s.matches, 1)
|
||||
rc, ok := s.cache[subject]
|
||||
s.RUnlock()
|
||||
if ok {
|
||||
atomic.AddUint64(&s.cacheHits, 1)
|
||||
return rc
|
||||
}
|
||||
|
||||
tsa := [32]string{}
|
||||
tokens := tsa[:0]
|
||||
start := 0
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if subject[i] == btsep {
|
||||
tokens = append(tokens, subject[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
tokens = append(tokens, subject[start:])
|
||||
|
||||
// FIXME(dlc) - Make shared pool between sublist and client readLoop?
|
||||
result := &SublistResult{}
|
||||
|
||||
s.Lock()
|
||||
matchLevel(s.root, tokens, result)
|
||||
|
||||
// Add to our cache
|
||||
s.cache[subject] = result
|
||||
// Bound the number of entries to sublistMaxCache
|
||||
if len(s.cache) > slCacheMax {
|
||||
for k := range s.cache {
|
||||
delete(s.cache, k)
|
||||
break
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// This will add in a node's results to the total results.
|
||||
func addNodeToResults(n *node, results *SublistResult) {
|
||||
results.psubs = append(results.psubs, n.psubs...)
|
||||
for _, qr := range n.qsubs {
|
||||
if len(qr) == 0 {
|
||||
continue
|
||||
}
|
||||
// Need to find matching list in results
|
||||
if i := findQSliceForSub(qr[0], results.qsubs); i >= 0 {
|
||||
results.qsubs[i] = append(results.qsubs[i], qr...)
|
||||
} else {
|
||||
results.qsubs = append(results.qsubs, qr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We do not use a map here since we want iteration to be past when
|
||||
// processing publishes in L1 on client. So we need to walk sequentially
|
||||
// for now. Keep an eye on this in case we start getting large number of
|
||||
// different queue subscribers for the same subject.
|
||||
func findQSliceForSub(sub *subscription, qsl [][]*subscription) int {
|
||||
if sub.queue == nil {
|
||||
return -1
|
||||
}
|
||||
for i, qr := range qsl {
|
||||
if len(qr) > 0 && bytes.Equal(sub.queue, qr[0].queue) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// matchLevel is used to recursively descend into the trie.
|
||||
func matchLevel(l *level, toks []string, results *SublistResult) {
|
||||
var pwc, n *node
|
||||
for i, t := range toks {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
if l.fwc != nil {
|
||||
addNodeToResults(l.fwc, results)
|
||||
}
|
||||
if pwc = l.pwc; pwc != nil {
|
||||
matchLevel(pwc.next, toks[i+1:], results)
|
||||
}
|
||||
n = l.nodes[t]
|
||||
if n != nil {
|
||||
l = n.next
|
||||
} else {
|
||||
l = nil
|
||||
}
|
||||
}
|
||||
if n != nil {
|
||||
addNodeToResults(n, results)
|
||||
}
|
||||
if pwc != nil {
|
||||
addNodeToResults(pwc, results)
|
||||
}
|
||||
}
|
||||
|
||||
// lnt is used to track descent into levels for a removal for pruning.
|
||||
type lnt struct {
|
||||
l *level
|
||||
n *node
|
||||
t string
|
||||
}
|
||||
|
||||
// Remove will remove a subscription.
|
||||
func (s *Sublist) Remove(sub *subscription) error {
|
||||
subject := string(sub.subject)
|
||||
tsa := [32]string{}
|
||||
tokens := tsa[:0]
|
||||
start := 0
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if subject[i] == btsep {
|
||||
tokens = append(tokens, subject[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
tokens = append(tokens, subject[start:])
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
sfwc := false
|
||||
l := s.root
|
||||
var n *node
|
||||
|
||||
// Track levels for pruning
|
||||
var lnts [32]lnt
|
||||
levels := lnts[:0]
|
||||
|
||||
for _, t := range tokens {
|
||||
if len(t) == 0 || sfwc {
|
||||
return ErrInvalidSubject
|
||||
}
|
||||
if l == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
switch t[0] {
|
||||
case pwc:
|
||||
n = l.pwc
|
||||
case fwc:
|
||||
n = l.fwc
|
||||
sfwc = true
|
||||
default:
|
||||
n = l.nodes[t]
|
||||
}
|
||||
if n != nil {
|
||||
levels = append(levels, lnt{l, n, t})
|
||||
l = n.next
|
||||
} else {
|
||||
l = nil
|
||||
}
|
||||
}
|
||||
if !s.removeFromNode(n, sub) {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
s.count--
|
||||
s.removes++
|
||||
|
||||
for i := len(levels) - 1; i >= 0; i-- {
|
||||
l, n, t := levels[i].l, levels[i].n, levels[i].t
|
||||
if n.isEmpty() {
|
||||
l.pruneNode(n, t)
|
||||
}
|
||||
}
|
||||
s.removeFromCache(subject, sub)
|
||||
atomic.AddUint64(&s.genid, 1)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pruneNode is used to prune an empty node from the tree.
|
||||
func (l *level) pruneNode(n *node, t string) {
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
if n == l.fwc {
|
||||
l.fwc = nil
|
||||
} else if n == l.pwc {
|
||||
l.pwc = nil
|
||||
} else {
|
||||
delete(l.nodes, t)
|
||||
}
|
||||
}
|
||||
|
||||
// isEmpty will test if the node has any entries. Used
|
||||
// in pruning.
|
||||
func (n *node) isEmpty() bool {
|
||||
if len(n.psubs) == 0 && len(n.qsubs) == 0 {
|
||||
if n.next == nil || n.next.numNodes() == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Return the number of nodes for the given level.
|
||||
func (l *level) numNodes() int {
|
||||
num := len(l.nodes)
|
||||
if l.pwc != nil {
|
||||
num++
|
||||
}
|
||||
if l.fwc != nil {
|
||||
num++
|
||||
}
|
||||
return num
|
||||
}
|
||||
|
||||
// Removes a sub from a list.
|
||||
func removeSubFromList(sub *subscription, sl []*subscription) ([]*subscription, bool) {
|
||||
for i := 0; i < len(sl); i++ {
|
||||
if sl[i] == sub {
|
||||
last := len(sl) - 1
|
||||
sl[i] = sl[last]
|
||||
sl[last] = nil
|
||||
sl = sl[:last]
|
||||
return shrinkAsNeeded(sl), true
|
||||
}
|
||||
}
|
||||
return sl, false
|
||||
}
|
||||
|
||||
// Remove the sub for the given node.
|
||||
func (s *Sublist) removeFromNode(n *node, sub *subscription) (found bool) {
|
||||
if n == nil {
|
||||
return false
|
||||
}
|
||||
if sub.queue == nil {
|
||||
n.psubs, found = removeSubFromList(sub, n.psubs)
|
||||
return found
|
||||
}
|
||||
|
||||
// We have a queue group subscription here
|
||||
if i := findQSliceForSub(sub, n.qsubs); i >= 0 {
|
||||
n.qsubs[i], found = removeSubFromList(sub, n.qsubs[i])
|
||||
if len(n.qsubs[i]) == 0 {
|
||||
last := len(n.qsubs) - 1
|
||||
n.qsubs[i] = n.qsubs[last]
|
||||
n.qsubs[last] = nil
|
||||
n.qsubs = n.qsubs[:last]
|
||||
if len(n.qsubs) == 0 {
|
||||
n.qsubs = nil
|
||||
}
|
||||
}
|
||||
return found
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if we need to do a resize. This is for very large growth then
|
||||
// subsequent return to a more normal size from unsubscribe.
|
||||
func shrinkAsNeeded(sl []*subscription) []*subscription {
|
||||
lsl := len(sl)
|
||||
csl := cap(sl)
|
||||
// Don't bother if list not too big
|
||||
if csl <= 8 {
|
||||
return sl
|
||||
}
|
||||
pFree := float32(csl-lsl) / float32(csl)
|
||||
if pFree > 0.50 {
|
||||
return append([]*subscription(nil), sl...)
|
||||
}
|
||||
return sl
|
||||
}
|
||||
|
||||
// Count returns the number of subscriptions.
|
||||
func (s *Sublist) Count() uint32 {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.count
|
||||
}
|
||||
|
||||
// CacheCount returns the number of result sets in the cache.
|
||||
func (s *Sublist) CacheCount() int {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return len(s.cache)
|
||||
}
|
||||
|
||||
// Public stats for the sublist
|
||||
type SublistStats struct {
|
||||
NumSubs uint32 `json:"num_subscriptions"`
|
||||
NumCache uint32 `json:"num_cache"`
|
||||
NumInserts uint64 `json:"num_inserts"`
|
||||
NumRemoves uint64 `json:"num_removes"`
|
||||
NumMatches uint64 `json:"num_matches"`
|
||||
CacheHitRate float64 `json:"cache_hit_rate"`
|
||||
MaxFanout uint32 `json:"max_fanout"`
|
||||
AvgFanout float64 `json:"avg_fanout"`
|
||||
}
|
||||
|
||||
// Stats will return a stats structure for the current state.
|
||||
func (s *Sublist) Stats() *SublistStats {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
st := &SublistStats{}
|
||||
st.NumSubs = s.count
|
||||
st.NumCache = uint32(len(s.cache))
|
||||
st.NumInserts = s.inserts
|
||||
st.NumRemoves = s.removes
|
||||
st.NumMatches = s.matches
|
||||
if s.matches > 0 {
|
||||
st.CacheHitRate = float64(s.cacheHits) / float64(s.matches)
|
||||
}
|
||||
// whip through cache for fanout stats
|
||||
tot, max := 0, 0
|
||||
for _, r := range s.cache {
|
||||
l := len(r.psubs) + len(r.qsubs)
|
||||
tot += l
|
||||
if l > max {
|
||||
max = l
|
||||
}
|
||||
}
|
||||
st.MaxFanout = uint32(max)
|
||||
if tot > 0 {
|
||||
st.AvgFanout = float64(tot) / float64(len(s.cache))
|
||||
}
|
||||
return st
|
||||
}
|
||||
|
||||
// numLevels will return the maximum number of levels
|
||||
// contained in the Sublist tree.
|
||||
func (s *Sublist) numLevels() int {
|
||||
return visitLevel(s.root, 0)
|
||||
}
|
||||
|
||||
// visitLevel is used to descend the Sublist tree structure
|
||||
// recursively.
|
||||
func visitLevel(l *level, depth int) int {
|
||||
if l == nil || l.numNodes() == 0 {
|
||||
return depth
|
||||
}
|
||||
|
||||
depth++
|
||||
maxDepth := depth
|
||||
|
||||
for _, n := range l.nodes {
|
||||
if n == nil {
|
||||
continue
|
||||
}
|
||||
newDepth := visitLevel(n.next, depth)
|
||||
if newDepth > maxDepth {
|
||||
maxDepth = newDepth
|
||||
}
|
||||
}
|
||||
if l.pwc != nil {
|
||||
pwcDepth := visitLevel(l.pwc.next, depth)
|
||||
if pwcDepth > maxDepth {
|
||||
maxDepth = pwcDepth
|
||||
}
|
||||
}
|
||||
if l.fwc != nil {
|
||||
fwcDepth := visitLevel(l.fwc.next, depth)
|
||||
if fwcDepth > maxDepth {
|
||||
maxDepth = fwcDepth
|
||||
}
|
||||
}
|
||||
return maxDepth
|
||||
}
|
||||
|
||||
// IsValidSubject returns true if a subject is valid, false otherwise
|
||||
func IsValidSubject(subject string) bool {
|
||||
if subject == "" {
|
||||
return false
|
||||
}
|
||||
sfwc := false
|
||||
tokens := strings.Split(string(subject), tsep)
|
||||
for _, t := range tokens {
|
||||
if len(t) == 0 || sfwc {
|
||||
return false
|
||||
}
|
||||
if len(t) > 1 {
|
||||
continue
|
||||
}
|
||||
switch t[0] {
|
||||
case fwc:
|
||||
sfwc = true
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsValidLiteralSubject returns true if a subject is valid and literal (no wildcards), false otherwise
|
||||
func IsValidLiteralSubject(subject string) bool {
|
||||
tokens := strings.Split(string(subject), tsep)
|
||||
for _, t := range tokens {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
}
|
||||
if len(t) > 1 {
|
||||
continue
|
||||
}
|
||||
switch t[0] {
|
||||
case pwc, fwc:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// matchLiteral is used to test literal subjects, those that do not have any
|
||||
// wildcards, with a target subject. This is used in the cache layer.
|
||||
func matchLiteral(literal, subject string) bool {
|
||||
li := 0
|
||||
ll := len(literal)
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if li >= ll {
|
||||
return false
|
||||
}
|
||||
b := subject[i]
|
||||
switch b {
|
||||
case pwc:
|
||||
// Skip token in literal
|
||||
ll := len(literal)
|
||||
for {
|
||||
if li >= ll || literal[li] == btsep {
|
||||
li--
|
||||
break
|
||||
}
|
||||
li++
|
||||
}
|
||||
case fwc:
|
||||
return true
|
||||
default:
|
||||
if b != literal[li] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
li++
|
||||
}
|
||||
// Make sure we have processed all of the literal's chars..
|
||||
if li < ll {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
56
vendor/github.com/nats-io/gnatsd/server/util.go
generated
vendored
56
vendor/github.com/nats-io/gnatsd/server/util.go
generated
vendored
|
@ -1,56 +0,0 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/nuid"
|
||||
)
|
||||
|
||||
// Use nuid.
|
||||
func genID() string {
|
||||
return nuid.Next()
|
||||
}
|
||||
|
||||
// Ascii numbers 0-9
|
||||
const (
|
||||
asciiZero = 48
|
||||
asciiNine = 57
|
||||
)
|
||||
|
||||
// parseSize expects decimal positive numbers. We
|
||||
// return -1 to signal error
|
||||
func parseSize(d []byte) (n int) {
|
||||
if len(d) == 0 {
|
||||
return -1
|
||||
}
|
||||
for _, dec := range d {
|
||||
if dec < asciiZero || dec > asciiNine {
|
||||
return -1
|
||||
}
|
||||
n = n*10 + (int(dec) - asciiZero)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// parseInt64 expects decimal positive numbers. We
|
||||
// return -1 to signal error
|
||||
func parseInt64(d []byte) (n int64) {
|
||||
if len(d) == 0 {
|
||||
return -1
|
||||
}
|
||||
for _, dec := range d {
|
||||
if dec < asciiZero || dec > asciiNine {
|
||||
return -1
|
||||
}
|
||||
n = n*10 + (int64(dec) - asciiZero)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Helper to move from float seconds to time.Duration
|
||||
func secondsToDuration(seconds float64) time.Duration {
|
||||
ttl := seconds * float64(time.Second)
|
||||
return time.Duration(ttl)
|
||||
}
|
399
vendor/github.com/nats-io/gnatsd/test/test.go
generated
vendored
399
vendor/github.com/nats-io/gnatsd/test/test.go
generated
vendored
|
@ -1,399 +0,0 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/gnatsd/auth"
|
||||
"github.com/nats-io/gnatsd/server"
|
||||
)
|
||||
|
||||
const natsServerExe = "../gnatsd"
|
||||
|
||||
type natsServer struct {
|
||||
args []string
|
||||
cmd *exec.Cmd
|
||||
}
|
||||
|
||||
// So we can pass tests and benchmarks..
|
||||
type tLogger interface {
|
||||
Fatalf(format string, args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// DefaultTestOptions are default options for the unit tests.
|
||||
var DefaultTestOptions = server.Options{
|
||||
Host: "localhost",
|
||||
Port: 4222,
|
||||
NoLog: true,
|
||||
NoSigs: true,
|
||||
MaxControlLine: 256,
|
||||
}
|
||||
|
||||
// RunDefaultServer starts a new Go routine based server using the default options
|
||||
func RunDefaultServer() *server.Server {
|
||||
return RunServer(&DefaultTestOptions)
|
||||
}
|
||||
|
||||
// RunServer starts a new Go routine based server
|
||||
func RunServer(opts *server.Options) *server.Server {
|
||||
return RunServerWithAuth(opts, nil)
|
||||
}
|
||||
|
||||
// LoadConfig loads a configuration from a filename
|
||||
func LoadConfig(configFile string) (opts *server.Options) {
|
||||
opts, err := server.ProcessConfigFile(configFile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error processing configuration file: %v", err))
|
||||
}
|
||||
opts.NoSigs, opts.NoLog = true, true
|
||||
return
|
||||
}
|
||||
|
||||
// RunServerWithConfig starts a new Go routine based server with a configuration file.
|
||||
func RunServerWithConfig(configFile string) (srv *server.Server, opts *server.Options) {
|
||||
opts = LoadConfig(configFile)
|
||||
|
||||
// Check for auth
|
||||
var a server.Auth
|
||||
if opts.Authorization != "" {
|
||||
a = &auth.Token{Token: opts.Authorization}
|
||||
}
|
||||
if opts.Username != "" {
|
||||
a = &auth.Plain{Username: opts.Username, Password: opts.Password}
|
||||
}
|
||||
if opts.Users != nil {
|
||||
a = auth.NewMultiUser(opts.Users)
|
||||
}
|
||||
srv = RunServerWithAuth(opts, a)
|
||||
return
|
||||
}
|
||||
|
||||
// RunServerWithAuth starts a new Go routine based server with auth
|
||||
func RunServerWithAuth(opts *server.Options, auth server.Auth) *server.Server {
|
||||
if opts == nil {
|
||||
opts = &DefaultTestOptions
|
||||
}
|
||||
s := server.New(opts)
|
||||
if s == nil {
|
||||
panic("No NATS Server object returned.")
|
||||
}
|
||||
|
||||
if auth != nil {
|
||||
s.SetClientAuthMethod(auth)
|
||||
}
|
||||
|
||||
// Run server in Go routine.
|
||||
go s.Start()
|
||||
|
||||
// Wait for accept loop(s) to be started
|
||||
if !s.ReadyForConnections(10 * time.Second) {
|
||||
panic("Unable to start NATS Server in Go Routine")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func stackFatalf(t tLogger, f string, args ...interface{}) {
|
||||
lines := make([]string, 0, 32)
|
||||
msg := fmt.Sprintf(f, args...)
|
||||
lines = append(lines, msg)
|
||||
|
||||
// Ignore ourselves
|
||||
_, testFile, _, _ := runtime.Caller(0)
|
||||
|
||||
// Generate the Stack of callers:
|
||||
for i := 0; true; i++ {
|
||||
_, file, line, ok := runtime.Caller(i)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if file == testFile {
|
||||
continue
|
||||
}
|
||||
msg := fmt.Sprintf("%d - %s:%d", i, file, line)
|
||||
lines = append(lines, msg)
|
||||
}
|
||||
|
||||
t.Fatalf("%s", strings.Join(lines, "\n"))
|
||||
}
|
||||
|
||||
func acceptRouteConn(t tLogger, host string, timeout time.Duration) net.Conn {
|
||||
l, e := net.Listen("tcp", host)
|
||||
if e != nil {
|
||||
stackFatalf(t, "Error listening for route connection on %v: %v", host, e)
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
tl := l.(*net.TCPListener)
|
||||
tl.SetDeadline(time.Now().Add(timeout))
|
||||
conn, err := l.Accept()
|
||||
tl.SetDeadline(time.Time{})
|
||||
|
||||
if err != nil {
|
||||
stackFatalf(t, "Did not receive a route connection request: %v", err)
|
||||
}
|
||||
return conn
|
||||
}
|
||||
|
||||
func createRouteConn(t tLogger, host string, port int) net.Conn {
|
||||
return createClientConn(t, host, port)
|
||||
}
|
||||
|
||||
func createClientConn(t tLogger, host string, port int) net.Conn {
|
||||
addr := fmt.Sprintf("%s:%d", host, port)
|
||||
c, err := net.DialTimeout("tcp", addr, 1*time.Second)
|
||||
if err != nil {
|
||||
stackFatalf(t, "Could not connect to server: %v\n", err)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func checkSocket(t tLogger, addr string, wait time.Duration) {
|
||||
end := time.Now().Add(wait)
|
||||
for time.Now().Before(end) {
|
||||
conn, err := net.Dial("tcp", addr)
|
||||
if err != nil {
|
||||
// Retry after 50ms
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
conn.Close()
|
||||
// Wait a bit to give a chance to the server to remove this
|
||||
// "client" from its state, which may otherwise interfere with
|
||||
// some tests.
|
||||
time.Sleep(25 * time.Millisecond)
|
||||
return
|
||||
}
|
||||
// We have failed to bind the socket in the time allowed.
|
||||
t.Fatalf("Failed to connect to the socket: %q", addr)
|
||||
}
|
||||
|
||||
func checkInfoMsg(t tLogger, c net.Conn) server.Info {
|
||||
buf := expectResult(t, c, infoRe)
|
||||
js := infoRe.FindAllSubmatch(buf, 1)[0][1]
|
||||
var sinfo server.Info
|
||||
err := json.Unmarshal(js, &sinfo)
|
||||
if err != nil {
|
||||
stackFatalf(t, "Could not unmarshal INFO json: %v\n", err)
|
||||
}
|
||||
return sinfo
|
||||
}
|
||||
|
||||
func doConnect(t tLogger, c net.Conn, verbose, pedantic, ssl bool) {
|
||||
checkInfoMsg(t, c)
|
||||
cs := fmt.Sprintf("CONNECT {\"verbose\":%v,\"pedantic\":%v,\"ssl_required\":%v}\r\n", verbose, pedantic, ssl)
|
||||
sendProto(t, c, cs)
|
||||
}
|
||||
|
||||
func doDefaultConnect(t tLogger, c net.Conn) {
|
||||
// Basic Connect
|
||||
doConnect(t, c, false, false, false)
|
||||
}
|
||||
|
||||
const connectProto = "CONNECT {\"verbose\":false,\"user\":\"%s\",\"pass\":\"%s\",\"name\":\"%s\"}\r\n"
|
||||
|
||||
func doRouteAuthConnect(t tLogger, c net.Conn, user, pass, id string) {
|
||||
cs := fmt.Sprintf(connectProto, user, pass, id)
|
||||
sendProto(t, c, cs)
|
||||
}
|
||||
|
||||
func setupRouteEx(t tLogger, c net.Conn, opts *server.Options, id string) (sendFun, expectFun) {
|
||||
user := opts.Cluster.Username
|
||||
pass := opts.Cluster.Password
|
||||
doRouteAuthConnect(t, c, user, pass, id)
|
||||
return sendCommand(t, c), expectCommand(t, c)
|
||||
}
|
||||
|
||||
func setupRoute(t tLogger, c net.Conn, opts *server.Options) (sendFun, expectFun) {
|
||||
u := make([]byte, 16)
|
||||
io.ReadFull(rand.Reader, u)
|
||||
id := fmt.Sprintf("ROUTER:%s", hex.EncodeToString(u))
|
||||
return setupRouteEx(t, c, opts, id)
|
||||
}
|
||||
|
||||
func setupConn(t tLogger, c net.Conn) (sendFun, expectFun) {
|
||||
doDefaultConnect(t, c)
|
||||
return sendCommand(t, c), expectCommand(t, c)
|
||||
}
|
||||
|
||||
func setupConnWithProto(t tLogger, c net.Conn, proto int) (sendFun, expectFun) {
|
||||
checkInfoMsg(t, c)
|
||||
cs := fmt.Sprintf("CONNECT {\"verbose\":%v,\"pedantic\":%v,\"ssl_required\":%v,\"protocol\":%d}\r\n", false, false, false, proto)
|
||||
sendProto(t, c, cs)
|
||||
return sendCommand(t, c), expectCommand(t, c)
|
||||
}
|
||||
|
||||
type sendFun func(string)
|
||||
type expectFun func(*regexp.Regexp) []byte
|
||||
|
||||
// Closure version for easier reading
|
||||
func sendCommand(t tLogger, c net.Conn) sendFun {
|
||||
return func(op string) {
|
||||
sendProto(t, c, op)
|
||||
}
|
||||
}
|
||||
|
||||
// Closure version for easier reading
|
||||
func expectCommand(t tLogger, c net.Conn) expectFun {
|
||||
return func(re *regexp.Regexp) []byte {
|
||||
return expectResult(t, c, re)
|
||||
}
|
||||
}
|
||||
|
||||
// Send the protocol command to the server.
|
||||
func sendProto(t tLogger, c net.Conn, op string) {
|
||||
n, err := c.Write([]byte(op))
|
||||
if err != nil {
|
||||
stackFatalf(t, "Error writing command to conn: %v\n", err)
|
||||
}
|
||||
if n != len(op) {
|
||||
stackFatalf(t, "Partial write: %d vs %d\n", n, len(op))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
infoRe = regexp.MustCompile(`INFO\s+([^\r\n]+)\r\n`)
|
||||
pingRe = regexp.MustCompile(`PING\r\n`)
|
||||
pongRe = regexp.MustCompile(`PONG\r\n`)
|
||||
msgRe = regexp.MustCompile(`(?:(?:MSG\s+([^\s]+)\s+([^\s]+)\s+(([^\s]+)[^\S\r\n]+)?(\d+)\s*\r\n([^\\r\\n]*?)\r\n)+?)`)
|
||||
okRe = regexp.MustCompile(`\A\+OK\r\n`)
|
||||
errRe = regexp.MustCompile(`\A\-ERR\s+([^\r\n]+)\r\n`)
|
||||
subRe = regexp.MustCompile(`SUB\s+([^\s]+)((\s+)([^\s]+))?\s+([^\s]+)\r\n`)
|
||||
unsubRe = regexp.MustCompile(`UNSUB\s+([^\s]+)(\s+(\d+))?\r\n`)
|
||||
unsubmaxRe = regexp.MustCompile(`UNSUB\s+([^\s]+)(\s+(\d+))\r\n`)
|
||||
unsubnomaxRe = regexp.MustCompile(`UNSUB\s+([^\s]+)\r\n`)
|
||||
connectRe = regexp.MustCompile(`CONNECT\s+([^\r\n]+)\r\n`)
|
||||
)
|
||||
|
||||
const (
|
||||
subIndex = 1
|
||||
sidIndex = 2
|
||||
replyIndex = 4
|
||||
lenIndex = 5
|
||||
msgIndex = 6
|
||||
)
|
||||
|
||||
// Test result from server against regexp
|
||||
func expectResult(t tLogger, c net.Conn, re *regexp.Regexp) []byte {
|
||||
expBuf := make([]byte, 32768)
|
||||
// Wait for commands to be processed and results queued for read
|
||||
c.SetReadDeadline(time.Now().Add(2 * time.Second))
|
||||
n, err := c.Read(expBuf)
|
||||
c.SetReadDeadline(time.Time{})
|
||||
|
||||
if n <= 0 && err != nil {
|
||||
stackFatalf(t, "Error reading from conn: %v\n", err)
|
||||
}
|
||||
buf := expBuf[:n]
|
||||
|
||||
if !re.Match(buf) {
|
||||
stackFatalf(t, "Response did not match expected: \n\tReceived:'%q'\n\tExpected:'%s'\n", buf, re)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func expectNothing(t tLogger, c net.Conn) {
|
||||
expBuf := make([]byte, 32)
|
||||
c.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
|
||||
n, err := c.Read(expBuf)
|
||||
c.SetReadDeadline(time.Time{})
|
||||
if err == nil && n > 0 {
|
||||
stackFatalf(t, "Expected nothing, received: '%q'\n", expBuf[:n])
|
||||
}
|
||||
}
|
||||
|
||||
// This will check that we got what we expected.
|
||||
func checkMsg(t tLogger, m [][]byte, subject, sid, reply, len, msg string) {
|
||||
if string(m[subIndex]) != subject {
|
||||
stackFatalf(t, "Did not get correct subject: expected '%s' got '%s'\n", subject, m[subIndex])
|
||||
}
|
||||
if sid != "" && string(m[sidIndex]) != sid {
|
||||
stackFatalf(t, "Did not get correct sid: expected '%s' got '%s'\n", sid, m[sidIndex])
|
||||
}
|
||||
if string(m[replyIndex]) != reply {
|
||||
stackFatalf(t, "Did not get correct reply: expected '%s' got '%s'\n", reply, m[replyIndex])
|
||||
}
|
||||
if string(m[lenIndex]) != len {
|
||||
stackFatalf(t, "Did not get correct msg length: expected '%s' got '%s'\n", len, m[lenIndex])
|
||||
}
|
||||
if string(m[msgIndex]) != msg {
|
||||
stackFatalf(t, "Did not get correct msg: expected '%s' got '%s'\n", msg, m[msgIndex])
|
||||
}
|
||||
}
|
||||
|
||||
// Closure for expectMsgs
|
||||
func expectMsgsCommand(t tLogger, ef expectFun) func(int) [][][]byte {
|
||||
return func(expected int) [][][]byte {
|
||||
buf := ef(msgRe)
|
||||
matches := msgRe.FindAllSubmatch(buf, -1)
|
||||
if len(matches) != expected {
|
||||
stackFatalf(t, "Did not get correct # msgs: %d vs %d\n", len(matches), expected)
|
||||
}
|
||||
return matches
|
||||
}
|
||||
}
|
||||
|
||||
// This will check that the matches include at least one of the sids. Useful for checking
|
||||
// that we received messages on a certain queue group.
|
||||
func checkForQueueSid(t tLogger, matches [][][]byte, sids []string) {
|
||||
seen := make(map[string]int, len(sids))
|
||||
for _, sid := range sids {
|
||||
seen[sid] = 0
|
||||
}
|
||||
for _, m := range matches {
|
||||
sid := string(m[sidIndex])
|
||||
if _, ok := seen[sid]; ok {
|
||||
seen[sid]++
|
||||
}
|
||||
}
|
||||
// Make sure we only see one and exactly one.
|
||||
total := 0
|
||||
for _, n := range seen {
|
||||
total += n
|
||||
}
|
||||
if total != 1 {
|
||||
stackFatalf(t, "Did not get a msg for queue sids group: expected 1 got %d\n", total)
|
||||
}
|
||||
}
|
||||
|
||||
// This will check that the matches include all of the sids. Useful for checking
|
||||
// that we received messages on all subscribers.
|
||||
func checkForPubSids(t tLogger, matches [][][]byte, sids []string) {
|
||||
seen := make(map[string]int, len(sids))
|
||||
for _, sid := range sids {
|
||||
seen[sid] = 0
|
||||
}
|
||||
for _, m := range matches {
|
||||
sid := string(m[sidIndex])
|
||||
if _, ok := seen[sid]; ok {
|
||||
seen[sid]++
|
||||
}
|
||||
}
|
||||
// Make sure we only see one and exactly one for each sid.
|
||||
for sid, n := range seen {
|
||||
if n != 1 {
|
||||
stackFatalf(t, "Did not get a msg for sid[%s]: expected 1 got %d\n", sid, n)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to generate next opts to make sure no port conflicts etc.
|
||||
func nextServerOpts(opts *server.Options) *server.Options {
|
||||
nopts := *opts
|
||||
nopts.Port++
|
||||
nopts.Cluster.Port++
|
||||
nopts.HTTPPort++
|
||||
return &nopts
|
||||
}
|
75
vendor/github.com/nats-io/gnatsd/util/mkpasswd.go
generated
vendored
75
vendor/github.com/nats-io/gnatsd/util/mkpasswd.go
generated
vendored
|
@ -1,75 +0,0 @@
|
|||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
func usage() {
|
||||
log.Fatalf("Usage: mkpasswd [-p <stdin password>] [-c COST] \n")
|
||||
}
|
||||
|
||||
const (
|
||||
// Make sure the password is reasonably long to generate enough entropy.
|
||||
PasswordLength = 22
|
||||
// Common advice from the past couple of years suggests that 10 should be sufficient.
|
||||
// Up that a little, to 11. Feel free to raise this higher if this value from 2015 is
|
||||
// no longer appropriate. Min is 4, Max is 31.
|
||||
DefaultCost = 11
|
||||
)
|
||||
|
||||
func main() {
|
||||
var pw = flag.Bool("p", false, "Input password via stdin")
|
||||
var cost = flag.Int("c", DefaultCost, "The cost weight, range of 4-31 (11)")
|
||||
|
||||
log.SetFlags(0)
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
var password string
|
||||
|
||||
if *pw {
|
||||
fmt.Printf("Enter Password: ")
|
||||
bytePassword, _ := terminal.ReadPassword(0)
|
||||
fmt.Printf("\nReenter Password: ")
|
||||
bytePassword2, _ := terminal.ReadPassword(0)
|
||||
if !bytes.Equal(bytePassword, bytePassword2) {
|
||||
log.Fatalf("Error, passwords do not match\n")
|
||||
}
|
||||
password = string(bytePassword)
|
||||
fmt.Printf("\n")
|
||||
} else {
|
||||
password = genPassword()
|
||||
fmt.Printf("pass: %s\n", password)
|
||||
}
|
||||
|
||||
cb, err := bcrypt.GenerateFromPassword([]byte(password), *cost)
|
||||
if err != nil {
|
||||
log.Fatalf("Error producing bcrypt hash: %v\n", err)
|
||||
}
|
||||
fmt.Printf("bcrypt hash: %s\n", cb)
|
||||
}
|
||||
|
||||
func genPassword() string {
|
||||
var ch = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@$#%^&*()")
|
||||
b := make([]byte, PasswordLength)
|
||||
max := big.NewInt(int64(len(ch)))
|
||||
for i := range b {
|
||||
ri, err := rand.Int(rand.Reader, max)
|
||||
if err != nil {
|
||||
log.Fatalf("Error producing random integer: %v\n", err)
|
||||
}
|
||||
b[i] = ch[int(ri.Int64())]
|
||||
}
|
||||
return string(b)
|
||||
}
|
37
vendor/github.com/nats-io/gnatsd/util/tls.go
generated
vendored
37
vendor/github.com/nats-io/gnatsd/util/tls.go
generated
vendored
|
@ -1,37 +0,0 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
// +build go1.7
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// CloneTLSConfig returns a copy of c. Only the exported fields are copied.
|
||||
// This is temporary, until this is provided by the language.
|
||||
// https://go-review.googlesource.com/#/c/28075/
|
||||
func CloneTLSConfig(c *tls.Config) *tls.Config {
|
||||
return &tls.Config{
|
||||
Rand: c.Rand,
|
||||
Time: c.Time,
|
||||
Certificates: c.Certificates,
|
||||
NameToCertificate: c.NameToCertificate,
|
||||
GetCertificate: c.GetCertificate,
|
||||
RootCAs: c.RootCAs,
|
||||
NextProtos: c.NextProtos,
|
||||
ServerName: c.ServerName,
|
||||
ClientAuth: c.ClientAuth,
|
||||
ClientCAs: c.ClientCAs,
|
||||
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||
CipherSuites: c.CipherSuites,
|
||||
PreferServerCipherSuites: c.PreferServerCipherSuites,
|
||||
SessionTicketsDisabled: c.SessionTicketsDisabled,
|
||||
SessionTicketKey: c.SessionTicketKey,
|
||||
ClientSessionCache: c.ClientSessionCache,
|
||||
MinVersion: c.MinVersion,
|
||||
MaxVersion: c.MaxVersion,
|
||||
CurvePreferences: c.CurvePreferences,
|
||||
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
|
||||
Renegotiation: c.Renegotiation,
|
||||
}
|
||||
}
|
35
vendor/github.com/nats-io/gnatsd/util/tls_pre17.go
generated
vendored
35
vendor/github.com/nats-io/gnatsd/util/tls_pre17.go
generated
vendored
|
@ -1,35 +0,0 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
// +build go1.5,!go1.7
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// CloneTLSConfig returns a copy of c. Only the exported fields are copied.
|
||||
// This is temporary, until this is provided by the language.
|
||||
// https://go-review.googlesource.com/#/c/28075/
|
||||
func CloneTLSConfig(c *tls.Config) *tls.Config {
|
||||
return &tls.Config{
|
||||
Rand: c.Rand,
|
||||
Time: c.Time,
|
||||
Certificates: c.Certificates,
|
||||
NameToCertificate: c.NameToCertificate,
|
||||
GetCertificate: c.GetCertificate,
|
||||
RootCAs: c.RootCAs,
|
||||
NextProtos: c.NextProtos,
|
||||
ServerName: c.ServerName,
|
||||
ClientAuth: c.ClientAuth,
|
||||
ClientCAs: c.ClientCAs,
|
||||
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||
CipherSuites: c.CipherSuites,
|
||||
PreferServerCipherSuites: c.PreferServerCipherSuites,
|
||||
SessionTicketsDisabled: c.SessionTicketsDisabled,
|
||||
SessionTicketKey: c.SessionTicketKey,
|
||||
ClientSessionCache: c.ClientSessionCache,
|
||||
MinVersion: c.MinVersion,
|
||||
MaxVersion: c.MaxVersion,
|
||||
CurvePreferences: c.CurvePreferences,
|
||||
}
|
||||
}
|
20
vendor/github.com/nats-io/nats-streaming-server/LICENSE
generated
vendored
20
vendor/github.com/nats-io/nats-streaming-server/LICENSE
generated
vendored
|
@ -1,20 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Apcera Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
115
vendor/github.com/nats-io/nats-streaming-server/server/client.go
generated
vendored
115
vendor/github.com/nats-io/nats-streaming-server/server/client.go
generated
vendored
|
@ -1,115 +0,0 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/nats-io/nats-streaming-server/stores"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This is a proxy to the store interface.
|
||||
type clientStore struct {
|
||||
store stores.Store
|
||||
}
|
||||
|
||||
// client has information needed by the server. A client is also
|
||||
// stored in a stores.Client object (which contains ID and HbInbox).
|
||||
type client struct {
|
||||
sync.RWMutex
|
||||
unregistered bool
|
||||
hbt *time.Timer
|
||||
fhb int
|
||||
subs []*subState
|
||||
}
|
||||
|
||||
// Register a client if new, otherwise returns the client already registered
|
||||
// and `false` to indicate that the client is not new.
|
||||
func (cs *clientStore) Register(ID, hbInbox string) (*stores.Client, bool, error) {
|
||||
// Will be gc'ed if we fail to register, that's ok.
|
||||
c := &client{subs: make([]*subState, 0, 4)}
|
||||
sc, isNew, err := cs.store.AddClient(ID, hbInbox, c)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return sc, isNew, nil
|
||||
}
|
||||
|
||||
// Unregister a client.
|
||||
func (cs *clientStore) Unregister(ID string) *stores.Client {
|
||||
sc := cs.store.DeleteClient(ID)
|
||||
if sc != nil {
|
||||
c := sc.UserData.(*client)
|
||||
c.Lock()
|
||||
c.unregistered = true
|
||||
c.Unlock()
|
||||
}
|
||||
return sc
|
||||
}
|
||||
|
||||
// IsValid returns true if the client is registered, false otherwise.
|
||||
func (cs *clientStore) IsValid(ID string) bool {
|
||||
return cs.store.GetClient(ID) != nil
|
||||
}
|
||||
|
||||
// Lookup a client
|
||||
func (cs *clientStore) Lookup(ID string) *client {
|
||||
sc := cs.store.GetClient(ID)
|
||||
if sc != nil {
|
||||
return sc.UserData.(*client)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSubs returns the list of subscriptions for the client identified by ID,
|
||||
// or nil if such client is not found.
|
||||
func (cs *clientStore) GetSubs(ID string) []*subState {
|
||||
c := cs.Lookup(ID)
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
c.RLock()
|
||||
subs := make([]*subState, len(c.subs))
|
||||
copy(subs, c.subs)
|
||||
c.RUnlock()
|
||||
return subs
|
||||
}
|
||||
|
||||
// AddSub adds the subscription to the client identified by clientID
|
||||
// and returns true only if the client has not been unregistered,
|
||||
// otherwise returns false.
|
||||
func (cs *clientStore) AddSub(ID string, sub *subState) bool {
|
||||
sc := cs.store.GetClient(ID)
|
||||
if sc == nil {
|
||||
return false
|
||||
}
|
||||
c := sc.UserData.(*client)
|
||||
c.Lock()
|
||||
if c.unregistered {
|
||||
c.Unlock()
|
||||
return false
|
||||
}
|
||||
c.subs = append(c.subs, sub)
|
||||
c.Unlock()
|
||||
return true
|
||||
}
|
||||
|
||||
// RemoveSub removes the subscription from the client identified by clientID
|
||||
// and returns true only if the client has not been unregistered and that
|
||||
// the subscription was found, otherwise returns false.
|
||||
func (cs *clientStore) RemoveSub(ID string, sub *subState) bool {
|
||||
sc := cs.store.GetClient(ID)
|
||||
if sc == nil {
|
||||
return false
|
||||
}
|
||||
c := sc.UserData.(*client)
|
||||
c.Lock()
|
||||
if c.unregistered {
|
||||
c.Unlock()
|
||||
return false
|
||||
}
|
||||
removed := false
|
||||
c.subs, removed = sub.deleteFromList(c.subs)
|
||||
c.Unlock()
|
||||
return removed
|
||||
}
|
291
vendor/github.com/nats-io/nats-streaming-server/server/conf.go
generated
vendored
291
vendor/github.com/nats-io/nats-streaming-server/server/conf.go
generated
vendored
|
@ -1,291 +0,0 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/gnatsd/conf"
|
||||
"github.com/nats-io/nats-streaming-server/stores"
|
||||
)
|
||||
|
||||
// ProcessConfigFile parses the configuration file `configFile` and updates
|
||||
// the given Streaming options `opts`.
|
||||
func ProcessConfigFile(configFile string, opts *Options) error {
|
||||
data, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m, err := conf.Parse(string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
name := strings.ToLower(k)
|
||||
switch name {
|
||||
case "id", "cid", "cluster_id":
|
||||
if err := checkType(k, reflect.String, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.ID = v.(string)
|
||||
case "discover_prefix":
|
||||
if err := checkType(k, reflect.String, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.DiscoverPrefix = v.(string)
|
||||
case "st", "store_type", "store", "storetype":
|
||||
if err := checkType(k, reflect.String, v); err != nil {
|
||||
return err
|
||||
}
|
||||
switch strings.ToUpper(v.(string)) {
|
||||
case stores.TypeFile:
|
||||
opts.StoreType = stores.TypeFile
|
||||
case stores.TypeMemory:
|
||||
opts.StoreType = stores.TypeMemory
|
||||
default:
|
||||
return fmt.Errorf("Unknown store type: %v", v.(string))
|
||||
}
|
||||
case "dir", "datastore":
|
||||
if err := checkType(k, reflect.String, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.FilestoreDir = v.(string)
|
||||
case "sd", "stan_debug":
|
||||
if err := checkType(k, reflect.Bool, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.Debug = v.(bool)
|
||||
case "sv", "stan_trace":
|
||||
if err := checkType(k, reflect.Bool, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.Trace = v.(bool)
|
||||
case "ns", "nats_server", "nats_server_url":
|
||||
if err := checkType(k, reflect.String, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.NATSServerURL = v.(string)
|
||||
case "secure":
|
||||
if err := checkType(k, reflect.Bool, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.Secure = v.(bool)
|
||||
case "tls":
|
||||
if err := parseTLS(v, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
case "limits", "store_limits", "storelimits":
|
||||
if err := parseStoreLimits(v, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
case "file", "file_options":
|
||||
if err := parseFileOptions(v, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkType returns a formatted error if `v` is not of the expected kind.
|
||||
func checkType(name string, kind reflect.Kind, v interface{}) error {
|
||||
actualKind := reflect.TypeOf(v).Kind()
|
||||
if actualKind != kind {
|
||||
return fmt.Errorf("Parameter %q value is expected to be %v, got %v",
|
||||
name, kind.String(), actualKind.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseTLS updates `opts` with TLS config
|
||||
func parseTLS(itf interface{}, opts *Options) error {
|
||||
m, ok := itf.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("Expected TLS to be a map/struct, got %v", itf)
|
||||
}
|
||||
for k, v := range m {
|
||||
name := strings.ToLower(k)
|
||||
switch name {
|
||||
case "client_cert":
|
||||
if err := checkType(k, reflect.String, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.ClientCert = v.(string)
|
||||
case "client_key":
|
||||
if err := checkType(k, reflect.String, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.ClientKey = v.(string)
|
||||
case "client_ca", "client_cacert":
|
||||
if err := checkType(k, reflect.String, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.ClientCA = v.(string)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseStoreLimits updates `opts` with store limits
|
||||
func parseStoreLimits(itf interface{}, opts *Options) error {
|
||||
m, ok := itf.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("Expected store limits to be a map/struct, got %v", itf)
|
||||
}
|
||||
for k, v := range m {
|
||||
name := strings.ToLower(k)
|
||||
switch name {
|
||||
case "mc", "max_channels", "maxchannels":
|
||||
if err := checkType(k, reflect.Int64, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.MaxChannels = int(v.(int64))
|
||||
case "channels", "channels_limits", "channelslimits", "per_channel", "per_channel_limits":
|
||||
if err := parsePerChannelLimits(v, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
// Check for the global limits (MaxMsgs, MaxBytes, etc..)
|
||||
if err := parseChannelLimits(&opts.ChannelLimits, k, name, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseChannelLimits updates `cl` with channel limits.
|
||||
func parseChannelLimits(cl *stores.ChannelLimits, k, name string, v interface{}) error {
|
||||
switch name {
|
||||
case "msu", "max_subs", "max_subscriptions", "maxsubscriptions":
|
||||
if err := checkType(k, reflect.Int64, v); err != nil {
|
||||
return err
|
||||
}
|
||||
cl.MaxSubscriptions = int(v.(int64))
|
||||
case "mm", "max_msgs", "maxmsgs", "max_count", "maxcount":
|
||||
if err := checkType(k, reflect.Int64, v); err != nil {
|
||||
return err
|
||||
}
|
||||
cl.MaxMsgs = int(v.(int64))
|
||||
case "mb", "max_bytes", "maxbytes":
|
||||
if err := checkType(k, reflect.Int64, v); err != nil {
|
||||
return err
|
||||
}
|
||||
cl.MaxBytes = v.(int64)
|
||||
case "ma", "max_age", "maxage":
|
||||
if err := checkType(k, reflect.String, v); err != nil {
|
||||
return err
|
||||
}
|
||||
dur, err := time.ParseDuration(v.(string))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cl.MaxAge = dur
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parsePerChannelLimits updates `opts` with per channel limits.
|
||||
func parsePerChannelLimits(itf interface{}, opts *Options) error {
|
||||
m, ok := itf.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("Expected per channel limits to be a map/struct, got %v", itf)
|
||||
}
|
||||
for channelName, limits := range m {
|
||||
limitsMap, ok := limits.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("Expected channel limits to be a map/struct, got %v", limits)
|
||||
}
|
||||
cl := &stores.ChannelLimits{}
|
||||
for k, v := range limitsMap {
|
||||
name := strings.ToLower(k)
|
||||
if err := parseChannelLimits(cl, k, name, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
sl := &opts.StoreLimits
|
||||
sl.AddPerChannel(channelName, cl)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseFileOptions(itf interface{}, opts *Options) error {
|
||||
m, ok := itf.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("Expected file options to be a map/struct, got %v", itf)
|
||||
}
|
||||
for k, v := range m {
|
||||
name := strings.ToLower(k)
|
||||
switch name {
|
||||
case "compact", "compact_enabled":
|
||||
if err := checkType(k, reflect.Bool, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.FileStoreOpts.CompactEnabled = v.(bool)
|
||||
case "compact_frag", "compact_fragmentation":
|
||||
if err := checkType(k, reflect.Int64, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.FileStoreOpts.CompactFragmentation = int(v.(int64))
|
||||
case "compact_interval":
|
||||
if err := checkType(k, reflect.Int64, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.FileStoreOpts.CompactInterval = int(v.(int64))
|
||||
case "compact_min_size":
|
||||
if err := checkType(k, reflect.Int64, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.FileStoreOpts.CompactMinFileSize = v.(int64)
|
||||
case "buffer_size":
|
||||
if err := checkType(k, reflect.Int64, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.FileStoreOpts.BufferSize = int(v.(int64))
|
||||
case "crc", "do_crc":
|
||||
if err := checkType(k, reflect.Bool, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.FileStoreOpts.DoCRC = v.(bool)
|
||||
case "crc_poly":
|
||||
if err := checkType(k, reflect.Int64, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.FileStoreOpts.CRCPolynomial = v.(int64)
|
||||
case "sync", "do_sync", "sync_on_flush":
|
||||
if err := checkType(k, reflect.Bool, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.FileStoreOpts.DoSync = v.(bool)
|
||||
case "slice_max_msgs", "slice_max_count", "slice_msgs", "slice_count":
|
||||
if err := checkType(k, reflect.Int64, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.FileStoreOpts.SliceMaxMsgs = int(v.(int64))
|
||||
case "slice_max_bytes", "slice_max_size", "slice_bytes", "slice_size":
|
||||
if err := checkType(k, reflect.Int64, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.FileStoreOpts.SliceMaxBytes = v.(int64)
|
||||
case "slice_max_age", "slice_age", "slice_max_time", "slice_time_limit":
|
||||
if err := checkType(k, reflect.String, v); err != nil {
|
||||
return err
|
||||
}
|
||||
dur, err := time.ParseDuration(v.(string))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.FileStoreOpts.SliceMaxAge = dur
|
||||
case "slice_archive_script", "slice_archive", "slice_script":
|
||||
if err := checkType(k, reflect.String, v); err != nil {
|
||||
return err
|
||||
}
|
||||
opts.FileStoreOpts.SliceArchiveScript = v.(string)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
149
vendor/github.com/nats-io/nats-streaming-server/server/log.go
generated
vendored
149
vendor/github.com/nats-io/nats-streaming-server/server/log.go
generated
vendored
|
@ -1,149 +0,0 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/nats-io/gnatsd/logger"
|
||||
natsd "github.com/nats-io/gnatsd/server"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Logging in STAN
|
||||
//
|
||||
// The STAN logger is an instance of a NATS logger, (basically duplicated
|
||||
// from the NATS server code), and is passed into the NATS server.
|
||||
//
|
||||
// A note on Debugf and Tracef: These will be enabled within the log if
|
||||
// either STAN or the NATS server enables them. However, STAN will only
|
||||
// trace/debug if the local STAN debug/trace flags are set. NATS will do
|
||||
// the same with it's logger flags. This enables us to use the same logger,
|
||||
// but differentiate between STAN and NATS debug/trace.
|
||||
//
|
||||
// All logging functions are fully implemented (versus calling into the NATS
|
||||
// server) in case STAN is decoupled from the NATS server.
|
||||
|
||||
// Package globals for performance checks
|
||||
var trace int32
|
||||
var debug int32
|
||||
|
||||
// The STAN logger, encapsulates a NATS logger
|
||||
var stanLog = struct {
|
||||
sync.Mutex
|
||||
logger natsd.Logger
|
||||
}{}
|
||||
|
||||
// ConfigureLogger configures logging for STAN and the embedded NATS server
|
||||
// based on options passed.
|
||||
func ConfigureLogger(stanOpts *Options, natsOpts *natsd.Options) {
|
||||
|
||||
var s *natsd.Server
|
||||
var newLogger natsd.Logger
|
||||
|
||||
sOpts := stanOpts
|
||||
nOpts := natsOpts
|
||||
|
||||
if sOpts == nil {
|
||||
sOpts = GetDefaultOptions()
|
||||
}
|
||||
if nOpts == nil {
|
||||
nOpts = &natsd.Options{}
|
||||
}
|
||||
|
||||
enableDebug := nOpts.Debug || sOpts.Debug
|
||||
enableTrace := nOpts.Trace || sOpts.Trace
|
||||
|
||||
if nOpts.LogFile != "" {
|
||||
newLogger = logger.NewFileLogger(nOpts.LogFile, nOpts.Logtime, enableDebug, sOpts.Trace, true)
|
||||
} else if nOpts.RemoteSyslog != "" {
|
||||
newLogger = logger.NewRemoteSysLogger(nOpts.RemoteSyslog, sOpts.Debug, sOpts.Trace)
|
||||
} else if nOpts.Syslog {
|
||||
newLogger = logger.NewSysLogger(sOpts.Debug, sOpts.Trace)
|
||||
} else {
|
||||
colors := true
|
||||
// Check to see if stderr is being redirected and if so turn off color
|
||||
// Also turn off colors if we're running on Windows where os.Stderr.Stat() returns an invalid handle-error
|
||||
stat, err := os.Stderr.Stat()
|
||||
if err != nil || (stat.Mode()&os.ModeCharDevice) == 0 {
|
||||
colors = false
|
||||
}
|
||||
newLogger = logger.NewStdLogger(nOpts.Logtime, enableDebug, enableTrace, colors, true)
|
||||
}
|
||||
if sOpts.Debug {
|
||||
atomic.StoreInt32(&debug, 1)
|
||||
}
|
||||
if sOpts.Trace {
|
||||
atomic.StoreInt32(&trace, 1)
|
||||
}
|
||||
|
||||
// The NATS server will use the STAN logger
|
||||
s.SetLogger(newLogger, nOpts.Debug, nOpts.Trace)
|
||||
|
||||
stanLog.Lock()
|
||||
stanLog.logger = newLogger
|
||||
stanLog.Unlock()
|
||||
}
|
||||
|
||||
// RemoveLogger clears the logger instance and debug/trace flags.
|
||||
// Used for testing.
|
||||
func RemoveLogger() {
|
||||
var s *natsd.Server
|
||||
|
||||
atomic.StoreInt32(&trace, 0)
|
||||
atomic.StoreInt32(&debug, 0)
|
||||
|
||||
stanLog.Lock()
|
||||
stanLog.logger = nil
|
||||
stanLog.Unlock()
|
||||
|
||||
s.SetLogger(nil, false, false)
|
||||
}
|
||||
|
||||
// Noticef logs a notice statement
|
||||
func Noticef(format string, v ...interface{}) {
|
||||
executeLogCall(func(log natsd.Logger, format string, v ...interface{}) {
|
||||
log.Noticef(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Errorf logs an error
|
||||
func Errorf(format string, v ...interface{}) {
|
||||
executeLogCall(func(log natsd.Logger, format string, v ...interface{}) {
|
||||
log.Errorf(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Fatalf logs a fatal error
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
executeLogCall(func(log natsd.Logger, format string, v ...interface{}) {
|
||||
log.Fatalf(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Debugf logs a debug statement
|
||||
func Debugf(format string, v ...interface{}) {
|
||||
if atomic.LoadInt32(&debug) != 0 {
|
||||
executeLogCall(func(log natsd.Logger, format string, v ...interface{}) {
|
||||
log.Debugf(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Tracef logs a trace statement
|
||||
func Tracef(format string, v ...interface{}) {
|
||||
if atomic.LoadInt32(&trace) != 0 {
|
||||
executeLogCall(func(logger natsd.Logger, format string, v ...interface{}) {
|
||||
logger.Tracef(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
func executeLogCall(f func(logger natsd.Logger, format string, v ...interface{}), format string, args ...interface{}) {
|
||||
stanLog.Lock()
|
||||
defer stanLog.Unlock()
|
||||
if stanLog.logger == nil {
|
||||
return
|
||||
}
|
||||
f(stanLog.logger, format, args...)
|
||||
}
|
2910
vendor/github.com/nats-io/nats-streaming-server/server/server.go
generated
vendored
2910
vendor/github.com/nats-io/nats-streaming-server/server/server.go
generated
vendored
File diff suppressed because it is too large
Load diff
1726
vendor/github.com/nats-io/nats-streaming-server/spb/protocol.pb.go
generated
vendored
1726
vendor/github.com/nats-io/nats-streaming-server/spb/protocol.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
71
vendor/github.com/nats-io/nats-streaming-server/spb/protocol.proto
generated
vendored
71
vendor/github.com/nats-io/nats-streaming-server/spb/protocol.proto
generated
vendored
|
@ -1,71 +0,0 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
//
|
||||
// Uses https://github.com/gogo/protobuf
|
||||
// compiled via `protoc -I=. -I=$GOPATH/src --gogofaster_out=. protocol.proto`
|
||||
|
||||
syntax = "proto3";
|
||||
package spb;
|
||||
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
option (gogoproto.goproto_getters_all) = false;
|
||||
|
||||
// SubState represents the state of a Subscription
|
||||
message SubState {
|
||||
uint64 ID = 1; // Subscription ID assigned by the SubStore interface
|
||||
string clientID = 2; // ClientID
|
||||
string qGroup = 3; // Optional queue group
|
||||
string inbox = 4; // Inbox subject to deliver messages on
|
||||
string ackInbox = 5; // Inbox for acks
|
||||
int32 maxInFlight = 6; // Maximum inflight messages without an ack allowed
|
||||
int32 ackWaitInSecs = 7; // Timeout for receiving an ack from the client
|
||||
string durableName = 8; // Optional durable name which survives client restarts
|
||||
uint64 lastSent = 9; // Start position
|
||||
bool isDurable =10; // Indicate durability for this subscriber
|
||||
}
|
||||
|
||||
// SubStateDelete marks a Subscription as deleted
|
||||
message SubStateDelete {
|
||||
uint64 ID = 1; // Subscription ID being deleted
|
||||
}
|
||||
|
||||
// SubStateUpdate represents a subscription update (either Msg or Ack)
|
||||
message SubStateUpdate {
|
||||
uint64 ID = 1; // Subscription ID
|
||||
uint64 seqno = 2; // Sequence of the message (pending or ack'ed)
|
||||
}
|
||||
|
||||
// ServerInfo contains basic information regarding the Server
|
||||
message ServerInfo {
|
||||
string ClusterID = 1; // Cluster ID
|
||||
string Discovery = 2; // Subject server receives connect requests on.
|
||||
string Publish = 3; // Subject prefix server receives published messages on.
|
||||
string Subscribe = 4; // Subject server receives subscription requests on.
|
||||
string Unsubscribe = 5; // Subject server receives unsubscribe requests on.
|
||||
string Close = 6; // Subject server receives close requests on.
|
||||
string SubClose = 7; // Subject server receives subscription close requests on.
|
||||
}
|
||||
|
||||
// ClientInfo contains information related to a Client
|
||||
message ClientInfo {
|
||||
string ID = 1; // Client ID
|
||||
string HbInbox = 2; // The inbox heartbeats are sent to
|
||||
}
|
||||
|
||||
message ClientDelete {
|
||||
string ID = 1; // ID of the client being unregistered
|
||||
}
|
||||
|
||||
message CtrlMsg {
|
||||
enum Type {
|
||||
SubUnsubscribe = 0; // Subscription Unsubscribe request.
|
||||
SubClose = 1; // Subscription Close request.
|
||||
ConnClose = 2; // Connection Close request.
|
||||
}
|
||||
Type MsgType = 1; // Type of the control message.
|
||||
string ServerID = 2; // Allows a server to detect if it is the intended receipient.
|
||||
bytes Data = 3; // Optional bytes that carries context information.
|
||||
}
|
400
vendor/github.com/nats-io/nats-streaming-server/stores/common.go
generated
vendored
400
vendor/github.com/nats-io/nats-streaming-server/stores/common.go
generated
vendored
|
@ -1,400 +0,0 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package stores
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/nats-io/go-nats-streaming/pb"
|
||||
"github.com/nats-io/nats-streaming-server/spb"
|
||||
)
|
||||
|
||||
// format string used to report that limit is reached when storing
|
||||
// messages.
|
||||
var droppingMsgsFmt = "WARNING: Reached limits for store %q (msgs=%v/%v bytes=%v/%v), " +
|
||||
"dropping old messages to make room for new ones."
|
||||
|
||||
// commonStore contains everything that is common to any type of store
|
||||
type commonStore struct {
|
||||
sync.RWMutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// genericStore is the generic store implementation with a map of channels.
|
||||
type genericStore struct {
|
||||
commonStore
|
||||
limits StoreLimits
|
||||
name string
|
||||
channels map[string]*ChannelStore
|
||||
clients map[string]*Client
|
||||
}
|
||||
|
||||
// genericSubStore is the generic store implementation that manages subscriptions
|
||||
// for a given channel.
|
||||
type genericSubStore struct {
|
||||
commonStore
|
||||
limits SubStoreLimits
|
||||
subject string // Can't be wildcard
|
||||
subsCount int
|
||||
maxSubID uint64
|
||||
}
|
||||
|
||||
// genericMsgStore is the generic store implementation that manages messages
|
||||
// for a given channel.
|
||||
type genericMsgStore struct {
|
||||
commonStore
|
||||
limits MsgStoreLimits
|
||||
subject string // Can't be wildcard
|
||||
first uint64
|
||||
last uint64
|
||||
totalCount int
|
||||
totalBytes uint64
|
||||
hitLimit bool // indicates if store had to drop messages due to limit
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// genericStore methods
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// init initializes the structure of a generic store
|
||||
func (gs *genericStore) init(name string, limits *StoreLimits) {
|
||||
gs.name = name
|
||||
if limits == nil {
|
||||
limits = &DefaultStoreLimits
|
||||
}
|
||||
gs.setLimits(limits)
|
||||
// Do not use limits values to create the map.
|
||||
gs.channels = make(map[string]*ChannelStore)
|
||||
gs.clients = make(map[string]*Client)
|
||||
}
|
||||
|
||||
// Init can be used to initialize the store with server's information.
|
||||
func (gs *genericStore) Init(info *spb.ServerInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Name returns the type name of this store
|
||||
func (gs *genericStore) Name() string {
|
||||
return gs.name
|
||||
}
|
||||
|
||||
// setLimits makes a copy of the given StoreLimits,
|
||||
// validates the limits and if ok, applies the inheritance.
|
||||
func (gs *genericStore) setLimits(limits *StoreLimits) error {
|
||||
// Make a copy
|
||||
gs.limits = *limits
|
||||
// of the map too
|
||||
if len(limits.PerChannel) > 0 {
|
||||
gs.limits.PerChannel = make(map[string]*ChannelLimits, len(limits.PerChannel))
|
||||
for key, val := range limits.PerChannel {
|
||||
// Make a copy of the values. We want ownership
|
||||
// of those structures
|
||||
gs.limits.PerChannel[key] = &(*val)
|
||||
}
|
||||
}
|
||||
// Build will validate and apply inheritance if no error.
|
||||
sl := &gs.limits
|
||||
return sl.Build()
|
||||
}
|
||||
|
||||
// SetLimits sets limits for this store
|
||||
func (gs *genericStore) SetLimits(limits *StoreLimits) error {
|
||||
gs.Lock()
|
||||
err := gs.setLimits(limits)
|
||||
gs.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateChannel creates a ChannelStore for the given channel, and returns
|
||||
// `true` to indicate that the channel is new, false if it already exists.
|
||||
func (gs *genericStore) CreateChannel(channel string, userData interface{}) (*ChannelStore, bool, error) {
|
||||
// no-op
|
||||
return nil, false, fmt.Errorf("Generic store, feature not implemented")
|
||||
}
|
||||
|
||||
// LookupChannel returns a ChannelStore for the given channel.
|
||||
func (gs *genericStore) LookupChannel(channel string) *ChannelStore {
|
||||
gs.RLock()
|
||||
cs := gs.channels[channel]
|
||||
gs.RUnlock()
|
||||
return cs
|
||||
}
|
||||
|
||||
// HasChannel returns true if this store has any channel
|
||||
func (gs *genericStore) HasChannel() bool {
|
||||
gs.RLock()
|
||||
l := len(gs.channels)
|
||||
gs.RUnlock()
|
||||
return l > 0
|
||||
}
|
||||
|
||||
// State returns message store statistics for a given channel ('*' for all)
|
||||
func (gs *genericStore) MsgsState(channel string) (numMessages int, byteSize uint64, err error) {
|
||||
numMessages = 0
|
||||
byteSize = 0
|
||||
err = nil
|
||||
|
||||
if channel == AllChannels {
|
||||
gs.RLock()
|
||||
cs := gs.channels
|
||||
gs.RUnlock()
|
||||
|
||||
for _, c := range cs {
|
||||
n, b, lerr := c.Msgs.State()
|
||||
if lerr != nil {
|
||||
err = lerr
|
||||
return
|
||||
}
|
||||
numMessages += n
|
||||
byteSize += b
|
||||
}
|
||||
} else {
|
||||
cs := gs.LookupChannel(channel)
|
||||
if cs != nil {
|
||||
numMessages, byteSize, err = cs.Msgs.State()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// canAddChannel returns true if the current number of channels is below the limit.
|
||||
// Store lock is assumed to be locked.
|
||||
func (gs *genericStore) canAddChannel() error {
|
||||
if gs.limits.MaxChannels > 0 && len(gs.channels) >= gs.limits.MaxChannels {
|
||||
return ErrTooManyChannels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddClient stores information about the client identified by `clientID`.
|
||||
func (gs *genericStore) AddClient(clientID, hbInbox string, userData interface{}) (*Client, bool, error) {
|
||||
c := &Client{spb.ClientInfo{ID: clientID, HbInbox: hbInbox}, userData}
|
||||
gs.Lock()
|
||||
oldClient := gs.clients[clientID]
|
||||
if oldClient != nil {
|
||||
gs.Unlock()
|
||||
return oldClient, false, nil
|
||||
}
|
||||
gs.clients[c.ID] = c
|
||||
gs.Unlock()
|
||||
return c, true, nil
|
||||
}
|
||||
|
||||
// GetClient returns the stored Client, or nil if it does not exist.
|
||||
func (gs *genericStore) GetClient(clientID string) *Client {
|
||||
gs.RLock()
|
||||
c := gs.clients[clientID]
|
||||
gs.RUnlock()
|
||||
return c
|
||||
}
|
||||
|
||||
// GetClients returns all stored Client objects, as a map keyed by client IDs.
|
||||
func (gs *genericStore) GetClients() map[string]*Client {
|
||||
gs.RLock()
|
||||
clients := make(map[string]*Client, len(gs.clients))
|
||||
for k, v := range gs.clients {
|
||||
clients[k] = v
|
||||
}
|
||||
gs.RUnlock()
|
||||
return clients
|
||||
}
|
||||
|
||||
// GetClientsCount returns the number of registered clients
|
||||
func (gs *genericStore) GetClientsCount() int {
|
||||
gs.RLock()
|
||||
count := len(gs.clients)
|
||||
gs.RUnlock()
|
||||
return count
|
||||
}
|
||||
|
||||
// DeleteClient deletes the client identified by `clientID`.
|
||||
func (gs *genericStore) DeleteClient(clientID string) *Client {
|
||||
gs.Lock()
|
||||
c := gs.clients[clientID]
|
||||
if c != nil {
|
||||
delete(gs.clients, clientID)
|
||||
}
|
||||
gs.Unlock()
|
||||
return c
|
||||
}
|
||||
|
||||
// Close closes all stores
|
||||
func (gs *genericStore) Close() error {
|
||||
gs.Lock()
|
||||
defer gs.Unlock()
|
||||
if gs.closed {
|
||||
return nil
|
||||
}
|
||||
gs.closed = true
|
||||
return gs.close()
|
||||
}
|
||||
|
||||
// close closes all stores. Store lock is assumed held on entry
|
||||
func (gs *genericStore) close() error {
|
||||
var err error
|
||||
var lerr error
|
||||
|
||||
for _, cs := range gs.channels {
|
||||
lerr = cs.Subs.Close()
|
||||
if lerr != nil && err == nil {
|
||||
err = lerr
|
||||
}
|
||||
lerr = cs.Msgs.Close()
|
||||
if lerr != nil && err == nil {
|
||||
err = lerr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// genericMsgStore methods
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// init initializes this generic message store
|
||||
func (gms *genericMsgStore) init(subject string, limits *MsgStoreLimits) {
|
||||
gms.subject = subject
|
||||
gms.limits = *limits
|
||||
}
|
||||
|
||||
// State returns some statistics related to this store
|
||||
func (gms *genericMsgStore) State() (numMessages int, byteSize uint64, err error) {
|
||||
gms.RLock()
|
||||
c, b := gms.totalCount, gms.totalBytes
|
||||
gms.RUnlock()
|
||||
return c, b, nil
|
||||
}
|
||||
|
||||
// FirstSequence returns sequence for first message stored.
|
||||
func (gms *genericMsgStore) FirstSequence() uint64 {
|
||||
gms.RLock()
|
||||
first := gms.first
|
||||
gms.RUnlock()
|
||||
return first
|
||||
}
|
||||
|
||||
// LastSequence returns sequence for last message stored.
|
||||
func (gms *genericMsgStore) LastSequence() uint64 {
|
||||
gms.RLock()
|
||||
last := gms.last
|
||||
gms.RUnlock()
|
||||
return last
|
||||
}
|
||||
|
||||
// FirstAndLastSequence returns sequences for the first and last messages stored.
|
||||
func (gms *genericMsgStore) FirstAndLastSequence() (uint64, uint64) {
|
||||
gms.RLock()
|
||||
first, last := gms.first, gms.last
|
||||
gms.RUnlock()
|
||||
return first, last
|
||||
}
|
||||
|
||||
// Lookup returns the stored message with given sequence number.
|
||||
func (gms *genericMsgStore) Lookup(seq uint64) *pb.MsgProto {
|
||||
// no-op
|
||||
return nil
|
||||
}
|
||||
|
||||
// FirstMsg returns the first message stored.
|
||||
func (gms *genericMsgStore) FirstMsg() *pb.MsgProto {
|
||||
// no-op
|
||||
return nil
|
||||
}
|
||||
|
||||
// LastMsg returns the last message stored.
|
||||
func (gms *genericMsgStore) LastMsg() *pb.MsgProto {
|
||||
// no-op
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gms *genericMsgStore) Flush() error {
|
||||
// no-op
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSequenceFromTimestamp returns the sequence of the first message whose
|
||||
// timestamp is greater or equal to given timestamp.
|
||||
func (gms *genericMsgStore) GetSequenceFromTimestamp(timestamp int64) uint64 {
|
||||
// no-op
|
||||
return 0
|
||||
}
|
||||
|
||||
// Close closes this store.
|
||||
func (gms *genericMsgStore) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// genericSubStore methods
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// init initializes the structure of a generic sub store
|
||||
func (gss *genericSubStore) init(channel string, limits *SubStoreLimits) {
|
||||
gss.subject = channel
|
||||
gss.limits = *limits
|
||||
}
|
||||
|
||||
// CreateSub records a new subscription represented by SubState. On success,
|
||||
// it records the subscription's ID in SubState.ID. This ID is to be used
|
||||
// by the other SubStore methods.
|
||||
func (gss *genericSubStore) CreateSub(sub *spb.SubState) error {
|
||||
gss.Lock()
|
||||
err := gss.createSub(sub)
|
||||
gss.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateSub updates a given subscription represented by SubState.
|
||||
func (gss *genericSubStore) UpdateSub(sub *spb.SubState) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// createSub is the unlocked version of CreateSub that can be used by
|
||||
// non-generic implementations.
|
||||
func (gss *genericSubStore) createSub(sub *spb.SubState) error {
|
||||
if gss.limits.MaxSubscriptions > 0 && gss.subsCount >= gss.limits.MaxSubscriptions {
|
||||
return ErrTooManySubs
|
||||
}
|
||||
|
||||
// Bump the max value before assigning it to the new subscription.
|
||||
gss.maxSubID++
|
||||
gss.subsCount++
|
||||
|
||||
// This new subscription has the max value.
|
||||
sub.ID = gss.maxSubID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteSub invalidates this subscription.
|
||||
func (gss *genericSubStore) DeleteSub(subid uint64) {
|
||||
gss.Lock()
|
||||
gss.subsCount--
|
||||
gss.Unlock()
|
||||
}
|
||||
|
||||
// AddSeqPending adds the given message seqno to the given subscription.
|
||||
func (gss *genericSubStore) AddSeqPending(subid, seqno uint64) error {
|
||||
// no-op
|
||||
return nil
|
||||
}
|
||||
|
||||
// AckSeqPending records that the given message seqno has been acknowledged
|
||||
// by the given subscription.
|
||||
func (gss *genericSubStore) AckSeqPending(subid, seqno uint64) error {
|
||||
// no-op
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush is for stores that may buffer operations and need them to be persisted.
|
||||
func (gss *genericSubStore) Flush() error {
|
||||
// no-op
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes this store
|
||||
func (gss *genericSubStore) Close() error {
|
||||
// no-op
|
||||
return nil
|
||||
}
|
2749
vendor/github.com/nats-io/nats-streaming-server/stores/filestore.go
generated
vendored
2749
vendor/github.com/nats-io/nats-streaming-server/stores/filestore.go
generated
vendored
File diff suppressed because it is too large
Load diff
109
vendor/github.com/nats-io/nats-streaming-server/stores/limits.go
generated
vendored
109
vendor/github.com/nats-io/nats-streaming-server/stores/limits.go
generated
vendored
|
@ -1,109 +0,0 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package stores
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// AddPerChannel stores limits for the given channel `name` in the StoreLimits.
|
||||
// Inheritance (that is, specifying 0 for a limit means that the global limit
|
||||
// should be used) is not applied in this call. This is done in StoreLimits.Build
|
||||
// along with some validation.
|
||||
func (sl *StoreLimits) AddPerChannel(name string, cl *ChannelLimits) {
|
||||
if sl.PerChannel == nil {
|
||||
sl.PerChannel = make(map[string]*ChannelLimits)
|
||||
}
|
||||
sl.PerChannel[name] = cl
|
||||
}
|
||||
|
||||
// Build sets the global limits into per-channel limits that are set
|
||||
// to zero. This call also validates the limits. An error is returned if:
|
||||
// * any limit is set to a negative value.
|
||||
// * the number of per-channel is higher than StoreLimits.MaxChannels.
|
||||
// * any per-channel limit is higher than the corresponding global limit.
|
||||
func (sl *StoreLimits) Build() error {
|
||||
// Check that there is no negative value
|
||||
if sl.MaxChannels < 0 {
|
||||
return fmt.Errorf("Max channels limit cannot be negative")
|
||||
}
|
||||
if err := sl.checkChannelLimits(&sl.ChannelLimits, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
// If there is no per-channel, we are done.
|
||||
if len(sl.PerChannel) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sl.PerChannel) > sl.MaxChannels {
|
||||
return fmt.Errorf("Too many channels defined (%v). The max channels limit is set to %v",
|
||||
len(sl.PerChannel), sl.MaxChannels)
|
||||
}
|
||||
for cn, cl := range sl.PerChannel {
|
||||
if err := sl.checkChannelLimits(cl, cn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// If we are here, it means that there was no error,
|
||||
// so we now apply inheritance.
|
||||
for _, cl := range sl.PerChannel {
|
||||
if cl.MaxSubscriptions == 0 {
|
||||
cl.MaxSubscriptions = sl.MaxSubscriptions
|
||||
}
|
||||
if cl.MaxMsgs == 0 {
|
||||
cl.MaxMsgs = sl.MaxMsgs
|
||||
}
|
||||
if cl.MaxBytes == 0 {
|
||||
cl.MaxBytes = sl.MaxBytes
|
||||
}
|
||||
if cl.MaxAge == 0 {
|
||||
cl.MaxAge = sl.MaxAge
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sl *StoreLimits) checkChannelLimits(cl *ChannelLimits, channelName string) error {
|
||||
// Check that there is no per-channel unlimited limit if corresponding
|
||||
// limit is not.
|
||||
if err := verifyLimit("subscriptions", channelName,
|
||||
int64(cl.MaxSubscriptions), int64(sl.MaxSubscriptions)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := verifyLimit("messages", channelName,
|
||||
int64(cl.MaxMsgs), int64(sl.MaxMsgs)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := verifyLimit("bytes", channelName,
|
||||
cl.MaxBytes, sl.MaxBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := verifyLimit("age", channelName,
|
||||
int64(cl.MaxAge), int64(sl.MaxAge)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifyLimit(errText, channelName string, limit, globalLimit int64) error {
|
||||
// No limit can be negative. If channelName is "" we are
|
||||
// verifying the global limit (in this case limit == globalLimit).
|
||||
// Otherwise, we verify a given per-channel limit. Make
|
||||
// sure that the value is not greater than the corresponding
|
||||
// global limit.
|
||||
if channelName == "" {
|
||||
if limit < 0 {
|
||||
return fmt.Errorf("Max %s for global limit cannot be negative", errText)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Per-channel limit specific here.
|
||||
if limit < 0 {
|
||||
return fmt.Errorf("Max %s for channel %q cannot be negative. "+
|
||||
"Set it to 0 to be equal to the global limit of %v", errText, channelName, globalLimit)
|
||||
}
|
||||
if limit > globalLimit {
|
||||
return fmt.Errorf("Max %s for channel %q cannot be higher than global limit "+
|
||||
"of %v", errText, channelName, globalLimit)
|
||||
}
|
||||
return nil
|
||||
}
|
248
vendor/github.com/nats-io/nats-streaming-server/stores/memstore.go
generated
vendored
248
vendor/github.com/nats-io/nats-streaming-server/stores/memstore.go
generated
vendored
|
@ -1,248 +0,0 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package stores
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/go-nats-streaming/pb"
|
||||
)
|
||||
|
||||
// MemoryStore is a factory for message and subscription stores.
|
||||
type MemoryStore struct {
|
||||
genericStore
|
||||
}
|
||||
|
||||
// MemorySubStore is a subscription store in memory
|
||||
type MemorySubStore struct {
|
||||
genericSubStore
|
||||
}
|
||||
|
||||
// MemoryMsgStore is a per channel message store in memory
|
||||
type MemoryMsgStore struct {
|
||||
genericMsgStore
|
||||
msgs map[uint64]*pb.MsgProto
|
||||
ageTimer *time.Timer
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// MemoryStore methods
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// NewMemoryStore returns a factory for stores held in memory.
|
||||
// If not limits are provided, the store will be created with
|
||||
// DefaultStoreLimits.
|
||||
func NewMemoryStore(limits *StoreLimits) (*MemoryStore, error) {
|
||||
ms := &MemoryStore{}
|
||||
ms.init(TypeMemory, limits)
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
// CreateChannel creates a ChannelStore for the given channel, and returns
|
||||
// `true` to indicate that the channel is new, false if it already exists.
|
||||
func (ms *MemoryStore) CreateChannel(channel string, userData interface{}) (*ChannelStore, bool, error) {
|
||||
ms.Lock()
|
||||
defer ms.Unlock()
|
||||
channelStore := ms.channels[channel]
|
||||
if channelStore != nil {
|
||||
return channelStore, false, nil
|
||||
}
|
||||
|
||||
if err := ms.canAddChannel(); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// Defaults to the global limits
|
||||
msgStoreLimits := ms.limits.MsgStoreLimits
|
||||
subStoreLimits := ms.limits.SubStoreLimits
|
||||
// See if there is an override
|
||||
thisChannelLimits, exists := ms.limits.PerChannel[channel]
|
||||
if exists {
|
||||
// Use this channel specific limits
|
||||
msgStoreLimits = thisChannelLimits.MsgStoreLimits
|
||||
subStoreLimits = thisChannelLimits.SubStoreLimits
|
||||
}
|
||||
|
||||
msgStore := &MemoryMsgStore{msgs: make(map[uint64]*pb.MsgProto, 64)}
|
||||
msgStore.init(channel, &msgStoreLimits)
|
||||
|
||||
subStore := &MemorySubStore{}
|
||||
subStore.init(channel, &subStoreLimits)
|
||||
|
||||
channelStore = &ChannelStore{
|
||||
Subs: subStore,
|
||||
Msgs: msgStore,
|
||||
UserData: userData,
|
||||
}
|
||||
|
||||
ms.channels[channel] = channelStore
|
||||
|
||||
return channelStore, true, nil
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// MemoryMsgStore methods
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Store a given message.
|
||||
func (ms *MemoryMsgStore) Store(data []byte) (uint64, error) {
|
||||
ms.Lock()
|
||||
defer ms.Unlock()
|
||||
|
||||
if ms.first == 0 {
|
||||
ms.first = 1
|
||||
}
|
||||
ms.last++
|
||||
m := &pb.MsgProto{
|
||||
Sequence: ms.last,
|
||||
Subject: ms.subject,
|
||||
Data: data,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
}
|
||||
ms.msgs[ms.last] = m
|
||||
ms.totalCount++
|
||||
ms.totalBytes += uint64(m.Size())
|
||||
// If there is an age limit and no timer yet created, do so now
|
||||
if ms.limits.MaxAge > time.Duration(0) && ms.ageTimer == nil {
|
||||
ms.wg.Add(1)
|
||||
ms.ageTimer = time.AfterFunc(ms.limits.MaxAge, ms.expireMsgs)
|
||||
}
|
||||
|
||||
// Check if we need to remove any (but leave at least the last added)
|
||||
maxMsgs := ms.limits.MaxMsgs
|
||||
maxBytes := ms.limits.MaxBytes
|
||||
if maxMsgs > 0 || maxBytes > 0 {
|
||||
for ms.totalCount > 1 &&
|
||||
((maxMsgs > 0 && ms.totalCount > maxMsgs) ||
|
||||
(maxBytes > 0 && (ms.totalBytes > uint64(maxBytes)))) {
|
||||
ms.removeFirstMsg()
|
||||
if !ms.hitLimit {
|
||||
ms.hitLimit = true
|
||||
Noticef(droppingMsgsFmt, ms.subject, ms.totalCount, ms.limits.MaxMsgs, ms.totalBytes, ms.limits.MaxBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ms.last, nil
|
||||
}
|
||||
|
||||
// Lookup returns the stored message with given sequence number.
|
||||
func (ms *MemoryMsgStore) Lookup(seq uint64) *pb.MsgProto {
|
||||
ms.RLock()
|
||||
m := ms.msgs[seq]
|
||||
ms.RUnlock()
|
||||
return m
|
||||
}
|
||||
|
||||
// FirstMsg returns the first message stored.
|
||||
func (ms *MemoryMsgStore) FirstMsg() *pb.MsgProto {
|
||||
ms.RLock()
|
||||
m := ms.msgs[ms.first]
|
||||
ms.RUnlock()
|
||||
return m
|
||||
}
|
||||
|
||||
// LastMsg returns the last message stored.
|
||||
func (ms *MemoryMsgStore) LastMsg() *pb.MsgProto {
|
||||
ms.RLock()
|
||||
m := ms.msgs[ms.last]
|
||||
ms.RUnlock()
|
||||
return m
|
||||
}
|
||||
|
||||
// GetSequenceFromTimestamp returns the sequence of the first message whose
|
||||
// timestamp is greater or equal to given timestamp.
|
||||
func (ms *MemoryMsgStore) GetSequenceFromTimestamp(timestamp int64) uint64 {
|
||||
ms.RLock()
|
||||
defer ms.RUnlock()
|
||||
|
||||
index := sort.Search(len(ms.msgs), func(i int) bool {
|
||||
m := ms.msgs[uint64(i)+ms.first]
|
||||
if m.Timestamp >= timestamp {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return uint64(index) + ms.first
|
||||
}
|
||||
|
||||
// expireMsgs ensures that messages don't stay in the log longer than the
|
||||
// limit's MaxAge.
|
||||
func (ms *MemoryMsgStore) expireMsgs() {
|
||||
ms.Lock()
|
||||
if ms.closed {
|
||||
ms.Unlock()
|
||||
ms.wg.Done()
|
||||
return
|
||||
}
|
||||
defer ms.Unlock()
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
maxAge := int64(ms.limits.MaxAge)
|
||||
for {
|
||||
m, ok := ms.msgs[ms.first]
|
||||
if !ok {
|
||||
ms.ageTimer = nil
|
||||
ms.wg.Done()
|
||||
return
|
||||
}
|
||||
elapsed := now - m.Timestamp
|
||||
if elapsed >= maxAge {
|
||||
ms.removeFirstMsg()
|
||||
} else {
|
||||
ms.ageTimer.Reset(time.Duration(maxAge - elapsed))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeFirstMsg removes the first message and updates totals.
|
||||
func (ms *MemoryMsgStore) removeFirstMsg() {
|
||||
firstMsg := ms.msgs[ms.first]
|
||||
ms.totalBytes -= uint64(firstMsg.Size())
|
||||
ms.totalCount--
|
||||
delete(ms.msgs, ms.first)
|
||||
ms.first++
|
||||
}
|
||||
|
||||
// Close implements the MsgStore interface
|
||||
func (ms *MemoryMsgStore) Close() error {
|
||||
ms.Lock()
|
||||
if ms.closed {
|
||||
ms.Unlock()
|
||||
return nil
|
||||
}
|
||||
ms.closed = true
|
||||
if ms.ageTimer != nil {
|
||||
if ms.ageTimer.Stop() {
|
||||
ms.wg.Done()
|
||||
}
|
||||
}
|
||||
ms.Unlock()
|
||||
|
||||
ms.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// MemorySubStore methods
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// AddSeqPending adds the given message seqno to the given subscription.
|
||||
func (*MemorySubStore) AddSeqPending(subid, seqno uint64) error {
|
||||
// Overrides in case genericSubStore does something. For the memory
|
||||
// based store, we want to minimize the cost of this to a minimum.
|
||||
return nil
|
||||
}
|
||||
|
||||
// AckSeqPending records that the given message seqno has been acknowledged
|
||||
// by the given subscription.
|
||||
func (*MemorySubStore) AckSeqPending(subid, seqno uint64) error {
|
||||
// Overrides in case genericSubStore does something. For the memory
|
||||
// based store, we want to minimize the cost of this to a minimum.
|
||||
return nil
|
||||
}
|
261
vendor/github.com/nats-io/nats-streaming-server/stores/store.go
generated
vendored
261
vendor/github.com/nats-io/nats-streaming-server/stores/store.go
generated
vendored
|
@ -1,261 +0,0 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package stores
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/gnatsd/server"
|
||||
"github.com/nats-io/go-nats-streaming/pb"
|
||||
"github.com/nats-io/nats-streaming-server/spb"
|
||||
)
|
||||
|
||||
const (
|
||||
// TypeMemory is the store type name for memory based stores
|
||||
TypeMemory = "MEMORY"
|
||||
// TypeFile is the store type name for file based stores
|
||||
TypeFile = "FILE"
|
||||
)
|
||||
|
||||
const (
|
||||
// AllChannels allows to get state for all channels.
|
||||
AllChannels = "*"
|
||||
)
|
||||
|
||||
// Errors.
|
||||
var (
|
||||
ErrTooManyChannels = errors.New("too many channels")
|
||||
ErrTooManySubs = errors.New("too many subscriptions per channel")
|
||||
)
|
||||
|
||||
// Noticef logs a notice statement
|
||||
func Noticef(format string, v ...interface{}) {
|
||||
server.Noticef(format, v...)
|
||||
}
|
||||
|
||||
// StoreLimits define limits for a store.
|
||||
type StoreLimits struct {
|
||||
// How many channels are allowed.
|
||||
MaxChannels int
|
||||
// Global limits. Any 0 value means that the limit is ignored (unlimited).
|
||||
ChannelLimits
|
||||
// Per-channel limits. If a limit for a channel in this map is 0,
|
||||
// the corresponding global limit (specified above) is used.
|
||||
PerChannel map[string]*ChannelLimits
|
||||
}
|
||||
|
||||
// ChannelLimits defines limits for a given channel
|
||||
type ChannelLimits struct {
|
||||
// Limits for message stores
|
||||
MsgStoreLimits
|
||||
// Limits for subscriptions stores
|
||||
SubStoreLimits
|
||||
}
|
||||
|
||||
// MsgStoreLimits defines limits for a MsgStore.
|
||||
// For global limits, a value of 0 means "unlimited".
|
||||
// For per-channel limits, it means that the corresponding global
|
||||
// limit is used.
|
||||
type MsgStoreLimits struct {
|
||||
// How many messages are allowed.
|
||||
MaxMsgs int
|
||||
// How many bytes are allowed.
|
||||
MaxBytes int64
|
||||
// How long messages are kept in the log (unit is seconds)
|
||||
MaxAge time.Duration
|
||||
}
|
||||
|
||||
// SubStoreLimits defines limits for a SubStore
|
||||
type SubStoreLimits struct {
|
||||
// How many subscriptions are allowed.
|
||||
MaxSubscriptions int
|
||||
}
|
||||
|
||||
// DefaultStoreLimits are the limits that a Store must
|
||||
// use when none are specified to the Store constructor.
|
||||
// Store limits can be changed with the Store.SetLimits() method.
|
||||
var DefaultStoreLimits = StoreLimits{
|
||||
100,
|
||||
ChannelLimits{
|
||||
MsgStoreLimits{
|
||||
MaxMsgs: 1000000,
|
||||
MaxBytes: 1000000 * 1024,
|
||||
},
|
||||
SubStoreLimits{
|
||||
MaxSubscriptions: 1000,
|
||||
},
|
||||
},
|
||||
nil,
|
||||
}
|
||||
|
||||
// RecoveredState allows the server to reconstruct its state after a restart.
|
||||
type RecoveredState struct {
|
||||
Info *spb.ServerInfo
|
||||
Clients []*Client
|
||||
Subs RecoveredSubscriptions
|
||||
}
|
||||
|
||||
// Client represents a client with ID, Heartbeat Inbox and user data sets
|
||||
// when adding it to the store.
|
||||
type Client struct {
|
||||
spb.ClientInfo
|
||||
UserData interface{}
|
||||
}
|
||||
|
||||
// RecoveredSubscriptions is a map of recovered subscriptions, keyed by channel name.
|
||||
type RecoveredSubscriptions map[string][]*RecoveredSubState
|
||||
|
||||
// PendingAcks is a set of message sequences waiting to be acknowledged.
|
||||
type PendingAcks map[uint64]struct{}
|
||||
|
||||
// RecoveredSubState represents a recovered Subscription with a map
|
||||
// of pending messages.
|
||||
type RecoveredSubState struct {
|
||||
Sub *spb.SubState
|
||||
Pending PendingAcks
|
||||
}
|
||||
|
||||
// ChannelStore contains a reference to both Subscription and Message stores.
|
||||
type ChannelStore struct {
|
||||
// UserData is set when the channel is created.
|
||||
UserData interface{}
|
||||
// Subs is the Subscriptions Store.
|
||||
Subs SubStore
|
||||
// Msgs is the Messages Store.
|
||||
Msgs MsgStore
|
||||
}
|
||||
|
||||
// Store is the storage interface for NATS Streaming servers.
|
||||
//
|
||||
// If an implementation has a Store constructor with StoreLimits, it should be
|
||||
// noted that the limits don't apply to any state being recovered, for Store
|
||||
// implementations supporting recovery.
|
||||
//
|
||||
type Store interface {
|
||||
// Init can be used to initialize the store with server's information.
|
||||
Init(info *spb.ServerInfo) error
|
||||
|
||||
// Name returns the name type of this store (e.g: MEMORY, FILESTORE, etc...).
|
||||
Name() string
|
||||
|
||||
// SetLimits sets limits for this store. The action is not expected
|
||||
// to be retroactive.
|
||||
// The store implementation should make a deep copy as to not change
|
||||
// the content of the structure passed by the caller.
|
||||
// This call may return an error due to limits validation errors.
|
||||
SetLimits(limits *StoreLimits) error
|
||||
|
||||
// CreateChannel creates a ChannelStore for the given channel, and returns
|
||||
// `true` to indicate that the channel is new, false if it already exists.
|
||||
// Limits defined for this channel in StoreLimits.PeChannel map, if present,
|
||||
// will apply. Otherwise, the global limits in StoreLimits will apply.
|
||||
CreateChannel(channel string, userData interface{}) (*ChannelStore, bool, error)
|
||||
|
||||
// LookupChannel returns a ChannelStore for the given channel, nil if channel
|
||||
// does not exist.
|
||||
LookupChannel(channel string) *ChannelStore
|
||||
|
||||
// HasChannel returns true if this store has any channel.
|
||||
HasChannel() bool
|
||||
|
||||
// MsgsState returns message store statistics for a given channel, or all
|
||||
// if 'channel' is AllChannels.
|
||||
MsgsState(channel string) (numMessages int, byteSize uint64, err error)
|
||||
|
||||
// AddClient stores information about the client identified by `clientID`.
|
||||
// If a Client is already registered, this call returns the currently
|
||||
// registered Client object, and the boolean set to false to indicate
|
||||
// that the client is not new.
|
||||
AddClient(clientID, hbInbox string, userData interface{}) (*Client, bool, error)
|
||||
|
||||
// GetClient returns the stored Client, or nil if it does not exist.
|
||||
GetClient(clientID string) *Client
|
||||
|
||||
// GetClients returns a map of all stored Client objects, keyed by client IDs.
|
||||
// The returned map is a copy of the state maintained by the store so that
|
||||
// it is safe for the caller to walk through the map while clients may be
|
||||
// added/deleted from the store.
|
||||
GetClients() map[string]*Client
|
||||
|
||||
// GetClientsCount returns the number of registered clients.
|
||||
GetClientsCount() int
|
||||
|
||||
// DeleteClient removes the client identified by `clientID` from the store
|
||||
// and returns it to the caller.
|
||||
DeleteClient(clientID string) *Client
|
||||
|
||||
// Close closes all stores.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// SubStore is the interface for storage of Subscriptions on a given channel.
|
||||
//
|
||||
// Implementations of this interface should not attempt to validate that
|
||||
// a subscription is valid (that is, has not been deleted) when processing
|
||||
// updates.
|
||||
type SubStore interface {
|
||||
// CreateSub records a new subscription represented by SubState. On success,
|
||||
// it records the subscription's ID in SubState.ID. This ID is to be used
|
||||
// by the other SubStore methods.
|
||||
CreateSub(*spb.SubState) error
|
||||
|
||||
// UpdateSub updates a given subscription represented by SubState.
|
||||
UpdateSub(*spb.SubState) error
|
||||
|
||||
// DeleteSub invalidates the subscription 'subid'.
|
||||
DeleteSub(subid uint64)
|
||||
|
||||
// AddSeqPending adds the given message 'seqno' to the subscription 'subid'.
|
||||
AddSeqPending(subid, seqno uint64) error
|
||||
|
||||
// AckSeqPending records that the given message 'seqno' has been acknowledged
|
||||
// by the subscription 'subid'.
|
||||
AckSeqPending(subid, seqno uint64) error
|
||||
|
||||
// Flush is for stores that may buffer operations and need them to be persisted.
|
||||
Flush() error
|
||||
|
||||
// Close closes the subscriptions store.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// MsgStore is the interface for storage of Messages on a given channel.
|
||||
type MsgStore interface {
|
||||
// State returns some statistics related to this store.
|
||||
State() (numMessages int, byteSize uint64, err error)
|
||||
|
||||
// Store stores a message and returns the message sequence.
|
||||
Store(data []byte) (uint64, error)
|
||||
|
||||
// Lookup returns the stored message with given sequence number.
|
||||
Lookup(seq uint64) *pb.MsgProto
|
||||
|
||||
// FirstSequence returns sequence for first message stored, 0 if no
|
||||
// message is stored.
|
||||
FirstSequence() uint64
|
||||
|
||||
// LastSequence returns sequence for last message stored, 0 if no
|
||||
// message is stored.
|
||||
LastSequence() uint64
|
||||
|
||||
// FirstAndLastSequence returns sequences for the first and last messages stored,
|
||||
// 0 if no message is stored.
|
||||
FirstAndLastSequence() (uint64, uint64)
|
||||
|
||||
// GetSequenceFromTimestamp returns the sequence of the first message whose
|
||||
// timestamp is greater or equal to given timestamp.
|
||||
GetSequenceFromTimestamp(timestamp int64) uint64
|
||||
|
||||
// FirstMsg returns the first message stored.
|
||||
FirstMsg() *pb.MsgProto
|
||||
|
||||
// LastMsg returns the last message stored.
|
||||
LastMsg() *pb.MsgProto
|
||||
|
||||
// Flush is for stores that may buffer operations and need them to be persisted.
|
||||
Flush() error
|
||||
|
||||
// Close closes the store.
|
||||
Close() error
|
||||
}
|
53
vendor/github.com/nats-io/nats-streaming-server/util/util.go
generated
vendored
53
vendor/github.com/nats-io/nats-streaming-server/util/util.go
generated
vendored
|
@ -1,53 +0,0 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ByteOrder specifies how to convert byte sequences into 16-, 32-, or 64-bit
|
||||
// unsigned integers.
|
||||
var ByteOrder binary.ByteOrder
|
||||
|
||||
func init() {
|
||||
ByteOrder = binary.LittleEndian
|
||||
}
|
||||
|
||||
// EnsureBufBigEnough checks that given buffer is big enough to hold 'needed'
|
||||
// bytes, otherwise returns a buffer of a size of at least 'needed' bytes.
|
||||
func EnsureBufBigEnough(buf []byte, needed int) []byte {
|
||||
if buf == nil {
|
||||
return make([]byte, needed)
|
||||
} else if needed > len(buf) {
|
||||
return make([]byte, int(float32(needed)*1.1))
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// WriteInt writes an int (4 bytes) to the given writer using ByteOrder.
|
||||
func WriteInt(w io.Writer, v int) error {
|
||||
var b [4]byte
|
||||
var bs []byte
|
||||
|
||||
bs = b[:4]
|
||||
|
||||
ByteOrder.PutUint32(bs, uint32(v))
|
||||
_, err := w.Write(bs)
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadInt reads an int (4 bytes) from the reader using ByteOrder.
|
||||
func ReadInt(r io.Reader) (int, error) {
|
||||
var b [4]byte
|
||||
var bs []byte
|
||||
|
||||
bs = b[:4]
|
||||
|
||||
_, err := io.ReadFull(r, bs)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(ByteOrder.Uint32(bs)), nil
|
||||
}
|
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
35
vendor/golang.org/x/crypto/bcrypt/base64.go
generated
vendored
35
vendor/golang.org/x/crypto/bcrypt/base64.go
generated
vendored
|
@ -1,35 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bcrypt
|
||||
|
||||
import "encoding/base64"
|
||||
|
||||
const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
|
||||
var bcEncoding = base64.NewEncoding(alphabet)
|
||||
|
||||
func base64Encode(src []byte) []byte {
|
||||
n := bcEncoding.EncodedLen(len(src))
|
||||
dst := make([]byte, n)
|
||||
bcEncoding.Encode(dst, src)
|
||||
for dst[n-1] == '=' {
|
||||
n--
|
||||
}
|
||||
return dst[:n]
|
||||
}
|
||||
|
||||
func base64Decode(src []byte) ([]byte, error) {
|
||||
numOfEquals := 4 - (len(src) % 4)
|
||||
for i := 0; i < numOfEquals; i++ {
|
||||
src = append(src, '=')
|
||||
}
|
||||
|
||||
dst := make([]byte, bcEncoding.DecodedLen(len(src)))
|
||||
n, err := bcEncoding.Decode(dst, src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst[:n], nil
|
||||
}
|
294
vendor/golang.org/x/crypto/bcrypt/bcrypt.go
generated
vendored
294
vendor/golang.org/x/crypto/bcrypt/bcrypt.go
generated
vendored
|
@ -1,294 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
|
||||
// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
|
||||
package bcrypt // import "golang.org/x/crypto/bcrypt"
|
||||
|
||||
// The code is a port of Provos and Mazières's C implementation.
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"golang.org/x/crypto/blowfish"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword
|
||||
MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
|
||||
DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
|
||||
)
|
||||
|
||||
// The error returned from CompareHashAndPassword when a password and hash do
|
||||
// not match.
|
||||
var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash is too short to
|
||||
// be a bcrypt hash.
|
||||
var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash was created with
|
||||
// a bcrypt algorithm newer than this implementation.
|
||||
type HashVersionTooNewError byte
|
||||
|
||||
func (hv HashVersionTooNewError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
|
||||
}
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
|
||||
type InvalidHashPrefixError byte
|
||||
|
||||
func (ih InvalidHashPrefixError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
|
||||
}
|
||||
|
||||
type InvalidCostError int
|
||||
|
||||
func (ic InvalidCostError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
|
||||
}
|
||||
|
||||
const (
|
||||
majorVersion = '2'
|
||||
minorVersion = 'a'
|
||||
maxSaltSize = 16
|
||||
maxCryptedHashSize = 23
|
||||
encodedSaltSize = 22
|
||||
encodedHashSize = 31
|
||||
minHashSize = 59
|
||||
)
|
||||
|
||||
// magicCipherData is an IV for the 64 Blowfish encryption calls in
|
||||
// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
|
||||
var magicCipherData = []byte{
|
||||
0x4f, 0x72, 0x70, 0x68,
|
||||
0x65, 0x61, 0x6e, 0x42,
|
||||
0x65, 0x68, 0x6f, 0x6c,
|
||||
0x64, 0x65, 0x72, 0x53,
|
||||
0x63, 0x72, 0x79, 0x44,
|
||||
0x6f, 0x75, 0x62, 0x74,
|
||||
}
|
||||
|
||||
type hashed struct {
|
||||
hash []byte
|
||||
salt []byte
|
||||
cost int // allowed range is MinCost to MaxCost
|
||||
major byte
|
||||
minor byte
|
||||
}
|
||||
|
||||
// GenerateFromPassword returns the bcrypt hash of the password at the given
|
||||
// cost. If the cost given is less than MinCost, the cost will be set to
|
||||
// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
|
||||
// to compare the returned hashed password with its cleartext version.
|
||||
func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
|
||||
p, err := newFromPassword(password, cost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.Hash(), nil
|
||||
}
|
||||
|
||||
// CompareHashAndPassword compares a bcrypt hashed password with its possible
|
||||
// plaintext equivalent. Returns nil on success, or an error on failure.
|
||||
func CompareHashAndPassword(hashedPassword, password []byte) error {
|
||||
p, err := newFromHash(hashedPassword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otherHash, err := bcrypt(password, p.cost, p.salt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
|
||||
if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrMismatchedHashAndPassword
|
||||
}
|
||||
|
||||
// Cost returns the hashing cost used to create the given hashed
|
||||
// password. When, in the future, the hashing cost of a password system needs
|
||||
// to be increased in order to adjust for greater computational power, this
|
||||
// function allows one to establish which passwords need to be updated.
|
||||
func Cost(hashedPassword []byte) (int, error) {
|
||||
p, err := newFromHash(hashedPassword)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return p.cost, nil
|
||||
}
|
||||
|
||||
func newFromPassword(password []byte, cost int) (*hashed, error) {
|
||||
if cost < MinCost {
|
||||
cost = DefaultCost
|
||||
}
|
||||
p := new(hashed)
|
||||
p.major = majorVersion
|
||||
p.minor = minorVersion
|
||||
|
||||
err := checkCost(cost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.cost = cost
|
||||
|
||||
unencodedSalt := make([]byte, maxSaltSize)
|
||||
_, err = io.ReadFull(rand.Reader, unencodedSalt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.salt = base64Encode(unencodedSalt)
|
||||
hash, err := bcrypt(password, p.cost, p.salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.hash = hash
|
||||
return p, err
|
||||
}
|
||||
|
||||
func newFromHash(hashedSecret []byte) (*hashed, error) {
|
||||
if len(hashedSecret) < minHashSize {
|
||||
return nil, ErrHashTooShort
|
||||
}
|
||||
p := new(hashed)
|
||||
n, err := p.decodeVersion(hashedSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashedSecret = hashedSecret[n:]
|
||||
n, err = p.decodeCost(hashedSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashedSecret = hashedSecret[n:]
|
||||
|
||||
// The "+2" is here because we'll have to append at most 2 '=' to the salt
|
||||
// when base64 decoding it in expensiveBlowfishSetup().
|
||||
p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
|
||||
copy(p.salt, hashedSecret[:encodedSaltSize])
|
||||
|
||||
hashedSecret = hashedSecret[encodedSaltSize:]
|
||||
p.hash = make([]byte, len(hashedSecret))
|
||||
copy(p.hash, hashedSecret)
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
|
||||
cipherData := make([]byte, len(magicCipherData))
|
||||
copy(cipherData, magicCipherData)
|
||||
|
||||
c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; i < 24; i += 8 {
|
||||
for j := 0; j < 64; j++ {
|
||||
c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
|
||||
}
|
||||
}
|
||||
|
||||
// Bug compatibility with C bcrypt implementations. We only encode 23 of
|
||||
// the 24 bytes encrypted.
|
||||
hsh := base64Encode(cipherData[:maxCryptedHashSize])
|
||||
return hsh, nil
|
||||
}
|
||||
|
||||
func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
|
||||
|
||||
csalt, err := base64Decode(salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Bug compatibility with C bcrypt implementations. They use the trailing
|
||||
// NULL in the key string during expansion.
|
||||
ckey := append(key, 0)
|
||||
|
||||
c, err := blowfish.NewSaltedCipher(ckey, csalt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var i, rounds uint64
|
||||
rounds = 1 << cost
|
||||
for i = 0; i < rounds; i++ {
|
||||
blowfish.ExpandKey(ckey, c)
|
||||
blowfish.ExpandKey(csalt, c)
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (p *hashed) Hash() []byte {
|
||||
arr := make([]byte, 60)
|
||||
arr[0] = '$'
|
||||
arr[1] = p.major
|
||||
n := 2
|
||||
if p.minor != 0 {
|
||||
arr[2] = p.minor
|
||||
n = 3
|
||||
}
|
||||
arr[n] = '$'
|
||||
n += 1
|
||||
copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
|
||||
n += 2
|
||||
arr[n] = '$'
|
||||
n += 1
|
||||
copy(arr[n:], p.salt)
|
||||
n += encodedSaltSize
|
||||
copy(arr[n:], p.hash)
|
||||
n += encodedHashSize
|
||||
return arr[:n]
|
||||
}
|
||||
|
||||
func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
|
||||
if sbytes[0] != '$' {
|
||||
return -1, InvalidHashPrefixError(sbytes[0])
|
||||
}
|
||||
if sbytes[1] > majorVersion {
|
||||
return -1, HashVersionTooNewError(sbytes[1])
|
||||
}
|
||||
p.major = sbytes[1]
|
||||
n := 3
|
||||
if sbytes[2] != '$' {
|
||||
p.minor = sbytes[2]
|
||||
n++
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// sbytes should begin where decodeVersion left off.
|
||||
func (p *hashed) decodeCost(sbytes []byte) (int, error) {
|
||||
cost, err := strconv.Atoi(string(sbytes[0:2]))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
err = checkCost(cost)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
p.cost = cost
|
||||
return 3, nil
|
||||
}
|
||||
|
||||
func (p *hashed) String() string {
|
||||
return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
|
||||
}
|
||||
|
||||
func checkCost(cost int) error {
|
||||
if cost < MinCost || cost > MaxCost {
|
||||
return InvalidCostError(cost)
|
||||
}
|
||||
return nil
|
||||
}
|
159
vendor/golang.org/x/crypto/blowfish/block.go
generated
vendored
159
vendor/golang.org/x/crypto/blowfish/block.go
generated
vendored
|
@ -1,159 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blowfish
|
||||
|
||||
// getNextWord returns the next big-endian uint32 value from the byte slice
|
||||
// at the given position in a circular manner, updating the position.
|
||||
func getNextWord(b []byte, pos *int) uint32 {
|
||||
var w uint32
|
||||
j := *pos
|
||||
for i := 0; i < 4; i++ {
|
||||
w = w<<8 | uint32(b[j])
|
||||
j++
|
||||
if j >= len(b) {
|
||||
j = 0
|
||||
}
|
||||
}
|
||||
*pos = j
|
||||
return w
|
||||
}
|
||||
|
||||
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
|
||||
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
|
||||
// pi and substitution tables for calls to Encrypt. This is used, primarily,
|
||||
// by the bcrypt package to reuse the Blowfish key schedule during its
|
||||
// set up. It's unlikely that you need to use this directly.
|
||||
func ExpandKey(key []byte, c *Cipher) {
|
||||
j := 0
|
||||
for i := 0; i < 18; i++ {
|
||||
// Using inlined getNextWord for performance.
|
||||
var d uint32
|
||||
for k := 0; k < 4; k++ {
|
||||
d = d<<8 | uint32(key[j])
|
||||
j++
|
||||
if j >= len(key) {
|
||||
j = 0
|
||||
}
|
||||
}
|
||||
c.p[i] ^= d
|
||||
}
|
||||
|
||||
var l, r uint32
|
||||
for i := 0; i < 18; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.p[i], c.p[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s0[i], c.s0[i+1] = l, r
|
||||
}
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s1[i], c.s1[i+1] = l, r
|
||||
}
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s2[i], c.s2[i+1] = l, r
|
||||
}
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s3[i], c.s3[i+1] = l, r
|
||||
}
|
||||
}
|
||||
|
||||
// This is similar to ExpandKey, but folds the salt during the key
|
||||
// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
|
||||
// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
|
||||
// and specializing it here is useful.
|
||||
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
|
||||
j := 0
|
||||
for i := 0; i < 18; i++ {
|
||||
c.p[i] ^= getNextWord(key, &j)
|
||||
}
|
||||
|
||||
j = 0
|
||||
var l, r uint32
|
||||
for i := 0; i < 18; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.p[i], c.p[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s0[i], c.s0[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s1[i], c.s1[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s2[i], c.s2[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s3[i], c.s3[i+1] = l, r
|
||||
}
|
||||
}
|
||||
|
||||
func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
|
||||
xl, xr := l, r
|
||||
xl ^= c.p[0]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
|
||||
xr ^= c.p[17]
|
||||
return xr, xl
|
||||
}
|
||||
|
||||
func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
|
||||
xl, xr := l, r
|
||||
xl ^= c.p[17]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
|
||||
xr ^= c.p[0]
|
||||
return xr, xl
|
||||
}
|
91
vendor/golang.org/x/crypto/blowfish/cipher.go
generated
vendored
91
vendor/golang.org/x/crypto/blowfish/cipher.go
generated
vendored
|
@ -1,91 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
|
||||
package blowfish // import "golang.org/x/crypto/blowfish"
|
||||
|
||||
// The code is a port of Bruce Schneier's C implementation.
|
||||
// See http://www.schneier.com/blowfish.html.
|
||||
|
||||
import "strconv"
|
||||
|
||||
// The Blowfish block size in bytes.
|
||||
const BlockSize = 8
|
||||
|
||||
// A Cipher is an instance of Blowfish encryption using a particular key.
|
||||
type Cipher struct {
|
||||
p [18]uint32
|
||||
s0, s1, s2, s3 [256]uint32
|
||||
}
|
||||
|
||||
type KeySizeError int
|
||||
|
||||
func (k KeySizeError) Error() string {
|
||||
return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
|
||||
}
|
||||
|
||||
// NewCipher creates and returns a Cipher.
|
||||
// The key argument should be the Blowfish key, from 1 to 56 bytes.
|
||||
func NewCipher(key []byte) (*Cipher, error) {
|
||||
var result Cipher
|
||||
if k := len(key); k < 1 || k > 56 {
|
||||
return nil, KeySizeError(k)
|
||||
}
|
||||
initCipher(&result)
|
||||
ExpandKey(key, &result)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
|
||||
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
|
||||
// sufficient and desirable. For bcrypt compatibility, the key can be over 56
|
||||
// bytes.
|
||||
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
|
||||
if len(salt) == 0 {
|
||||
return NewCipher(key)
|
||||
}
|
||||
var result Cipher
|
||||
if k := len(key); k < 1 {
|
||||
return nil, KeySizeError(k)
|
||||
}
|
||||
initCipher(&result)
|
||||
expandKeyWithSalt(key, salt, &result)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// BlockSize returns the Blowfish block size, 8 bytes.
|
||||
// It is necessary to satisfy the Block interface in the
|
||||
// package "crypto/cipher".
|
||||
func (c *Cipher) BlockSize() int { return BlockSize }
|
||||
|
||||
// Encrypt encrypts the 8-byte buffer src using the key k
|
||||
// and stores the result in dst.
|
||||
// Note that for amounts of data larger than a block,
|
||||
// it is not safe to just call Encrypt on successive blocks;
|
||||
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
|
||||
func (c *Cipher) Encrypt(dst, src []byte) {
|
||||
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||
l, r = encryptBlock(l, r, c)
|
||||
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
|
||||
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
|
||||
}
|
||||
|
||||
// Decrypt decrypts the 8-byte buffer src using the key k
|
||||
// and stores the result in dst.
|
||||
func (c *Cipher) Decrypt(dst, src []byte) {
|
||||
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||
l, r = decryptBlock(l, r, c)
|
||||
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
|
||||
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
|
||||
}
|
||||
|
||||
func initCipher(c *Cipher) {
|
||||
copy(c.p[0:], p[0:])
|
||||
copy(c.s0[0:], s0[0:])
|
||||
copy(c.s1[0:], s1[0:])
|
||||
copy(c.s2[0:], s2[0:])
|
||||
copy(c.s3[0:], s3[0:])
|
||||
}
|
199
vendor/golang.org/x/crypto/blowfish/const.go
generated
vendored
199
vendor/golang.org/x/crypto/blowfish/const.go
generated
vendored
|
@ -1,199 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The startup permutation array and substitution boxes.
|
||||
// They are the hexadecimal digits of PI; see:
|
||||
// http://www.schneier.com/code/constants.txt.
|
||||
|
||||
package blowfish
|
||||
|
||||
var s0 = [256]uint32{
|
||||
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
|
||||
0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
|
||||
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
|
||||
0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
|
||||
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
|
||||
0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
|
||||
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
|
||||
0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
|
||||
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
|
||||
0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
|
||||
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
|
||||
0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
|
||||
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
|
||||
0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
|
||||
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
|
||||
0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
|
||||
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
|
||||
0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
|
||||
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
|
||||
0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
|
||||
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
|
||||
0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
|
||||
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
|
||||
0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
|
||||
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
|
||||
0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
|
||||
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
|
||||
0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
|
||||
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
|
||||
0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
|
||||
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
|
||||
0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
|
||||
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
|
||||
0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
|
||||
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
|
||||
0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
|
||||
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
|
||||
0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
|
||||
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
|
||||
0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
|
||||
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
|
||||
0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
|
||||
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
|
||||
}
|
||||
|
||||
var s1 = [256]uint32{
|
||||
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
|
||||
0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
|
||||
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
|
||||
0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
|
||||
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
|
||||
0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
|
||||
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
|
||||
0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
|
||||
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
|
||||
0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
|
||||
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
|
||||
0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
|
||||
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
|
||||
0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
|
||||
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
|
||||
0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
|
||||
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
|
||||
0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
|
||||
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
|
||||
0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
|
||||
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
|
||||
0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
|
||||
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
|
||||
0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
|
||||
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
|
||||
0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
|
||||
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
|
||||
0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
|
||||
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
|
||||
0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
|
||||
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
|
||||
0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
|
||||
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
|
||||
0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
|
||||
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
|
||||
0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
|
||||
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
|
||||
0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
|
||||
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
|
||||
0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
|
||||
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
|
||||
0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
|
||||
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
|
||||
}
|
||||
|
||||
var s2 = [256]uint32{
|
||||
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
|
||||
0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
|
||||
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
|
||||
0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
|
||||
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
|
||||
0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
|
||||
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
|
||||
0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
|
||||
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
|
||||
0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
|
||||
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
|
||||
0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
|
||||
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
|
||||
0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
|
||||
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
|
||||
0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
|
||||
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
|
||||
0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
|
||||
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
|
||||
0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
|
||||
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
|
||||
0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
|
||||
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
|
||||
0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
|
||||
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
|
||||
0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
|
||||
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
|
||||
0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
|
||||
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
|
||||
0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
|
||||
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
|
||||
0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
|
||||
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
|
||||
0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
|
||||
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
|
||||
0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
|
||||
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
|
||||
0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
|
||||
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
|
||||
0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
|
||||
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
|
||||
0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
|
||||
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
|
||||
}
|
||||
|
||||
var s3 = [256]uint32{
|
||||
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
|
||||
0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
|
||||
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
|
||||
0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
|
||||
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
|
||||
0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
|
||||
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
|
||||
0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
|
||||
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
|
||||
0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
|
||||
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
|
||||
0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
|
||||
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
|
||||
0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
|
||||
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
|
||||
0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
|
||||
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
|
||||
0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
|
||||
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
|
||||
0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
|
||||
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
|
||||
0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
|
||||
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
|
||||
0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
|
||||
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
|
||||
0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
|
||||
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
|
||||
0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
|
||||
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
|
||||
0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
|
||||
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
|
||||
0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
|
||||
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
|
||||
0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
|
||||
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
|
||||
0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
|
||||
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
|
||||
0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
|
||||
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
|
||||
0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
|
||||
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
|
||||
0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
|
||||
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
|
||||
}
|
||||
|
||||
var p = [18]uint32{
|
||||
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
|
||||
0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
|
||||
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
|
||||
}
|
924
vendor/golang.org/x/crypto/ssh/terminal/terminal.go
generated
vendored
924
vendor/golang.org/x/crypto/ssh/terminal/terminal.go
generated
vendored
|
@ -1,924 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package terminal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// EscapeCodes contains escape sequences that can be written to the terminal in
|
||||
// order to achieve different styles of text.
|
||||
type EscapeCodes struct {
|
||||
// Foreground colors
|
||||
Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte
|
||||
|
||||
// Reset all attributes
|
||||
Reset []byte
|
||||
}
|
||||
|
||||
var vt100EscapeCodes = EscapeCodes{
|
||||
Black: []byte{keyEscape, '[', '3', '0', 'm'},
|
||||
Red: []byte{keyEscape, '[', '3', '1', 'm'},
|
||||
Green: []byte{keyEscape, '[', '3', '2', 'm'},
|
||||
Yellow: []byte{keyEscape, '[', '3', '3', 'm'},
|
||||
Blue: []byte{keyEscape, '[', '3', '4', 'm'},
|
||||
Magenta: []byte{keyEscape, '[', '3', '5', 'm'},
|
||||
Cyan: []byte{keyEscape, '[', '3', '6', 'm'},
|
||||
White: []byte{keyEscape, '[', '3', '7', 'm'},
|
||||
|
||||
Reset: []byte{keyEscape, '[', '0', 'm'},
|
||||
}
|
||||
|
||||
// Terminal contains the state for running a VT100 terminal that is capable of
|
||||
// reading lines of input.
|
||||
type Terminal struct {
|
||||
// AutoCompleteCallback, if non-null, is called for each keypress with
|
||||
// the full input line and the current position of the cursor (in
|
||||
// bytes, as an index into |line|). If it returns ok=false, the key
|
||||
// press is processed normally. Otherwise it returns a replacement line
|
||||
// and the new cursor position.
|
||||
AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool)
|
||||
|
||||
// Escape contains a pointer to the escape codes for this terminal.
|
||||
// It's always a valid pointer, although the escape codes themselves
|
||||
// may be empty if the terminal doesn't support them.
|
||||
Escape *EscapeCodes
|
||||
|
||||
// lock protects the terminal and the state in this object from
|
||||
// concurrent processing of a key press and a Write() call.
|
||||
lock sync.Mutex
|
||||
|
||||
c io.ReadWriter
|
||||
prompt []rune
|
||||
|
||||
// line is the current line being entered.
|
||||
line []rune
|
||||
// pos is the logical position of the cursor in line
|
||||
pos int
|
||||
// echo is true if local echo is enabled
|
||||
echo bool
|
||||
// pasteActive is true iff there is a bracketed paste operation in
|
||||
// progress.
|
||||
pasteActive bool
|
||||
|
||||
// cursorX contains the current X value of the cursor where the left
|
||||
// edge is 0. cursorY contains the row number where the first row of
|
||||
// the current line is 0.
|
||||
cursorX, cursorY int
|
||||
// maxLine is the greatest value of cursorY so far.
|
||||
maxLine int
|
||||
|
||||
termWidth, termHeight int
|
||||
|
||||
// outBuf contains the terminal data to be sent.
|
||||
outBuf []byte
|
||||
// remainder contains the remainder of any partial key sequences after
|
||||
// a read. It aliases into inBuf.
|
||||
remainder []byte
|
||||
inBuf [256]byte
|
||||
|
||||
// history contains previously entered commands so that they can be
|
||||
// accessed with the up and down keys.
|
||||
history stRingBuffer
|
||||
// historyIndex stores the currently accessed history entry, where zero
|
||||
// means the immediately previous entry.
|
||||
historyIndex int
|
||||
// When navigating up and down the history it's possible to return to
|
||||
// the incomplete, initial line. That value is stored in
|
||||
// historyPending.
|
||||
historyPending string
|
||||
}
|
||||
|
||||
// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is
|
||||
// a local terminal, that terminal must first have been put into raw mode.
|
||||
// prompt is a string that is written at the start of each input line (i.e.
|
||||
// "> ").
|
||||
func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
|
||||
return &Terminal{
|
||||
Escape: &vt100EscapeCodes,
|
||||
c: c,
|
||||
prompt: []rune(prompt),
|
||||
termWidth: 80,
|
||||
termHeight: 24,
|
||||
echo: true,
|
||||
historyIndex: -1,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
keyCtrlD = 4
|
||||
keyCtrlU = 21
|
||||
keyEnter = '\r'
|
||||
keyEscape = 27
|
||||
keyBackspace = 127
|
||||
keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota
|
||||
keyUp
|
||||
keyDown
|
||||
keyLeft
|
||||
keyRight
|
||||
keyAltLeft
|
||||
keyAltRight
|
||||
keyHome
|
||||
keyEnd
|
||||
keyDeleteWord
|
||||
keyDeleteLine
|
||||
keyClearScreen
|
||||
keyPasteStart
|
||||
keyPasteEnd
|
||||
)
|
||||
|
||||
var (
|
||||
crlf = []byte{'\r', '\n'}
|
||||
pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'}
|
||||
pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'}
|
||||
)
|
||||
|
||||
// bytesToKey tries to parse a key sequence from b. If successful, it returns
|
||||
// the key and the remainder of the input. Otherwise it returns utf8.RuneError.
|
||||
func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
|
||||
if len(b) == 0 {
|
||||
return utf8.RuneError, nil
|
||||
}
|
||||
|
||||
if !pasteActive {
|
||||
switch b[0] {
|
||||
case 1: // ^A
|
||||
return keyHome, b[1:]
|
||||
case 5: // ^E
|
||||
return keyEnd, b[1:]
|
||||
case 8: // ^H
|
||||
return keyBackspace, b[1:]
|
||||
case 11: // ^K
|
||||
return keyDeleteLine, b[1:]
|
||||
case 12: // ^L
|
||||
return keyClearScreen, b[1:]
|
||||
case 23: // ^W
|
||||
return keyDeleteWord, b[1:]
|
||||
}
|
||||
}
|
||||
|
||||
if b[0] != keyEscape {
|
||||
if !utf8.FullRune(b) {
|
||||
return utf8.RuneError, b
|
||||
}
|
||||
r, l := utf8.DecodeRune(b)
|
||||
return r, b[l:]
|
||||
}
|
||||
|
||||
if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' {
|
||||
switch b[2] {
|
||||
case 'A':
|
||||
return keyUp, b[3:]
|
||||
case 'B':
|
||||
return keyDown, b[3:]
|
||||
case 'C':
|
||||
return keyRight, b[3:]
|
||||
case 'D':
|
||||
return keyLeft, b[3:]
|
||||
case 'H':
|
||||
return keyHome, b[3:]
|
||||
case 'F':
|
||||
return keyEnd, b[3:]
|
||||
}
|
||||
}
|
||||
|
||||
if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' {
|
||||
switch b[5] {
|
||||
case 'C':
|
||||
return keyAltRight, b[6:]
|
||||
case 'D':
|
||||
return keyAltLeft, b[6:]
|
||||
}
|
||||
}
|
||||
|
||||
if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) {
|
||||
return keyPasteStart, b[6:]
|
||||
}
|
||||
|
||||
if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) {
|
||||
return keyPasteEnd, b[6:]
|
||||
}
|
||||
|
||||
// If we get here then we have a key that we don't recognise, or a
|
||||
// partial sequence. It's not clear how one should find the end of a
|
||||
// sequence without knowing them all, but it seems that [a-zA-Z~] only
|
||||
// appears at the end of a sequence.
|
||||
for i, c := range b[0:] {
|
||||
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' {
|
||||
return keyUnknown, b[i+1:]
|
||||
}
|
||||
}
|
||||
|
||||
return utf8.RuneError, b
|
||||
}
|
||||
|
||||
// queue appends data to the end of t.outBuf
|
||||
func (t *Terminal) queue(data []rune) {
|
||||
t.outBuf = append(t.outBuf, []byte(string(data))...)
|
||||
}
|
||||
|
||||
var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'}
|
||||
var space = []rune{' '}
|
||||
|
||||
func isPrintable(key rune) bool {
|
||||
isInSurrogateArea := key >= 0xd800 && key <= 0xdbff
|
||||
return key >= 32 && !isInSurrogateArea
|
||||
}
|
||||
|
||||
// moveCursorToPos appends data to t.outBuf which will move the cursor to the
|
||||
// given, logical position in the text.
|
||||
func (t *Terminal) moveCursorToPos(pos int) {
|
||||
if !t.echo {
|
||||
return
|
||||
}
|
||||
|
||||
x := visualLength(t.prompt) + pos
|
||||
y := x / t.termWidth
|
||||
x = x % t.termWidth
|
||||
|
||||
up := 0
|
||||
if y < t.cursorY {
|
||||
up = t.cursorY - y
|
||||
}
|
||||
|
||||
down := 0
|
||||
if y > t.cursorY {
|
||||
down = y - t.cursorY
|
||||
}
|
||||
|
||||
left := 0
|
||||
if x < t.cursorX {
|
||||
left = t.cursorX - x
|
||||
}
|
||||
|
||||
right := 0
|
||||
if x > t.cursorX {
|
||||
right = x - t.cursorX
|
||||
}
|
||||
|
||||
t.cursorX = x
|
||||
t.cursorY = y
|
||||
t.move(up, down, left, right)
|
||||
}
|
||||
|
||||
func (t *Terminal) move(up, down, left, right int) {
|
||||
movement := make([]rune, 3*(up+down+left+right))
|
||||
m := movement
|
||||
for i := 0; i < up; i++ {
|
||||
m[0] = keyEscape
|
||||
m[1] = '['
|
||||
m[2] = 'A'
|
||||
m = m[3:]
|
||||
}
|
||||
for i := 0; i < down; i++ {
|
||||
m[0] = keyEscape
|
||||
m[1] = '['
|
||||
m[2] = 'B'
|
||||
m = m[3:]
|
||||
}
|
||||
for i := 0; i < left; i++ {
|
||||
m[0] = keyEscape
|
||||
m[1] = '['
|
||||
m[2] = 'D'
|
||||
m = m[3:]
|
||||
}
|
||||
for i := 0; i < right; i++ {
|
||||
m[0] = keyEscape
|
||||
m[1] = '['
|
||||
m[2] = 'C'
|
||||
m = m[3:]
|
||||
}
|
||||
|
||||
t.queue(movement)
|
||||
}
|
||||
|
||||
func (t *Terminal) clearLineToRight() {
|
||||
op := []rune{keyEscape, '[', 'K'}
|
||||
t.queue(op)
|
||||
}
|
||||
|
||||
const maxLineLength = 4096
|
||||
|
||||
func (t *Terminal) setLine(newLine []rune, newPos int) {
|
||||
if t.echo {
|
||||
t.moveCursorToPos(0)
|
||||
t.writeLine(newLine)
|
||||
for i := len(newLine); i < len(t.line); i++ {
|
||||
t.writeLine(space)
|
||||
}
|
||||
t.moveCursorToPos(newPos)
|
||||
}
|
||||
t.line = newLine
|
||||
t.pos = newPos
|
||||
}
|
||||
|
||||
func (t *Terminal) advanceCursor(places int) {
|
||||
t.cursorX += places
|
||||
t.cursorY += t.cursorX / t.termWidth
|
||||
if t.cursorY > t.maxLine {
|
||||
t.maxLine = t.cursorY
|
||||
}
|
||||
t.cursorX = t.cursorX % t.termWidth
|
||||
|
||||
if places > 0 && t.cursorX == 0 {
|
||||
// Normally terminals will advance the current position
|
||||
// when writing a character. But that doesn't happen
|
||||
// for the last character in a line. However, when
|
||||
// writing a character (except a new line) that causes
|
||||
// a line wrap, the position will be advanced two
|
||||
// places.
|
||||
//
|
||||
// So, if we are stopping at the end of a line, we
|
||||
// need to write a newline so that our cursor can be
|
||||
// advanced to the next line.
|
||||
t.outBuf = append(t.outBuf, '\r', '\n')
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Terminal) eraseNPreviousChars(n int) {
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if t.pos < n {
|
||||
n = t.pos
|
||||
}
|
||||
t.pos -= n
|
||||
t.moveCursorToPos(t.pos)
|
||||
|
||||
copy(t.line[t.pos:], t.line[n+t.pos:])
|
||||
t.line = t.line[:len(t.line)-n]
|
||||
if t.echo {
|
||||
t.writeLine(t.line[t.pos:])
|
||||
for i := 0; i < n; i++ {
|
||||
t.queue(space)
|
||||
}
|
||||
t.advanceCursor(n)
|
||||
t.moveCursorToPos(t.pos)
|
||||
}
|
||||
}
|
||||
|
||||
// countToLeftWord returns then number of characters from the cursor to the
|
||||
// start of the previous word.
|
||||
func (t *Terminal) countToLeftWord() int {
|
||||
if t.pos == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
pos := t.pos - 1
|
||||
for pos > 0 {
|
||||
if t.line[pos] != ' ' {
|
||||
break
|
||||
}
|
||||
pos--
|
||||
}
|
||||
for pos > 0 {
|
||||
if t.line[pos] == ' ' {
|
||||
pos++
|
||||
break
|
||||
}
|
||||
pos--
|
||||
}
|
||||
|
||||
return t.pos - pos
|
||||
}
|
||||
|
||||
// countToRightWord returns then number of characters from the cursor to the
|
||||
// start of the next word.
|
||||
func (t *Terminal) countToRightWord() int {
|
||||
pos := t.pos
|
||||
for pos < len(t.line) {
|
||||
if t.line[pos] == ' ' {
|
||||
break
|
||||
}
|
||||
pos++
|
||||
}
|
||||
for pos < len(t.line) {
|
||||
if t.line[pos] != ' ' {
|
||||
break
|
||||
}
|
||||
pos++
|
||||
}
|
||||
return pos - t.pos
|
||||
}
|
||||
|
||||
// visualLength returns the number of visible glyphs in s.
|
||||
func visualLength(runes []rune) int {
|
||||
inEscapeSeq := false
|
||||
length := 0
|
||||
|
||||
for _, r := range runes {
|
||||
switch {
|
||||
case inEscapeSeq:
|
||||
if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') {
|
||||
inEscapeSeq = false
|
||||
}
|
||||
case r == '\x1b':
|
||||
inEscapeSeq = true
|
||||
default:
|
||||
length++
|
||||
}
|
||||
}
|
||||
|
||||
return length
|
||||
}
|
||||
|
||||
// handleKey processes the given key and, optionally, returns a line of text
|
||||
// that the user has entered.
|
||||
func (t *Terminal) handleKey(key rune) (line string, ok bool) {
|
||||
if t.pasteActive && key != keyEnter {
|
||||
t.addKeyToLine(key)
|
||||
return
|
||||
}
|
||||
|
||||
switch key {
|
||||
case keyBackspace:
|
||||
if t.pos == 0 {
|
||||
return
|
||||
}
|
||||
t.eraseNPreviousChars(1)
|
||||
case keyAltLeft:
|
||||
// move left by a word.
|
||||
t.pos -= t.countToLeftWord()
|
||||
t.moveCursorToPos(t.pos)
|
||||
case keyAltRight:
|
||||
// move right by a word.
|
||||
t.pos += t.countToRightWord()
|
||||
t.moveCursorToPos(t.pos)
|
||||
case keyLeft:
|
||||
if t.pos == 0 {
|
||||
return
|
||||
}
|
||||
t.pos--
|
||||
t.moveCursorToPos(t.pos)
|
||||
case keyRight:
|
||||
if t.pos == len(t.line) {
|
||||
return
|
||||
}
|
||||
t.pos++
|
||||
t.moveCursorToPos(t.pos)
|
||||
case keyHome:
|
||||
if t.pos == 0 {
|
||||
return
|
||||
}
|
||||
t.pos = 0
|
||||
t.moveCursorToPos(t.pos)
|
||||
case keyEnd:
|
||||
if t.pos == len(t.line) {
|
||||
return
|
||||
}
|
||||
t.pos = len(t.line)
|
||||
t.moveCursorToPos(t.pos)
|
||||
case keyUp:
|
||||
entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
if t.historyIndex == -1 {
|
||||
t.historyPending = string(t.line)
|
||||
}
|
||||
t.historyIndex++
|
||||
runes := []rune(entry)
|
||||
t.setLine(runes, len(runes))
|
||||
case keyDown:
|
||||
switch t.historyIndex {
|
||||
case -1:
|
||||
return
|
||||
case 0:
|
||||
runes := []rune(t.historyPending)
|
||||
t.setLine(runes, len(runes))
|
||||
t.historyIndex--
|
||||
default:
|
||||
entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1)
|
||||
if ok {
|
||||
t.historyIndex--
|
||||
runes := []rune(entry)
|
||||
t.setLine(runes, len(runes))
|
||||
}
|
||||
}
|
||||
case keyEnter:
|
||||
t.moveCursorToPos(len(t.line))
|
||||
t.queue([]rune("\r\n"))
|
||||
line = string(t.line)
|
||||
ok = true
|
||||
t.line = t.line[:0]
|
||||
t.pos = 0
|
||||
t.cursorX = 0
|
||||
t.cursorY = 0
|
||||
t.maxLine = 0
|
||||
case keyDeleteWord:
|
||||
// Delete zero or more spaces and then one or more characters.
|
||||
t.eraseNPreviousChars(t.countToLeftWord())
|
||||
case keyDeleteLine:
|
||||
// Delete everything from the current cursor position to the
|
||||
// end of line.
|
||||
for i := t.pos; i < len(t.line); i++ {
|
||||
t.queue(space)
|
||||
t.advanceCursor(1)
|
||||
}
|
||||
t.line = t.line[:t.pos]
|
||||
t.moveCursorToPos(t.pos)
|
||||
case keyCtrlD:
|
||||
// Erase the character under the current position.
|
||||
// The EOF case when the line is empty is handled in
|
||||
// readLine().
|
||||
if t.pos < len(t.line) {
|
||||
t.pos++
|
||||
t.eraseNPreviousChars(1)
|
||||
}
|
||||
case keyCtrlU:
|
||||
t.eraseNPreviousChars(t.pos)
|
||||
case keyClearScreen:
|
||||
// Erases the screen and moves the cursor to the home position.
|
||||
t.queue([]rune("\x1b[2J\x1b[H"))
|
||||
t.queue(t.prompt)
|
||||
t.cursorX, t.cursorY = 0, 0
|
||||
t.advanceCursor(visualLength(t.prompt))
|
||||
t.setLine(t.line, t.pos)
|
||||
default:
|
||||
if t.AutoCompleteCallback != nil {
|
||||
prefix := string(t.line[:t.pos])
|
||||
suffix := string(t.line[t.pos:])
|
||||
|
||||
t.lock.Unlock()
|
||||
newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key)
|
||||
t.lock.Lock()
|
||||
|
||||
if completeOk {
|
||||
t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos]))
|
||||
return
|
||||
}
|
||||
}
|
||||
if !isPrintable(key) {
|
||||
return
|
||||
}
|
||||
if len(t.line) == maxLineLength {
|
||||
return
|
||||
}
|
||||
t.addKeyToLine(key)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// addKeyToLine inserts the given key at the current position in the current
|
||||
// line.
|
||||
func (t *Terminal) addKeyToLine(key rune) {
|
||||
if len(t.line) == cap(t.line) {
|
||||
newLine := make([]rune, len(t.line), 2*(1+len(t.line)))
|
||||
copy(newLine, t.line)
|
||||
t.line = newLine
|
||||
}
|
||||
t.line = t.line[:len(t.line)+1]
|
||||
copy(t.line[t.pos+1:], t.line[t.pos:])
|
||||
t.line[t.pos] = key
|
||||
if t.echo {
|
||||
t.writeLine(t.line[t.pos:])
|
||||
}
|
||||
t.pos++
|
||||
t.moveCursorToPos(t.pos)
|
||||
}
|
||||
|
||||
func (t *Terminal) writeLine(line []rune) {
|
||||
for len(line) != 0 {
|
||||
remainingOnLine := t.termWidth - t.cursorX
|
||||
todo := len(line)
|
||||
if todo > remainingOnLine {
|
||||
todo = remainingOnLine
|
||||
}
|
||||
t.queue(line[:todo])
|
||||
t.advanceCursor(visualLength(line[:todo]))
|
||||
line = line[todo:]
|
||||
}
|
||||
}
|
||||
|
||||
// writeWithCRLF writes buf to w but replaces all occurances of \n with \r\n.
|
||||
func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) {
|
||||
for len(buf) > 0 {
|
||||
i := bytes.IndexByte(buf, '\n')
|
||||
todo := len(buf)
|
||||
if i >= 0 {
|
||||
todo = i
|
||||
}
|
||||
|
||||
var nn int
|
||||
nn, err = w.Write(buf[:todo])
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
buf = buf[todo:]
|
||||
|
||||
if i >= 0 {
|
||||
if _, err = w.Write(crlf); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n += 1
|
||||
buf = buf[1:]
|
||||
}
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (t *Terminal) Write(buf []byte) (n int, err error) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
||||
if t.cursorX == 0 && t.cursorY == 0 {
|
||||
// This is the easy case: there's nothing on the screen that we
|
||||
// have to move out of the way.
|
||||
return writeWithCRLF(t.c, buf)
|
||||
}
|
||||
|
||||
// We have a prompt and possibly user input on the screen. We
|
||||
// have to clear it first.
|
||||
t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */)
|
||||
t.cursorX = 0
|
||||
t.clearLineToRight()
|
||||
|
||||
for t.cursorY > 0 {
|
||||
t.move(1 /* up */, 0, 0, 0)
|
||||
t.cursorY--
|
||||
t.clearLineToRight()
|
||||
}
|
||||
|
||||
if _, err = t.c.Write(t.outBuf); err != nil {
|
||||
return
|
||||
}
|
||||
t.outBuf = t.outBuf[:0]
|
||||
|
||||
if n, err = writeWithCRLF(t.c, buf); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t.writeLine(t.prompt)
|
||||
if t.echo {
|
||||
t.writeLine(t.line)
|
||||
}
|
||||
|
||||
t.moveCursorToPos(t.pos)
|
||||
|
||||
if _, err = t.c.Write(t.outBuf); err != nil {
|
||||
return
|
||||
}
|
||||
t.outBuf = t.outBuf[:0]
|
||||
return
|
||||
}
|
||||
|
||||
// ReadPassword temporarily changes the prompt and reads a password, without
|
||||
// echo, from the terminal.
|
||||
func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
||||
oldPrompt := t.prompt
|
||||
t.prompt = []rune(prompt)
|
||||
t.echo = false
|
||||
|
||||
line, err = t.readLine()
|
||||
|
||||
t.prompt = oldPrompt
|
||||
t.echo = true
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ReadLine returns a line of input from the terminal.
|
||||
func (t *Terminal) ReadLine() (line string, err error) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
||||
return t.readLine()
|
||||
}
|
||||
|
||||
func (t *Terminal) readLine() (line string, err error) {
|
||||
// t.lock must be held at this point
|
||||
|
||||
if t.cursorX == 0 && t.cursorY == 0 {
|
||||
t.writeLine(t.prompt)
|
||||
t.c.Write(t.outBuf)
|
||||
t.outBuf = t.outBuf[:0]
|
||||
}
|
||||
|
||||
lineIsPasted := t.pasteActive
|
||||
|
||||
for {
|
||||
rest := t.remainder
|
||||
lineOk := false
|
||||
for !lineOk {
|
||||
var key rune
|
||||
key, rest = bytesToKey(rest, t.pasteActive)
|
||||
if key == utf8.RuneError {
|
||||
break
|
||||
}
|
||||
if !t.pasteActive {
|
||||
if key == keyCtrlD {
|
||||
if len(t.line) == 0 {
|
||||
return "", io.EOF
|
||||
}
|
||||
}
|
||||
if key == keyPasteStart {
|
||||
t.pasteActive = true
|
||||
if len(t.line) == 0 {
|
||||
lineIsPasted = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
} else if key == keyPasteEnd {
|
||||
t.pasteActive = false
|
||||
continue
|
||||
}
|
||||
if !t.pasteActive {
|
||||
lineIsPasted = false
|
||||
}
|
||||
line, lineOk = t.handleKey(key)
|
||||
}
|
||||
if len(rest) > 0 {
|
||||
n := copy(t.inBuf[:], rest)
|
||||
t.remainder = t.inBuf[:n]
|
||||
} else {
|
||||
t.remainder = nil
|
||||
}
|
||||
t.c.Write(t.outBuf)
|
||||
t.outBuf = t.outBuf[:0]
|
||||
if lineOk {
|
||||
if t.echo {
|
||||
t.historyIndex = -1
|
||||
t.history.Add(line)
|
||||
}
|
||||
if lineIsPasted {
|
||||
err = ErrPasteIndicator
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// t.remainder is a slice at the beginning of t.inBuf
|
||||
// containing a partial key sequence
|
||||
readBuf := t.inBuf[len(t.remainder):]
|
||||
var n int
|
||||
|
||||
t.lock.Unlock()
|
||||
n, err = t.c.Read(readBuf)
|
||||
t.lock.Lock()
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t.remainder = t.inBuf[:n+len(t.remainder)]
|
||||
}
|
||||
|
||||
panic("unreachable") // for Go 1.0.
|
||||
}
|
||||
|
||||
// SetPrompt sets the prompt to be used when reading subsequent lines.
|
||||
func (t *Terminal) SetPrompt(prompt string) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
||||
t.prompt = []rune(prompt)
|
||||
}
|
||||
|
||||
func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) {
|
||||
// Move cursor to column zero at the start of the line.
|
||||
t.move(t.cursorY, 0, t.cursorX, 0)
|
||||
t.cursorX, t.cursorY = 0, 0
|
||||
t.clearLineToRight()
|
||||
for t.cursorY < numPrevLines {
|
||||
// Move down a line
|
||||
t.move(0, 1, 0, 0)
|
||||
t.cursorY++
|
||||
t.clearLineToRight()
|
||||
}
|
||||
// Move back to beginning.
|
||||
t.move(t.cursorY, 0, 0, 0)
|
||||
t.cursorX, t.cursorY = 0, 0
|
||||
|
||||
t.queue(t.prompt)
|
||||
t.advanceCursor(visualLength(t.prompt))
|
||||
t.writeLine(t.line)
|
||||
t.moveCursorToPos(t.pos)
|
||||
}
|
||||
|
||||
func (t *Terminal) SetSize(width, height int) error {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
|
||||
if width == 0 {
|
||||
width = 1
|
||||
}
|
||||
|
||||
oldWidth := t.termWidth
|
||||
t.termWidth, t.termHeight = width, height
|
||||
|
||||
switch {
|
||||
case width == oldWidth:
|
||||
// If the width didn't change then nothing else needs to be
|
||||
// done.
|
||||
return nil
|
||||
case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0:
|
||||
// If there is nothing on current line and no prompt printed,
|
||||
// just do nothing
|
||||
return nil
|
||||
case width < oldWidth:
|
||||
// Some terminals (e.g. xterm) will truncate lines that were
|
||||
// too long when shinking. Others, (e.g. gnome-terminal) will
|
||||
// attempt to wrap them. For the former, repainting t.maxLine
|
||||
// works great, but that behaviour goes badly wrong in the case
|
||||
// of the latter because they have doubled every full line.
|
||||
|
||||
// We assume that we are working on a terminal that wraps lines
|
||||
// and adjust the cursor position based on every previous line
|
||||
// wrapping and turning into two. This causes the prompt on
|
||||
// xterms to move upwards, which isn't great, but it avoids a
|
||||
// huge mess with gnome-terminal.
|
||||
if t.cursorX >= t.termWidth {
|
||||
t.cursorX = t.termWidth - 1
|
||||
}
|
||||
t.cursorY *= 2
|
||||
t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2)
|
||||
case width > oldWidth:
|
||||
// If the terminal expands then our position calculations will
|
||||
// be wrong in the future because we think the cursor is
|
||||
// |t.pos| chars into the string, but there will be a gap at
|
||||
// the end of any wrapped line.
|
||||
//
|
||||
// But the position will actually be correct until we move, so
|
||||
// we can move back to the beginning and repaint everything.
|
||||
t.clearAndRepaintLinePlusNPrevious(t.maxLine)
|
||||
}
|
||||
|
||||
_, err := t.c.Write(t.outBuf)
|
||||
t.outBuf = t.outBuf[:0]
|
||||
return err
|
||||
}
|
||||
|
||||
type pasteIndicatorError struct{}
|
||||
|
||||
func (pasteIndicatorError) Error() string {
|
||||
return "terminal: ErrPasteIndicator not correctly handled"
|
||||
}
|
||||
|
||||
// ErrPasteIndicator may be returned from ReadLine as the error, in addition
|
||||
// to valid line data. It indicates that bracketed paste mode is enabled and
|
||||
// that the returned line consists only of pasted data. Programs may wish to
|
||||
// interpret pasted data more literally than typed data.
|
||||
var ErrPasteIndicator = pasteIndicatorError{}
|
||||
|
||||
// SetBracketedPasteMode requests that the terminal bracket paste operations
|
||||
// with markers. Not all terminals support this but, if it is supported, then
|
||||
// enabling this mode will stop any autocomplete callback from running due to
|
||||
// pastes. Additionally, any lines that are completely pasted will be returned
|
||||
// from ReadLine with the error set to ErrPasteIndicator.
|
||||
func (t *Terminal) SetBracketedPasteMode(on bool) {
|
||||
if on {
|
||||
io.WriteString(t.c, "\x1b[?2004h")
|
||||
} else {
|
||||
io.WriteString(t.c, "\x1b[?2004l")
|
||||
}
|
||||
}
|
||||
|
||||
// stRingBuffer is a ring buffer of strings.
|
||||
type stRingBuffer struct {
|
||||
// entries contains max elements.
|
||||
entries []string
|
||||
max int
|
||||
// head contains the index of the element most recently added to the ring.
|
||||
head int
|
||||
// size contains the number of elements in the ring.
|
||||
size int
|
||||
}
|
||||
|
||||
func (s *stRingBuffer) Add(a string) {
|
||||
if s.entries == nil {
|
||||
const defaultNumEntries = 100
|
||||
s.entries = make([]string, defaultNumEntries)
|
||||
s.max = defaultNumEntries
|
||||
}
|
||||
|
||||
s.head = (s.head + 1) % s.max
|
||||
s.entries[s.head] = a
|
||||
if s.size < s.max {
|
||||
s.size++
|
||||
}
|
||||
}
|
||||
|
||||
// NthPreviousEntry returns the value passed to the nth previous call to Add.
|
||||
// If n is zero then the immediately prior value is returned, if one, then the
|
||||
// next most recent, and so on. If such an element doesn't exist then ok is
|
||||
// false.
|
||||
func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
|
||||
if n >= s.size {
|
||||
return "", false
|
||||
}
|
||||
index := s.head - n
|
||||
if index < 0 {
|
||||
index += s.max
|
||||
}
|
||||
return s.entries[index], true
|
||||
}
|
133
vendor/golang.org/x/crypto/ssh/terminal/util.go
generated
vendored
133
vendor/golang.org/x/crypto/ssh/terminal/util.go
generated
vendored
|
@ -1,133 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
|
||||
|
||||
// Package terminal provides support functions for dealing with terminals, as
|
||||
// commonly found on UNIX systems.
|
||||
//
|
||||
// Putting a terminal into raw mode is the most common requirement:
|
||||
//
|
||||
// oldState, err := terminal.MakeRaw(0)
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// defer terminal.Restore(0, oldState)
|
||||
package terminal // import "golang.org/x/crypto/ssh/terminal"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// State contains the state of a terminal.
|
||||
type State struct {
|
||||
termios syscall.Termios
|
||||
}
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal(fd int) bool {
|
||||
var termios syscall.Termios
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
||||
|
||||
// MakeRaw put the terminal connected to the given file descriptor into raw
|
||||
// mode and returns the previous state of the terminal so that it can be
|
||||
// restored.
|
||||
func MakeRaw(fd int) (*State, error) {
|
||||
var oldState State
|
||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newState := oldState.termios
|
||||
// This attempts to replicate the behaviour documented for cfmakeraw in
|
||||
// the termios(3) manpage.
|
||||
newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
|
||||
newState.Oflag &^= syscall.OPOST
|
||||
newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
|
||||
newState.Cflag &^= syscall.CSIZE | syscall.PARENB
|
||||
newState.Cflag |= syscall.CS8
|
||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &oldState, nil
|
||||
}
|
||||
|
||||
// GetState returns the current state of a terminal which may be useful to
|
||||
// restore the terminal after a signal.
|
||||
func GetState(fd int) (*State, error) {
|
||||
var oldState State
|
||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &oldState, nil
|
||||
}
|
||||
|
||||
// Restore restores the terminal connected to the given file descriptor to a
|
||||
// previous state.
|
||||
func Restore(fd int, state *State) error {
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetSize returns the dimensions of the given terminal.
|
||||
func GetSize(fd int) (width, height int, err error) {
|
||||
var dimensions [4]uint16
|
||||
|
||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
|
||||
return -1, -1, err
|
||||
}
|
||||
return int(dimensions[1]), int(dimensions[0]), nil
|
||||
}
|
||||
|
||||
// ReadPassword reads a line of input from a terminal without local echo. This
|
||||
// is commonly used for inputting passwords and other sensitive data. The slice
|
||||
// returned does not include the \n.
|
||||
func ReadPassword(fd int) ([]byte, error) {
|
||||
var oldState syscall.Termios
|
||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newState := oldState
|
||||
newState.Lflag &^= syscall.ECHO
|
||||
newState.Lflag |= syscall.ICANON | syscall.ISIG
|
||||
newState.Iflag |= syscall.ICRNL
|
||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0)
|
||||
}()
|
||||
|
||||
var buf [16]byte
|
||||
var ret []byte
|
||||
for {
|
||||
n, err := syscall.Read(fd, buf[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n == 0 {
|
||||
if len(ret) == 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
break
|
||||
}
|
||||
if buf[n-1] == '\n' {
|
||||
n--
|
||||
}
|
||||
ret = append(ret, buf[:n]...)
|
||||
if n < len(buf) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
12
vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
generated
vendored
12
vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
generated
vendored
|
@ -1,12 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd netbsd openbsd
|
||||
|
||||
package terminal
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
const ioctlWriteTermios = syscall.TIOCSETA
|
11
vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
generated
vendored
11
vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
generated
vendored
|
@ -1,11 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package terminal
|
||||
|
||||
// These constants are declared here, rather than importing
|
||||
// them from the syscall package as some syscall packages, even
|
||||
// on linux, for example gccgo, do not declare them.
|
||||
const ioctlReadTermios = 0x5401 // syscall.TCGETS
|
||||
const ioctlWriteTermios = 0x5402 // syscall.TCSETS
|
58
vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
generated
vendored
58
vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
generated
vendored
|
@ -1,58 +0,0 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package terminal provides support functions for dealing with terminals, as
|
||||
// commonly found on UNIX systems.
|
||||
//
|
||||
// Putting a terminal into raw mode is the most common requirement:
|
||||
//
|
||||
// oldState, err := terminal.MakeRaw(0)
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// defer terminal.Restore(0, oldState)
|
||||
package terminal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type State struct{}
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal(fd int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// MakeRaw put the terminal connected to the given file descriptor into raw
|
||||
// mode and returns the previous state of the terminal so that it can be
|
||||
// restored.
|
||||
func MakeRaw(fd int) (*State, error) {
|
||||
return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
// GetState returns the current state of a terminal which may be useful to
|
||||
// restore the terminal after a signal.
|
||||
func GetState(fd int) (*State, error) {
|
||||
return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
// Restore restores the terminal connected to the given file descriptor to a
|
||||
// previous state.
|
||||
func Restore(fd int, state *State) error {
|
||||
return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
// GetSize returns the dimensions of the given terminal.
|
||||
func GetSize(fd int) (width, height int, err error) {
|
||||
return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
// ReadPassword reads a line of input from a terminal without local echo. This
|
||||
// is commonly used for inputting passwords and other sensitive data. The slice
|
||||
// returned does not include the \n.
|
||||
func ReadPassword(fd int) ([]byte, error) {
|
||||
return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
73
vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
generated
vendored
73
vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
generated
vendored
|
@ -1,73 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build solaris
|
||||
|
||||
package terminal // import "golang.org/x/crypto/ssh/terminal"
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
"io"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// State contains the state of a terminal.
|
||||
type State struct {
|
||||
termios syscall.Termios
|
||||
}
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal(fd int) bool {
|
||||
// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
|
||||
var termio unix.Termio
|
||||
err := unix.IoctlSetTermio(fd, unix.TCGETA, &termio)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// ReadPassword reads a line of input from a terminal without local echo. This
|
||||
// is commonly used for inputting passwords and other sensitive data. The slice
|
||||
// returned does not include the \n.
|
||||
func ReadPassword(fd int) ([]byte, error) {
|
||||
// see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c
|
||||
val, err := unix.IoctlGetTermios(fd, unix.TCGETS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oldState := *val
|
||||
|
||||
newState := oldState
|
||||
newState.Lflag &^= syscall.ECHO
|
||||
newState.Lflag |= syscall.ICANON | syscall.ISIG
|
||||
newState.Iflag |= syscall.ICRNL
|
||||
err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState)
|
||||
|
||||
var buf [16]byte
|
||||
var ret []byte
|
||||
for {
|
||||
n, err := syscall.Read(fd, buf[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n == 0 {
|
||||
if len(ret) == 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
break
|
||||
}
|
||||
if buf[n-1] == '\n' {
|
||||
n--
|
||||
}
|
||||
ret = append(ret, buf[:n]...)
|
||||
if n < len(buf) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
174
vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
generated
vendored
174
vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
generated
vendored
|
@ -1,174 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
// Package terminal provides support functions for dealing with terminals, as
|
||||
// commonly found on UNIX systems.
|
||||
//
|
||||
// Putting a terminal into raw mode is the most common requirement:
|
||||
//
|
||||
// oldState, err := terminal.MakeRaw(0)
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// defer terminal.Restore(0, oldState)
|
||||
package terminal
|
||||
|
||||
import (
|
||||
"io"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
enableLineInput = 2
|
||||
enableEchoInput = 4
|
||||
enableProcessedInput = 1
|
||||
enableWindowInput = 8
|
||||
enableMouseInput = 16
|
||||
enableInsertMode = 32
|
||||
enableQuickEditMode = 64
|
||||
enableExtendedFlags = 128
|
||||
enableAutoPosition = 256
|
||||
enableProcessedOutput = 1
|
||||
enableWrapAtEolOutput = 2
|
||||
)
|
||||
|
||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
var (
|
||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
|
||||
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
|
||||
)
|
||||
|
||||
type (
|
||||
short int16
|
||||
word uint16
|
||||
|
||||
coord struct {
|
||||
x short
|
||||
y short
|
||||
}
|
||||
smallRect struct {
|
||||
left short
|
||||
top short
|
||||
right short
|
||||
bottom short
|
||||
}
|
||||
consoleScreenBufferInfo struct {
|
||||
size coord
|
||||
cursorPosition coord
|
||||
attributes word
|
||||
window smallRect
|
||||
maximumWindowSize coord
|
||||
}
|
||||
)
|
||||
|
||||
type State struct {
|
||||
mode uint32
|
||||
}
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal(fd int) bool {
|
||||
var st uint32
|
||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
||||
return r != 0 && e == 0
|
||||
}
|
||||
|
||||
// MakeRaw put the terminal connected to the given file descriptor into raw
|
||||
// mode and returns the previous state of the terminal so that it can be
|
||||
// restored.
|
||||
func MakeRaw(fd int) (*State, error) {
|
||||
var st uint32
|
||||
_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
||||
if e != 0 {
|
||||
return nil, error(e)
|
||||
}
|
||||
raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)
|
||||
_, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0)
|
||||
if e != 0 {
|
||||
return nil, error(e)
|
||||
}
|
||||
return &State{st}, nil
|
||||
}
|
||||
|
||||
// GetState returns the current state of a terminal which may be useful to
|
||||
// restore the terminal after a signal.
|
||||
func GetState(fd int) (*State, error) {
|
||||
var st uint32
|
||||
_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
||||
if e != 0 {
|
||||
return nil, error(e)
|
||||
}
|
||||
return &State{st}, nil
|
||||
}
|
||||
|
||||
// Restore restores the terminal connected to the given file descriptor to a
|
||||
// previous state.
|
||||
func Restore(fd int, state *State) error {
|
||||
_, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetSize returns the dimensions of the given terminal.
|
||||
func GetSize(fd int) (width, height int, err error) {
|
||||
var info consoleScreenBufferInfo
|
||||
_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)
|
||||
if e != 0 {
|
||||
return 0, 0, error(e)
|
||||
}
|
||||
return int(info.size.x), int(info.size.y), nil
|
||||
}
|
||||
|
||||
// ReadPassword reads a line of input from a terminal without local echo. This
|
||||
// is commonly used for inputting passwords and other sensitive data. The slice
|
||||
// returned does not include the \n.
|
||||
func ReadPassword(fd int) ([]byte, error) {
|
||||
var st uint32
|
||||
_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
||||
if e != 0 {
|
||||
return nil, error(e)
|
||||
}
|
||||
old := st
|
||||
|
||||
st &^= (enableEchoInput)
|
||||
st |= (enableProcessedInput | enableLineInput | enableProcessedOutput)
|
||||
_, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
|
||||
if e != 0 {
|
||||
return nil, error(e)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)
|
||||
}()
|
||||
|
||||
var buf [16]byte
|
||||
var ret []byte
|
||||
for {
|
||||
n, err := syscall.Read(syscall.Handle(fd), buf[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n == 0 {
|
||||
if len(ret) == 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
break
|
||||
}
|
||||
if buf[n-1] == '\n' {
|
||||
n--
|
||||
}
|
||||
if n > 0 && buf[n-1] == '\r' {
|
||||
n--
|
||||
}
|
||||
ret = append(ret, buf[:n]...)
|
||||
if n < len(buf) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
Loading…
Reference in a new issue