Add vendoring to containerd master
Initial vendor list validated with empty $GOPATH and only master checked out; followed by `make` and verified that all binaries build properly. Updates require github.com/LK4D4/vndr tool. Signed-off-by: Phil Estes <estesp@linux.vnet.ibm.com>
This commit is contained in:
parent
286ea04591
commit
dd9309c15e
407 changed files with 113562 additions and 0 deletions
17
vendor/github.com/nats-io/gnatsd/server/auth.go
generated
vendored
Normal file
17
vendor/github.com/nats-io/gnatsd/server/auth.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
// Copyright 2012-2014 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
// Auth is an interface for implementing authentication
|
||||
type Auth interface {
|
||||
// Check if a client is authorized to connect
|
||||
Check(c ClientAuth) bool
|
||||
}
|
||||
|
||||
// ClientAuth is an interface for client authentication
|
||||
type ClientAuth interface {
|
||||
// Get options associated with a client
|
||||
GetOpts() *clientOpts
|
||||
// Optionally map a user after auth.
|
||||
RegisterUser(*User)
|
||||
}
|
33
vendor/github.com/nats-io/gnatsd/server/ciphersuites_1.4.go
generated
vendored
Normal file
33
vendor/github.com/nats-io/gnatsd/server/ciphersuites_1.4.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
|
||||
// +build go1.4,!go1.5
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// Where we maintain all of the available 1.4 ciphers
|
||||
var cipherMap = map[string]uint16{
|
||||
"TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
}
|
||||
|
||||
func defaultCipherSuites() []uint16 {
|
||||
return []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
}
|
||||
}
|
38
vendor/github.com/nats-io/gnatsd/server/ciphersuites_1.5.go
generated
vendored
Normal file
38
vendor/github.com/nats-io/gnatsd/server/ciphersuites_1.5.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
|
||||
// +build go1.5
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// Where we maintain all of the available 1.5 ciphers
|
||||
var cipherMap = map[string]uint16{
|
||||
"TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
}
|
||||
|
||||
func defaultCipherSuites() []uint16 {
|
||||
return []uint16{
|
||||
// The SHA384 versions are only in Go1.5
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
}
|
||||
}
|
1366
vendor/github.com/nats-io/gnatsd/server/client.go
generated
vendored
Normal file
1366
vendor/github.com/nats-io/gnatsd/server/client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
82
vendor/github.com/nats-io/gnatsd/server/const.go
generated
vendored
Normal file
82
vendor/github.com/nats-io/gnatsd/server/const.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// VERSION is the current version for the server.
|
||||
VERSION = "0.9.6"
|
||||
|
||||
// DEFAULT_PORT is the default port for client connections.
|
||||
DEFAULT_PORT = 4222
|
||||
|
||||
// RANDOM_PORT is the value for port that, when supplied, will cause the
|
||||
// server to listen on a randomly-chosen available port. The resolved port
|
||||
// is available via the Addr() method.
|
||||
RANDOM_PORT = -1
|
||||
|
||||
// DEFAULT_HOST defaults to all interfaces.
|
||||
DEFAULT_HOST = "0.0.0.0"
|
||||
|
||||
// MAX_CONTROL_LINE_SIZE is the maximum allowed protocol control line size.
|
||||
// 1k should be plenty since payloads sans connect string are separate
|
||||
MAX_CONTROL_LINE_SIZE = 1024
|
||||
|
||||
// MAX_PAYLOAD_SIZE is the maximum allowed payload size. Should be using
|
||||
// something different if > 1MB payloads are needed.
|
||||
MAX_PAYLOAD_SIZE = (1024 * 1024)
|
||||
|
||||
// DEFAULT_MAX_CONNECTIONS is the default maximum connections allowed.
|
||||
DEFAULT_MAX_CONNECTIONS = (64 * 1024)
|
||||
|
||||
// TLS_TIMEOUT is the TLS wait time.
|
||||
TLS_TIMEOUT = 500 * time.Millisecond
|
||||
|
||||
// AUTH_TIMEOUT is the authorization wait time.
|
||||
AUTH_TIMEOUT = 2 * TLS_TIMEOUT
|
||||
|
||||
// DEFAULT_PING_INTERVAL is how often pings are sent to clients and routes.
|
||||
DEFAULT_PING_INTERVAL = 2 * time.Minute
|
||||
|
||||
// DEFAULT_PING_MAX_OUT is maximum allowed pings outstanding before disconnect.
|
||||
DEFAULT_PING_MAX_OUT = 2
|
||||
|
||||
// CR_LF string
|
||||
CR_LF = "\r\n"
|
||||
|
||||
// LEN_CR_LF hold onto the computed size.
|
||||
LEN_CR_LF = len(CR_LF)
|
||||
|
||||
// DEFAULT_FLUSH_DEADLINE is the write/flush deadlines.
|
||||
DEFAULT_FLUSH_DEADLINE = 2 * time.Second
|
||||
|
||||
// DEFAULT_HTTP_PORT is the default monitoring port.
|
||||
DEFAULT_HTTP_PORT = 8222
|
||||
|
||||
// ACCEPT_MIN_SLEEP is the minimum acceptable sleep times on temporary errors.
|
||||
ACCEPT_MIN_SLEEP = 10 * time.Millisecond
|
||||
|
||||
// ACCEPT_MAX_SLEEP is the maximum acceptable sleep times on temporary errors
|
||||
ACCEPT_MAX_SLEEP = 1 * time.Second
|
||||
|
||||
// DEFAULT_ROUTE_CONNECT Route solicitation intervals.
|
||||
DEFAULT_ROUTE_CONNECT = 1 * time.Second
|
||||
|
||||
// DEFAULT_ROUTE_RECONNECT Route reconnect intervals.
|
||||
DEFAULT_ROUTE_RECONNECT = 1 * time.Second
|
||||
|
||||
// DEFAULT_ROUTE_DIAL Route dial timeout.
|
||||
DEFAULT_ROUTE_DIAL = 1 * time.Second
|
||||
|
||||
// PROTO_SNIPPET_SIZE is the default size of proto to print on parse errors.
|
||||
PROTO_SNIPPET_SIZE = 32
|
||||
|
||||
// MAX_MSG_ARGS Maximum possible number of arguments from MSG proto.
|
||||
MAX_MSG_ARGS = 4
|
||||
|
||||
// MAX_PUB_ARGS Maximum possible number of arguments from PUB proto.
|
||||
MAX_PUB_ARGS = 3
|
||||
)
|
32
vendor/github.com/nats-io/gnatsd/server/errors.go
generated
vendored
Normal file
32
vendor/github.com/nats-io/gnatsd/server/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrConnectionClosed represents an error condition on a closed connection.
|
||||
ErrConnectionClosed = errors.New("Connection Closed")
|
||||
|
||||
// ErrAuthorization represents an error condition on failed authorization.
|
||||
ErrAuthorization = errors.New("Authorization Error")
|
||||
|
||||
// ErrAuthTimeout represents an error condition on failed authorization due to timeout.
|
||||
ErrAuthTimeout = errors.New("Authorization Timeout")
|
||||
|
||||
// ErrMaxPayload represents an error condition when the payload is too big.
|
||||
ErrMaxPayload = errors.New("Maximum Payload Exceeded")
|
||||
|
||||
// ErrMaxControlLine represents an error condition when the control line is too big.
|
||||
ErrMaxControlLine = errors.New("Maximum Control Line Exceeded")
|
||||
|
||||
// ErrReservedPublishSubject represents an error condition when sending to a reserved subject, e.g. _SYS.>
|
||||
ErrReservedPublishSubject = errors.New("Reserved Internal Subject")
|
||||
|
||||
// ErrBadClientProtocol signals a client requested an invalud client protocol.
|
||||
ErrBadClientProtocol = errors.New("Invalid Client Protocol")
|
||||
|
||||
// ErrTooManyConnections signals a client that the maximum number of connections supported by the
|
||||
// server has been reached.
|
||||
ErrTooManyConnections = errors.New("Maximum Connections Exceeded")
|
||||
)
|
132
vendor/github.com/nats-io/gnatsd/server/log.go
generated
vendored
Normal file
132
vendor/github.com/nats-io/gnatsd/server/log.go
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/nats-io/gnatsd/logger"
|
||||
)
|
||||
|
||||
// Package globals for performance checks
|
||||
var trace int32
|
||||
var debug int32
|
||||
|
||||
var log = struct {
|
||||
sync.Mutex
|
||||
logger Logger
|
||||
}{}
|
||||
|
||||
// Logger interface of the NATS Server
|
||||
type Logger interface {
|
||||
|
||||
// Log a notice statement
|
||||
Noticef(format string, v ...interface{})
|
||||
|
||||
// Log a fatal error
|
||||
Fatalf(format string, v ...interface{})
|
||||
|
||||
// Log an error
|
||||
Errorf(format string, v ...interface{})
|
||||
|
||||
// Log a debug statement
|
||||
Debugf(format string, v ...interface{})
|
||||
|
||||
// Log a trace statement
|
||||
Tracef(format string, v ...interface{})
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the server
|
||||
func (s *Server) SetLogger(logger Logger, debugFlag, traceFlag bool) {
|
||||
if debugFlag {
|
||||
atomic.StoreInt32(&debug, 1)
|
||||
} else {
|
||||
atomic.StoreInt32(&debug, 0)
|
||||
}
|
||||
if traceFlag {
|
||||
atomic.StoreInt32(&trace, 1)
|
||||
} else {
|
||||
atomic.StoreInt32(&trace, 0)
|
||||
}
|
||||
|
||||
log.Lock()
|
||||
log.logger = logger
|
||||
log.Unlock()
|
||||
}
|
||||
|
||||
// If the logger is a file based logger, close and re-open the file.
|
||||
// This allows for file rotation by 'mv'ing the file then signalling
|
||||
// the process to trigger this function.
|
||||
func (s *Server) ReOpenLogFile() {
|
||||
// Check to make sure this is a file logger.
|
||||
log.Lock()
|
||||
ll := log.logger
|
||||
log.Unlock()
|
||||
|
||||
if ll == nil {
|
||||
Noticef("File log re-open ignored, no logger")
|
||||
return
|
||||
}
|
||||
if s.opts.LogFile == "" {
|
||||
Noticef("File log re-open ignored, not a file logger")
|
||||
} else {
|
||||
fileLog := logger.NewFileLogger(s.opts.LogFile,
|
||||
s.opts.Logtime, s.opts.Debug, s.opts.Trace, true)
|
||||
s.SetLogger(fileLog, s.opts.Debug, s.opts.Trace)
|
||||
Noticef("File log re-opened")
|
||||
}
|
||||
}
|
||||
|
||||
// Noticef logs a notice statement
|
||||
func Noticef(format string, v ...interface{}) {
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Noticef(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Errorf logs an error
|
||||
func Errorf(format string, v ...interface{}) {
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Errorf(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Fatalf logs a fatal error
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Fatalf(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Debugf logs a debug statement
|
||||
func Debugf(format string, v ...interface{}) {
|
||||
if atomic.LoadInt32(&debug) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Debugf(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Tracef logs a trace statement
|
||||
func Tracef(format string, v ...interface{}) {
|
||||
if atomic.LoadInt32(&trace) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Tracef(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
func executeLogCall(f func(logger Logger, format string, v ...interface{}), format string, args ...interface{}) {
|
||||
log.Lock()
|
||||
defer log.Unlock()
|
||||
if log.logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
f(log.logger, format, args...)
|
||||
}
|
526
vendor/github.com/nats-io/gnatsd/server/monitor.go
generated
vendored
Normal file
526
vendor/github.com/nats-io/gnatsd/server/monitor.go
generated
vendored
Normal file
|
@ -0,0 +1,526 @@
|
|||
// Copyright 2013-2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/gnatsd/server/pse"
|
||||
)
|
||||
|
||||
// Snapshot this
|
||||
var numCores int
|
||||
|
||||
func init() {
|
||||
numCores = runtime.NumCPU()
|
||||
}
|
||||
|
||||
// Connz represents detailed information on current client connections.
|
||||
type Connz struct {
|
||||
Now time.Time `json:"now"`
|
||||
NumConns int `json:"num_connections"`
|
||||
Total int `json:"total"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
Conns []ConnInfo `json:"connections"`
|
||||
}
|
||||
|
||||
// ConnInfo has detailed information on a per connection basis.
|
||||
type ConnInfo struct {
|
||||
Cid uint64 `json:"cid"`
|
||||
IP string `json:"ip"`
|
||||
Port int `json:"port"`
|
||||
Start time.Time `json:"start"`
|
||||
LastActivity time.Time `json:"last_activity"`
|
||||
Uptime string `json:"uptime"`
|
||||
Idle string `json:"idle"`
|
||||
Pending int `json:"pending_bytes"`
|
||||
InMsgs int64 `json:"in_msgs"`
|
||||
OutMsgs int64 `json:"out_msgs"`
|
||||
InBytes int64 `json:"in_bytes"`
|
||||
OutBytes int64 `json:"out_bytes"`
|
||||
NumSubs uint32 `json:"subscriptions"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Lang string `json:"lang,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
TLSVersion string `json:"tls_version,omitempty"`
|
||||
TLSCipher string `json:"tls_cipher_suite,omitempty"`
|
||||
AuthorizedUser string `json:"authorized_user,omitempty"`
|
||||
Subs []string `json:"subscriptions_list,omitempty"`
|
||||
}
|
||||
|
||||
// DefaultConnListSize is the default size of the connection list.
|
||||
const DefaultConnListSize = 1024
|
||||
|
||||
const defaultStackBufSize = 10000
|
||||
|
||||
// HandleConnz process HTTP requests for connection information.
|
||||
func (s *Server) HandleConnz(w http.ResponseWriter, r *http.Request) {
|
||||
sortOpt := SortOpt(r.URL.Query().Get("sort"))
|
||||
|
||||
// If no sort option given or sort is by uptime, then sort by cid
|
||||
if sortOpt == "" || sortOpt == byUptime {
|
||||
sortOpt = byCid
|
||||
} else if !sortOpt.IsValid() {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
w.Write([]byte(fmt.Sprintf("Invalid sorting option: %s", sortOpt)))
|
||||
return
|
||||
}
|
||||
|
||||
c := &Connz{}
|
||||
c.Now = time.Now()
|
||||
|
||||
auth, _ := strconv.Atoi(r.URL.Query().Get("auth"))
|
||||
subs, _ := strconv.Atoi(r.URL.Query().Get("subs"))
|
||||
c.Offset, _ = strconv.Atoi(r.URL.Query().Get("offset"))
|
||||
c.Limit, _ = strconv.Atoi(r.URL.Query().Get("limit"))
|
||||
|
||||
if c.Limit == 0 {
|
||||
c.Limit = DefaultConnListSize
|
||||
}
|
||||
|
||||
// Walk the list
|
||||
s.mu.Lock()
|
||||
s.httpReqStats[ConnzPath]++
|
||||
tlsRequired := s.info.TLSRequired
|
||||
|
||||
// number total of clients. The resulting ConnInfo array
|
||||
// may be smaller if pagination is used.
|
||||
totalClients := len(s.clients)
|
||||
c.Total = totalClients
|
||||
|
||||
i := 0
|
||||
pairs := make(Pairs, totalClients)
|
||||
for _, client := range s.clients {
|
||||
client.mu.Lock()
|
||||
switch sortOpt {
|
||||
case byCid:
|
||||
pairs[i] = Pair{Key: client, Val: int64(client.cid)}
|
||||
case bySubs:
|
||||
pairs[i] = Pair{Key: client, Val: int64(len(client.subs))}
|
||||
case byPending:
|
||||
pairs[i] = Pair{Key: client, Val: int64(client.bw.Buffered())}
|
||||
case byOutMsgs:
|
||||
pairs[i] = Pair{Key: client, Val: client.outMsgs}
|
||||
case byInMsgs:
|
||||
pairs[i] = Pair{Key: client, Val: atomic.LoadInt64(&client.inMsgs)}
|
||||
case byOutBytes:
|
||||
pairs[i] = Pair{Key: client, Val: client.outBytes}
|
||||
case byInBytes:
|
||||
pairs[i] = Pair{Key: client, Val: atomic.LoadInt64(&client.inBytes)}
|
||||
case byLast:
|
||||
pairs[i] = Pair{Key: client, Val: client.last.UnixNano()}
|
||||
case byIdle:
|
||||
pairs[i] = Pair{Key: client, Val: c.Now.Sub(client.last).Nanoseconds()}
|
||||
}
|
||||
client.mu.Unlock()
|
||||
i++
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
if totalClients > 0 {
|
||||
if sortOpt == byCid {
|
||||
// Return in ascending order
|
||||
sort.Sort(pairs)
|
||||
} else {
|
||||
// Return in descending order
|
||||
sort.Sort(sort.Reverse(pairs))
|
||||
}
|
||||
}
|
||||
|
||||
minoff := c.Offset
|
||||
maxoff := c.Offset + c.Limit
|
||||
|
||||
// Make sure these are sane.
|
||||
if minoff > totalClients {
|
||||
minoff = totalClients
|
||||
}
|
||||
if maxoff > totalClients {
|
||||
maxoff = totalClients
|
||||
}
|
||||
pairs = pairs[minoff:maxoff]
|
||||
|
||||
// Now we have the real number of ConnInfo objects, we can set c.NumConns
|
||||
// and allocate the array
|
||||
c.NumConns = len(pairs)
|
||||
c.Conns = make([]ConnInfo, c.NumConns)
|
||||
|
||||
i = 0
|
||||
for _, pair := range pairs {
|
||||
|
||||
client := pair.Key
|
||||
|
||||
client.mu.Lock()
|
||||
|
||||
// First, fill ConnInfo with current client's values. We will
|
||||
// then overwrite the field used for the sort with what was stored
|
||||
// in 'pair'.
|
||||
ci := &c.Conns[i]
|
||||
|
||||
ci.Cid = client.cid
|
||||
ci.Start = client.start
|
||||
ci.LastActivity = client.last
|
||||
ci.Uptime = myUptime(c.Now.Sub(client.start))
|
||||
ci.Idle = myUptime(c.Now.Sub(client.last))
|
||||
ci.OutMsgs = client.outMsgs
|
||||
ci.OutBytes = client.outBytes
|
||||
ci.NumSubs = uint32(len(client.subs))
|
||||
ci.Pending = client.bw.Buffered()
|
||||
ci.Name = client.opts.Name
|
||||
ci.Lang = client.opts.Lang
|
||||
ci.Version = client.opts.Version
|
||||
// inMsgs and inBytes are updated outside of the client's lock, so
|
||||
// we need to use atomic here.
|
||||
ci.InMsgs = atomic.LoadInt64(&client.inMsgs)
|
||||
ci.InBytes = atomic.LoadInt64(&client.inBytes)
|
||||
|
||||
// Now overwrite the field that was used as the sort key, so results
|
||||
// still look sorted even if the value has changed since sort occurred.
|
||||
sortValue := pair.Val
|
||||
switch sortOpt {
|
||||
case bySubs:
|
||||
ci.NumSubs = uint32(sortValue)
|
||||
case byPending:
|
||||
ci.Pending = int(sortValue)
|
||||
case byOutMsgs:
|
||||
ci.OutMsgs = sortValue
|
||||
case byInMsgs:
|
||||
ci.InMsgs = sortValue
|
||||
case byOutBytes:
|
||||
ci.OutBytes = sortValue
|
||||
case byInBytes:
|
||||
ci.InBytes = sortValue
|
||||
case byLast:
|
||||
ci.LastActivity = time.Unix(0, sortValue)
|
||||
case byIdle:
|
||||
ci.Idle = myUptime(time.Duration(sortValue))
|
||||
}
|
||||
|
||||
// If the connection is gone, too bad, we won't set TLSVersion and TLSCipher.
|
||||
if tlsRequired && client.nc != nil {
|
||||
conn := client.nc.(*tls.Conn)
|
||||
cs := conn.ConnectionState()
|
||||
ci.TLSVersion = tlsVersion(cs.Version)
|
||||
ci.TLSCipher = tlsCipher(cs.CipherSuite)
|
||||
}
|
||||
|
||||
switch conn := client.nc.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
addr := conn.RemoteAddr().(*net.TCPAddr)
|
||||
ci.Port = addr.Port
|
||||
ci.IP = addr.IP.String()
|
||||
}
|
||||
|
||||
// Fill in subscription data if requested.
|
||||
if subs == 1 {
|
||||
sublist := make([]*subscription, 0, len(client.subs))
|
||||
for _, sub := range client.subs {
|
||||
sublist = append(sublist, sub)
|
||||
}
|
||||
ci.Subs = castToSliceString(sublist)
|
||||
}
|
||||
|
||||
// Fill in user if auth requested.
|
||||
if auth == 1 {
|
||||
ci.AuthorizedUser = client.opts.Username
|
||||
}
|
||||
|
||||
client.mu.Unlock()
|
||||
i++
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(c, "", " ")
|
||||
if err != nil {
|
||||
Errorf("Error marshalling response to /connz request: %v", err)
|
||||
}
|
||||
|
||||
// Handle response
|
||||
ResponseHandler(w, r, b)
|
||||
}
|
||||
|
||||
func castToSliceString(input []*subscription) []string {
|
||||
output := make([]string, 0, len(input))
|
||||
for _, line := range input {
|
||||
output = append(output, string(line.subject))
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// Subsz represents detail information on current connections.
|
||||
type Subsz struct {
|
||||
*SublistStats
|
||||
}
|
||||
|
||||
// Routez represents detailed information on current client connections.
|
||||
type Routez struct {
|
||||
Now time.Time `json:"now"`
|
||||
NumRoutes int `json:"num_routes"`
|
||||
Routes []*RouteInfo `json:"routes"`
|
||||
}
|
||||
|
||||
// RouteInfo has detailed information on a per connection basis.
|
||||
type RouteInfo struct {
|
||||
Rid uint64 `json:"rid"`
|
||||
RemoteID string `json:"remote_id"`
|
||||
DidSolicit bool `json:"did_solicit"`
|
||||
IsConfigured bool `json:"is_configured"`
|
||||
IP string `json:"ip"`
|
||||
Port int `json:"port"`
|
||||
Pending int `json:"pending_size"`
|
||||
InMsgs int64 `json:"in_msgs"`
|
||||
OutMsgs int64 `json:"out_msgs"`
|
||||
InBytes int64 `json:"in_bytes"`
|
||||
OutBytes int64 `json:"out_bytes"`
|
||||
NumSubs uint32 `json:"subscriptions"`
|
||||
Subs []string `json:"subscriptions_list,omitempty"`
|
||||
}
|
||||
|
||||
// HandleRoutez process HTTP requests for route information.
|
||||
func (s *Server) HandleRoutez(w http.ResponseWriter, r *http.Request) {
|
||||
rs := &Routez{Routes: []*RouteInfo{}}
|
||||
rs.Now = time.Now()
|
||||
|
||||
subs, _ := strconv.Atoi(r.URL.Query().Get("subs"))
|
||||
|
||||
// Walk the list
|
||||
s.mu.Lock()
|
||||
|
||||
s.httpReqStats[RoutezPath]++
|
||||
rs.NumRoutes = len(s.routes)
|
||||
|
||||
for _, r := range s.routes {
|
||||
r.mu.Lock()
|
||||
ri := &RouteInfo{
|
||||
Rid: r.cid,
|
||||
RemoteID: r.route.remoteID,
|
||||
DidSolicit: r.route.didSolicit,
|
||||
IsConfigured: r.route.routeType == Explicit,
|
||||
InMsgs: atomic.LoadInt64(&r.inMsgs),
|
||||
OutMsgs: r.outMsgs,
|
||||
InBytes: atomic.LoadInt64(&r.inBytes),
|
||||
OutBytes: r.outBytes,
|
||||
NumSubs: uint32(len(r.subs)),
|
||||
}
|
||||
|
||||
if subs == 1 {
|
||||
sublist := make([]*subscription, 0, len(r.subs))
|
||||
for _, sub := range r.subs {
|
||||
sublist = append(sublist, sub)
|
||||
}
|
||||
ri.Subs = castToSliceString(sublist)
|
||||
}
|
||||
r.mu.Unlock()
|
||||
|
||||
if ip, ok := r.nc.(*net.TCPConn); ok {
|
||||
addr := ip.RemoteAddr().(*net.TCPAddr)
|
||||
ri.Port = addr.Port
|
||||
ri.IP = addr.IP.String()
|
||||
}
|
||||
rs.Routes = append(rs.Routes, ri)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
b, err := json.MarshalIndent(rs, "", " ")
|
||||
if err != nil {
|
||||
Errorf("Error marshalling response to /routez request: %v", err)
|
||||
}
|
||||
|
||||
// Handle response
|
||||
ResponseHandler(w, r, b)
|
||||
}
|
||||
|
||||
// HandleSubsz processes HTTP requests for subjects stats.
|
||||
func (s *Server) HandleSubsz(w http.ResponseWriter, r *http.Request) {
|
||||
s.mu.Lock()
|
||||
s.httpReqStats[SubszPath]++
|
||||
s.mu.Unlock()
|
||||
|
||||
st := &Subsz{s.sl.Stats()}
|
||||
b, err := json.MarshalIndent(st, "", " ")
|
||||
if err != nil {
|
||||
Errorf("Error marshalling response to /subscriptionsz request: %v", err)
|
||||
}
|
||||
|
||||
// Handle response
|
||||
ResponseHandler(w, r, b)
|
||||
}
|
||||
|
||||
// HandleStacksz processes HTTP requests for getting stacks
|
||||
func (s *Server) HandleStacksz(w http.ResponseWriter, r *http.Request) {
|
||||
// Do not get any lock here that would prevent getting the stacks
|
||||
// if we were to have a deadlock somewhere.
|
||||
var defaultBuf [defaultStackBufSize]byte
|
||||
size := defaultStackBufSize
|
||||
buf := defaultBuf[:size]
|
||||
n := 0
|
||||
for {
|
||||
n = runtime.Stack(buf, true)
|
||||
if n < size {
|
||||
break
|
||||
}
|
||||
size *= 2
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
// Handle response
|
||||
ResponseHandler(w, r, buf[:n])
|
||||
}
|
||||
|
||||
// Varz will output server information on the monitoring port at /varz.
|
||||
type Varz struct {
|
||||
*Info
|
||||
*Options
|
||||
Port int `json:"port"`
|
||||
MaxPayload int `json:"max_payload"`
|
||||
Start time.Time `json:"start"`
|
||||
Now time.Time `json:"now"`
|
||||
Uptime string `json:"uptime"`
|
||||
Mem int64 `json:"mem"`
|
||||
Cores int `json:"cores"`
|
||||
CPU float64 `json:"cpu"`
|
||||
Connections int `json:"connections"`
|
||||
TotalConnections uint64 `json:"total_connections"`
|
||||
Routes int `json:"routes"`
|
||||
Remotes int `json:"remotes"`
|
||||
InMsgs int64 `json:"in_msgs"`
|
||||
OutMsgs int64 `json:"out_msgs"`
|
||||
InBytes int64 `json:"in_bytes"`
|
||||
OutBytes int64 `json:"out_bytes"`
|
||||
SlowConsumers int64 `json:"slow_consumers"`
|
||||
Subscriptions uint32 `json:"subscriptions"`
|
||||
HTTPReqStats map[string]uint64 `json:"http_req_stats"`
|
||||
}
|
||||
|
||||
type usage struct {
|
||||
CPU float32
|
||||
Cores int
|
||||
Mem int64
|
||||
}
|
||||
|
||||
func myUptime(d time.Duration) string {
|
||||
// Just use total seconds for uptime, and display days / years
|
||||
tsecs := d / time.Second
|
||||
tmins := tsecs / 60
|
||||
thrs := tmins / 60
|
||||
tdays := thrs / 24
|
||||
tyrs := tdays / 365
|
||||
|
||||
if tyrs > 0 {
|
||||
return fmt.Sprintf("%dy%dd%dh%dm%ds", tyrs, tdays%365, thrs%24, tmins%60, tsecs%60)
|
||||
}
|
||||
if tdays > 0 {
|
||||
return fmt.Sprintf("%dd%dh%dm%ds", tdays, thrs%24, tmins%60, tsecs%60)
|
||||
}
|
||||
if thrs > 0 {
|
||||
return fmt.Sprintf("%dh%dm%ds", thrs, tmins%60, tsecs%60)
|
||||
}
|
||||
if tmins > 0 {
|
||||
return fmt.Sprintf("%dm%ds", tmins, tsecs%60)
|
||||
}
|
||||
return fmt.Sprintf("%ds", tsecs)
|
||||
}
|
||||
|
||||
// HandleRoot will show basic info and links to others handlers.
|
||||
func (s *Server) HandleRoot(w http.ResponseWriter, r *http.Request) {
|
||||
// This feels dumb to me, but is required: https://code.google.com/p/go/issues/detail?id=4799
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
s.httpReqStats[RootPath]++
|
||||
s.mu.Unlock()
|
||||
fmt.Fprintf(w, `<html lang="en">
|
||||
<head>
|
||||
<link rel="shortcut icon" href="http://nats.io/img/favicon.ico">
|
||||
<style type="text/css">
|
||||
body { font-family: "Century Gothic", CenturyGothic, AppleGothic, sans-serif; font-size: 22; }
|
||||
a { margin-left: 32px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<img src="http://nats.io/img/logo.png" alt="NATS">
|
||||
<br/>
|
||||
<a href=/varz>varz</a><br/>
|
||||
<a href=/connz>connz</a><br/>
|
||||
<a href=/routez>routez</a><br/>
|
||||
<a href=/subsz>subsz</a><br/>
|
||||
<br/>
|
||||
<a href=http://nats.io/documentation/server/gnatsd-monitoring/>help</a>
|
||||
</body>
|
||||
</html>`)
|
||||
}
|
||||
|
||||
// HandleVarz will process HTTP requests for server information.
|
||||
func (s *Server) HandleVarz(w http.ResponseWriter, r *http.Request) {
|
||||
v := &Varz{Info: &s.info, Options: s.opts, MaxPayload: s.opts.MaxPayload, Start: s.start}
|
||||
v.Now = time.Now()
|
||||
v.Uptime = myUptime(time.Since(s.start))
|
||||
v.Port = v.Info.Port
|
||||
|
||||
updateUsage(v)
|
||||
|
||||
s.mu.Lock()
|
||||
v.Connections = len(s.clients)
|
||||
v.TotalConnections = s.totalClients
|
||||
v.Routes = len(s.routes)
|
||||
v.Remotes = len(s.remotes)
|
||||
v.InMsgs = s.inMsgs
|
||||
v.InBytes = s.inBytes
|
||||
v.OutMsgs = s.outMsgs
|
||||
v.OutBytes = s.outBytes
|
||||
v.SlowConsumers = s.slowConsumers
|
||||
v.Subscriptions = s.sl.Count()
|
||||
s.httpReqStats[VarzPath]++
|
||||
// Need a copy here since s.httpReqStas can change while doing
|
||||
// the marshaling down below.
|
||||
v.HTTPReqStats = make(map[string]uint64, len(s.httpReqStats))
|
||||
for key, val := range s.httpReqStats {
|
||||
v.HTTPReqStats[key] = val
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
b, err := json.MarshalIndent(v, "", " ")
|
||||
if err != nil {
|
||||
Errorf("Error marshalling response to /varz request: %v", err)
|
||||
}
|
||||
|
||||
// Handle response
|
||||
ResponseHandler(w, r, b)
|
||||
}
|
||||
|
||||
// Grab RSS and PCPU
|
||||
func updateUsage(v *Varz) {
|
||||
var rss, vss int64
|
||||
var pcpu float64
|
||||
|
||||
pse.ProcUsage(&pcpu, &rss, &vss)
|
||||
|
||||
v.Mem = rss
|
||||
v.CPU = pcpu
|
||||
v.Cores = numCores
|
||||
}
|
||||
|
||||
// ResponseHandler handles responses for monitoring routes
|
||||
func ResponseHandler(w http.ResponseWriter, r *http.Request, data []byte) {
|
||||
// Get callback from request
|
||||
callback := r.URL.Query().Get("callback")
|
||||
// If callback is not empty then
|
||||
if callback != "" {
|
||||
// Response for JSONP
|
||||
w.Header().Set("Content-Type", "application/javascript")
|
||||
fmt.Fprintf(w, "%s(%s)", callback, data)
|
||||
} else {
|
||||
// Otherwise JSON
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(data)
|
||||
}
|
||||
}
|
50
vendor/github.com/nats-io/gnatsd/server/monitor_sort_opts.go
generated
vendored
Normal file
50
vendor/github.com/nats-io/gnatsd/server/monitor_sort_opts.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2013-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
// SortOpt is a helper type to sort by ConnInfo values
|
||||
type SortOpt string
|
||||
|
||||
const (
|
||||
byCid SortOpt = "cid"
|
||||
bySubs = "subs"
|
||||
byPending = "pending"
|
||||
byOutMsgs = "msgs_to"
|
||||
byInMsgs = "msgs_from"
|
||||
byOutBytes = "bytes_to"
|
||||
byInBytes = "bytes_from"
|
||||
byLast = "last"
|
||||
byIdle = "idle"
|
||||
byUptime = "uptime"
|
||||
)
|
||||
|
||||
// IsValid determines if a sort option is valid
|
||||
func (s SortOpt) IsValid() bool {
|
||||
switch s {
|
||||
case "", byCid, bySubs, byPending, byOutMsgs, byInMsgs, byOutBytes, byInBytes, byLast, byIdle, byUptime:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Pair type is internally used.
|
||||
type Pair struct {
|
||||
Key *client
|
||||
Val int64
|
||||
}
|
||||
|
||||
// Pairs type is internally used.
|
||||
type Pairs []Pair
|
||||
|
||||
func (d Pairs) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func (d Pairs) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
||||
|
||||
func (d Pairs) Less(i, j int) bool {
|
||||
return d[i].Val < d[j].Val
|
||||
}
|
802
vendor/github.com/nats-io/gnatsd/server/opts.go
generated
vendored
Normal file
802
vendor/github.com/nats-io/gnatsd/server/opts.go
generated
vendored
Normal file
|
@ -0,0 +1,802 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/gnatsd/conf"
|
||||
)
|
||||
|
||||
// For multiple accounts/users.
|
||||
type User struct {
|
||||
Username string `json:"user"`
|
||||
Password string `json:"password"`
|
||||
Permissions *Permissions `json:"permissions"`
|
||||
}
|
||||
|
||||
// Authorization are the allowed subjects on a per
|
||||
// publish or subscribe basis.
|
||||
type Permissions struct {
|
||||
Publish []string `json:"publish"`
|
||||
Subscribe []string `json:"subscribe"`
|
||||
}
|
||||
|
||||
// Options for clusters.
|
||||
type ClusterOpts struct {
|
||||
Host string `json:"addr"`
|
||||
Port int `json:"cluster_port"`
|
||||
Username string `json:"-"`
|
||||
Password string `json:"-"`
|
||||
AuthTimeout float64 `json:"auth_timeout"`
|
||||
TLSTimeout float64 `json:"-"`
|
||||
TLSConfig *tls.Config `json:"-"`
|
||||
ListenStr string `json:"-"`
|
||||
NoAdvertise bool `json:"-"`
|
||||
}
|
||||
|
||||
// Options block for gnatsd server.
|
||||
type Options struct {
|
||||
Host string `json:"addr"`
|
||||
Port int `json:"port"`
|
||||
Trace bool `json:"-"`
|
||||
Debug bool `json:"-"`
|
||||
NoLog bool `json:"-"`
|
||||
NoSigs bool `json:"-"`
|
||||
Logtime bool `json:"-"`
|
||||
MaxConn int `json:"max_connections"`
|
||||
Users []*User `json:"-"`
|
||||
Username string `json:"-"`
|
||||
Password string `json:"-"`
|
||||
Authorization string `json:"-"`
|
||||
PingInterval time.Duration `json:"ping_interval"`
|
||||
MaxPingsOut int `json:"ping_max"`
|
||||
HTTPHost string `json:"http_host"`
|
||||
HTTPPort int `json:"http_port"`
|
||||
HTTPSPort int `json:"https_port"`
|
||||
AuthTimeout float64 `json:"auth_timeout"`
|
||||
MaxControlLine int `json:"max_control_line"`
|
||||
MaxPayload int `json:"max_payload"`
|
||||
Cluster ClusterOpts `json:"cluster"`
|
||||
ProfPort int `json:"-"`
|
||||
PidFile string `json:"-"`
|
||||
LogFile string `json:"-"`
|
||||
Syslog bool `json:"-"`
|
||||
RemoteSyslog string `json:"-"`
|
||||
Routes []*url.URL `json:"-"`
|
||||
RoutesStr string `json:"-"`
|
||||
TLSTimeout float64 `json:"tls_timeout"`
|
||||
TLS bool `json:"-"`
|
||||
TLSVerify bool `json:"-"`
|
||||
TLSCert string `json:"-"`
|
||||
TLSKey string `json:"-"`
|
||||
TLSCaCert string `json:"-"`
|
||||
TLSConfig *tls.Config `json:"-"`
|
||||
}
|
||||
|
||||
// Configuration file authorization section.
|
||||
type authorization struct {
|
||||
// Singles
|
||||
user string
|
||||
pass string
|
||||
// Multiple Users
|
||||
users []*User
|
||||
timeout float64
|
||||
defaultPermissions *Permissions
|
||||
}
|
||||
|
||||
// TLSConfigOpts holds the parsed tls config information,
|
||||
// used with flag parsing
|
||||
type TLSConfigOpts struct {
|
||||
CertFile string
|
||||
KeyFile string
|
||||
CaFile string
|
||||
Verify bool
|
||||
Timeout float64
|
||||
Ciphers []uint16
|
||||
}
|
||||
|
||||
var tlsUsage = `
|
||||
TLS configuration is specified in the tls section of a configuration file:
|
||||
|
||||
e.g.
|
||||
|
||||
tls {
|
||||
cert_file: "./certs/server-cert.pem"
|
||||
key_file: "./certs/server-key.pem"
|
||||
ca_file: "./certs/ca.pem"
|
||||
verify: true
|
||||
|
||||
cipher_suites: [
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
|
||||
]
|
||||
}
|
||||
|
||||
Available cipher suites include:
|
||||
`
|
||||
|
||||
// ProcessConfigFile processes a configuration file.
|
||||
// FIXME(dlc): Hacky
|
||||
func ProcessConfigFile(configFile string) (*Options, error) {
|
||||
opts := &Options{}
|
||||
|
||||
if configFile == "" {
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
m, err := conf.ParseFile(configFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for k, v := range m {
|
||||
switch strings.ToLower(k) {
|
||||
case "listen":
|
||||
hp, err := parseListen(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.Host = hp.host
|
||||
opts.Port = hp.port
|
||||
case "port":
|
||||
opts.Port = int(v.(int64))
|
||||
case "host", "net":
|
||||
opts.Host = v.(string)
|
||||
case "debug":
|
||||
opts.Debug = v.(bool)
|
||||
case "trace":
|
||||
opts.Trace = v.(bool)
|
||||
case "logtime":
|
||||
opts.Logtime = v.(bool)
|
||||
case "authorization":
|
||||
am := v.(map[string]interface{})
|
||||
auth, err := parseAuthorization(am)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.Username = auth.user
|
||||
opts.Password = auth.pass
|
||||
opts.AuthTimeout = auth.timeout
|
||||
// Check for multiple users defined
|
||||
if auth.users != nil {
|
||||
if auth.user != "" {
|
||||
return nil, fmt.Errorf("Can not have a single user/pass and a users array")
|
||||
}
|
||||
opts.Users = auth.users
|
||||
}
|
||||
case "http":
|
||||
hp, err := parseListen(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.HTTPHost = hp.host
|
||||
opts.HTTPPort = hp.port
|
||||
case "https":
|
||||
hp, err := parseListen(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.HTTPHost = hp.host
|
||||
opts.HTTPSPort = hp.port
|
||||
case "http_port", "monitor_port":
|
||||
opts.HTTPPort = int(v.(int64))
|
||||
case "https_port":
|
||||
opts.HTTPSPort = int(v.(int64))
|
||||
case "cluster":
|
||||
cm := v.(map[string]interface{})
|
||||
if err := parseCluster(cm, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "logfile", "log_file":
|
||||
opts.LogFile = v.(string)
|
||||
case "syslog":
|
||||
opts.Syslog = v.(bool)
|
||||
case "remote_syslog":
|
||||
opts.RemoteSyslog = v.(string)
|
||||
case "pidfile", "pid_file":
|
||||
opts.PidFile = v.(string)
|
||||
case "prof_port":
|
||||
opts.ProfPort = int(v.(int64))
|
||||
case "max_control_line":
|
||||
opts.MaxControlLine = int(v.(int64))
|
||||
case "max_payload":
|
||||
opts.MaxPayload = int(v.(int64))
|
||||
case "max_connections", "max_conn":
|
||||
opts.MaxConn = int(v.(int64))
|
||||
case "ping_interval":
|
||||
opts.PingInterval = time.Duration(int(v.(int64))) * time.Second
|
||||
case "ping_max":
|
||||
opts.MaxPingsOut = int(v.(int64))
|
||||
case "tls":
|
||||
tlsm := v.(map[string]interface{})
|
||||
tc, err := parseTLS(tlsm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opts.TLSConfig, err = GenTLSConfig(tc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.TLSTimeout = tc.Timeout
|
||||
}
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// hostPort is simple struct to hold parsed listen/addr strings.
|
||||
type hostPort struct {
|
||||
host string
|
||||
port int
|
||||
}
|
||||
|
||||
// parseListen will parse listen option which is replacing host/net and port
|
||||
func parseListen(v interface{}) (*hostPort, error) {
|
||||
hp := &hostPort{}
|
||||
switch v.(type) {
|
||||
// Only a port
|
||||
case int64:
|
||||
hp.port = int(v.(int64))
|
||||
case string:
|
||||
host, port, err := net.SplitHostPort(v.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse address string %q", v)
|
||||
}
|
||||
hp.port, err = strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse port %q", port)
|
||||
}
|
||||
hp.host = host
|
||||
}
|
||||
return hp, nil
|
||||
}
|
||||
|
||||
// parseCluster will parse the cluster config.
|
||||
func parseCluster(cm map[string]interface{}, opts *Options) error {
|
||||
for mk, mv := range cm {
|
||||
switch strings.ToLower(mk) {
|
||||
case "listen":
|
||||
hp, err := parseListen(mv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.Cluster.Host = hp.host
|
||||
opts.Cluster.Port = hp.port
|
||||
case "port":
|
||||
opts.Cluster.Port = int(mv.(int64))
|
||||
case "host", "net":
|
||||
opts.Cluster.Host = mv.(string)
|
||||
case "authorization":
|
||||
am := mv.(map[string]interface{})
|
||||
auth, err := parseAuthorization(am)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if auth.users != nil {
|
||||
return fmt.Errorf("Cluster authorization does not allow multiple users")
|
||||
}
|
||||
opts.Cluster.Username = auth.user
|
||||
opts.Cluster.Password = auth.pass
|
||||
opts.Cluster.AuthTimeout = auth.timeout
|
||||
case "routes":
|
||||
ra := mv.([]interface{})
|
||||
opts.Routes = make([]*url.URL, 0, len(ra))
|
||||
for _, r := range ra {
|
||||
routeURL := r.(string)
|
||||
url, err := url.Parse(routeURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing route url [%q]", routeURL)
|
||||
}
|
||||
opts.Routes = append(opts.Routes, url)
|
||||
}
|
||||
case "tls":
|
||||
tlsm := mv.(map[string]interface{})
|
||||
tc, err := parseTLS(tlsm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.Cluster.TLSConfig, err = GenTLSConfig(tc); err != nil {
|
||||
return err
|
||||
}
|
||||
// For clusters, we will force strict verification. We also act
|
||||
// as both client and server, so will mirror the rootCA to the
|
||||
// clientCA pool.
|
||||
opts.Cluster.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
opts.Cluster.TLSConfig.RootCAs = opts.Cluster.TLSConfig.ClientCAs
|
||||
opts.Cluster.TLSTimeout = tc.Timeout
|
||||
case "no_advertise":
|
||||
opts.Cluster.NoAdvertise = mv.(bool)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper function to parse Authorization configs.
|
||||
func parseAuthorization(am map[string]interface{}) (*authorization, error) {
|
||||
auth := &authorization{}
|
||||
for mk, mv := range am {
|
||||
switch strings.ToLower(mk) {
|
||||
case "user", "username":
|
||||
auth.user = mv.(string)
|
||||
case "pass", "password":
|
||||
auth.pass = mv.(string)
|
||||
case "timeout":
|
||||
at := float64(1)
|
||||
switch mv.(type) {
|
||||
case int64:
|
||||
at = float64(mv.(int64))
|
||||
case float64:
|
||||
at = mv.(float64)
|
||||
}
|
||||
auth.timeout = at
|
||||
case "users":
|
||||
users, err := parseUsers(mv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth.users = users
|
||||
case "default_permission", "default_permissions":
|
||||
pm, ok := mv.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Expected default permissions to be a map/struct, got %+v", mv)
|
||||
}
|
||||
permissions, err := parseUserPermissions(pm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth.defaultPermissions = permissions
|
||||
}
|
||||
|
||||
// Now check for permission defaults with multiple users, etc.
|
||||
if auth.users != nil && auth.defaultPermissions != nil {
|
||||
for _, user := range auth.users {
|
||||
if user.Permissions == nil {
|
||||
user.Permissions = auth.defaultPermissions
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
// Helper function to parse multiple users array with optional permissions.
|
||||
func parseUsers(mv interface{}) ([]*User, error) {
|
||||
// Make sure we have an array
|
||||
uv, ok := mv.([]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Expected users field to be an array, got %v", mv)
|
||||
}
|
||||
users := []*User{}
|
||||
for _, u := range uv {
|
||||
// Check its a map/struct
|
||||
um, ok := u.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Expected user entry to be a map/struct, got %v", u)
|
||||
}
|
||||
user := &User{}
|
||||
for k, v := range um {
|
||||
switch strings.ToLower(k) {
|
||||
case "user", "username":
|
||||
user.Username = v.(string)
|
||||
case "pass", "password":
|
||||
user.Password = v.(string)
|
||||
case "permission", "permissions", "authroization":
|
||||
pm, ok := v.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Expected user permissions to be a map/struct, got %+v", v)
|
||||
}
|
||||
permissions, err := parseUserPermissions(pm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
user.Permissions = permissions
|
||||
}
|
||||
}
|
||||
// Check to make sure we have at least username and password
|
||||
if user.Username == "" || user.Password == "" {
|
||||
return nil, fmt.Errorf("User entry requires a user and a password")
|
||||
}
|
||||
users = append(users, user)
|
||||
}
|
||||
return users, nil
|
||||
}
|
||||
|
||||
// Helper function to parse user/account permissions
|
||||
func parseUserPermissions(pm map[string]interface{}) (*Permissions, error) {
|
||||
p := &Permissions{}
|
||||
for k, v := range pm {
|
||||
switch strings.ToLower(k) {
|
||||
case "pub", "publish":
|
||||
subjects, err := parseSubjects(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.Publish = subjects
|
||||
case "sub", "subscribe":
|
||||
subjects, err := parseSubjects(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.Subscribe = subjects
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown field %s parsing permissions", k)
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Helper function to parse subject singeltons and/or arrays
|
||||
func parseSubjects(v interface{}) ([]string, error) {
|
||||
var subjects []string
|
||||
switch v.(type) {
|
||||
case string:
|
||||
subjects = append(subjects, v.(string))
|
||||
case []string:
|
||||
subjects = v.([]string)
|
||||
case []interface{}:
|
||||
for _, i := range v.([]interface{}) {
|
||||
subject, ok := i.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Subject in permissions array cannot be cast to string")
|
||||
}
|
||||
subjects = append(subjects, subject)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("Expected subject permissions to be a subject, or array of subjects, got %T", v)
|
||||
}
|
||||
return checkSubjectArray(subjects)
|
||||
}
|
||||
|
||||
// Helper function to validate subjects, etc for account permissioning.
|
||||
func checkSubjectArray(sa []string) ([]string, error) {
|
||||
for _, s := range sa {
|
||||
if !IsValidSubject(s) {
|
||||
return nil, fmt.Errorf("Subject %q is not a valid subject", s)
|
||||
}
|
||||
}
|
||||
return sa, nil
|
||||
}
|
||||
|
||||
// PrintTLSHelpAndDie prints TLS usage and exits.
|
||||
func PrintTLSHelpAndDie() {
|
||||
fmt.Printf("%s\n", tlsUsage)
|
||||
for k := range cipherMap {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func parseCipher(cipherName string) (uint16, error) {
|
||||
|
||||
cipher, exists := cipherMap[cipherName]
|
||||
if !exists {
|
||||
return 0, fmt.Errorf("Unrecognized cipher %s", cipherName)
|
||||
}
|
||||
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
// Helper function to parse TLS configs.
|
||||
func parseTLS(tlsm map[string]interface{}) (*TLSConfigOpts, error) {
|
||||
tc := TLSConfigOpts{}
|
||||
for mk, mv := range tlsm {
|
||||
switch strings.ToLower(mk) {
|
||||
case "cert_file":
|
||||
certFile, ok := mv.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing tls config, expected 'cert_file' to be filename")
|
||||
}
|
||||
tc.CertFile = certFile
|
||||
case "key_file":
|
||||
keyFile, ok := mv.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing tls config, expected 'key_file' to be filename")
|
||||
}
|
||||
tc.KeyFile = keyFile
|
||||
case "ca_file":
|
||||
caFile, ok := mv.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing tls config, expected 'ca_file' to be filename")
|
||||
}
|
||||
tc.CaFile = caFile
|
||||
case "verify":
|
||||
verify, ok := mv.(bool)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing tls config, expected 'verify' to be a boolean")
|
||||
}
|
||||
tc.Verify = verify
|
||||
case "cipher_suites":
|
||||
ra := mv.([]interface{})
|
||||
if len(ra) == 0 {
|
||||
return nil, fmt.Errorf("error parsing tls config, 'cipher_suites' cannot be empty")
|
||||
}
|
||||
tc.Ciphers = make([]uint16, 0, len(ra))
|
||||
for _, r := range ra {
|
||||
cipher, err := parseCipher(r.(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tc.Ciphers = append(tc.Ciphers, cipher)
|
||||
}
|
||||
case "timeout":
|
||||
at := float64(0)
|
||||
switch mv.(type) {
|
||||
case int64:
|
||||
at = float64(mv.(int64))
|
||||
case float64:
|
||||
at = mv.(float64)
|
||||
}
|
||||
tc.Timeout = at
|
||||
default:
|
||||
return nil, fmt.Errorf("error parsing tls config, unknown field [%q]", mk)
|
||||
}
|
||||
}
|
||||
|
||||
// If cipher suites were not specified then use the defaults
|
||||
if tc.Ciphers == nil {
|
||||
tc.Ciphers = defaultCipherSuites()
|
||||
}
|
||||
|
||||
return &tc, nil
|
||||
}
|
||||
|
||||
// GenTLSConfig loads TLS related configuration parameters.
|
||||
func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) {
|
||||
|
||||
// Now load in cert and private key
|
||||
cert, err := tls.LoadX509KeyPair(tc.CertFile, tc.KeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err)
|
||||
}
|
||||
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing certificate: %v", err)
|
||||
}
|
||||
|
||||
// Create TLSConfig
|
||||
// We will determine the cipher suites that we prefer.
|
||||
config := tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
PreferServerCipherSuites: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: tc.Ciphers,
|
||||
}
|
||||
|
||||
// Require client certificates as needed
|
||||
if tc.Verify {
|
||||
config.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
}
|
||||
// Add in CAs if applicable.
|
||||
if tc.CaFile != "" {
|
||||
rootPEM, err := ioutil.ReadFile(tc.CaFile)
|
||||
if err != nil || rootPEM == nil {
|
||||
return nil, err
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
ok := pool.AppendCertsFromPEM([]byte(rootPEM))
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to parse root ca certificate")
|
||||
}
|
||||
config.ClientCAs = pool
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// MergeOptions will merge two options giving preference to the flagOpts
|
||||
// if the item is present.
|
||||
func MergeOptions(fileOpts, flagOpts *Options) *Options {
|
||||
if fileOpts == nil {
|
||||
return flagOpts
|
||||
}
|
||||
if flagOpts == nil {
|
||||
return fileOpts
|
||||
}
|
||||
// Merge the two, flagOpts override
|
||||
opts := *fileOpts
|
||||
|
||||
if flagOpts.Port != 0 {
|
||||
opts.Port = flagOpts.Port
|
||||
}
|
||||
if flagOpts.Host != "" {
|
||||
opts.Host = flagOpts.Host
|
||||
}
|
||||
if flagOpts.Username != "" {
|
||||
opts.Username = flagOpts.Username
|
||||
}
|
||||
if flagOpts.Password != "" {
|
||||
opts.Password = flagOpts.Password
|
||||
}
|
||||
if flagOpts.Authorization != "" {
|
||||
opts.Authorization = flagOpts.Authorization
|
||||
}
|
||||
if flagOpts.HTTPPort != 0 {
|
||||
opts.HTTPPort = flagOpts.HTTPPort
|
||||
}
|
||||
if flagOpts.Debug {
|
||||
opts.Debug = true
|
||||
}
|
||||
if flagOpts.Trace {
|
||||
opts.Trace = true
|
||||
}
|
||||
if flagOpts.Logtime {
|
||||
opts.Logtime = true
|
||||
}
|
||||
if flagOpts.LogFile != "" {
|
||||
opts.LogFile = flagOpts.LogFile
|
||||
}
|
||||
if flagOpts.PidFile != "" {
|
||||
opts.PidFile = flagOpts.PidFile
|
||||
}
|
||||
if flagOpts.ProfPort != 0 {
|
||||
opts.ProfPort = flagOpts.ProfPort
|
||||
}
|
||||
if flagOpts.Cluster.ListenStr != "" {
|
||||
opts.Cluster.ListenStr = flagOpts.Cluster.ListenStr
|
||||
}
|
||||
if flagOpts.Cluster.NoAdvertise {
|
||||
opts.Cluster.NoAdvertise = true
|
||||
}
|
||||
if flagOpts.RoutesStr != "" {
|
||||
mergeRoutes(&opts, flagOpts)
|
||||
}
|
||||
return &opts
|
||||
}
|
||||
|
||||
// RoutesFromStr parses route URLs from a string
|
||||
func RoutesFromStr(routesStr string) []*url.URL {
|
||||
routes := strings.Split(routesStr, ",")
|
||||
if len(routes) == 0 {
|
||||
return nil
|
||||
}
|
||||
routeUrls := []*url.URL{}
|
||||
for _, r := range routes {
|
||||
r = strings.TrimSpace(r)
|
||||
u, _ := url.Parse(r)
|
||||
routeUrls = append(routeUrls, u)
|
||||
}
|
||||
return routeUrls
|
||||
}
|
||||
|
||||
// This will merge the flag routes and override anything that was present.
|
||||
func mergeRoutes(opts, flagOpts *Options) {
|
||||
routeUrls := RoutesFromStr(flagOpts.RoutesStr)
|
||||
if routeUrls == nil {
|
||||
return
|
||||
}
|
||||
opts.Routes = routeUrls
|
||||
opts.RoutesStr = flagOpts.RoutesStr
|
||||
}
|
||||
|
||||
// RemoveSelfReference removes this server from an array of routes
|
||||
func RemoveSelfReference(clusterPort int, routes []*url.URL) ([]*url.URL, error) {
|
||||
var cleanRoutes []*url.URL
|
||||
cport := strconv.Itoa(clusterPort)
|
||||
|
||||
selfIPs := getInterfaceIPs()
|
||||
for _, r := range routes {
|
||||
host, port, err := net.SplitHostPort(r.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cport == port && isIPInList(selfIPs, getURLIP(host)) {
|
||||
Noticef("Self referencing IP found: ", r)
|
||||
continue
|
||||
}
|
||||
cleanRoutes = append(cleanRoutes, r)
|
||||
}
|
||||
|
||||
return cleanRoutes, nil
|
||||
}
|
||||
|
||||
func isIPInList(list1 []net.IP, list2 []net.IP) bool {
|
||||
for _, ip1 := range list1 {
|
||||
for _, ip2 := range list2 {
|
||||
if ip1.Equal(ip2) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getURLIP(ipStr string) []net.IP {
|
||||
ipList := []net.IP{}
|
||||
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip != nil {
|
||||
ipList = append(ipList, ip)
|
||||
return ipList
|
||||
}
|
||||
|
||||
hostAddr, err := net.LookupHost(ipStr)
|
||||
if err != nil {
|
||||
Errorf("Error looking up host with route hostname: %v", err)
|
||||
return ipList
|
||||
}
|
||||
for _, addr := range hostAddr {
|
||||
ip = net.ParseIP(addr)
|
||||
if ip != nil {
|
||||
ipList = append(ipList, ip)
|
||||
}
|
||||
}
|
||||
return ipList
|
||||
}
|
||||
|
||||
func getInterfaceIPs() []net.IP {
|
||||
var localIPs []net.IP
|
||||
|
||||
interfaceAddr, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
Errorf("Error getting self referencing address: %v", err)
|
||||
return localIPs
|
||||
}
|
||||
|
||||
for i := 0; i < len(interfaceAddr); i++ {
|
||||
interfaceIP, _, _ := net.ParseCIDR(interfaceAddr[i].String())
|
||||
if net.ParseIP(interfaceIP.String()) != nil {
|
||||
localIPs = append(localIPs, interfaceIP)
|
||||
} else {
|
||||
Errorf("Error parsing self referencing address: %v", err)
|
||||
}
|
||||
}
|
||||
return localIPs
|
||||
}
|
||||
|
||||
func processOptions(opts *Options) {
|
||||
// Setup non-standard Go defaults
|
||||
if opts.Host == "" {
|
||||
opts.Host = DEFAULT_HOST
|
||||
}
|
||||
if opts.HTTPHost == "" {
|
||||
// Default to same bind from server if left undefined
|
||||
opts.HTTPHost = opts.Host
|
||||
}
|
||||
if opts.Port == 0 {
|
||||
opts.Port = DEFAULT_PORT
|
||||
} else if opts.Port == RANDOM_PORT {
|
||||
// Choose randomly inside of net.Listen
|
||||
opts.Port = 0
|
||||
}
|
||||
if opts.MaxConn == 0 {
|
||||
opts.MaxConn = DEFAULT_MAX_CONNECTIONS
|
||||
}
|
||||
if opts.PingInterval == 0 {
|
||||
opts.PingInterval = DEFAULT_PING_INTERVAL
|
||||
}
|
||||
if opts.MaxPingsOut == 0 {
|
||||
opts.MaxPingsOut = DEFAULT_PING_MAX_OUT
|
||||
}
|
||||
if opts.TLSTimeout == 0 {
|
||||
opts.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
|
||||
}
|
||||
if opts.AuthTimeout == 0 {
|
||||
opts.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
|
||||
}
|
||||
if opts.Cluster.Host == "" {
|
||||
opts.Cluster.Host = DEFAULT_HOST
|
||||
}
|
||||
if opts.Cluster.TLSTimeout == 0 {
|
||||
opts.Cluster.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
|
||||
}
|
||||
if opts.Cluster.AuthTimeout == 0 {
|
||||
opts.Cluster.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
|
||||
}
|
||||
if opts.MaxControlLine == 0 {
|
||||
opts.MaxControlLine = MAX_CONTROL_LINE_SIZE
|
||||
}
|
||||
if opts.MaxPayload == 0 {
|
||||
opts.MaxPayload = MAX_PAYLOAD_SIZE
|
||||
}
|
||||
}
|
738
vendor/github.com/nats-io/gnatsd/server/parser.go
generated
vendored
Normal file
738
vendor/github.com/nats-io/gnatsd/server/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,738 @@
|
|||
// Copyright 2012-2014 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type pubArg struct {
|
||||
subject []byte
|
||||
reply []byte
|
||||
sid []byte
|
||||
szb []byte
|
||||
size int
|
||||
}
|
||||
|
||||
type parseState struct {
|
||||
state int
|
||||
as int
|
||||
drop int
|
||||
pa pubArg
|
||||
argBuf []byte
|
||||
msgBuf []byte
|
||||
scratch [MAX_CONTROL_LINE_SIZE]byte
|
||||
}
|
||||
|
||||
// Parser constants
|
||||
const (
|
||||
OP_START = iota
|
||||
OP_PLUS
|
||||
OP_PLUS_O
|
||||
OP_PLUS_OK
|
||||
OP_MINUS
|
||||
OP_MINUS_E
|
||||
OP_MINUS_ER
|
||||
OP_MINUS_ERR
|
||||
OP_MINUS_ERR_SPC
|
||||
MINUS_ERR_ARG
|
||||
OP_C
|
||||
OP_CO
|
||||
OP_CON
|
||||
OP_CONN
|
||||
OP_CONNE
|
||||
OP_CONNEC
|
||||
OP_CONNECT
|
||||
CONNECT_ARG
|
||||
OP_P
|
||||
OP_PU
|
||||
OP_PUB
|
||||
OP_PUB_SPC
|
||||
PUB_ARG
|
||||
OP_PI
|
||||
OP_PIN
|
||||
OP_PING
|
||||
OP_PO
|
||||
OP_PON
|
||||
OP_PONG
|
||||
MSG_PAYLOAD
|
||||
MSG_END
|
||||
OP_S
|
||||
OP_SU
|
||||
OP_SUB
|
||||
OP_SUB_SPC
|
||||
SUB_ARG
|
||||
OP_U
|
||||
OP_UN
|
||||
OP_UNS
|
||||
OP_UNSU
|
||||
OP_UNSUB
|
||||
OP_UNSUB_SPC
|
||||
UNSUB_ARG
|
||||
OP_M
|
||||
OP_MS
|
||||
OP_MSG
|
||||
OP_MSG_SPC
|
||||
MSG_ARG
|
||||
OP_I
|
||||
OP_IN
|
||||
OP_INF
|
||||
OP_INFO
|
||||
INFO_ARG
|
||||
)
|
||||
|
||||
func (c *client) parse(buf []byte) error {
|
||||
var i int
|
||||
var b byte
|
||||
|
||||
mcl := MAX_CONTROL_LINE_SIZE
|
||||
if c.srv != nil && c.srv.opts != nil {
|
||||
mcl = c.srv.opts.MaxControlLine
|
||||
}
|
||||
|
||||
// snapshot this, and reset when we receive a
|
||||
// proper CONNECT if needed.
|
||||
authSet := c.isAuthTimerSet()
|
||||
|
||||
// Move to loop instead of range syntax to allow jumping of i
|
||||
for i = 0; i < len(buf); i++ {
|
||||
b = buf[i]
|
||||
|
||||
switch c.state {
|
||||
case OP_START:
|
||||
if b != 'C' && b != 'c' && authSet {
|
||||
goto authErr
|
||||
}
|
||||
switch b {
|
||||
case 'P', 'p':
|
||||
c.state = OP_P
|
||||
case 'S', 's':
|
||||
c.state = OP_S
|
||||
case 'U', 'u':
|
||||
c.state = OP_U
|
||||
case 'M', 'm':
|
||||
if c.typ == CLIENT {
|
||||
goto parseErr
|
||||
} else {
|
||||
c.state = OP_M
|
||||
}
|
||||
case 'C', 'c':
|
||||
c.state = OP_C
|
||||
case 'I', 'i':
|
||||
c.state = OP_I
|
||||
case '+':
|
||||
c.state = OP_PLUS
|
||||
case '-':
|
||||
c.state = OP_MINUS
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_P:
|
||||
switch b {
|
||||
case 'U', 'u':
|
||||
c.state = OP_PU
|
||||
case 'I', 'i':
|
||||
c.state = OP_PI
|
||||
case 'O', 'o':
|
||||
c.state = OP_PO
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PU:
|
||||
switch b {
|
||||
case 'B', 'b':
|
||||
c.state = OP_PUB
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PUB:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_PUB_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PUB_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = PUB_ARG
|
||||
c.as = i
|
||||
}
|
||||
case PUB_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processPub(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = OP_START, i+1, MSG_PAYLOAD
|
||||
// If we don't have a saved buffer then jump ahead with
|
||||
// the index. If this overruns what is left we fall out
|
||||
// and process split buffer.
|
||||
if c.msgBuf == nil {
|
||||
i = c.as + c.pa.size - LEN_CR_LF
|
||||
}
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case MSG_PAYLOAD:
|
||||
if c.msgBuf != nil {
|
||||
// copy as much as we can to the buffer and skip ahead.
|
||||
toCopy := c.pa.size - len(c.msgBuf)
|
||||
avail := len(buf) - i
|
||||
if avail < toCopy {
|
||||
toCopy = avail
|
||||
}
|
||||
if toCopy > 0 {
|
||||
start := len(c.msgBuf)
|
||||
// This is needed for copy to work.
|
||||
c.msgBuf = c.msgBuf[:start+toCopy]
|
||||
copy(c.msgBuf[start:], buf[i:i+toCopy])
|
||||
// Update our index
|
||||
i = (i + toCopy) - 1
|
||||
} else {
|
||||
// Fall back to append if needed.
|
||||
c.msgBuf = append(c.msgBuf, b)
|
||||
}
|
||||
if len(c.msgBuf) >= c.pa.size {
|
||||
c.state = MSG_END
|
||||
}
|
||||
} else if i-c.as >= c.pa.size {
|
||||
c.state = MSG_END
|
||||
}
|
||||
case MSG_END:
|
||||
switch b {
|
||||
case '\n':
|
||||
if c.msgBuf != nil {
|
||||
c.msgBuf = append(c.msgBuf, b)
|
||||
} else {
|
||||
c.msgBuf = buf[c.as : i+1]
|
||||
}
|
||||
// strict check for proto
|
||||
if len(c.msgBuf) != c.pa.size+LEN_CR_LF {
|
||||
goto parseErr
|
||||
}
|
||||
c.processMsg(c.msgBuf)
|
||||
c.argBuf, c.msgBuf = nil, nil
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.msgBuf != nil {
|
||||
c.msgBuf = append(c.msgBuf, b)
|
||||
}
|
||||
continue
|
||||
}
|
||||
case OP_S:
|
||||
switch b {
|
||||
case 'U', 'u':
|
||||
c.state = OP_SU
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_SU:
|
||||
switch b {
|
||||
case 'B', 'b':
|
||||
c.state = OP_SUB
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_SUB:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_SUB_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_SUB_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = SUB_ARG
|
||||
c.as = i
|
||||
}
|
||||
case SUB_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processSub(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_U:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_UN
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UN:
|
||||
switch b {
|
||||
case 'S', 's':
|
||||
c.state = OP_UNS
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UNS:
|
||||
switch b {
|
||||
case 'U', 'u':
|
||||
c.state = OP_UNSU
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UNSU:
|
||||
switch b {
|
||||
case 'B', 'b':
|
||||
c.state = OP_UNSUB
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UNSUB:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_UNSUB_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UNSUB_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = UNSUB_ARG
|
||||
c.as = i
|
||||
}
|
||||
case UNSUB_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processUnsub(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_PI:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_PIN
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PIN:
|
||||
switch b {
|
||||
case 'G', 'g':
|
||||
c.state = OP_PING
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PING:
|
||||
switch b {
|
||||
case '\n':
|
||||
c.processPing()
|
||||
c.drop, c.state = 0, OP_START
|
||||
}
|
||||
case OP_PO:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_PON
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PON:
|
||||
switch b {
|
||||
case 'G', 'g':
|
||||
c.state = OP_PONG
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PONG:
|
||||
switch b {
|
||||
case '\n':
|
||||
c.processPong()
|
||||
c.drop, c.state = 0, OP_START
|
||||
}
|
||||
case OP_C:
|
||||
switch b {
|
||||
case 'O', 'o':
|
||||
c.state = OP_CO
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CO:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_CON
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CON:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_CONN
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CONN:
|
||||
switch b {
|
||||
case 'E', 'e':
|
||||
c.state = OP_CONNE
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CONNE:
|
||||
switch b {
|
||||
case 'C', 'c':
|
||||
c.state = OP_CONNEC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CONNEC:
|
||||
switch b {
|
||||
case 'T', 't':
|
||||
c.state = OP_CONNECT
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CONNECT:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = CONNECT_ARG
|
||||
c.as = i
|
||||
}
|
||||
case CONNECT_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processConnect(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.state = 0, OP_START
|
||||
// Reset notion on authSet
|
||||
authSet = c.isAuthTimerSet()
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_M:
|
||||
switch b {
|
||||
case 'S', 's':
|
||||
c.state = OP_MS
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MS:
|
||||
switch b {
|
||||
case 'G', 'g':
|
||||
c.state = OP_MSG
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MSG:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_MSG_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MSG_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = MSG_ARG
|
||||
c.as = i
|
||||
}
|
||||
case MSG_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processMsgArgs(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = 0, i+1, MSG_PAYLOAD
|
||||
|
||||
// jump ahead with the index. If this overruns
|
||||
// what is left we fall out and process split
|
||||
// buffer.
|
||||
i = c.as + c.pa.size - 1
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_I:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_IN
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_IN:
|
||||
switch b {
|
||||
case 'F', 'f':
|
||||
c.state = OP_INF
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_INF:
|
||||
switch b {
|
||||
case 'O', 'o':
|
||||
c.state = OP_INFO
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_INFO:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = INFO_ARG
|
||||
c.as = i
|
||||
}
|
||||
case INFO_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processInfo(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_PLUS:
|
||||
switch b {
|
||||
case 'O', 'o':
|
||||
c.state = OP_PLUS_O
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PLUS_O:
|
||||
switch b {
|
||||
case 'K', 'k':
|
||||
c.state = OP_PLUS_OK
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PLUS_OK:
|
||||
switch b {
|
||||
case '\n':
|
||||
c.drop, c.state = 0, OP_START
|
||||
}
|
||||
case OP_MINUS:
|
||||
switch b {
|
||||
case 'E', 'e':
|
||||
c.state = OP_MINUS_E
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_E:
|
||||
switch b {
|
||||
case 'R', 'r':
|
||||
c.state = OP_MINUS_ER
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_ER:
|
||||
switch b {
|
||||
case 'R', 'r':
|
||||
c.state = OP_MINUS_ERR
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_ERR:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_MINUS_ERR_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_ERR_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = MINUS_ERR_ARG
|
||||
c.as = i
|
||||
}
|
||||
case MINUS_ERR_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
c.processErr(string(arg))
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
}
|
||||
|
||||
// Check for split buffer scenarios for any ARG state.
|
||||
if c.state == SUB_ARG || c.state == UNSUB_ARG || c.state == PUB_ARG ||
|
||||
c.state == MSG_ARG || c.state == MINUS_ERR_ARG ||
|
||||
c.state == CONNECT_ARG || c.state == INFO_ARG {
|
||||
// Setup a holder buffer to deal with split buffer scenario.
|
||||
if c.argBuf == nil {
|
||||
c.argBuf = c.scratch[:0]
|
||||
c.argBuf = append(c.argBuf, buf[c.as:i-c.drop]...)
|
||||
}
|
||||
// Check for violations of control line length here. Note that this is not
|
||||
// exact at all but the performance hit is too great to be precise, and
|
||||
// catching here should prevent memory exhaustion attacks.
|
||||
if len(c.argBuf) > mcl {
|
||||
c.sendErr("Maximum Control Line Exceeded")
|
||||
c.closeConnection()
|
||||
return ErrMaxControlLine
|
||||
}
|
||||
}
|
||||
|
||||
// Check for split msg
|
||||
if (c.state == MSG_PAYLOAD || c.state == MSG_END) && c.msgBuf == nil {
|
||||
// We need to clone the pubArg if it is still referencing the
|
||||
// read buffer and we are not able to process the msg.
|
||||
if c.argBuf == nil {
|
||||
// Works also for MSG_ARG, when message comes from ROUTE.
|
||||
c.clonePubArg()
|
||||
}
|
||||
|
||||
// If we will overflow the scratch buffer, just create a
|
||||
// new buffer to hold the split message.
|
||||
if c.pa.size > cap(c.scratch)-len(c.argBuf) {
|
||||
lrem := len(buf[c.as:])
|
||||
|
||||
// Consider it a protocol error when the remaining payload
|
||||
// is larger than the reported size for PUB. It can happen
|
||||
// when processing incomplete messages from rogue clients.
|
||||
if lrem > c.pa.size+LEN_CR_LF {
|
||||
goto parseErr
|
||||
}
|
||||
c.msgBuf = make([]byte, lrem, c.pa.size+LEN_CR_LF)
|
||||
copy(c.msgBuf, buf[c.as:])
|
||||
} else {
|
||||
c.msgBuf = c.scratch[len(c.argBuf):len(c.argBuf)]
|
||||
c.msgBuf = append(c.msgBuf, (buf[c.as:])...)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
authErr:
|
||||
c.authViolation()
|
||||
return ErrAuthorization
|
||||
|
||||
parseErr:
|
||||
c.sendErr("Unknown Protocol Operation")
|
||||
snip := protoSnippet(i, buf)
|
||||
err := fmt.Errorf("%s Parser ERROR, state=%d, i=%d: proto='%s...'",
|
||||
c.typeString(), c.state, i, snip)
|
||||
return err
|
||||
}
|
||||
|
||||
func protoSnippet(start int, buf []byte) string {
|
||||
stop := start + PROTO_SNIPPET_SIZE
|
||||
bufSize := len(buf)
|
||||
if start >= bufSize {
|
||||
return `""`
|
||||
}
|
||||
if stop > bufSize {
|
||||
stop = bufSize - 1
|
||||
}
|
||||
return fmt.Sprintf("%q", buf[start:stop])
|
||||
}
|
||||
|
||||
// clonePubArg is used when the split buffer scenario has the pubArg in the existing read buffer, but
|
||||
// we need to hold onto it into the next read.
|
||||
func (c *client) clonePubArg() {
|
||||
c.argBuf = c.scratch[:0]
|
||||
c.argBuf = append(c.argBuf, c.pa.subject...)
|
||||
c.argBuf = append(c.argBuf, c.pa.reply...)
|
||||
c.argBuf = append(c.argBuf, c.pa.sid...)
|
||||
c.argBuf = append(c.argBuf, c.pa.szb...)
|
||||
|
||||
c.pa.subject = c.argBuf[:len(c.pa.subject)]
|
||||
|
||||
if c.pa.reply != nil {
|
||||
c.pa.reply = c.argBuf[len(c.pa.subject) : len(c.pa.subject)+len(c.pa.reply)]
|
||||
}
|
||||
|
||||
if c.pa.sid != nil {
|
||||
c.pa.sid = c.argBuf[len(c.pa.subject)+len(c.pa.reply) : len(c.pa.subject)+len(c.pa.reply)+len(c.pa.sid)]
|
||||
}
|
||||
|
||||
c.pa.szb = c.argBuf[len(c.pa.subject)+len(c.pa.reply)+len(c.pa.sid):]
|
||||
}
|
23
vendor/github.com/nats-io/gnatsd/server/pse/pse_darwin.go
generated
vendored
Normal file
23
vendor/github.com/nats-io/gnatsd/server/pse/pse_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package pse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
pidStr := fmt.Sprintf("%d", os.Getpid())
|
||||
out, err := exec.Command("ps", "o", "pcpu=,rss=,vsz=", "-p", pidStr).Output()
|
||||
if err != nil {
|
||||
*rss, *vss = -1, -1
|
||||
return errors.New(fmt.Sprintf("ps call failed:%v", err))
|
||||
}
|
||||
fmt.Sscanf(string(out), "%f %d %d", pcpu, rss, vss)
|
||||
*rss *= 1024 // 1k blocks, want bytes.
|
||||
*vss *= 1024 // 1k blocks, want bytes.
|
||||
return nil
|
||||
}
|
72
vendor/github.com/nats-io/gnatsd/server/pse/pse_freebsd.go
generated
vendored
Normal file
72
vendor/github.com/nats-io/gnatsd/server/pse/pse_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package pse
|
||||
|
||||
/*
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/user.h>
|
||||
#include <stddef.h>
|
||||
#include <unistd.h>
|
||||
|
||||
long pagetok(long size)
|
||||
{
|
||||
int pageshift, pagesize;
|
||||
|
||||
pagesize = getpagesize();
|
||||
pageshift = 0;
|
||||
|
||||
while (pagesize > 1) {
|
||||
pageshift++;
|
||||
pagesize >>= 1;
|
||||
}
|
||||
|
||||
return (size << pageshift);
|
||||
}
|
||||
|
||||
int getusage(double *pcpu, unsigned int *rss, unsigned int *vss)
|
||||
{
|
||||
int mib[4], ret;
|
||||
size_t len;
|
||||
struct kinfo_proc kp;
|
||||
|
||||
len = 4;
|
||||
sysctlnametomib("kern.proc.pid", mib, &len);
|
||||
|
||||
mib[3] = getpid();
|
||||
len = sizeof(kp);
|
||||
|
||||
ret = sysctl(mib, 4, &kp, &len, NULL, 0);
|
||||
if (ret != 0) {
|
||||
return (errno);
|
||||
}
|
||||
|
||||
*rss = pagetok(kp.ki_rssize);
|
||||
*vss = kp.ki_size;
|
||||
*pcpu = kp.ki_pctcpu;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// This is a placeholder for now.
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
var r, v C.uint
|
||||
var c C.double
|
||||
|
||||
if ret := C.getusage(&c, &r, &v); ret != 0 {
|
||||
return syscall.Errno(ret)
|
||||
}
|
||||
|
||||
*pcpu = float64(c)
|
||||
*rss = int64(r)
|
||||
*vss = int64(v)
|
||||
|
||||
return nil
|
||||
}
|
115
vendor/github.com/nats-io/gnatsd/server/pse/pse_linux.go
generated
vendored
Normal file
115
vendor/github.com/nats-io/gnatsd/server/pse/pse_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package pse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
procStatFile string
|
||||
ticks int64
|
||||
lastTotal int64
|
||||
lastSeconds int64
|
||||
ipcpu int64
|
||||
)
|
||||
|
||||
const (
|
||||
utimePos = 13
|
||||
stimePos = 14
|
||||
startPos = 21
|
||||
vssPos = 22
|
||||
rssPos = 23
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Avoiding to generate docker image without CGO
|
||||
ticks = 100 // int64(C.sysconf(C._SC_CLK_TCK))
|
||||
procStatFile = fmt.Sprintf("/proc/%d/stat", os.Getpid())
|
||||
periodic()
|
||||
}
|
||||
|
||||
// Sampling function to keep pcpu relevant.
|
||||
func periodic() {
|
||||
contents, err := ioutil.ReadFile(procStatFile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fields := bytes.Fields(contents)
|
||||
|
||||
// PCPU
|
||||
pstart := parseInt64(fields[startPos])
|
||||
utime := parseInt64(fields[utimePos])
|
||||
stime := parseInt64(fields[stimePos])
|
||||
total := utime + stime
|
||||
|
||||
var sysinfo syscall.Sysinfo_t
|
||||
if err := syscall.Sysinfo(&sysinfo); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
seconds := int64(sysinfo.Uptime) - (pstart / ticks)
|
||||
|
||||
// Save off temps
|
||||
lt := lastTotal
|
||||
ls := lastSeconds
|
||||
|
||||
// Update last sample
|
||||
lastTotal = total
|
||||
lastSeconds = seconds
|
||||
|
||||
// Adjust to current time window
|
||||
total -= lt
|
||||
seconds -= ls
|
||||
|
||||
if seconds > 0 {
|
||||
atomic.StoreInt64(&ipcpu, (total*1000/ticks)/seconds)
|
||||
}
|
||||
|
||||
time.AfterFunc(1*time.Second, periodic)
|
||||
}
|
||||
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
contents, err := ioutil.ReadFile(procStatFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields := bytes.Fields(contents)
|
||||
|
||||
// Memory
|
||||
*rss = (parseInt64(fields[rssPos])) << 12
|
||||
*vss = parseInt64(fields[vssPos])
|
||||
|
||||
// PCPU
|
||||
// We track this with periodic sampling, so just load and go.
|
||||
*pcpu = float64(atomic.LoadInt64(&ipcpu)) / 10.0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ascii numbers 0-9
|
||||
const (
|
||||
asciiZero = 48
|
||||
asciiNine = 57
|
||||
)
|
||||
|
||||
// parseInt64 expects decimal positive numbers. We
|
||||
// return -1 to signal error
|
||||
func parseInt64(d []byte) (n int64) {
|
||||
if len(d) == 0 {
|
||||
return -1
|
||||
}
|
||||
for _, dec := range d {
|
||||
if dec < asciiZero || dec > asciiNine {
|
||||
return -1
|
||||
}
|
||||
n = n*10 + (int64(dec) - asciiZero)
|
||||
}
|
||||
return n
|
||||
}
|
13
vendor/github.com/nats-io/gnatsd/server/pse/pse_rumprun.go
generated
vendored
Normal file
13
vendor/github.com/nats-io/gnatsd/server/pse/pse_rumprun.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
// +build rumprun
|
||||
|
||||
package pse
|
||||
|
||||
// This is a placeholder for now.
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
*pcpu = 0.0
|
||||
*rss = 0
|
||||
*vss = 0
|
||||
|
||||
return nil
|
||||
}
|
12
vendor/github.com/nats-io/gnatsd/server/pse/pse_solaris.go
generated
vendored
Normal file
12
vendor/github.com/nats-io/gnatsd/server/pse/pse_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package pse
|
||||
|
||||
// This is a placeholder for now.
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
*pcpu = 0.0
|
||||
*rss = 0
|
||||
*vss = 0
|
||||
|
||||
return nil
|
||||
}
|
268
vendor/github.com/nats-io/gnatsd/server/pse/pse_windows.go
generated
vendored
Normal file
268
vendor/github.com/nats-io/gnatsd/server/pse/pse_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,268 @@
|
|||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
// +build windows
|
||||
|
||||
package pse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
pdh = syscall.NewLazyDLL("pdh.dll")
|
||||
winPdhOpenQuery = pdh.NewProc("PdhOpenQuery")
|
||||
winPdhAddCounter = pdh.NewProc("PdhAddCounterW")
|
||||
winPdhCollectQueryData = pdh.NewProc("PdhCollectQueryData")
|
||||
winPdhGetFormattedCounterValue = pdh.NewProc("PdhGetFormattedCounterValue")
|
||||
winPdhGetFormattedCounterArray = pdh.NewProc("PdhGetFormattedCounterArrayW")
|
||||
)
|
||||
|
||||
// global performance counter query handle and counters
|
||||
var (
|
||||
pcHandle PDH_HQUERY
|
||||
pidCounter, cpuCounter, rssCounter, vssCounter PDH_HCOUNTER
|
||||
prevCPU float64
|
||||
prevRss int64
|
||||
prevVss int64
|
||||
lastSampleTime time.Time
|
||||
processPid int
|
||||
pcQueryLock sync.Mutex
|
||||
initialSample = true
|
||||
)
|
||||
|
||||
// maxQuerySize is the number of values to return from a query.
|
||||
// It represents the maximum # of servers that can be queried
|
||||
// simultaneously running on a machine.
|
||||
const maxQuerySize = 512
|
||||
|
||||
// Keep static memory around to reuse; this works best for passing
|
||||
// into the pdh API.
|
||||
var counterResults [maxQuerySize]PDH_FMT_COUNTERVALUE_ITEM_DOUBLE
|
||||
|
||||
// PDH Types
|
||||
type (
|
||||
PDH_HQUERY syscall.Handle
|
||||
PDH_HCOUNTER syscall.Handle
|
||||
)
|
||||
|
||||
// PDH constants used here
|
||||
const (
|
||||
PDH_FMT_DOUBLE = 0x00000200
|
||||
PDH_INVALID_DATA = 0xC0000BC6
|
||||
PDH_MORE_DATA = 0x800007D2
|
||||
)
|
||||
|
||||
// PDH_FMT_COUNTERVALUE_DOUBLE - double value
|
||||
type PDH_FMT_COUNTERVALUE_DOUBLE struct {
|
||||
CStatus uint32
|
||||
DoubleValue float64
|
||||
}
|
||||
|
||||
// PDH_FMT_COUNTERVALUE_ITEM_DOUBLE is an array
|
||||
// element of a double value
|
||||
type PDH_FMT_COUNTERVALUE_ITEM_DOUBLE struct {
|
||||
SzName *uint16 // pointer to a string
|
||||
FmtValue PDH_FMT_COUNTERVALUE_DOUBLE
|
||||
}
|
||||
|
||||
func pdhAddCounter(hQuery PDH_HQUERY, szFullCounterPath string, dwUserData uintptr, phCounter *PDH_HCOUNTER) error {
|
||||
ptxt, _ := syscall.UTF16PtrFromString(szFullCounterPath)
|
||||
r0, _, _ := winPdhAddCounter.Call(
|
||||
uintptr(hQuery),
|
||||
uintptr(unsafe.Pointer(ptxt)),
|
||||
dwUserData,
|
||||
uintptr(unsafe.Pointer(phCounter)))
|
||||
|
||||
if r0 != 0 {
|
||||
return fmt.Errorf("pdhAddCounter failed. %d", r0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func pdhOpenQuery(datasrc *uint16, userdata uint32, query *PDH_HQUERY) error {
|
||||
r0, _, _ := syscall.Syscall(winPdhOpenQuery.Addr(), 3, 0, uintptr(userdata), uintptr(unsafe.Pointer(query)))
|
||||
if r0 != 0 {
|
||||
return fmt.Errorf("pdhOpenQuery failed - %d", r0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func pdhCollectQueryData(hQuery PDH_HQUERY) error {
|
||||
r0, _, _ := winPdhCollectQueryData.Call(uintptr(hQuery))
|
||||
if r0 != 0 {
|
||||
return fmt.Errorf("pdhCollectQueryData failed - %d", r0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// pdhGetFormattedCounterArrayDouble returns the value of return code
|
||||
// rather than error, to easily check return codes
|
||||
func pdhGetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_DOUBLE) uint32 {
|
||||
ret, _, _ := winPdhGetFormattedCounterArray.Call(
|
||||
uintptr(hCounter),
|
||||
uintptr(PDH_FMT_DOUBLE),
|
||||
uintptr(unsafe.Pointer(lpdwBufferSize)),
|
||||
uintptr(unsafe.Pointer(lpdwBufferCount)),
|
||||
uintptr(unsafe.Pointer(itemBuffer)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
func getCounterArrayData(counter PDH_HCOUNTER) ([]float64, error) {
|
||||
var bufSize uint32
|
||||
var bufCount uint32
|
||||
|
||||
// Retrieving array data requires two calls, the first which
|
||||
// requires an addressable empty buffer, and sets size fields.
|
||||
// The second call returns the data.
|
||||
initialBuf := make([]PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, 1)
|
||||
ret := pdhGetFormattedCounterArrayDouble(counter, &bufSize, &bufCount, &initialBuf[0])
|
||||
if ret == PDH_MORE_DATA {
|
||||
// we'll likely never get here, but be safe.
|
||||
if bufCount > maxQuerySize {
|
||||
bufCount = maxQuerySize
|
||||
}
|
||||
ret = pdhGetFormattedCounterArrayDouble(counter, &bufSize, &bufCount, &counterResults[0])
|
||||
if ret == 0 {
|
||||
rv := make([]float64, bufCount)
|
||||
for i := 0; i < int(bufCount); i++ {
|
||||
rv[i] = counterResults[i].FmtValue.DoubleValue
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
}
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("getCounterArrayData failed - %d", ret)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// getProcessImageName returns the name of the process image, as expected by
|
||||
// the performance counter API.
|
||||
func getProcessImageName() (name string) {
|
||||
name = filepath.Base(os.Args[0])
|
||||
name = strings.TrimRight(name, ".exe")
|
||||
return
|
||||
}
|
||||
|
||||
// initialize our counters
|
||||
func initCounters() (err error) {
|
||||
|
||||
processPid = os.Getpid()
|
||||
// require an addressible nil pointer
|
||||
var source uint16
|
||||
if err := pdhOpenQuery(&source, 0, &pcHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// setup the performance counters, search for all server instances
|
||||
name := fmt.Sprintf("%s*", getProcessImageName())
|
||||
pidQuery := fmt.Sprintf("\\Process(%s)\\ID Process", name)
|
||||
cpuQuery := fmt.Sprintf("\\Process(%s)\\%% Processor Time", name)
|
||||
rssQuery := fmt.Sprintf("\\Process(%s)\\Working Set - Private", name)
|
||||
vssQuery := fmt.Sprintf("\\Process(%s)\\Virtual Bytes", name)
|
||||
|
||||
if err = pdhAddCounter(pcHandle, pidQuery, 0, &pidCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pdhAddCounter(pcHandle, cpuQuery, 0, &cpuCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pdhAddCounter(pcHandle, rssQuery, 0, &rssCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pdhAddCounter(pcHandle, vssQuery, 0, &vssCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// prime the counters by collecting once, and sleep to get somewhat
|
||||
// useful information the first request. Counters for the CPU require
|
||||
// at least two collect calls.
|
||||
if err = pdhCollectQueryData(pcHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(50)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcUsage returns process CPU and memory statistics
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
var err error
|
||||
|
||||
// For simplicity, protect the entire call.
|
||||
// Most simultaneous requests will immediately return
|
||||
// with cached values.
|
||||
pcQueryLock.Lock()
|
||||
defer pcQueryLock.Unlock()
|
||||
|
||||
// First time through, initialize counters.
|
||||
if initialSample {
|
||||
if err = initCounters(); err != nil {
|
||||
return err
|
||||
}
|
||||
initialSample = false
|
||||
} else if time.Since(lastSampleTime) < (2 * time.Second) {
|
||||
// only refresh every two seconds as to minimize impact
|
||||
// on the server.
|
||||
*pcpu = prevCPU
|
||||
*rss = prevRss
|
||||
*vss = prevVss
|
||||
return nil
|
||||
}
|
||||
|
||||
// always save the sample time, even on errors.
|
||||
defer func() {
|
||||
lastSampleTime = time.Now()
|
||||
}()
|
||||
|
||||
// refresh the performance counter data
|
||||
if err = pdhCollectQueryData(pcHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// retrieve the data
|
||||
var pidAry, cpuAry, rssAry, vssAry []float64
|
||||
if pidAry, err = getCounterArrayData(pidCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if cpuAry, err = getCounterArrayData(cpuCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if rssAry, err = getCounterArrayData(rssCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if vssAry, err = getCounterArrayData(vssCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
// find the index of the entry for this process
|
||||
idx := int(-1)
|
||||
for i := range pidAry {
|
||||
if int(pidAry[i]) == processPid {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
// no pid found...
|
||||
if idx < 0 {
|
||||
return fmt.Errorf("could not find pid in performance counter results")
|
||||
}
|
||||
// assign values from the performance counters
|
||||
*pcpu = cpuAry[idx]
|
||||
*rss = int64(rssAry[idx])
|
||||
*vss = int64(vssAry[idx])
|
||||
|
||||
// save off cache values
|
||||
prevCPU = *pcpu
|
||||
prevRss = *rss
|
||||
prevVss = *vss
|
||||
|
||||
return nil
|
||||
}
|
731
vendor/github.com/nats-io/gnatsd/server/route.go
generated
vendored
Normal file
731
vendor/github.com/nats-io/gnatsd/server/route.go
generated
vendored
Normal file
|
@ -0,0 +1,731 @@
|
|||
// Copyright 2013-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/gnatsd/util"
|
||||
)
|
||||
|
||||
// RouteType designates the router type
|
||||
type RouteType int
|
||||
|
||||
// Type of Route
|
||||
const (
|
||||
// This route we learned from speaking to other routes.
|
||||
Implicit RouteType = iota
|
||||
// This route was explicitly configured.
|
||||
Explicit
|
||||
)
|
||||
|
||||
type route struct {
|
||||
remoteID string
|
||||
didSolicit bool
|
||||
retry bool
|
||||
routeType RouteType
|
||||
url *url.URL
|
||||
authRequired bool
|
||||
tlsRequired bool
|
||||
}
|
||||
|
||||
type connectInfo struct {
|
||||
Verbose bool `json:"verbose"`
|
||||
Pedantic bool `json:"pedantic"`
|
||||
User string `json:"user,omitempty"`
|
||||
Pass string `json:"pass,omitempty"`
|
||||
TLS bool `json:"tls_required"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Route protocol constants
|
||||
const (
|
||||
ConProto = "CONNECT %s" + _CRLF_
|
||||
InfoProto = "INFO %s" + _CRLF_
|
||||
)
|
||||
|
||||
// Lock should be held entering here.
|
||||
func (c *client) sendConnect(tlsRequired bool) {
|
||||
var user, pass string
|
||||
if userInfo := c.route.url.User; userInfo != nil {
|
||||
user = userInfo.Username()
|
||||
pass, _ = userInfo.Password()
|
||||
}
|
||||
cinfo := connectInfo{
|
||||
Verbose: false,
|
||||
Pedantic: false,
|
||||
User: user,
|
||||
Pass: pass,
|
||||
TLS: tlsRequired,
|
||||
Name: c.srv.info.ID,
|
||||
}
|
||||
b, err := json.Marshal(cinfo)
|
||||
if err != nil {
|
||||
c.Errorf("Error marshalling CONNECT to route: %v\n", err)
|
||||
c.closeConnection()
|
||||
return
|
||||
}
|
||||
c.sendProto([]byte(fmt.Sprintf(ConProto, b)), true)
|
||||
}
|
||||
|
||||
// Process the info message if we are a route.
|
||||
func (c *client) processRouteInfo(info *Info) {
|
||||
c.mu.Lock()
|
||||
// Connection can be closed at any time (by auth timeout, etc).
|
||||
// Does not make sense to continue here if connection is gone.
|
||||
if c.route == nil || c.nc == nil {
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
s := c.srv
|
||||
remoteID := c.route.remoteID
|
||||
|
||||
// We receive an INFO from a server that informs us about another server,
|
||||
// so the info.ID in the INFO protocol does not match the ID of this route.
|
||||
if remoteID != "" && remoteID != info.ID {
|
||||
c.mu.Unlock()
|
||||
|
||||
// Process this implicit route. We will check that it is not an explicit
|
||||
// route and/or that it has not been connected already.
|
||||
s.processImplicitRoute(info)
|
||||
return
|
||||
}
|
||||
|
||||
// Need to set this for the detection of the route to self to work
|
||||
// in closeConnection().
|
||||
c.route.remoteID = info.ID
|
||||
|
||||
// Detect route to self.
|
||||
if c.route.remoteID == s.info.ID {
|
||||
c.mu.Unlock()
|
||||
c.closeConnection()
|
||||
return
|
||||
}
|
||||
|
||||
// Copy over important information.
|
||||
c.route.authRequired = info.AuthRequired
|
||||
c.route.tlsRequired = info.TLSRequired
|
||||
|
||||
// If we do not know this route's URL, construct one on the fly
|
||||
// from the information provided.
|
||||
if c.route.url == nil {
|
||||
// Add in the URL from host and port
|
||||
hp := net.JoinHostPort(info.Host, strconv.Itoa(info.Port))
|
||||
url, err := url.Parse(fmt.Sprintf("nats-route://%s/", hp))
|
||||
if err != nil {
|
||||
c.Errorf("Error parsing URL from INFO: %v\n", err)
|
||||
c.mu.Unlock()
|
||||
c.closeConnection()
|
||||
return
|
||||
}
|
||||
c.route.url = url
|
||||
}
|
||||
|
||||
// Check to see if we have this remote already registered.
|
||||
// This can happen when both servers have routes to each other.
|
||||
c.mu.Unlock()
|
||||
|
||||
if added, sendInfo := s.addRoute(c, info); added {
|
||||
c.Debugf("Registering remote route %q", info.ID)
|
||||
// Send our local subscriptions to this route.
|
||||
s.sendLocalSubsToRoute(c)
|
||||
if sendInfo {
|
||||
// Need to get the remote IP address.
|
||||
c.mu.Lock()
|
||||
switch conn := c.nc.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
addr := conn.RemoteAddr().(*net.TCPAddr)
|
||||
info.IP = fmt.Sprintf("nats-route://%s/", net.JoinHostPort(addr.IP.String(), strconv.Itoa(info.Port)))
|
||||
default:
|
||||
info.IP = fmt.Sprintf("%s", c.route.url)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
// Now let the known servers know about this new route
|
||||
s.forwardNewRouteInfoToKnownServers(info)
|
||||
}
|
||||
// If the server Info did not have these URLs, update and send an INFO
|
||||
// protocol to all clients that support it (unless the feature is disabled).
|
||||
if s.updateServerINFO(info.ClientConnectURLs) {
|
||||
s.sendAsyncInfoToClients()
|
||||
}
|
||||
} else {
|
||||
c.Debugf("Detected duplicate remote route %q", info.ID)
|
||||
c.closeConnection()
|
||||
}
|
||||
}
|
||||
|
||||
// sendAsyncInfoToClients sends an INFO protocol to all
|
||||
// connected clients that accept async INFO updates.
|
||||
func (s *Server) sendAsyncInfoToClients() {
|
||||
s.mu.Lock()
|
||||
// If there are no clients supporting async INFO protocols, we are done.
|
||||
if s.cproto == 0 {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Capture under lock
|
||||
proto := s.infoJSON
|
||||
|
||||
// Make a copy of ALL clients so we can release server lock while
|
||||
// sending the protocol to clients. We could check the conditions
|
||||
// (proto support, first PONG sent) here and so have potentially
|
||||
// a limited number of clients, but that would mean grabbing the
|
||||
// client's lock here, which we don't want since we would still
|
||||
// need it in the second loop.
|
||||
clients := make([]*client, 0, len(s.clients))
|
||||
for _, c := range s.clients {
|
||||
clients = append(clients, c)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
for _, c := range clients {
|
||||
c.mu.Lock()
|
||||
// If server did not yet receive the CONNECT protocol, check later
|
||||
// when sending the first PONG.
|
||||
if !c.flags.isSet(connectReceived) {
|
||||
c.flags.set(infoUpdated)
|
||||
} else if c.opts.Protocol >= ClientProtoInfo {
|
||||
// Send only if first PONG was sent
|
||||
if c.flags.isSet(firstPongSent) {
|
||||
// sendInfo takes care of checking if the connection is still
|
||||
// valid or not, so don't duplicate tests here.
|
||||
c.sendInfo(proto)
|
||||
} else {
|
||||
// Otherwise, notify that INFO has changed and check later.
|
||||
c.flags.set(infoUpdated)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// This will process implicit route information received from another server.
|
||||
// We will check to see if we have configured or are already connected,
|
||||
// and if so we will ignore. Otherwise we will attempt to connect.
|
||||
func (s *Server) processImplicitRoute(info *Info) {
|
||||
remoteID := info.ID
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Don't connect to ourself
|
||||
if remoteID == s.info.ID {
|
||||
return
|
||||
}
|
||||
// Check if this route already exists
|
||||
if _, exists := s.remotes[remoteID]; exists {
|
||||
return
|
||||
}
|
||||
// Check if we have this route as a configured route
|
||||
if s.hasThisRouteConfigured(info) {
|
||||
return
|
||||
}
|
||||
|
||||
// Initiate the connection, using info.IP instead of info.URL here...
|
||||
r, err := url.Parse(info.IP)
|
||||
if err != nil {
|
||||
Debugf("Error parsing URL from INFO: %v\n", err)
|
||||
return
|
||||
}
|
||||
if info.AuthRequired {
|
||||
r.User = url.UserPassword(s.opts.Cluster.Username, s.opts.Cluster.Password)
|
||||
}
|
||||
s.startGoRoutine(func() { s.connectToRoute(r, false) })
|
||||
}
|
||||
|
||||
// hasThisRouteConfigured returns true if info.Host:info.Port is present
|
||||
// in the server's opts.Routes, false otherwise.
|
||||
// Server lock is assumed to be held by caller.
|
||||
func (s *Server) hasThisRouteConfigured(info *Info) bool {
|
||||
urlToCheckExplicit := strings.ToLower(net.JoinHostPort(info.Host, strconv.Itoa(info.Port)))
|
||||
for _, ri := range s.opts.Routes {
|
||||
if strings.ToLower(ri.Host) == urlToCheckExplicit {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// forwardNewRouteInfoToKnownServers sends the INFO protocol of the new route
|
||||
// to all routes known by this server. In turn, each server will contact this
|
||||
// new route.
|
||||
func (s *Server) forwardNewRouteInfoToKnownServers(info *Info) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
b, _ := json.Marshal(info)
|
||||
infoJSON := []byte(fmt.Sprintf(InfoProto, b))
|
||||
|
||||
for _, r := range s.routes {
|
||||
r.mu.Lock()
|
||||
if r.route.remoteID != info.ID {
|
||||
r.sendInfo(infoJSON)
|
||||
}
|
||||
r.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// This will send local subscription state to a new route connection.
|
||||
// FIXME(dlc) - This could be a DOS or perf issue with many clients
|
||||
// and large subscription space. Plus buffering in place not a good idea.
|
||||
func (s *Server) sendLocalSubsToRoute(route *client) {
|
||||
b := bytes.Buffer{}
|
||||
s.mu.Lock()
|
||||
for _, client := range s.clients {
|
||||
client.mu.Lock()
|
||||
subs := make([]*subscription, 0, len(client.subs))
|
||||
for _, sub := range client.subs {
|
||||
subs = append(subs, sub)
|
||||
}
|
||||
client.mu.Unlock()
|
||||
for _, sub := range subs {
|
||||
rsid := routeSid(sub)
|
||||
proto := fmt.Sprintf(subProto, sub.subject, sub.queue, rsid)
|
||||
b.WriteString(proto)
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
route.mu.Lock()
|
||||
defer route.mu.Unlock()
|
||||
route.sendProto(b.Bytes(), true)
|
||||
|
||||
route.Debugf("Route sent local subscriptions")
|
||||
}
|
||||
|
||||
func (s *Server) createRoute(conn net.Conn, rURL *url.URL) *client {
|
||||
didSolicit := rURL != nil
|
||||
r := &route{didSolicit: didSolicit}
|
||||
for _, route := range s.opts.Routes {
|
||||
if rURL != nil && (strings.ToLower(rURL.Host) == strings.ToLower(route.Host)) {
|
||||
r.routeType = Explicit
|
||||
}
|
||||
}
|
||||
|
||||
c := &client{srv: s, nc: conn, opts: clientOpts{}, typ: ROUTER, route: r}
|
||||
|
||||
// Grab server variables
|
||||
s.mu.Lock()
|
||||
infoJSON := s.routeInfoJSON
|
||||
authRequired := s.routeInfo.AuthRequired
|
||||
tlsRequired := s.routeInfo.TLSRequired
|
||||
s.mu.Unlock()
|
||||
|
||||
// Grab lock
|
||||
c.mu.Lock()
|
||||
|
||||
// Initialize
|
||||
c.initClient()
|
||||
|
||||
c.Debugf("Route connection created")
|
||||
|
||||
if didSolicit {
|
||||
// Do this before the TLS code, otherwise, in case of failure
|
||||
// and if route is explicit, it would try to reconnect to 'nil'...
|
||||
r.url = rURL
|
||||
}
|
||||
|
||||
// Check for TLS
|
||||
if tlsRequired {
|
||||
// Copy off the config to add in ServerName if we
|
||||
tlsConfig := util.CloneTLSConfig(s.opts.Cluster.TLSConfig)
|
||||
|
||||
// If we solicited, we will act like the client, otherwise the server.
|
||||
if didSolicit {
|
||||
c.Debugf("Starting TLS route client handshake")
|
||||
// Specify the ServerName we are expecting.
|
||||
host, _, _ := net.SplitHostPort(rURL.Host)
|
||||
tlsConfig.ServerName = host
|
||||
c.nc = tls.Client(c.nc, tlsConfig)
|
||||
} else {
|
||||
c.Debugf("Starting TLS route server handshake")
|
||||
c.nc = tls.Server(c.nc, tlsConfig)
|
||||
}
|
||||
|
||||
conn := c.nc.(*tls.Conn)
|
||||
|
||||
// Setup the timeout
|
||||
ttl := secondsToDuration(s.opts.Cluster.TLSTimeout)
|
||||
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
|
||||
conn.SetReadDeadline(time.Now().Add(ttl))
|
||||
|
||||
c.mu.Unlock()
|
||||
if err := conn.Handshake(); err != nil {
|
||||
c.Debugf("TLS route handshake error: %v", err)
|
||||
c.sendErr("Secure Connection - TLS Required")
|
||||
c.closeConnection()
|
||||
return nil
|
||||
}
|
||||
// Reset the read deadline
|
||||
conn.SetReadDeadline(time.Time{})
|
||||
|
||||
// Re-Grab lock
|
||||
c.mu.Lock()
|
||||
|
||||
// Verify that the connection did not go away while we released the lock.
|
||||
if c.nc == nil {
|
||||
c.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rewrap bw
|
||||
c.bw = bufio.NewWriterSize(c.nc, startBufSize)
|
||||
}
|
||||
|
||||
// Do final client initialization
|
||||
|
||||
// Set the Ping timer
|
||||
c.setPingTimer()
|
||||
|
||||
// For routes, the "client" is added to s.routes only when processing
|
||||
// the INFO protocol, that is much later.
|
||||
// In the meantime, if the server shutsdown, there would be no reference
|
||||
// to the client (connection) to be closed, leaving this readLoop
|
||||
// uinterrupted, causing the Shutdown() to wait indefinitively.
|
||||
// We need to store the client in a special map, under a special lock.
|
||||
s.grMu.Lock()
|
||||
s.grTmpClients[c.cid] = c
|
||||
s.grMu.Unlock()
|
||||
|
||||
// Spin up the read loop.
|
||||
s.startGoRoutine(func() { c.readLoop() })
|
||||
|
||||
if tlsRequired {
|
||||
c.Debugf("TLS handshake complete")
|
||||
cs := c.nc.(*tls.Conn).ConnectionState()
|
||||
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
|
||||
}
|
||||
|
||||
// Queue Connect proto if we solicited the connection.
|
||||
if didSolicit {
|
||||
c.Debugf("Route connect msg sent")
|
||||
c.sendConnect(tlsRequired)
|
||||
}
|
||||
|
||||
// Send our info to the other side.
|
||||
c.sendInfo(infoJSON)
|
||||
|
||||
// Check for Auth required state for incoming connections.
|
||||
if authRequired && !didSolicit {
|
||||
ttl := secondsToDuration(s.opts.Cluster.AuthTimeout)
|
||||
c.setAuthTimer(ttl)
|
||||
}
|
||||
|
||||
c.mu.Unlock()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
const (
|
||||
_CRLF_ = "\r\n"
|
||||
_EMPTY_ = ""
|
||||
_SPC_ = " "
|
||||
)
|
||||
|
||||
const (
|
||||
subProto = "SUB %s %s %s" + _CRLF_
|
||||
unsubProto = "UNSUB %s%s" + _CRLF_
|
||||
)
|
||||
|
||||
// FIXME(dlc) - Make these reserved and reject if they come in as a sid
|
||||
// from a client connection.
|
||||
// Route constants
|
||||
const (
|
||||
RSID = "RSID"
|
||||
QRSID = "QRSID"
|
||||
|
||||
RSID_CID_INDEX = 1
|
||||
RSID_SID_INDEX = 2
|
||||
EXPECTED_MATCHES = 3
|
||||
)
|
||||
|
||||
// FIXME(dlc) - This may be too slow, check at later date.
|
||||
var qrsidRe = regexp.MustCompile(`QRSID:(\d+):([^\s]+)`)
|
||||
|
||||
func (s *Server) routeSidQueueSubscriber(rsid []byte) (*subscription, bool) {
|
||||
if !bytes.HasPrefix(rsid, []byte(QRSID)) {
|
||||
return nil, false
|
||||
}
|
||||
matches := qrsidRe.FindSubmatch(rsid)
|
||||
if matches == nil || len(matches) != EXPECTED_MATCHES {
|
||||
return nil, false
|
||||
}
|
||||
cid := uint64(parseInt64(matches[RSID_CID_INDEX]))
|
||||
|
||||
s.mu.Lock()
|
||||
client := s.clients[cid]
|
||||
s.mu.Unlock()
|
||||
|
||||
if client == nil {
|
||||
return nil, true
|
||||
}
|
||||
sid := matches[RSID_SID_INDEX]
|
||||
|
||||
client.mu.Lock()
|
||||
sub, ok := client.subs[string(sid)]
|
||||
client.mu.Unlock()
|
||||
if ok {
|
||||
return sub, true
|
||||
}
|
||||
return nil, true
|
||||
}
|
||||
|
||||
func routeSid(sub *subscription) string {
|
||||
var qi string
|
||||
if len(sub.queue) > 0 {
|
||||
qi = "Q"
|
||||
}
|
||||
return fmt.Sprintf("%s%s:%d:%s", qi, RSID, sub.client.cid, sub.sid)
|
||||
}
|
||||
|
||||
func (s *Server) addRoute(c *client, info *Info) (bool, bool) {
|
||||
id := c.route.remoteID
|
||||
sendInfo := false
|
||||
|
||||
s.mu.Lock()
|
||||
if !s.running {
|
||||
s.mu.Unlock()
|
||||
return false, false
|
||||
}
|
||||
remote, exists := s.remotes[id]
|
||||
if !exists {
|
||||
// Remove from the temporary map
|
||||
s.grMu.Lock()
|
||||
delete(s.grTmpClients, c.cid)
|
||||
s.grMu.Unlock()
|
||||
|
||||
s.routes[c.cid] = c
|
||||
s.remotes[id] = c
|
||||
|
||||
// If this server's ID is (alpha) less than the peer, then we will
|
||||
// make sure that if we are disconnected, we will try to connect once
|
||||
// more. This is to mitigate the issue where both sides add the route
|
||||
// on the opposite connection, and therefore we end-up with both
|
||||
// being dropped.
|
||||
if s.info.ID < id {
|
||||
c.mu.Lock()
|
||||
// Make this as a retry (otherwise, only explicit are retried).
|
||||
c.route.retry = true
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// we don't need to send if the only route is the one we just accepted.
|
||||
sendInfo = len(s.routes) > 1
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
if exists && c.route.didSolicit {
|
||||
// upgrade to solicited?
|
||||
remote.mu.Lock()
|
||||
// the existing route (remote) should keep its 'retry' value, and
|
||||
// not be replaced with c.route.retry.
|
||||
retry := remote.route.retry
|
||||
remote.route = c.route
|
||||
remote.route.retry = retry
|
||||
remote.mu.Unlock()
|
||||
}
|
||||
|
||||
return !exists, sendInfo
|
||||
}
|
||||
|
||||
func (s *Server) broadcastInterestToRoutes(proto string) {
|
||||
var arg []byte
|
||||
if atomic.LoadInt32(&trace) == 1 {
|
||||
arg = []byte(proto[:len(proto)-LEN_CR_LF])
|
||||
}
|
||||
protoAsBytes := []byte(proto)
|
||||
s.mu.Lock()
|
||||
for _, route := range s.routes {
|
||||
// FIXME(dlc) - Make same logic as deliverMsg
|
||||
route.mu.Lock()
|
||||
route.sendProto(protoAsBytes, true)
|
||||
route.mu.Unlock()
|
||||
route.traceOutOp("", arg)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// broadcastSubscribe will forward a client subscription
|
||||
// to all active routes.
|
||||
func (s *Server) broadcastSubscribe(sub *subscription) {
|
||||
if s.numRoutes() == 0 {
|
||||
return
|
||||
}
|
||||
rsid := routeSid(sub)
|
||||
proto := fmt.Sprintf(subProto, sub.subject, sub.queue, rsid)
|
||||
s.broadcastInterestToRoutes(proto)
|
||||
}
|
||||
|
||||
// broadcastUnSubscribe will forward a client unsubscribe
|
||||
// action to all active routes.
|
||||
func (s *Server) broadcastUnSubscribe(sub *subscription) {
|
||||
if s.numRoutes() == 0 {
|
||||
return
|
||||
}
|
||||
rsid := routeSid(sub)
|
||||
maxStr := _EMPTY_
|
||||
sub.client.mu.Lock()
|
||||
// Set max if we have it set and have not tripped auto-unsubscribe
|
||||
if sub.max > 0 && sub.nm < sub.max {
|
||||
maxStr = fmt.Sprintf(" %d", sub.max)
|
||||
}
|
||||
sub.client.mu.Unlock()
|
||||
proto := fmt.Sprintf(unsubProto, rsid, maxStr)
|
||||
s.broadcastInterestToRoutes(proto)
|
||||
}
|
||||
|
||||
func (s *Server) routeAcceptLoop(ch chan struct{}) {
|
||||
hp := net.JoinHostPort(s.opts.Cluster.Host, strconv.Itoa(s.opts.Cluster.Port))
|
||||
Noticef("Listening for route connections on %s", hp)
|
||||
l, e := net.Listen("tcp", hp)
|
||||
if e != nil {
|
||||
// We need to close this channel to avoid a deadlock
|
||||
close(ch)
|
||||
Fatalf("Error listening on router port: %d - %v", s.opts.Cluster.Port, e)
|
||||
return
|
||||
}
|
||||
|
||||
// Setup state that can enable shutdown
|
||||
s.mu.Lock()
|
||||
s.routeListener = l
|
||||
s.mu.Unlock()
|
||||
|
||||
// Let them know we are up
|
||||
close(ch)
|
||||
|
||||
tmpDelay := ACCEPT_MIN_SLEEP
|
||||
|
||||
for s.isRunning() {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Temporary() {
|
||||
Debugf("Temporary Route Accept Errorf(%v), sleeping %dms",
|
||||
ne, tmpDelay/time.Millisecond)
|
||||
time.Sleep(tmpDelay)
|
||||
tmpDelay *= 2
|
||||
if tmpDelay > ACCEPT_MAX_SLEEP {
|
||||
tmpDelay = ACCEPT_MAX_SLEEP
|
||||
}
|
||||
} else if s.isRunning() {
|
||||
Noticef("Accept error: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
tmpDelay = ACCEPT_MIN_SLEEP
|
||||
s.startGoRoutine(func() {
|
||||
s.createRoute(conn, nil)
|
||||
s.grWG.Done()
|
||||
})
|
||||
}
|
||||
Debugf("Router accept loop exiting..")
|
||||
s.done <- true
|
||||
}
|
||||
|
||||
// StartRouting will start the accept loop on the cluster host:port
|
||||
// and will actively try to connect to listed routes.
|
||||
func (s *Server) StartRouting(clientListenReady chan struct{}) {
|
||||
defer s.grWG.Done()
|
||||
|
||||
// Wait for the client listen port to be opened, and
|
||||
// the possible ephemeral port to be selected.
|
||||
<-clientListenReady
|
||||
|
||||
// Get all possible URLs (when server listens to 0.0.0.0).
|
||||
// This is going to be sent to other Servers, so that they can let their
|
||||
// clients know about us.
|
||||
clientConnectURLs := s.getClientConnectURLs()
|
||||
|
||||
// Check for TLSConfig
|
||||
tlsReq := s.opts.Cluster.TLSConfig != nil
|
||||
info := Info{
|
||||
ID: s.info.ID,
|
||||
Version: s.info.Version,
|
||||
Host: s.opts.Cluster.Host,
|
||||
Port: s.opts.Cluster.Port,
|
||||
AuthRequired: false,
|
||||
TLSRequired: tlsReq,
|
||||
SSLRequired: tlsReq,
|
||||
TLSVerify: tlsReq,
|
||||
MaxPayload: s.info.MaxPayload,
|
||||
ClientConnectURLs: clientConnectURLs,
|
||||
}
|
||||
// Check for Auth items
|
||||
if s.opts.Cluster.Username != "" {
|
||||
info.AuthRequired = true
|
||||
}
|
||||
s.routeInfo = info
|
||||
b, _ := json.Marshal(info)
|
||||
s.routeInfoJSON = []byte(fmt.Sprintf(InfoProto, b))
|
||||
|
||||
// Spin up the accept loop
|
||||
ch := make(chan struct{})
|
||||
go s.routeAcceptLoop(ch)
|
||||
<-ch
|
||||
|
||||
// Solicit Routes if needed.
|
||||
s.solicitRoutes()
|
||||
}
|
||||
|
||||
func (s *Server) reConnectToRoute(rURL *url.URL, rtype RouteType) {
|
||||
tryForEver := rtype == Explicit
|
||||
if tryForEver {
|
||||
time.Sleep(DEFAULT_ROUTE_RECONNECT)
|
||||
}
|
||||
s.connectToRoute(rURL, tryForEver)
|
||||
}
|
||||
|
||||
func (s *Server) connectToRoute(rURL *url.URL, tryForEver bool) {
|
||||
defer s.grWG.Done()
|
||||
for s.isRunning() && rURL != nil {
|
||||
Debugf("Trying to connect to route on %s", rURL.Host)
|
||||
conn, err := net.DialTimeout("tcp", rURL.Host, DEFAULT_ROUTE_DIAL)
|
||||
if err != nil {
|
||||
Debugf("Error trying to connect to route: %v", err)
|
||||
select {
|
||||
case <-s.rcQuit:
|
||||
return
|
||||
case <-time.After(DEFAULT_ROUTE_CONNECT):
|
||||
if !tryForEver {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
// We have a route connection here.
|
||||
// Go ahead and create it and exit this func.
|
||||
s.createRoute(conn, rURL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *client) isSolicitedRoute() bool {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.typ == ROUTER && c.route != nil && c.route.didSolicit
|
||||
}
|
||||
|
||||
func (s *Server) solicitRoutes() {
|
||||
for _, r := range s.opts.Routes {
|
||||
route := r
|
||||
s.startGoRoutine(func() { s.connectToRoute(route, true) })
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) numRoutes() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.routes)
|
||||
}
|
923
vendor/github.com/nats-io/gnatsd/server/server.go
generated
vendored
Normal file
923
vendor/github.com/nats-io/gnatsd/server/server.go
generated
vendored
Normal file
|
@ -0,0 +1,923 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
// Allow dynamic profiling.
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/nats-io/gnatsd/util"
|
||||
)
|
||||
|
||||
// Info is the information sent to clients to help them understand information
|
||||
// about this server.
|
||||
type Info struct {
|
||||
ID string `json:"server_id"`
|
||||
Version string `json:"version"`
|
||||
GoVersion string `json:"go"`
|
||||
Host string `json:"host"`
|
||||
Port int `json:"port"`
|
||||
AuthRequired bool `json:"auth_required"`
|
||||
SSLRequired bool `json:"ssl_required"` // DEPRECATED: ssl json used for older clients
|
||||
TLSRequired bool `json:"tls_required"`
|
||||
TLSVerify bool `json:"tls_verify"`
|
||||
MaxPayload int `json:"max_payload"`
|
||||
IP string `json:"ip,omitempty"`
|
||||
ClientConnectURLs []string `json:"connect_urls,omitempty"` // Contains URLs a client can connect to.
|
||||
|
||||
// Used internally for quick look-ups.
|
||||
clientConnectURLs map[string]struct{}
|
||||
}
|
||||
|
||||
// Server is our main struct.
|
||||
type Server struct {
|
||||
gcid uint64
|
||||
grid uint64
|
||||
stats
|
||||
mu sync.Mutex
|
||||
info Info
|
||||
infoJSON []byte
|
||||
sl *Sublist
|
||||
opts *Options
|
||||
cAuth Auth
|
||||
rAuth Auth
|
||||
trace bool
|
||||
debug bool
|
||||
running bool
|
||||
listener net.Listener
|
||||
clients map[uint64]*client
|
||||
routes map[uint64]*client
|
||||
remotes map[string]*client
|
||||
totalClients uint64
|
||||
done chan bool
|
||||
start time.Time
|
||||
http net.Listener
|
||||
httpReqStats map[string]uint64
|
||||
routeListener net.Listener
|
||||
routeInfo Info
|
||||
routeInfoJSON []byte
|
||||
rcQuit chan bool
|
||||
grMu sync.Mutex
|
||||
grTmpClients map[uint64]*client
|
||||
grRunning bool
|
||||
grWG sync.WaitGroup // to wait on various go routines
|
||||
cproto int64 // number of clients supporting async INFO
|
||||
}
|
||||
|
||||
// Make sure all are 64bits for atomic use
|
||||
type stats struct {
|
||||
inMsgs int64
|
||||
outMsgs int64
|
||||
inBytes int64
|
||||
outBytes int64
|
||||
slowConsumers int64
|
||||
}
|
||||
|
||||
// New will setup a new server struct after parsing the options.
|
||||
func New(opts *Options) *Server {
|
||||
processOptions(opts)
|
||||
|
||||
// Process TLS options, including whether we require client certificates.
|
||||
tlsReq := opts.TLSConfig != nil
|
||||
verify := (tlsReq && opts.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert)
|
||||
|
||||
info := Info{
|
||||
ID: genID(),
|
||||
Version: VERSION,
|
||||
GoVersion: runtime.Version(),
|
||||
Host: opts.Host,
|
||||
Port: opts.Port,
|
||||
AuthRequired: false,
|
||||
TLSRequired: tlsReq,
|
||||
SSLRequired: tlsReq,
|
||||
TLSVerify: verify,
|
||||
MaxPayload: opts.MaxPayload,
|
||||
clientConnectURLs: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
info: info,
|
||||
sl: NewSublist(),
|
||||
opts: opts,
|
||||
debug: opts.Debug,
|
||||
trace: opts.Trace,
|
||||
done: make(chan bool, 1),
|
||||
start: time.Now(),
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// For tracking clients
|
||||
s.clients = make(map[uint64]*client)
|
||||
|
||||
// For tracking connections that are not yet registered
|
||||
// in s.routes, but for which readLoop has started.
|
||||
s.grTmpClients = make(map[uint64]*client)
|
||||
|
||||
// For tracking routes and their remote ids
|
||||
s.routes = make(map[uint64]*client)
|
||||
s.remotes = make(map[string]*client)
|
||||
|
||||
// Used to kick out all of the route
|
||||
// connect Go routines.
|
||||
s.rcQuit = make(chan bool)
|
||||
s.generateServerInfoJSON()
|
||||
s.handleSignals()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// SetClientAuthMethod sets the authentication method for clients.
|
||||
func (s *Server) SetClientAuthMethod(authMethod Auth) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.info.AuthRequired = true
|
||||
s.cAuth = authMethod
|
||||
|
||||
s.generateServerInfoJSON()
|
||||
}
|
||||
|
||||
// SetRouteAuthMethod sets the authentication method for routes.
|
||||
func (s *Server) SetRouteAuthMethod(authMethod Auth) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.rAuth = authMethod
|
||||
}
|
||||
|
||||
func (s *Server) generateServerInfoJSON() {
|
||||
// Generate the info json
|
||||
b, err := json.Marshal(s.info)
|
||||
if err != nil {
|
||||
Fatalf("Error marshalling INFO JSON: %+v\n", err)
|
||||
return
|
||||
}
|
||||
s.infoJSON = []byte(fmt.Sprintf("INFO %s %s", b, CR_LF))
|
||||
}
|
||||
|
||||
// PrintAndDie is exported for access in other packages.
|
||||
func PrintAndDie(msg string) {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", msg)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// PrintServerAndExit will print our version and exit.
|
||||
func PrintServerAndExit() {
|
||||
fmt.Printf("nats-server version %s\n", VERSION)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// ProcessCommandLineArgs takes the command line arguments
|
||||
// validating and setting flags for handling in case any
|
||||
// sub command was present.
|
||||
func ProcessCommandLineArgs(cmd *flag.FlagSet) (showVersion bool, showHelp bool, err error) {
|
||||
if len(cmd.Args()) > 0 {
|
||||
arg := cmd.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "version":
|
||||
return true, false, nil
|
||||
case "help":
|
||||
return false, true, nil
|
||||
default:
|
||||
return false, false, fmt.Errorf("Unrecognized command: %q\n", arg)
|
||||
}
|
||||
}
|
||||
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// Protected check on running state
|
||||
func (s *Server) isRunning() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.running
|
||||
}
|
||||
|
||||
func (s *Server) logPid() {
|
||||
pidStr := strconv.Itoa(os.Getpid())
|
||||
err := ioutil.WriteFile(s.opts.PidFile, []byte(pidStr), 0660)
|
||||
if err != nil {
|
||||
PrintAndDie(fmt.Sprintf("Could not write pidfile: %v\n", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Start up the server, this will block.
|
||||
// Start via a Go routine if needed.
|
||||
func (s *Server) Start() {
|
||||
Noticef("Starting nats-server version %s", VERSION)
|
||||
Debugf("Go build version %s", s.info.GoVersion)
|
||||
|
||||
// Avoid RACE between Start() and Shutdown()
|
||||
s.mu.Lock()
|
||||
s.running = true
|
||||
s.mu.Unlock()
|
||||
|
||||
s.grMu.Lock()
|
||||
s.grRunning = true
|
||||
s.grMu.Unlock()
|
||||
|
||||
// Log the pid to a file
|
||||
if s.opts.PidFile != _EMPTY_ {
|
||||
s.logPid()
|
||||
}
|
||||
|
||||
// Start up the http server if needed.
|
||||
if s.opts.HTTPPort != 0 {
|
||||
s.StartHTTPMonitoring()
|
||||
}
|
||||
|
||||
// Start up the https server if needed.
|
||||
if s.opts.HTTPSPort != 0 {
|
||||
if s.opts.TLSConfig == nil {
|
||||
Fatalf("TLS cert and key required for HTTPS")
|
||||
return
|
||||
}
|
||||
s.StartHTTPSMonitoring()
|
||||
}
|
||||
|
||||
// The Routing routine needs to wait for the client listen
|
||||
// port to be opened and potential ephemeral port selected.
|
||||
clientListenReady := make(chan struct{})
|
||||
|
||||
// Start up routing as well if needed.
|
||||
if s.opts.Cluster.Port != 0 {
|
||||
s.startGoRoutine(func() {
|
||||
s.StartRouting(clientListenReady)
|
||||
})
|
||||
}
|
||||
|
||||
// Pprof http endpoint for the profiler.
|
||||
if s.opts.ProfPort != 0 {
|
||||
s.StartProfiler()
|
||||
}
|
||||
|
||||
// Wait for clients.
|
||||
s.AcceptLoop(clientListenReady)
|
||||
}
|
||||
|
||||
// Shutdown will shutdown the server instance by kicking out the AcceptLoop
|
||||
// and closing all associated clients.
|
||||
func (s *Server) Shutdown() {
|
||||
s.mu.Lock()
|
||||
|
||||
// Prevent issues with multiple calls.
|
||||
if !s.running {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
s.running = false
|
||||
s.grMu.Lock()
|
||||
s.grRunning = false
|
||||
s.grMu.Unlock()
|
||||
|
||||
conns := make(map[uint64]*client)
|
||||
|
||||
// Copy off the clients
|
||||
for i, c := range s.clients {
|
||||
conns[i] = c
|
||||
}
|
||||
// Copy off the connections that are not yet registered
|
||||
// in s.routes, but for which the readLoop has started
|
||||
s.grMu.Lock()
|
||||
for i, c := range s.grTmpClients {
|
||||
conns[i] = c
|
||||
}
|
||||
s.grMu.Unlock()
|
||||
// Copy off the routes
|
||||
for i, r := range s.routes {
|
||||
conns[i] = r
|
||||
}
|
||||
|
||||
// Number of done channel responses we expect.
|
||||
doneExpected := 0
|
||||
|
||||
// Kick client AcceptLoop()
|
||||
if s.listener != nil {
|
||||
doneExpected++
|
||||
s.listener.Close()
|
||||
s.listener = nil
|
||||
}
|
||||
|
||||
// Kick route AcceptLoop()
|
||||
if s.routeListener != nil {
|
||||
doneExpected++
|
||||
s.routeListener.Close()
|
||||
s.routeListener = nil
|
||||
}
|
||||
|
||||
// Kick HTTP monitoring if its running
|
||||
if s.http != nil {
|
||||
doneExpected++
|
||||
s.http.Close()
|
||||
s.http = nil
|
||||
}
|
||||
|
||||
// Release the solicited routes connect go routines.
|
||||
close(s.rcQuit)
|
||||
|
||||
s.mu.Unlock()
|
||||
|
||||
// Close client and route connections
|
||||
for _, c := range conns {
|
||||
c.closeConnection()
|
||||
}
|
||||
|
||||
// Block until the accept loops exit
|
||||
for doneExpected > 0 {
|
||||
<-s.done
|
||||
doneExpected--
|
||||
}
|
||||
|
||||
// Wait for go routines to be done.
|
||||
s.grWG.Wait()
|
||||
}
|
||||
|
||||
// AcceptLoop is exported for easier testing.
|
||||
func (s *Server) AcceptLoop(clr chan struct{}) {
|
||||
// If we were to exit before the listener is setup properly,
|
||||
// make sure we close the channel.
|
||||
defer func() {
|
||||
if clr != nil {
|
||||
close(clr)
|
||||
}
|
||||
}()
|
||||
|
||||
hp := net.JoinHostPort(s.opts.Host, strconv.Itoa(s.opts.Port))
|
||||
Noticef("Listening for client connections on %s", hp)
|
||||
l, e := net.Listen("tcp", hp)
|
||||
if e != nil {
|
||||
Fatalf("Error listening on port: %s, %q", hp, e)
|
||||
return
|
||||
}
|
||||
|
||||
// Alert of TLS enabled.
|
||||
if s.opts.TLSConfig != nil {
|
||||
Noticef("TLS required for client connections")
|
||||
}
|
||||
|
||||
Debugf("Server id is %s", s.info.ID)
|
||||
Noticef("Server is ready")
|
||||
|
||||
// Setup state that can enable shutdown
|
||||
s.mu.Lock()
|
||||
s.listener = l
|
||||
|
||||
// If server was started with RANDOM_PORT (-1), opts.Port would be equal
|
||||
// to 0 at the beginning this function. So we need to get the actual port
|
||||
if s.opts.Port == 0 {
|
||||
// Write resolved port back to options.
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
Fatalf("Error parsing server address (%s): %s", l.Addr().String(), e)
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
portNum, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
Fatalf("Error parsing server address (%s): %s", l.Addr().String(), e)
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
s.opts.Port = portNum
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
// Let the caller know that we are ready
|
||||
close(clr)
|
||||
clr = nil
|
||||
|
||||
tmpDelay := ACCEPT_MIN_SLEEP
|
||||
|
||||
for s.isRunning() {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Temporary() {
|
||||
Debugf("Temporary Client Accept Error(%v), sleeping %dms",
|
||||
ne, tmpDelay/time.Millisecond)
|
||||
time.Sleep(tmpDelay)
|
||||
tmpDelay *= 2
|
||||
if tmpDelay > ACCEPT_MAX_SLEEP {
|
||||
tmpDelay = ACCEPT_MAX_SLEEP
|
||||
}
|
||||
} else if s.isRunning() {
|
||||
Noticef("Accept error: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
tmpDelay = ACCEPT_MIN_SLEEP
|
||||
s.startGoRoutine(func() {
|
||||
s.createClient(conn)
|
||||
s.grWG.Done()
|
||||
})
|
||||
}
|
||||
Noticef("Server Exiting..")
|
||||
s.done <- true
|
||||
}
|
||||
|
||||
// StartProfiler is called to enable dynamic profiling.
|
||||
func (s *Server) StartProfiler() {
|
||||
Noticef("Starting profiling on http port %d", s.opts.ProfPort)
|
||||
hp := net.JoinHostPort(s.opts.Host, strconv.Itoa(s.opts.ProfPort))
|
||||
go func() {
|
||||
err := http.ListenAndServe(hp, nil)
|
||||
if err != nil {
|
||||
Fatalf("error starting monitor server: %s", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// StartHTTPMonitoring will enable the HTTP monitoring port.
|
||||
func (s *Server) StartHTTPMonitoring() {
|
||||
s.startMonitoring(false)
|
||||
}
|
||||
|
||||
// StartHTTPSMonitoring will enable the HTTPS monitoring port.
|
||||
func (s *Server) StartHTTPSMonitoring() {
|
||||
s.startMonitoring(true)
|
||||
}
|
||||
|
||||
// HTTP endpoints
|
||||
const (
|
||||
RootPath = "/"
|
||||
VarzPath = "/varz"
|
||||
ConnzPath = "/connz"
|
||||
RoutezPath = "/routez"
|
||||
SubszPath = "/subsz"
|
||||
StackszPath = "/stacksz"
|
||||
)
|
||||
|
||||
// Start the monitoring server
|
||||
func (s *Server) startMonitoring(secure bool) {
|
||||
|
||||
// Used to track HTTP requests
|
||||
s.httpReqStats = map[string]uint64{
|
||||
RootPath: 0,
|
||||
VarzPath: 0,
|
||||
ConnzPath: 0,
|
||||
RoutezPath: 0,
|
||||
SubszPath: 0,
|
||||
}
|
||||
|
||||
var hp string
|
||||
var err error
|
||||
|
||||
if secure {
|
||||
hp = net.JoinHostPort(s.opts.HTTPHost, strconv.Itoa(s.opts.HTTPSPort))
|
||||
Noticef("Starting https monitor on %s", hp)
|
||||
config := util.CloneTLSConfig(s.opts.TLSConfig)
|
||||
config.ClientAuth = tls.NoClientCert
|
||||
s.http, err = tls.Listen("tcp", hp, config)
|
||||
|
||||
} else {
|
||||
hp = net.JoinHostPort(s.opts.HTTPHost, strconv.Itoa(s.opts.HTTPPort))
|
||||
Noticef("Starting http monitor on %s", hp)
|
||||
s.http, err = net.Listen("tcp", hp)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
Fatalf("Can't listen to the monitor port: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Root
|
||||
mux.HandleFunc(RootPath, s.HandleRoot)
|
||||
// Varz
|
||||
mux.HandleFunc(VarzPath, s.HandleVarz)
|
||||
// Connz
|
||||
mux.HandleFunc(ConnzPath, s.HandleConnz)
|
||||
// Routez
|
||||
mux.HandleFunc(RoutezPath, s.HandleRoutez)
|
||||
// Subz
|
||||
mux.HandleFunc(SubszPath, s.HandleSubsz)
|
||||
// Subz alias for backwards compatibility
|
||||
mux.HandleFunc("/subscriptionsz", s.HandleSubsz)
|
||||
// Stacksz
|
||||
mux.HandleFunc(StackszPath, s.HandleStacksz)
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: hp,
|
||||
Handler: mux,
|
||||
ReadTimeout: 2 * time.Second,
|
||||
WriteTimeout: 2 * time.Second,
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
}
|
||||
|
||||
go func() {
|
||||
srv.Serve(s.http)
|
||||
srv.Handler = nil
|
||||
s.done <- true
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Server) createClient(conn net.Conn) *client {
|
||||
c := &client{srv: s, nc: conn, opts: defaultOpts, mpay: s.info.MaxPayload, start: time.Now()}
|
||||
|
||||
// Grab JSON info string
|
||||
s.mu.Lock()
|
||||
info := s.infoJSON
|
||||
authRequired := s.info.AuthRequired
|
||||
tlsRequired := s.info.TLSRequired
|
||||
s.totalClients++
|
||||
s.mu.Unlock()
|
||||
|
||||
// Grab lock
|
||||
c.mu.Lock()
|
||||
|
||||
// Initialize
|
||||
c.initClient()
|
||||
|
||||
c.Debugf("Client connection created")
|
||||
|
||||
// Check for Auth
|
||||
if authRequired {
|
||||
c.setAuthTimer(secondsToDuration(s.opts.AuthTimeout))
|
||||
}
|
||||
|
||||
// Send our information.
|
||||
c.sendInfo(info)
|
||||
|
||||
// Unlock to register
|
||||
c.mu.Unlock()
|
||||
|
||||
// Register with the server.
|
||||
s.mu.Lock()
|
||||
// If server is not running, Shutdown() may have already gathered the
|
||||
// list of connections to close. It won't contain this one, so we need
|
||||
// to bail out now otherwise the readLoop started down there would not
|
||||
// be interrupted.
|
||||
if !s.running {
|
||||
s.mu.Unlock()
|
||||
return c
|
||||
}
|
||||
// If there is a max connections specified, check that adding
|
||||
// this new client would not push us over the max
|
||||
if s.opts.MaxConn > 0 && len(s.clients) >= s.opts.MaxConn {
|
||||
s.mu.Unlock()
|
||||
c.maxConnExceeded()
|
||||
return nil
|
||||
}
|
||||
s.clients[c.cid] = c
|
||||
s.mu.Unlock()
|
||||
|
||||
// Re-Grab lock
|
||||
c.mu.Lock()
|
||||
|
||||
// Check for TLS
|
||||
if tlsRequired {
|
||||
c.Debugf("Starting TLS client connection handshake")
|
||||
c.nc = tls.Server(c.nc, s.opts.TLSConfig)
|
||||
conn := c.nc.(*tls.Conn)
|
||||
|
||||
// Setup the timeout
|
||||
ttl := secondsToDuration(s.opts.TLSTimeout)
|
||||
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
|
||||
conn.SetReadDeadline(time.Now().Add(ttl))
|
||||
|
||||
// Force handshake
|
||||
c.mu.Unlock()
|
||||
if err := conn.Handshake(); err != nil {
|
||||
c.Debugf("TLS handshake error: %v", err)
|
||||
c.sendErr("Secure Connection - TLS Required")
|
||||
c.closeConnection()
|
||||
return nil
|
||||
}
|
||||
// Reset the read deadline
|
||||
conn.SetReadDeadline(time.Time{})
|
||||
|
||||
// Re-Grab lock
|
||||
c.mu.Lock()
|
||||
}
|
||||
|
||||
// The connection may have been closed
|
||||
if c.nc == nil {
|
||||
c.mu.Unlock()
|
||||
return c
|
||||
}
|
||||
|
||||
if tlsRequired {
|
||||
// Rewrap bw
|
||||
c.bw = bufio.NewWriterSize(c.nc, startBufSize)
|
||||
}
|
||||
|
||||
// Do final client initialization
|
||||
|
||||
// Set the Ping timer
|
||||
c.setPingTimer()
|
||||
|
||||
// Spin up the read loop.
|
||||
s.startGoRoutine(func() { c.readLoop() })
|
||||
|
||||
if tlsRequired {
|
||||
c.Debugf("TLS handshake complete")
|
||||
cs := c.nc.(*tls.Conn).ConnectionState()
|
||||
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
|
||||
}
|
||||
|
||||
c.mu.Unlock()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// updateServerINFO updates the server's Info object with the given
|
||||
// array of URLs and re-generate the infoJSON byte array, only if the
|
||||
// given URLs were not already recorded and if the feature is not
|
||||
// disabled.
|
||||
// Returns a boolean indicating if server's Info was updated.
|
||||
func (s *Server) updateServerINFO(urls []string) bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Feature disabled, do not update.
|
||||
if s.opts.Cluster.NoAdvertise {
|
||||
return false
|
||||
}
|
||||
|
||||
// Will be set to true if we alter the server's Info object.
|
||||
wasUpdated := false
|
||||
for _, url := range urls {
|
||||
if _, present := s.info.clientConnectURLs[url]; !present {
|
||||
|
||||
s.info.clientConnectURLs[url] = struct{}{}
|
||||
s.info.ClientConnectURLs = append(s.info.ClientConnectURLs, url)
|
||||
wasUpdated = true
|
||||
}
|
||||
}
|
||||
if wasUpdated {
|
||||
s.generateServerInfoJSON()
|
||||
}
|
||||
return wasUpdated
|
||||
}
|
||||
|
||||
// Handle closing down a connection when the handshake has timedout.
|
||||
func tlsTimeout(c *client, conn *tls.Conn) {
|
||||
c.mu.Lock()
|
||||
nc := c.nc
|
||||
c.mu.Unlock()
|
||||
// Check if already closed
|
||||
if nc == nil {
|
||||
return
|
||||
}
|
||||
cs := conn.ConnectionState()
|
||||
if !cs.HandshakeComplete {
|
||||
c.Debugf("TLS handshake timeout")
|
||||
c.sendErr("Secure Connection - TLS Required")
|
||||
c.closeConnection()
|
||||
}
|
||||
}
|
||||
|
||||
// Seems silly we have to write these
|
||||
func tlsVersion(ver uint16) string {
|
||||
switch ver {
|
||||
case tls.VersionTLS10:
|
||||
return "1.0"
|
||||
case tls.VersionTLS11:
|
||||
return "1.1"
|
||||
case tls.VersionTLS12:
|
||||
return "1.2"
|
||||
}
|
||||
return fmt.Sprintf("Unknown [%x]", ver)
|
||||
}
|
||||
|
||||
// We use hex here so we don't need multiple versions
|
||||
func tlsCipher(cs uint16) string {
|
||||
switch cs {
|
||||
case 0x0005:
|
||||
return "TLS_RSA_WITH_RC4_128_SHA"
|
||||
case 0x000a:
|
||||
return "TLS_RSA_WITH_3DES_EDE_CBC_SHA"
|
||||
case 0x002f:
|
||||
return "TLS_RSA_WITH_AES_128_CBC_SHA"
|
||||
case 0x0035:
|
||||
return "TLS_RSA_WITH_AES_256_CBC_SHA"
|
||||
case 0xc007:
|
||||
return "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA"
|
||||
case 0xc009:
|
||||
return "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
|
||||
case 0xc00a:
|
||||
return "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA"
|
||||
case 0xc011:
|
||||
return "TLS_ECDHE_RSA_WITH_RC4_128_SHA"
|
||||
case 0xc012:
|
||||
return "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA"
|
||||
case 0xc013:
|
||||
return "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"
|
||||
case 0xc014:
|
||||
return "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"
|
||||
case 0xc02f:
|
||||
return "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
|
||||
case 0xc02b:
|
||||
return "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
|
||||
case 0xc030:
|
||||
return "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"
|
||||
case 0xc02c:
|
||||
return "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
|
||||
}
|
||||
return fmt.Sprintf("Unknown [%x]", cs)
|
||||
}
|
||||
|
||||
func (s *Server) checkClientAuth(c *client) bool {
|
||||
if s.cAuth == nil {
|
||||
return true
|
||||
}
|
||||
return s.cAuth.Check(c)
|
||||
}
|
||||
|
||||
func (s *Server) checkRouterAuth(c *client) bool {
|
||||
if s.rAuth == nil {
|
||||
return true
|
||||
}
|
||||
return s.rAuth.Check(c)
|
||||
}
|
||||
|
||||
// Check auth and return boolean indicating if client is ok
|
||||
func (s *Server) checkAuth(c *client) bool {
|
||||
switch c.typ {
|
||||
case CLIENT:
|
||||
return s.checkClientAuth(c)
|
||||
case ROUTER:
|
||||
return s.checkRouterAuth(c)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Remove a client or route from our internal accounting.
|
||||
func (s *Server) removeClient(c *client) {
|
||||
var rID string
|
||||
c.mu.Lock()
|
||||
cid := c.cid
|
||||
typ := c.typ
|
||||
r := c.route
|
||||
if r != nil {
|
||||
rID = r.remoteID
|
||||
}
|
||||
updateProtoInfoCount := false
|
||||
if typ == CLIENT && c.opts.Protocol >= ClientProtoInfo {
|
||||
updateProtoInfoCount = true
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
s.mu.Lock()
|
||||
switch typ {
|
||||
case CLIENT:
|
||||
delete(s.clients, cid)
|
||||
if updateProtoInfoCount {
|
||||
s.cproto--
|
||||
}
|
||||
case ROUTER:
|
||||
delete(s.routes, cid)
|
||||
if r != nil {
|
||||
rc, ok := s.remotes[rID]
|
||||
// Only delete it if it is us..
|
||||
if ok && c == rc {
|
||||
delete(s.remotes, rID)
|
||||
}
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// These are some helpers for accounting in functional tests.
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
// NumRoutes will report the number of registered routes.
|
||||
func (s *Server) NumRoutes() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.routes)
|
||||
}
|
||||
|
||||
// NumRemotes will report number of registered remotes.
|
||||
func (s *Server) NumRemotes() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.remotes)
|
||||
}
|
||||
|
||||
// NumClients will report the number of registered clients.
|
||||
func (s *Server) NumClients() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.clients)
|
||||
}
|
||||
|
||||
// NumSubscriptions will report how many subscriptions are active.
|
||||
func (s *Server) NumSubscriptions() uint32 {
|
||||
s.mu.Lock()
|
||||
subs := s.sl.Count()
|
||||
s.mu.Unlock()
|
||||
return subs
|
||||
}
|
||||
|
||||
// Addr will return the net.Addr object for the current listener.
|
||||
func (s *Server) Addr() net.Addr {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.listener == nil {
|
||||
return nil
|
||||
}
|
||||
return s.listener.Addr()
|
||||
}
|
||||
|
||||
// ReadyForConnections returns `true` if the server is ready to accept client
|
||||
// and, if routing is enabled, route connections. If after the duration
|
||||
// `dur` the server is still not ready, returns `false`.
|
||||
func (s *Server) ReadyForConnections(dur time.Duration) bool {
|
||||
end := time.Now().Add(dur)
|
||||
for time.Now().Before(end) {
|
||||
s.mu.Lock()
|
||||
ok := s.listener != nil && (s.opts.Cluster.Port == 0 || s.routeListener != nil)
|
||||
s.mu.Unlock()
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
time.Sleep(25 * time.Millisecond)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ID returns the server's ID
|
||||
func (s *Server) ID() string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.info.ID
|
||||
}
|
||||
|
||||
func (s *Server) startGoRoutine(f func()) {
|
||||
s.grMu.Lock()
|
||||
if s.grRunning {
|
||||
s.grWG.Add(1)
|
||||
go f()
|
||||
}
|
||||
s.grMu.Unlock()
|
||||
}
|
||||
|
||||
// getClientConnectURLs returns suitable URLs for clients to connect to the listen
|
||||
// port based on the server options' Host and Port. If the Host corresponds to
|
||||
// "any" interfaces, this call returns the list of resolved IP addresses.
|
||||
func (s *Server) getClientConnectURLs() []string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
sPort := strconv.Itoa(s.opts.Port)
|
||||
urls := make([]string, 0, 1)
|
||||
|
||||
ipAddr, err := net.ResolveIPAddr("ip", s.opts.Host)
|
||||
// If the host is "any" (0.0.0.0 or ::), get specific IPs from available
|
||||
// interfaces.
|
||||
if err == nil && ipAddr.IP.IsUnspecified() {
|
||||
var ip net.IP
|
||||
ifaces, _ := net.Interfaces()
|
||||
for _, i := range ifaces {
|
||||
addrs, _ := i.Addrs()
|
||||
for _, addr := range addrs {
|
||||
switch v := addr.(type) {
|
||||
case *net.IPNet:
|
||||
ip = v.IP
|
||||
case *net.IPAddr:
|
||||
ip = v.IP
|
||||
}
|
||||
// Skip non global unicast addresses
|
||||
if !ip.IsGlobalUnicast() || ip.IsUnspecified() {
|
||||
ip = nil
|
||||
continue
|
||||
}
|
||||
urls = append(urls, net.JoinHostPort(ip.String(), sPort))
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil || len(urls) == 0 {
|
||||
// We are here if s.opts.Host is not "0.0.0.0" nor "::", or if for some
|
||||
// reason we could not add any URL in the loop above.
|
||||
// We had a case where a Windows VM was hosed and would have err == nil
|
||||
// and not add any address in the array in the loop above, and we
|
||||
// ended-up returning 0.0.0.0, which is problematic for Windows clients.
|
||||
// Check for 0.0.0.0 or :: specifically, and ignore if that's the case.
|
||||
if s.opts.Host == "0.0.0.0" || s.opts.Host == "::" {
|
||||
Errorf("Address %q can not be resolved properly", s.opts.Host)
|
||||
} else {
|
||||
urls = append(urls, net.JoinHostPort(s.opts.Host, sPort))
|
||||
}
|
||||
}
|
||||
return urls
|
||||
}
|
34
vendor/github.com/nats-io/gnatsd/server/signal.go
generated
vendored
Normal file
34
vendor/github.com/nats-io/gnatsd/server/signal.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
// +build !windows
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Signal Handling
|
||||
func (s *Server) handleSignals() {
|
||||
if s.opts.NoSigs {
|
||||
return
|
||||
}
|
||||
c := make(chan os.Signal, 1)
|
||||
|
||||
signal.Notify(c, syscall.SIGINT, syscall.SIGUSR1)
|
||||
|
||||
go func() {
|
||||
for sig := range c {
|
||||
Debugf("Trapped %q signal", sig)
|
||||
switch sig {
|
||||
case syscall.SIGINT:
|
||||
Noticef("Server Exiting..")
|
||||
os.Exit(0)
|
||||
case syscall.SIGUSR1:
|
||||
// File log re-open for rotating file logs.
|
||||
s.ReOpenLogFile()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
26
vendor/github.com/nats-io/gnatsd/server/signal_windows.go
generated
vendored
Normal file
26
vendor/github.com/nats-io/gnatsd/server/signal_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
)
|
||||
|
||||
// Signal Handling
|
||||
func (s *Server) handleSignals() {
|
||||
if s.opts.NoSigs {
|
||||
return
|
||||
}
|
||||
c := make(chan os.Signal, 1)
|
||||
|
||||
signal.Notify(c, os.Interrupt)
|
||||
|
||||
go func() {
|
||||
for sig := range c {
|
||||
Debugf("Trapped %q signal", sig)
|
||||
Noticef("Server Exiting..")
|
||||
os.Exit(0)
|
||||
}
|
||||
}()
|
||||
}
|
643
vendor/github.com/nats-io/gnatsd/server/sublist.go
generated
vendored
Normal file
643
vendor/github.com/nats-io/gnatsd/server/sublist.go
generated
vendored
Normal file
|
@ -0,0 +1,643 @@
|
|||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
// Package sublist is a routing mechanism to handle subject distribution
|
||||
// and provides a facility to match subjects from published messages to
|
||||
// interested subscribers. Subscribers can have wildcard subjects to match
|
||||
// multiple published subjects.
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Common byte variables for wildcards and token separator.
|
||||
const (
|
||||
pwc = '*'
|
||||
fwc = '>'
|
||||
tsep = "."
|
||||
btsep = '.'
|
||||
)
|
||||
|
||||
// Sublist related errors
|
||||
var (
|
||||
ErrInvalidSubject = errors.New("sublist: Invalid Subject")
|
||||
ErrNotFound = errors.New("sublist: No Matches Found")
|
||||
)
|
||||
|
||||
// cacheMax is used to bound limit the frontend cache
|
||||
const slCacheMax = 1024
|
||||
|
||||
// A result structure better optimized for queue subs.
|
||||
type SublistResult struct {
|
||||
psubs []*subscription
|
||||
qsubs [][]*subscription // don't make this a map, too expensive to iterate
|
||||
}
|
||||
|
||||
// A Sublist stores and efficiently retrieves subscriptions.
|
||||
type Sublist struct {
|
||||
sync.RWMutex
|
||||
genid uint64
|
||||
matches uint64
|
||||
cacheHits uint64
|
||||
inserts uint64
|
||||
removes uint64
|
||||
cache map[string]*SublistResult
|
||||
root *level
|
||||
count uint32
|
||||
}
|
||||
|
||||
// A node contains subscriptions and a pointer to the next level.
|
||||
type node struct {
|
||||
next *level
|
||||
psubs []*subscription
|
||||
qsubs [][]*subscription
|
||||
}
|
||||
|
||||
// A level represents a group of nodes and special pointers to
|
||||
// wildcard nodes.
|
||||
type level struct {
|
||||
nodes map[string]*node
|
||||
pwc, fwc *node
|
||||
}
|
||||
|
||||
// Create a new default node.
|
||||
func newNode() *node {
|
||||
return &node{psubs: make([]*subscription, 0, 4)}
|
||||
}
|
||||
|
||||
// Create a new default level. We use FNV1A as the hash
|
||||
// algortihm for the tokens, which should be short.
|
||||
func newLevel() *level {
|
||||
return &level{nodes: make(map[string]*node)}
|
||||
}
|
||||
|
||||
// New will create a default sublist
|
||||
func NewSublist() *Sublist {
|
||||
return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
|
||||
}
|
||||
|
||||
// Insert adds a subscription into the sublist
|
||||
func (s *Sublist) Insert(sub *subscription) error {
|
||||
// copy the subject since we hold this and this might be part of a large byte slice.
|
||||
subject := string(sub.subject)
|
||||
tsa := [32]string{}
|
||||
tokens := tsa[:0]
|
||||
start := 0
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if subject[i] == btsep {
|
||||
tokens = append(tokens, subject[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
tokens = append(tokens, subject[start:])
|
||||
|
||||
s.Lock()
|
||||
|
||||
sfwc := false
|
||||
l := s.root
|
||||
var n *node
|
||||
|
||||
for _, t := range tokens {
|
||||
if len(t) == 0 || sfwc {
|
||||
s.Unlock()
|
||||
return ErrInvalidSubject
|
||||
}
|
||||
|
||||
switch t[0] {
|
||||
case pwc:
|
||||
n = l.pwc
|
||||
case fwc:
|
||||
n = l.fwc
|
||||
sfwc = true
|
||||
default:
|
||||
n = l.nodes[t]
|
||||
}
|
||||
if n == nil {
|
||||
n = newNode()
|
||||
switch t[0] {
|
||||
case pwc:
|
||||
l.pwc = n
|
||||
case fwc:
|
||||
l.fwc = n
|
||||
default:
|
||||
l.nodes[t] = n
|
||||
}
|
||||
}
|
||||
if n.next == nil {
|
||||
n.next = newLevel()
|
||||
}
|
||||
l = n.next
|
||||
}
|
||||
if sub.queue == nil {
|
||||
n.psubs = append(n.psubs, sub)
|
||||
} else {
|
||||
// This is a queue subscription
|
||||
if i := findQSliceForSub(sub, n.qsubs); i >= 0 {
|
||||
n.qsubs[i] = append(n.qsubs[i], sub)
|
||||
} else {
|
||||
n.qsubs = append(n.qsubs, []*subscription{sub})
|
||||
}
|
||||
}
|
||||
|
||||
s.count++
|
||||
s.inserts++
|
||||
|
||||
s.addToCache(subject, sub)
|
||||
atomic.AddUint64(&s.genid, 1)
|
||||
|
||||
s.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deep copy
|
||||
func copyResult(r *SublistResult) *SublistResult {
|
||||
nr := &SublistResult{}
|
||||
nr.psubs = append([]*subscription(nil), r.psubs...)
|
||||
for _, qr := range r.qsubs {
|
||||
nqr := append([]*subscription(nil), qr...)
|
||||
nr.qsubs = append(nr.qsubs, nqr)
|
||||
}
|
||||
return nr
|
||||
}
|
||||
|
||||
// addToCache will add the new entry to existing cache
|
||||
// entries if needed. Assumes write lock is held.
|
||||
func (s *Sublist) addToCache(subject string, sub *subscription) {
|
||||
for k, r := range s.cache {
|
||||
if matchLiteral(k, subject) {
|
||||
// Copy since others may have a reference.
|
||||
nr := copyResult(r)
|
||||
if sub.queue == nil {
|
||||
nr.psubs = append(nr.psubs, sub)
|
||||
} else {
|
||||
if i := findQSliceForSub(sub, nr.qsubs); i >= 0 {
|
||||
nr.qsubs[i] = append(nr.qsubs[i], sub)
|
||||
} else {
|
||||
nr.qsubs = append(nr.qsubs, []*subscription{sub})
|
||||
}
|
||||
}
|
||||
s.cache[k] = nr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeFromCache will remove the sub from any active cache entries.
|
||||
// Assumes write lock is held.
|
||||
func (s *Sublist) removeFromCache(subject string, sub *subscription) {
|
||||
for k := range s.cache {
|
||||
if !matchLiteral(k, subject) {
|
||||
continue
|
||||
}
|
||||
// Since someone else may be referecing, can't modify the list
|
||||
// safely, just let it re-populate.
|
||||
delete(s.cache, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Match will match all entries to the literal subject.
|
||||
// It will return a set of results for both normal and queue subscribers.
|
||||
func (s *Sublist) Match(subject string) *SublistResult {
|
||||
s.RLock()
|
||||
atomic.AddUint64(&s.matches, 1)
|
||||
rc, ok := s.cache[subject]
|
||||
s.RUnlock()
|
||||
if ok {
|
||||
atomic.AddUint64(&s.cacheHits, 1)
|
||||
return rc
|
||||
}
|
||||
|
||||
tsa := [32]string{}
|
||||
tokens := tsa[:0]
|
||||
start := 0
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if subject[i] == btsep {
|
||||
tokens = append(tokens, subject[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
tokens = append(tokens, subject[start:])
|
||||
|
||||
// FIXME(dlc) - Make shared pool between sublist and client readLoop?
|
||||
result := &SublistResult{}
|
||||
|
||||
s.Lock()
|
||||
matchLevel(s.root, tokens, result)
|
||||
|
||||
// Add to our cache
|
||||
s.cache[subject] = result
|
||||
// Bound the number of entries to sublistMaxCache
|
||||
if len(s.cache) > slCacheMax {
|
||||
for k := range s.cache {
|
||||
delete(s.cache, k)
|
||||
break
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// This will add in a node's results to the total results.
|
||||
func addNodeToResults(n *node, results *SublistResult) {
|
||||
results.psubs = append(results.psubs, n.psubs...)
|
||||
for _, qr := range n.qsubs {
|
||||
if len(qr) == 0 {
|
||||
continue
|
||||
}
|
||||
// Need to find matching list in results
|
||||
if i := findQSliceForSub(qr[0], results.qsubs); i >= 0 {
|
||||
results.qsubs[i] = append(results.qsubs[i], qr...)
|
||||
} else {
|
||||
results.qsubs = append(results.qsubs, qr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We do not use a map here since we want iteration to be past when
|
||||
// processing publishes in L1 on client. So we need to walk sequentially
|
||||
// for now. Keep an eye on this in case we start getting large number of
|
||||
// different queue subscribers for the same subject.
|
||||
func findQSliceForSub(sub *subscription, qsl [][]*subscription) int {
|
||||
if sub.queue == nil {
|
||||
return -1
|
||||
}
|
||||
for i, qr := range qsl {
|
||||
if len(qr) > 0 && bytes.Equal(sub.queue, qr[0].queue) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// matchLevel is used to recursively descend into the trie.
|
||||
func matchLevel(l *level, toks []string, results *SublistResult) {
|
||||
var pwc, n *node
|
||||
for i, t := range toks {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
if l.fwc != nil {
|
||||
addNodeToResults(l.fwc, results)
|
||||
}
|
||||
if pwc = l.pwc; pwc != nil {
|
||||
matchLevel(pwc.next, toks[i+1:], results)
|
||||
}
|
||||
n = l.nodes[t]
|
||||
if n != nil {
|
||||
l = n.next
|
||||
} else {
|
||||
l = nil
|
||||
}
|
||||
}
|
||||
if n != nil {
|
||||
addNodeToResults(n, results)
|
||||
}
|
||||
if pwc != nil {
|
||||
addNodeToResults(pwc, results)
|
||||
}
|
||||
}
|
||||
|
||||
// lnt is used to track descent into levels for a removal for pruning.
|
||||
type lnt struct {
|
||||
l *level
|
||||
n *node
|
||||
t string
|
||||
}
|
||||
|
||||
// Remove will remove a subscription.
|
||||
func (s *Sublist) Remove(sub *subscription) error {
|
||||
subject := string(sub.subject)
|
||||
tsa := [32]string{}
|
||||
tokens := tsa[:0]
|
||||
start := 0
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if subject[i] == btsep {
|
||||
tokens = append(tokens, subject[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
tokens = append(tokens, subject[start:])
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
sfwc := false
|
||||
l := s.root
|
||||
var n *node
|
||||
|
||||
// Track levels for pruning
|
||||
var lnts [32]lnt
|
||||
levels := lnts[:0]
|
||||
|
||||
for _, t := range tokens {
|
||||
if len(t) == 0 || sfwc {
|
||||
return ErrInvalidSubject
|
||||
}
|
||||
if l == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
switch t[0] {
|
||||
case pwc:
|
||||
n = l.pwc
|
||||
case fwc:
|
||||
n = l.fwc
|
||||
sfwc = true
|
||||
default:
|
||||
n = l.nodes[t]
|
||||
}
|
||||
if n != nil {
|
||||
levels = append(levels, lnt{l, n, t})
|
||||
l = n.next
|
||||
} else {
|
||||
l = nil
|
||||
}
|
||||
}
|
||||
if !s.removeFromNode(n, sub) {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
s.count--
|
||||
s.removes++
|
||||
|
||||
for i := len(levels) - 1; i >= 0; i-- {
|
||||
l, n, t := levels[i].l, levels[i].n, levels[i].t
|
||||
if n.isEmpty() {
|
||||
l.pruneNode(n, t)
|
||||
}
|
||||
}
|
||||
s.removeFromCache(subject, sub)
|
||||
atomic.AddUint64(&s.genid, 1)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pruneNode is used to prune an empty node from the tree.
|
||||
func (l *level) pruneNode(n *node, t string) {
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
if n == l.fwc {
|
||||
l.fwc = nil
|
||||
} else if n == l.pwc {
|
||||
l.pwc = nil
|
||||
} else {
|
||||
delete(l.nodes, t)
|
||||
}
|
||||
}
|
||||
|
||||
// isEmpty will test if the node has any entries. Used
|
||||
// in pruning.
|
||||
func (n *node) isEmpty() bool {
|
||||
if len(n.psubs) == 0 && len(n.qsubs) == 0 {
|
||||
if n.next == nil || n.next.numNodes() == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Return the number of nodes for the given level.
|
||||
func (l *level) numNodes() int {
|
||||
num := len(l.nodes)
|
||||
if l.pwc != nil {
|
||||
num++
|
||||
}
|
||||
if l.fwc != nil {
|
||||
num++
|
||||
}
|
||||
return num
|
||||
}
|
||||
|
||||
// Removes a sub from a list.
|
||||
func removeSubFromList(sub *subscription, sl []*subscription) ([]*subscription, bool) {
|
||||
for i := 0; i < len(sl); i++ {
|
||||
if sl[i] == sub {
|
||||
last := len(sl) - 1
|
||||
sl[i] = sl[last]
|
||||
sl[last] = nil
|
||||
sl = sl[:last]
|
||||
return shrinkAsNeeded(sl), true
|
||||
}
|
||||
}
|
||||
return sl, false
|
||||
}
|
||||
|
||||
// Remove the sub for the given node.
|
||||
func (s *Sublist) removeFromNode(n *node, sub *subscription) (found bool) {
|
||||
if n == nil {
|
||||
return false
|
||||
}
|
||||
if sub.queue == nil {
|
||||
n.psubs, found = removeSubFromList(sub, n.psubs)
|
||||
return found
|
||||
}
|
||||
|
||||
// We have a queue group subscription here
|
||||
if i := findQSliceForSub(sub, n.qsubs); i >= 0 {
|
||||
n.qsubs[i], found = removeSubFromList(sub, n.qsubs[i])
|
||||
if len(n.qsubs[i]) == 0 {
|
||||
last := len(n.qsubs) - 1
|
||||
n.qsubs[i] = n.qsubs[last]
|
||||
n.qsubs[last] = nil
|
||||
n.qsubs = n.qsubs[:last]
|
||||
if len(n.qsubs) == 0 {
|
||||
n.qsubs = nil
|
||||
}
|
||||
}
|
||||
return found
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if we need to do a resize. This is for very large growth then
|
||||
// subsequent return to a more normal size from unsubscribe.
|
||||
func shrinkAsNeeded(sl []*subscription) []*subscription {
|
||||
lsl := len(sl)
|
||||
csl := cap(sl)
|
||||
// Don't bother if list not too big
|
||||
if csl <= 8 {
|
||||
return sl
|
||||
}
|
||||
pFree := float32(csl-lsl) / float32(csl)
|
||||
if pFree > 0.50 {
|
||||
return append([]*subscription(nil), sl...)
|
||||
}
|
||||
return sl
|
||||
}
|
||||
|
||||
// Count returns the number of subscriptions.
|
||||
func (s *Sublist) Count() uint32 {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.count
|
||||
}
|
||||
|
||||
// CacheCount returns the number of result sets in the cache.
|
||||
func (s *Sublist) CacheCount() int {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return len(s.cache)
|
||||
}
|
||||
|
||||
// Public stats for the sublist
|
||||
type SublistStats struct {
|
||||
NumSubs uint32 `json:"num_subscriptions"`
|
||||
NumCache uint32 `json:"num_cache"`
|
||||
NumInserts uint64 `json:"num_inserts"`
|
||||
NumRemoves uint64 `json:"num_removes"`
|
||||
NumMatches uint64 `json:"num_matches"`
|
||||
CacheHitRate float64 `json:"cache_hit_rate"`
|
||||
MaxFanout uint32 `json:"max_fanout"`
|
||||
AvgFanout float64 `json:"avg_fanout"`
|
||||
}
|
||||
|
||||
// Stats will return a stats structure for the current state.
|
||||
func (s *Sublist) Stats() *SublistStats {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
st := &SublistStats{}
|
||||
st.NumSubs = s.count
|
||||
st.NumCache = uint32(len(s.cache))
|
||||
st.NumInserts = s.inserts
|
||||
st.NumRemoves = s.removes
|
||||
st.NumMatches = s.matches
|
||||
if s.matches > 0 {
|
||||
st.CacheHitRate = float64(s.cacheHits) / float64(s.matches)
|
||||
}
|
||||
// whip through cache for fanout stats
|
||||
tot, max := 0, 0
|
||||
for _, r := range s.cache {
|
||||
l := len(r.psubs) + len(r.qsubs)
|
||||
tot += l
|
||||
if l > max {
|
||||
max = l
|
||||
}
|
||||
}
|
||||
st.MaxFanout = uint32(max)
|
||||
if tot > 0 {
|
||||
st.AvgFanout = float64(tot) / float64(len(s.cache))
|
||||
}
|
||||
return st
|
||||
}
|
||||
|
||||
// numLevels will return the maximum number of levels
|
||||
// contained in the Sublist tree.
|
||||
func (s *Sublist) numLevels() int {
|
||||
return visitLevel(s.root, 0)
|
||||
}
|
||||
|
||||
// visitLevel is used to descend the Sublist tree structure
|
||||
// recursively.
|
||||
func visitLevel(l *level, depth int) int {
|
||||
if l == nil || l.numNodes() == 0 {
|
||||
return depth
|
||||
}
|
||||
|
||||
depth++
|
||||
maxDepth := depth
|
||||
|
||||
for _, n := range l.nodes {
|
||||
if n == nil {
|
||||
continue
|
||||
}
|
||||
newDepth := visitLevel(n.next, depth)
|
||||
if newDepth > maxDepth {
|
||||
maxDepth = newDepth
|
||||
}
|
||||
}
|
||||
if l.pwc != nil {
|
||||
pwcDepth := visitLevel(l.pwc.next, depth)
|
||||
if pwcDepth > maxDepth {
|
||||
maxDepth = pwcDepth
|
||||
}
|
||||
}
|
||||
if l.fwc != nil {
|
||||
fwcDepth := visitLevel(l.fwc.next, depth)
|
||||
if fwcDepth > maxDepth {
|
||||
maxDepth = fwcDepth
|
||||
}
|
||||
}
|
||||
return maxDepth
|
||||
}
|
||||
|
||||
// IsValidSubject returns true if a subject is valid, false otherwise
|
||||
func IsValidSubject(subject string) bool {
|
||||
if subject == "" {
|
||||
return false
|
||||
}
|
||||
sfwc := false
|
||||
tokens := strings.Split(string(subject), tsep)
|
||||
for _, t := range tokens {
|
||||
if len(t) == 0 || sfwc {
|
||||
return false
|
||||
}
|
||||
if len(t) > 1 {
|
||||
continue
|
||||
}
|
||||
switch t[0] {
|
||||
case fwc:
|
||||
sfwc = true
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsValidLiteralSubject returns true if a subject is valid and literal (no wildcards), false otherwise
|
||||
func IsValidLiteralSubject(subject string) bool {
|
||||
tokens := strings.Split(string(subject), tsep)
|
||||
for _, t := range tokens {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
}
|
||||
if len(t) > 1 {
|
||||
continue
|
||||
}
|
||||
switch t[0] {
|
||||
case pwc, fwc:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// matchLiteral is used to test literal subjects, those that do not have any
|
||||
// wildcards, with a target subject. This is used in the cache layer.
|
||||
func matchLiteral(literal, subject string) bool {
|
||||
li := 0
|
||||
ll := len(literal)
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if li >= ll {
|
||||
return false
|
||||
}
|
||||
b := subject[i]
|
||||
switch b {
|
||||
case pwc:
|
||||
// Skip token in literal
|
||||
ll := len(literal)
|
||||
for {
|
||||
if li >= ll || literal[li] == btsep {
|
||||
li--
|
||||
break
|
||||
}
|
||||
li++
|
||||
}
|
||||
case fwc:
|
||||
return true
|
||||
default:
|
||||
if b != literal[li] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
li++
|
||||
}
|
||||
// Make sure we have processed all of the literal's chars..
|
||||
if li < ll {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
56
vendor/github.com/nats-io/gnatsd/server/util.go
generated
vendored
Normal file
56
vendor/github.com/nats-io/gnatsd/server/util.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/nuid"
|
||||
)
|
||||
|
||||
// Use nuid.
|
||||
func genID() string {
|
||||
return nuid.Next()
|
||||
}
|
||||
|
||||
// Ascii numbers 0-9
|
||||
const (
|
||||
asciiZero = 48
|
||||
asciiNine = 57
|
||||
)
|
||||
|
||||
// parseSize expects decimal positive numbers. We
|
||||
// return -1 to signal error
|
||||
func parseSize(d []byte) (n int) {
|
||||
if len(d) == 0 {
|
||||
return -1
|
||||
}
|
||||
for _, dec := range d {
|
||||
if dec < asciiZero || dec > asciiNine {
|
||||
return -1
|
||||
}
|
||||
n = n*10 + (int(dec) - asciiZero)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// parseInt64 expects decimal positive numbers. We
|
||||
// return -1 to signal error
|
||||
func parseInt64(d []byte) (n int64) {
|
||||
if len(d) == 0 {
|
||||
return -1
|
||||
}
|
||||
for _, dec := range d {
|
||||
if dec < asciiZero || dec > asciiNine {
|
||||
return -1
|
||||
}
|
||||
n = n*10 + (int64(dec) - asciiZero)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Helper to move from float seconds to time.Duration
|
||||
func secondsToDuration(seconds float64) time.Duration {
|
||||
ttl := seconds * float64(time.Second)
|
||||
return time.Duration(ttl)
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue