Add vendoring to containerd master

Initial vendor list validated with empty $GOPATH
and only master checked out; followed by `make`
and verified that all binaries build properly.
Updates require github.com/LK4D4/vndr tool.

Signed-off-by: Phil Estes <estesp@linux.vnet.ibm.com>
This commit is contained in:
Phil Estes 2016-12-16 12:03:35 -05:00
parent 286ea04591
commit dd9309c15e
No known key found for this signature in database
GPG key ID: 0F386284C03A1162
407 changed files with 113562 additions and 0 deletions

View file

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2016 Apcera Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -0,0 +1,115 @@
// Copyright 2016 Apcera Inc. All rights reserved.
package server
import (
"github.com/nats-io/nats-streaming-server/stores"
"sync"
"time"
)
// This is a proxy to the store interface.
type clientStore struct {
store stores.Store
}
// client has information needed by the server. A client is also
// stored in a stores.Client object (which contains ID and HbInbox).
type client struct {
sync.RWMutex
unregistered bool
hbt *time.Timer
fhb int
subs []*subState
}
// Register a client if new, otherwise returns the client already registered
// and `false` to indicate that the client is not new.
func (cs *clientStore) Register(ID, hbInbox string) (*stores.Client, bool, error) {
// Will be gc'ed if we fail to register, that's ok.
c := &client{subs: make([]*subState, 0, 4)}
sc, isNew, err := cs.store.AddClient(ID, hbInbox, c)
if err != nil {
return nil, false, err
}
return sc, isNew, nil
}
// Unregister a client.
func (cs *clientStore) Unregister(ID string) *stores.Client {
sc := cs.store.DeleteClient(ID)
if sc != nil {
c := sc.UserData.(*client)
c.Lock()
c.unregistered = true
c.Unlock()
}
return sc
}
// IsValid returns true if the client is registered, false otherwise.
func (cs *clientStore) IsValid(ID string) bool {
return cs.store.GetClient(ID) != nil
}
// Lookup a client
func (cs *clientStore) Lookup(ID string) *client {
sc := cs.store.GetClient(ID)
if sc != nil {
return sc.UserData.(*client)
}
return nil
}
// GetSubs returns the list of subscriptions for the client identified by ID,
// or nil if such client is not found.
func (cs *clientStore) GetSubs(ID string) []*subState {
c := cs.Lookup(ID)
if c == nil {
return nil
}
c.RLock()
subs := make([]*subState, len(c.subs))
copy(subs, c.subs)
c.RUnlock()
return subs
}
// AddSub adds the subscription to the client identified by clientID
// and returns true only if the client has not been unregistered,
// otherwise returns false.
func (cs *clientStore) AddSub(ID string, sub *subState) bool {
sc := cs.store.GetClient(ID)
if sc == nil {
return false
}
c := sc.UserData.(*client)
c.Lock()
if c.unregistered {
c.Unlock()
return false
}
c.subs = append(c.subs, sub)
c.Unlock()
return true
}
// RemoveSub removes the subscription from the client identified by clientID
// and returns true only if the client has not been unregistered and that
// the subscription was found, otherwise returns false.
func (cs *clientStore) RemoveSub(ID string, sub *subState) bool {
sc := cs.store.GetClient(ID)
if sc == nil {
return false
}
c := sc.UserData.(*client)
c.Lock()
if c.unregistered {
c.Unlock()
return false
}
removed := false
c.subs, removed = sub.deleteFromList(c.subs)
c.Unlock()
return removed
}

View file

@ -0,0 +1,291 @@
// Copyright 2016 Apcera Inc. All rights reserved.
package server
import (
"fmt"
"io/ioutil"
"reflect"
"strings"
"time"
"github.com/nats-io/gnatsd/conf"
"github.com/nats-io/nats-streaming-server/stores"
)
// ProcessConfigFile parses the configuration file `configFile` and updates
// the given Streaming options `opts`.
func ProcessConfigFile(configFile string, opts *Options) error {
data, err := ioutil.ReadFile(configFile)
if err != nil {
return err
}
m, err := conf.Parse(string(data))
if err != nil {
return err
}
for k, v := range m {
name := strings.ToLower(k)
switch name {
case "id", "cid", "cluster_id":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ID = v.(string)
case "discover_prefix":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.DiscoverPrefix = v.(string)
case "st", "store_type", "store", "storetype":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
switch strings.ToUpper(v.(string)) {
case stores.TypeFile:
opts.StoreType = stores.TypeFile
case stores.TypeMemory:
opts.StoreType = stores.TypeMemory
default:
return fmt.Errorf("Unknown store type: %v", v.(string))
}
case "dir", "datastore":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.FilestoreDir = v.(string)
case "sd", "stan_debug":
if err := checkType(k, reflect.Bool, v); err != nil {
return err
}
opts.Debug = v.(bool)
case "sv", "stan_trace":
if err := checkType(k, reflect.Bool, v); err != nil {
return err
}
opts.Trace = v.(bool)
case "ns", "nats_server", "nats_server_url":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.NATSServerURL = v.(string)
case "secure":
if err := checkType(k, reflect.Bool, v); err != nil {
return err
}
opts.Secure = v.(bool)
case "tls":
if err := parseTLS(v, opts); err != nil {
return err
}
case "limits", "store_limits", "storelimits":
if err := parseStoreLimits(v, opts); err != nil {
return err
}
case "file", "file_options":
if err := parseFileOptions(v, opts); err != nil {
return err
}
}
}
return nil
}
// checkType returns a formatted error if `v` is not of the expected kind.
func checkType(name string, kind reflect.Kind, v interface{}) error {
actualKind := reflect.TypeOf(v).Kind()
if actualKind != kind {
return fmt.Errorf("Parameter %q value is expected to be %v, got %v",
name, kind.String(), actualKind.String())
}
return nil
}
// parseTLS updates `opts` with TLS config
func parseTLS(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("Expected TLS to be a map/struct, got %v", itf)
}
for k, v := range m {
name := strings.ToLower(k)
switch name {
case "client_cert":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientCert = v.(string)
case "client_key":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientKey = v.(string)
case "client_ca", "client_cacert":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientCA = v.(string)
}
}
return nil
}
// parseStoreLimits updates `opts` with store limits
func parseStoreLimits(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("Expected store limits to be a map/struct, got %v", itf)
}
for k, v := range m {
name := strings.ToLower(k)
switch name {
case "mc", "max_channels", "maxchannels":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
opts.MaxChannels = int(v.(int64))
case "channels", "channels_limits", "channelslimits", "per_channel", "per_channel_limits":
if err := parsePerChannelLimits(v, opts); err != nil {
return err
}
default:
// Check for the global limits (MaxMsgs, MaxBytes, etc..)
if err := parseChannelLimits(&opts.ChannelLimits, k, name, v); err != nil {
return err
}
}
}
return nil
}
// parseChannelLimits updates `cl` with channel limits.
func parseChannelLimits(cl *stores.ChannelLimits, k, name string, v interface{}) error {
switch name {
case "msu", "max_subs", "max_subscriptions", "maxsubscriptions":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxSubscriptions = int(v.(int64))
case "mm", "max_msgs", "maxmsgs", "max_count", "maxcount":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxMsgs = int(v.(int64))
case "mb", "max_bytes", "maxbytes":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxBytes = v.(int64)
case "ma", "max_age", "maxage":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
dur, err := time.ParseDuration(v.(string))
if err != nil {
return err
}
cl.MaxAge = dur
}
return nil
}
// parsePerChannelLimits updates `opts` with per channel limits.
func parsePerChannelLimits(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("Expected per channel limits to be a map/struct, got %v", itf)
}
for channelName, limits := range m {
limitsMap, ok := limits.(map[string]interface{})
if !ok {
return fmt.Errorf("Expected channel limits to be a map/struct, got %v", limits)
}
cl := &stores.ChannelLimits{}
for k, v := range limitsMap {
name := strings.ToLower(k)
if err := parseChannelLimits(cl, k, name, v); err != nil {
return err
}
}
sl := &opts.StoreLimits
sl.AddPerChannel(channelName, cl)
}
return nil
}
func parseFileOptions(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("Expected file options to be a map/struct, got %v", itf)
}
for k, v := range m {
name := strings.ToLower(k)
switch name {
case "compact", "compact_enabled":
if err := checkType(k, reflect.Bool, v); err != nil {
return err
}
opts.FileStoreOpts.CompactEnabled = v.(bool)
case "compact_frag", "compact_fragmentation":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
opts.FileStoreOpts.CompactFragmentation = int(v.(int64))
case "compact_interval":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
opts.FileStoreOpts.CompactInterval = int(v.(int64))
case "compact_min_size":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
opts.FileStoreOpts.CompactMinFileSize = v.(int64)
case "buffer_size":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
opts.FileStoreOpts.BufferSize = int(v.(int64))
case "crc", "do_crc":
if err := checkType(k, reflect.Bool, v); err != nil {
return err
}
opts.FileStoreOpts.DoCRC = v.(bool)
case "crc_poly":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
opts.FileStoreOpts.CRCPolynomial = v.(int64)
case "sync", "do_sync", "sync_on_flush":
if err := checkType(k, reflect.Bool, v); err != nil {
return err
}
opts.FileStoreOpts.DoSync = v.(bool)
case "slice_max_msgs", "slice_max_count", "slice_msgs", "slice_count":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
opts.FileStoreOpts.SliceMaxMsgs = int(v.(int64))
case "slice_max_bytes", "slice_max_size", "slice_bytes", "slice_size":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
opts.FileStoreOpts.SliceMaxBytes = v.(int64)
case "slice_max_age", "slice_age", "slice_max_time", "slice_time_limit":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
dur, err := time.ParseDuration(v.(string))
if err != nil {
return err
}
opts.FileStoreOpts.SliceMaxAge = dur
case "slice_archive_script", "slice_archive", "slice_script":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.FileStoreOpts.SliceArchiveScript = v.(string)
}
}
return nil
}

View file

@ -0,0 +1,149 @@
// Copyright 2016 Apcera Inc. All rights reserved.
package server
import (
"github.com/nats-io/gnatsd/logger"
natsd "github.com/nats-io/gnatsd/server"
"os"
"sync"
"sync/atomic"
)
// Logging in STAN
//
// The STAN logger is an instance of a NATS logger, (basically duplicated
// from the NATS server code), and is passed into the NATS server.
//
// A note on Debugf and Tracef: These will be enabled within the log if
// either STAN or the NATS server enables them. However, STAN will only
// trace/debug if the local STAN debug/trace flags are set. NATS will do
// the same with it's logger flags. This enables us to use the same logger,
// but differentiate between STAN and NATS debug/trace.
//
// All logging functions are fully implemented (versus calling into the NATS
// server) in case STAN is decoupled from the NATS server.
// Package globals for performance checks
var trace int32
var debug int32
// The STAN logger, encapsulates a NATS logger
var stanLog = struct {
sync.Mutex
logger natsd.Logger
}{}
// ConfigureLogger configures logging for STAN and the embedded NATS server
// based on options passed.
func ConfigureLogger(stanOpts *Options, natsOpts *natsd.Options) {
var s *natsd.Server
var newLogger natsd.Logger
sOpts := stanOpts
nOpts := natsOpts
if sOpts == nil {
sOpts = GetDefaultOptions()
}
if nOpts == nil {
nOpts = &natsd.Options{}
}
enableDebug := nOpts.Debug || sOpts.Debug
enableTrace := nOpts.Trace || sOpts.Trace
if nOpts.LogFile != "" {
newLogger = logger.NewFileLogger(nOpts.LogFile, nOpts.Logtime, enableDebug, sOpts.Trace, true)
} else if nOpts.RemoteSyslog != "" {
newLogger = logger.NewRemoteSysLogger(nOpts.RemoteSyslog, sOpts.Debug, sOpts.Trace)
} else if nOpts.Syslog {
newLogger = logger.NewSysLogger(sOpts.Debug, sOpts.Trace)
} else {
colors := true
// Check to see if stderr is being redirected and if so turn off color
// Also turn off colors if we're running on Windows where os.Stderr.Stat() returns an invalid handle-error
stat, err := os.Stderr.Stat()
if err != nil || (stat.Mode()&os.ModeCharDevice) == 0 {
colors = false
}
newLogger = logger.NewStdLogger(nOpts.Logtime, enableDebug, enableTrace, colors, true)
}
if sOpts.Debug {
atomic.StoreInt32(&debug, 1)
}
if sOpts.Trace {
atomic.StoreInt32(&trace, 1)
}
// The NATS server will use the STAN logger
s.SetLogger(newLogger, nOpts.Debug, nOpts.Trace)
stanLog.Lock()
stanLog.logger = newLogger
stanLog.Unlock()
}
// RemoveLogger clears the logger instance and debug/trace flags.
// Used for testing.
func RemoveLogger() {
var s *natsd.Server
atomic.StoreInt32(&trace, 0)
atomic.StoreInt32(&debug, 0)
stanLog.Lock()
stanLog.logger = nil
stanLog.Unlock()
s.SetLogger(nil, false, false)
}
// Noticef logs a notice statement
func Noticef(format string, v ...interface{}) {
executeLogCall(func(log natsd.Logger, format string, v ...interface{}) {
log.Noticef(format, v...)
}, format, v...)
}
// Errorf logs an error
func Errorf(format string, v ...interface{}) {
executeLogCall(func(log natsd.Logger, format string, v ...interface{}) {
log.Errorf(format, v...)
}, format, v...)
}
// Fatalf logs a fatal error
func Fatalf(format string, v ...interface{}) {
executeLogCall(func(log natsd.Logger, format string, v ...interface{}) {
log.Fatalf(format, v...)
}, format, v...)
}
// Debugf logs a debug statement
func Debugf(format string, v ...interface{}) {
if atomic.LoadInt32(&debug) != 0 {
executeLogCall(func(log natsd.Logger, format string, v ...interface{}) {
log.Debugf(format, v...)
}, format, v...)
}
}
// Tracef logs a trace statement
func Tracef(format string, v ...interface{}) {
if atomic.LoadInt32(&trace) != 0 {
executeLogCall(func(logger natsd.Logger, format string, v ...interface{}) {
logger.Tracef(format, v...)
}, format, v...)
}
}
func executeLogCall(f func(logger natsd.Logger, format string, v ...interface{}), format string, args ...interface{}) {
stanLog.Lock()
defer stanLog.Unlock()
if stanLog.logger == nil {
return
}
f(stanLog.logger, format, args...)
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,71 @@
// Copyright 2016 Apcera Inc. All rights reserved.
//
// Uses https://github.com/gogo/protobuf
// compiled via `protoc -I=. -I=$GOPATH/src --gogofaster_out=. protocol.proto`
syntax = "proto3";
package spb;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.goproto_getters_all) = false;
// SubState represents the state of a Subscription
message SubState {
uint64 ID = 1; // Subscription ID assigned by the SubStore interface
string clientID = 2; // ClientID
string qGroup = 3; // Optional queue group
string inbox = 4; // Inbox subject to deliver messages on
string ackInbox = 5; // Inbox for acks
int32 maxInFlight = 6; // Maximum inflight messages without an ack allowed
int32 ackWaitInSecs = 7; // Timeout for receiving an ack from the client
string durableName = 8; // Optional durable name which survives client restarts
uint64 lastSent = 9; // Start position
bool isDurable =10; // Indicate durability for this subscriber
}
// SubStateDelete marks a Subscription as deleted
message SubStateDelete {
uint64 ID = 1; // Subscription ID being deleted
}
// SubStateUpdate represents a subscription update (either Msg or Ack)
message SubStateUpdate {
uint64 ID = 1; // Subscription ID
uint64 seqno = 2; // Sequence of the message (pending or ack'ed)
}
// ServerInfo contains basic information regarding the Server
message ServerInfo {
string ClusterID = 1; // Cluster ID
string Discovery = 2; // Subject server receives connect requests on.
string Publish = 3; // Subject prefix server receives published messages on.
string Subscribe = 4; // Subject server receives subscription requests on.
string Unsubscribe = 5; // Subject server receives unsubscribe requests on.
string Close = 6; // Subject server receives close requests on.
string SubClose = 7; // Subject server receives subscription close requests on.
}
// ClientInfo contains information related to a Client
message ClientInfo {
string ID = 1; // Client ID
string HbInbox = 2; // The inbox heartbeats are sent to
}
message ClientDelete {
string ID = 1; // ID of the client being unregistered
}
message CtrlMsg {
enum Type {
SubUnsubscribe = 0; // Subscription Unsubscribe request.
SubClose = 1; // Subscription Close request.
ConnClose = 2; // Connection Close request.
}
Type MsgType = 1; // Type of the control message.
string ServerID = 2; // Allows a server to detect if it is the intended receipient.
bytes Data = 3; // Optional bytes that carries context information.
}

View file

@ -0,0 +1,400 @@
// Copyright 2016 Apcera Inc. All rights reserved.
package stores
import (
"fmt"
"sync"
"github.com/nats-io/go-nats-streaming/pb"
"github.com/nats-io/nats-streaming-server/spb"
)
// format string used to report that limit is reached when storing
// messages.
var droppingMsgsFmt = "WARNING: Reached limits for store %q (msgs=%v/%v bytes=%v/%v), " +
"dropping old messages to make room for new ones."
// commonStore contains everything that is common to any type of store
type commonStore struct {
sync.RWMutex
closed bool
}
// genericStore is the generic store implementation with a map of channels.
type genericStore struct {
commonStore
limits StoreLimits
name string
channels map[string]*ChannelStore
clients map[string]*Client
}
// genericSubStore is the generic store implementation that manages subscriptions
// for a given channel.
type genericSubStore struct {
commonStore
limits SubStoreLimits
subject string // Can't be wildcard
subsCount int
maxSubID uint64
}
// genericMsgStore is the generic store implementation that manages messages
// for a given channel.
type genericMsgStore struct {
commonStore
limits MsgStoreLimits
subject string // Can't be wildcard
first uint64
last uint64
totalCount int
totalBytes uint64
hitLimit bool // indicates if store had to drop messages due to limit
}
////////////////////////////////////////////////////////////////////////////
// genericStore methods
////////////////////////////////////////////////////////////////////////////
// init initializes the structure of a generic store
func (gs *genericStore) init(name string, limits *StoreLimits) {
gs.name = name
if limits == nil {
limits = &DefaultStoreLimits
}
gs.setLimits(limits)
// Do not use limits values to create the map.
gs.channels = make(map[string]*ChannelStore)
gs.clients = make(map[string]*Client)
}
// Init can be used to initialize the store with server's information.
func (gs *genericStore) Init(info *spb.ServerInfo) error {
return nil
}
// Name returns the type name of this store
func (gs *genericStore) Name() string {
return gs.name
}
// setLimits makes a copy of the given StoreLimits,
// validates the limits and if ok, applies the inheritance.
func (gs *genericStore) setLimits(limits *StoreLimits) error {
// Make a copy
gs.limits = *limits
// of the map too
if len(limits.PerChannel) > 0 {
gs.limits.PerChannel = make(map[string]*ChannelLimits, len(limits.PerChannel))
for key, val := range limits.PerChannel {
// Make a copy of the values. We want ownership
// of those structures
gs.limits.PerChannel[key] = &(*val)
}
}
// Build will validate and apply inheritance if no error.
sl := &gs.limits
return sl.Build()
}
// SetLimits sets limits for this store
func (gs *genericStore) SetLimits(limits *StoreLimits) error {
gs.Lock()
err := gs.setLimits(limits)
gs.Unlock()
return err
}
// CreateChannel creates a ChannelStore for the given channel, and returns
// `true` to indicate that the channel is new, false if it already exists.
func (gs *genericStore) CreateChannel(channel string, userData interface{}) (*ChannelStore, bool, error) {
// no-op
return nil, false, fmt.Errorf("Generic store, feature not implemented")
}
// LookupChannel returns a ChannelStore for the given channel.
func (gs *genericStore) LookupChannel(channel string) *ChannelStore {
gs.RLock()
cs := gs.channels[channel]
gs.RUnlock()
return cs
}
// HasChannel returns true if this store has any channel
func (gs *genericStore) HasChannel() bool {
gs.RLock()
l := len(gs.channels)
gs.RUnlock()
return l > 0
}
// State returns message store statistics for a given channel ('*' for all)
func (gs *genericStore) MsgsState(channel string) (numMessages int, byteSize uint64, err error) {
numMessages = 0
byteSize = 0
err = nil
if channel == AllChannels {
gs.RLock()
cs := gs.channels
gs.RUnlock()
for _, c := range cs {
n, b, lerr := c.Msgs.State()
if lerr != nil {
err = lerr
return
}
numMessages += n
byteSize += b
}
} else {
cs := gs.LookupChannel(channel)
if cs != nil {
numMessages, byteSize, err = cs.Msgs.State()
}
}
return
}
// canAddChannel returns true if the current number of channels is below the limit.
// Store lock is assumed to be locked.
func (gs *genericStore) canAddChannel() error {
if gs.limits.MaxChannels > 0 && len(gs.channels) >= gs.limits.MaxChannels {
return ErrTooManyChannels
}
return nil
}
// AddClient stores information about the client identified by `clientID`.
func (gs *genericStore) AddClient(clientID, hbInbox string, userData interface{}) (*Client, bool, error) {
c := &Client{spb.ClientInfo{ID: clientID, HbInbox: hbInbox}, userData}
gs.Lock()
oldClient := gs.clients[clientID]
if oldClient != nil {
gs.Unlock()
return oldClient, false, nil
}
gs.clients[c.ID] = c
gs.Unlock()
return c, true, nil
}
// GetClient returns the stored Client, or nil if it does not exist.
func (gs *genericStore) GetClient(clientID string) *Client {
gs.RLock()
c := gs.clients[clientID]
gs.RUnlock()
return c
}
// GetClients returns all stored Client objects, as a map keyed by client IDs.
func (gs *genericStore) GetClients() map[string]*Client {
gs.RLock()
clients := make(map[string]*Client, len(gs.clients))
for k, v := range gs.clients {
clients[k] = v
}
gs.RUnlock()
return clients
}
// GetClientsCount returns the number of registered clients
func (gs *genericStore) GetClientsCount() int {
gs.RLock()
count := len(gs.clients)
gs.RUnlock()
return count
}
// DeleteClient deletes the client identified by `clientID`.
func (gs *genericStore) DeleteClient(clientID string) *Client {
gs.Lock()
c := gs.clients[clientID]
if c != nil {
delete(gs.clients, clientID)
}
gs.Unlock()
return c
}
// Close closes all stores
func (gs *genericStore) Close() error {
gs.Lock()
defer gs.Unlock()
if gs.closed {
return nil
}
gs.closed = true
return gs.close()
}
// close closes all stores. Store lock is assumed held on entry
func (gs *genericStore) close() error {
var err error
var lerr error
for _, cs := range gs.channels {
lerr = cs.Subs.Close()
if lerr != nil && err == nil {
err = lerr
}
lerr = cs.Msgs.Close()
if lerr != nil && err == nil {
err = lerr
}
}
return err
}
////////////////////////////////////////////////////////////////////////////
// genericMsgStore methods
////////////////////////////////////////////////////////////////////////////
// init initializes this generic message store
func (gms *genericMsgStore) init(subject string, limits *MsgStoreLimits) {
gms.subject = subject
gms.limits = *limits
}
// State returns some statistics related to this store
func (gms *genericMsgStore) State() (numMessages int, byteSize uint64, err error) {
gms.RLock()
c, b := gms.totalCount, gms.totalBytes
gms.RUnlock()
return c, b, nil
}
// FirstSequence returns sequence for first message stored.
func (gms *genericMsgStore) FirstSequence() uint64 {
gms.RLock()
first := gms.first
gms.RUnlock()
return first
}
// LastSequence returns sequence for last message stored.
func (gms *genericMsgStore) LastSequence() uint64 {
gms.RLock()
last := gms.last
gms.RUnlock()
return last
}
// FirstAndLastSequence returns sequences for the first and last messages stored.
func (gms *genericMsgStore) FirstAndLastSequence() (uint64, uint64) {
gms.RLock()
first, last := gms.first, gms.last
gms.RUnlock()
return first, last
}
// Lookup returns the stored message with given sequence number.
func (gms *genericMsgStore) Lookup(seq uint64) *pb.MsgProto {
// no-op
return nil
}
// FirstMsg returns the first message stored.
func (gms *genericMsgStore) FirstMsg() *pb.MsgProto {
// no-op
return nil
}
// LastMsg returns the last message stored.
func (gms *genericMsgStore) LastMsg() *pb.MsgProto {
// no-op
return nil
}
func (gms *genericMsgStore) Flush() error {
// no-op
return nil
}
// GetSequenceFromTimestamp returns the sequence of the first message whose
// timestamp is greater or equal to given timestamp.
func (gms *genericMsgStore) GetSequenceFromTimestamp(timestamp int64) uint64 {
// no-op
return 0
}
// Close closes this store.
func (gms *genericMsgStore) Close() error {
return nil
}
////////////////////////////////////////////////////////////////////////////
// genericSubStore methods
////////////////////////////////////////////////////////////////////////////
// init initializes the structure of a generic sub store
func (gss *genericSubStore) init(channel string, limits *SubStoreLimits) {
gss.subject = channel
gss.limits = *limits
}
// CreateSub records a new subscription represented by SubState. On success,
// it records the subscription's ID in SubState.ID. This ID is to be used
// by the other SubStore methods.
func (gss *genericSubStore) CreateSub(sub *spb.SubState) error {
gss.Lock()
err := gss.createSub(sub)
gss.Unlock()
return err
}
// UpdateSub updates a given subscription represented by SubState.
func (gss *genericSubStore) UpdateSub(sub *spb.SubState) error {
return nil
}
// createSub is the unlocked version of CreateSub that can be used by
// non-generic implementations.
func (gss *genericSubStore) createSub(sub *spb.SubState) error {
if gss.limits.MaxSubscriptions > 0 && gss.subsCount >= gss.limits.MaxSubscriptions {
return ErrTooManySubs
}
// Bump the max value before assigning it to the new subscription.
gss.maxSubID++
gss.subsCount++
// This new subscription has the max value.
sub.ID = gss.maxSubID
return nil
}
// DeleteSub invalidates this subscription.
func (gss *genericSubStore) DeleteSub(subid uint64) {
gss.Lock()
gss.subsCount--
gss.Unlock()
}
// AddSeqPending adds the given message seqno to the given subscription.
func (gss *genericSubStore) AddSeqPending(subid, seqno uint64) error {
// no-op
return nil
}
// AckSeqPending records that the given message seqno has been acknowledged
// by the given subscription.
func (gss *genericSubStore) AckSeqPending(subid, seqno uint64) error {
// no-op
return nil
}
// Flush is for stores that may buffer operations and need them to be persisted.
func (gss *genericSubStore) Flush() error {
// no-op
return nil
}
// Close closes this store
func (gss *genericSubStore) Close() error {
// no-op
return nil
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,109 @@
// Copyright 2016 Apcera Inc. All rights reserved.
package stores
import (
"fmt"
)
// AddPerChannel stores limits for the given channel `name` in the StoreLimits.
// Inheritance (that is, specifying 0 for a limit means that the global limit
// should be used) is not applied in this call. This is done in StoreLimits.Build
// along with some validation.
func (sl *StoreLimits) AddPerChannel(name string, cl *ChannelLimits) {
if sl.PerChannel == nil {
sl.PerChannel = make(map[string]*ChannelLimits)
}
sl.PerChannel[name] = cl
}
// Build sets the global limits into per-channel limits that are set
// to zero. This call also validates the limits. An error is returned if:
// * any limit is set to a negative value.
// * the number of per-channel is higher than StoreLimits.MaxChannels.
// * any per-channel limit is higher than the corresponding global limit.
func (sl *StoreLimits) Build() error {
// Check that there is no negative value
if sl.MaxChannels < 0 {
return fmt.Errorf("Max channels limit cannot be negative")
}
if err := sl.checkChannelLimits(&sl.ChannelLimits, ""); err != nil {
return err
}
// If there is no per-channel, we are done.
if len(sl.PerChannel) == 0 {
return nil
}
if len(sl.PerChannel) > sl.MaxChannels {
return fmt.Errorf("Too many channels defined (%v). The max channels limit is set to %v",
len(sl.PerChannel), sl.MaxChannels)
}
for cn, cl := range sl.PerChannel {
if err := sl.checkChannelLimits(cl, cn); err != nil {
return err
}
}
// If we are here, it means that there was no error,
// so we now apply inheritance.
for _, cl := range sl.PerChannel {
if cl.MaxSubscriptions == 0 {
cl.MaxSubscriptions = sl.MaxSubscriptions
}
if cl.MaxMsgs == 0 {
cl.MaxMsgs = sl.MaxMsgs
}
if cl.MaxBytes == 0 {
cl.MaxBytes = sl.MaxBytes
}
if cl.MaxAge == 0 {
cl.MaxAge = sl.MaxAge
}
}
return nil
}
func (sl *StoreLimits) checkChannelLimits(cl *ChannelLimits, channelName string) error {
// Check that there is no per-channel unlimited limit if corresponding
// limit is not.
if err := verifyLimit("subscriptions", channelName,
int64(cl.MaxSubscriptions), int64(sl.MaxSubscriptions)); err != nil {
return err
}
if err := verifyLimit("messages", channelName,
int64(cl.MaxMsgs), int64(sl.MaxMsgs)); err != nil {
return err
}
if err := verifyLimit("bytes", channelName,
cl.MaxBytes, sl.MaxBytes); err != nil {
return err
}
if err := verifyLimit("age", channelName,
int64(cl.MaxAge), int64(sl.MaxAge)); err != nil {
return err
}
return nil
}
func verifyLimit(errText, channelName string, limit, globalLimit int64) error {
// No limit can be negative. If channelName is "" we are
// verifying the global limit (in this case limit == globalLimit).
// Otherwise, we verify a given per-channel limit. Make
// sure that the value is not greater than the corresponding
// global limit.
if channelName == "" {
if limit < 0 {
return fmt.Errorf("Max %s for global limit cannot be negative", errText)
}
return nil
}
// Per-channel limit specific here.
if limit < 0 {
return fmt.Errorf("Max %s for channel %q cannot be negative. "+
"Set it to 0 to be equal to the global limit of %v", errText, channelName, globalLimit)
}
if limit > globalLimit {
return fmt.Errorf("Max %s for channel %q cannot be higher than global limit "+
"of %v", errText, channelName, globalLimit)
}
return nil
}

View file

@ -0,0 +1,248 @@
// Copyright 2016 Apcera Inc. All rights reserved.
package stores
import (
"sort"
"sync"
"time"
"github.com/nats-io/go-nats-streaming/pb"
)
// MemoryStore is a factory for message and subscription stores.
type MemoryStore struct {
genericStore
}
// MemorySubStore is a subscription store in memory
type MemorySubStore struct {
genericSubStore
}
// MemoryMsgStore is a per channel message store in memory
type MemoryMsgStore struct {
genericMsgStore
msgs map[uint64]*pb.MsgProto
ageTimer *time.Timer
wg sync.WaitGroup
}
////////////////////////////////////////////////////////////////////////////
// MemoryStore methods
////////////////////////////////////////////////////////////////////////////
// NewMemoryStore returns a factory for stores held in memory.
// If not limits are provided, the store will be created with
// DefaultStoreLimits.
func NewMemoryStore(limits *StoreLimits) (*MemoryStore, error) {
ms := &MemoryStore{}
ms.init(TypeMemory, limits)
return ms, nil
}
// CreateChannel creates a ChannelStore for the given channel, and returns
// `true` to indicate that the channel is new, false if it already exists.
func (ms *MemoryStore) CreateChannel(channel string, userData interface{}) (*ChannelStore, bool, error) {
ms.Lock()
defer ms.Unlock()
channelStore := ms.channels[channel]
if channelStore != nil {
return channelStore, false, nil
}
if err := ms.canAddChannel(); err != nil {
return nil, false, err
}
// Defaults to the global limits
msgStoreLimits := ms.limits.MsgStoreLimits
subStoreLimits := ms.limits.SubStoreLimits
// See if there is an override
thisChannelLimits, exists := ms.limits.PerChannel[channel]
if exists {
// Use this channel specific limits
msgStoreLimits = thisChannelLimits.MsgStoreLimits
subStoreLimits = thisChannelLimits.SubStoreLimits
}
msgStore := &MemoryMsgStore{msgs: make(map[uint64]*pb.MsgProto, 64)}
msgStore.init(channel, &msgStoreLimits)
subStore := &MemorySubStore{}
subStore.init(channel, &subStoreLimits)
channelStore = &ChannelStore{
Subs: subStore,
Msgs: msgStore,
UserData: userData,
}
ms.channels[channel] = channelStore
return channelStore, true, nil
}
////////////////////////////////////////////////////////////////////////////
// MemoryMsgStore methods
////////////////////////////////////////////////////////////////////////////
// Store a given message.
func (ms *MemoryMsgStore) Store(data []byte) (uint64, error) {
ms.Lock()
defer ms.Unlock()
if ms.first == 0 {
ms.first = 1
}
ms.last++
m := &pb.MsgProto{
Sequence: ms.last,
Subject: ms.subject,
Data: data,
Timestamp: time.Now().UnixNano(),
}
ms.msgs[ms.last] = m
ms.totalCount++
ms.totalBytes += uint64(m.Size())
// If there is an age limit and no timer yet created, do so now
if ms.limits.MaxAge > time.Duration(0) && ms.ageTimer == nil {
ms.wg.Add(1)
ms.ageTimer = time.AfterFunc(ms.limits.MaxAge, ms.expireMsgs)
}
// Check if we need to remove any (but leave at least the last added)
maxMsgs := ms.limits.MaxMsgs
maxBytes := ms.limits.MaxBytes
if maxMsgs > 0 || maxBytes > 0 {
for ms.totalCount > 1 &&
((maxMsgs > 0 && ms.totalCount > maxMsgs) ||
(maxBytes > 0 && (ms.totalBytes > uint64(maxBytes)))) {
ms.removeFirstMsg()
if !ms.hitLimit {
ms.hitLimit = true
Noticef(droppingMsgsFmt, ms.subject, ms.totalCount, ms.limits.MaxMsgs, ms.totalBytes, ms.limits.MaxBytes)
}
}
}
return ms.last, nil
}
// Lookup returns the stored message with given sequence number.
func (ms *MemoryMsgStore) Lookup(seq uint64) *pb.MsgProto {
ms.RLock()
m := ms.msgs[seq]
ms.RUnlock()
return m
}
// FirstMsg returns the first message stored.
func (ms *MemoryMsgStore) FirstMsg() *pb.MsgProto {
ms.RLock()
m := ms.msgs[ms.first]
ms.RUnlock()
return m
}
// LastMsg returns the last message stored.
func (ms *MemoryMsgStore) LastMsg() *pb.MsgProto {
ms.RLock()
m := ms.msgs[ms.last]
ms.RUnlock()
return m
}
// GetSequenceFromTimestamp returns the sequence of the first message whose
// timestamp is greater or equal to given timestamp.
func (ms *MemoryMsgStore) GetSequenceFromTimestamp(timestamp int64) uint64 {
ms.RLock()
defer ms.RUnlock()
index := sort.Search(len(ms.msgs), func(i int) bool {
m := ms.msgs[uint64(i)+ms.first]
if m.Timestamp >= timestamp {
return true
}
return false
})
return uint64(index) + ms.first
}
// expireMsgs ensures that messages don't stay in the log longer than the
// limit's MaxAge.
func (ms *MemoryMsgStore) expireMsgs() {
ms.Lock()
if ms.closed {
ms.Unlock()
ms.wg.Done()
return
}
defer ms.Unlock()
now := time.Now().UnixNano()
maxAge := int64(ms.limits.MaxAge)
for {
m, ok := ms.msgs[ms.first]
if !ok {
ms.ageTimer = nil
ms.wg.Done()
return
}
elapsed := now - m.Timestamp
if elapsed >= maxAge {
ms.removeFirstMsg()
} else {
ms.ageTimer.Reset(time.Duration(maxAge - elapsed))
return
}
}
}
// removeFirstMsg removes the first message and updates totals.
func (ms *MemoryMsgStore) removeFirstMsg() {
firstMsg := ms.msgs[ms.first]
ms.totalBytes -= uint64(firstMsg.Size())
ms.totalCount--
delete(ms.msgs, ms.first)
ms.first++
}
// Close implements the MsgStore interface
func (ms *MemoryMsgStore) Close() error {
ms.Lock()
if ms.closed {
ms.Unlock()
return nil
}
ms.closed = true
if ms.ageTimer != nil {
if ms.ageTimer.Stop() {
ms.wg.Done()
}
}
ms.Unlock()
ms.wg.Wait()
return nil
}
////////////////////////////////////////////////////////////////////////////
// MemorySubStore methods
////////////////////////////////////////////////////////////////////////////
// AddSeqPending adds the given message seqno to the given subscription.
func (*MemorySubStore) AddSeqPending(subid, seqno uint64) error {
// Overrides in case genericSubStore does something. For the memory
// based store, we want to minimize the cost of this to a minimum.
return nil
}
// AckSeqPending records that the given message seqno has been acknowledged
// by the given subscription.
func (*MemorySubStore) AckSeqPending(subid, seqno uint64) error {
// Overrides in case genericSubStore does something. For the memory
// based store, we want to minimize the cost of this to a minimum.
return nil
}

View file

@ -0,0 +1,261 @@
// Copyright 2016 Apcera Inc. All rights reserved.
package stores
import (
"errors"
"time"
"github.com/nats-io/gnatsd/server"
"github.com/nats-io/go-nats-streaming/pb"
"github.com/nats-io/nats-streaming-server/spb"
)
const (
// TypeMemory is the store type name for memory based stores
TypeMemory = "MEMORY"
// TypeFile is the store type name for file based stores
TypeFile = "FILE"
)
const (
// AllChannels allows to get state for all channels.
AllChannels = "*"
)
// Errors.
var (
ErrTooManyChannels = errors.New("too many channels")
ErrTooManySubs = errors.New("too many subscriptions per channel")
)
// Noticef logs a notice statement
func Noticef(format string, v ...interface{}) {
server.Noticef(format, v...)
}
// StoreLimits define limits for a store.
type StoreLimits struct {
// How many channels are allowed.
MaxChannels int
// Global limits. Any 0 value means that the limit is ignored (unlimited).
ChannelLimits
// Per-channel limits. If a limit for a channel in this map is 0,
// the corresponding global limit (specified above) is used.
PerChannel map[string]*ChannelLimits
}
// ChannelLimits defines limits for a given channel
type ChannelLimits struct {
// Limits for message stores
MsgStoreLimits
// Limits for subscriptions stores
SubStoreLimits
}
// MsgStoreLimits defines limits for a MsgStore.
// For global limits, a value of 0 means "unlimited".
// For per-channel limits, it means that the corresponding global
// limit is used.
type MsgStoreLimits struct {
// How many messages are allowed.
MaxMsgs int
// How many bytes are allowed.
MaxBytes int64
// How long messages are kept in the log (unit is seconds)
MaxAge time.Duration
}
// SubStoreLimits defines limits for a SubStore
type SubStoreLimits struct {
// How many subscriptions are allowed.
MaxSubscriptions int
}
// DefaultStoreLimits are the limits that a Store must
// use when none are specified to the Store constructor.
// Store limits can be changed with the Store.SetLimits() method.
var DefaultStoreLimits = StoreLimits{
100,
ChannelLimits{
MsgStoreLimits{
MaxMsgs: 1000000,
MaxBytes: 1000000 * 1024,
},
SubStoreLimits{
MaxSubscriptions: 1000,
},
},
nil,
}
// RecoveredState allows the server to reconstruct its state after a restart.
type RecoveredState struct {
Info *spb.ServerInfo
Clients []*Client
Subs RecoveredSubscriptions
}
// Client represents a client with ID, Heartbeat Inbox and user data sets
// when adding it to the store.
type Client struct {
spb.ClientInfo
UserData interface{}
}
// RecoveredSubscriptions is a map of recovered subscriptions, keyed by channel name.
type RecoveredSubscriptions map[string][]*RecoveredSubState
// PendingAcks is a set of message sequences waiting to be acknowledged.
type PendingAcks map[uint64]struct{}
// RecoveredSubState represents a recovered Subscription with a map
// of pending messages.
type RecoveredSubState struct {
Sub *spb.SubState
Pending PendingAcks
}
// ChannelStore contains a reference to both Subscription and Message stores.
type ChannelStore struct {
// UserData is set when the channel is created.
UserData interface{}
// Subs is the Subscriptions Store.
Subs SubStore
// Msgs is the Messages Store.
Msgs MsgStore
}
// Store is the storage interface for NATS Streaming servers.
//
// If an implementation has a Store constructor with StoreLimits, it should be
// noted that the limits don't apply to any state being recovered, for Store
// implementations supporting recovery.
//
type Store interface {
// Init can be used to initialize the store with server's information.
Init(info *spb.ServerInfo) error
// Name returns the name type of this store (e.g: MEMORY, FILESTORE, etc...).
Name() string
// SetLimits sets limits for this store. The action is not expected
// to be retroactive.
// The store implementation should make a deep copy as to not change
// the content of the structure passed by the caller.
// This call may return an error due to limits validation errors.
SetLimits(limits *StoreLimits) error
// CreateChannel creates a ChannelStore for the given channel, and returns
// `true` to indicate that the channel is new, false if it already exists.
// Limits defined for this channel in StoreLimits.PeChannel map, if present,
// will apply. Otherwise, the global limits in StoreLimits will apply.
CreateChannel(channel string, userData interface{}) (*ChannelStore, bool, error)
// LookupChannel returns a ChannelStore for the given channel, nil if channel
// does not exist.
LookupChannel(channel string) *ChannelStore
// HasChannel returns true if this store has any channel.
HasChannel() bool
// MsgsState returns message store statistics for a given channel, or all
// if 'channel' is AllChannels.
MsgsState(channel string) (numMessages int, byteSize uint64, err error)
// AddClient stores information about the client identified by `clientID`.
// If a Client is already registered, this call returns the currently
// registered Client object, and the boolean set to false to indicate
// that the client is not new.
AddClient(clientID, hbInbox string, userData interface{}) (*Client, bool, error)
// GetClient returns the stored Client, or nil if it does not exist.
GetClient(clientID string) *Client
// GetClients returns a map of all stored Client objects, keyed by client IDs.
// The returned map is a copy of the state maintained by the store so that
// it is safe for the caller to walk through the map while clients may be
// added/deleted from the store.
GetClients() map[string]*Client
// GetClientsCount returns the number of registered clients.
GetClientsCount() int
// DeleteClient removes the client identified by `clientID` from the store
// and returns it to the caller.
DeleteClient(clientID string) *Client
// Close closes all stores.
Close() error
}
// SubStore is the interface for storage of Subscriptions on a given channel.
//
// Implementations of this interface should not attempt to validate that
// a subscription is valid (that is, has not been deleted) when processing
// updates.
type SubStore interface {
// CreateSub records a new subscription represented by SubState. On success,
// it records the subscription's ID in SubState.ID. This ID is to be used
// by the other SubStore methods.
CreateSub(*spb.SubState) error
// UpdateSub updates a given subscription represented by SubState.
UpdateSub(*spb.SubState) error
// DeleteSub invalidates the subscription 'subid'.
DeleteSub(subid uint64)
// AddSeqPending adds the given message 'seqno' to the subscription 'subid'.
AddSeqPending(subid, seqno uint64) error
// AckSeqPending records that the given message 'seqno' has been acknowledged
// by the subscription 'subid'.
AckSeqPending(subid, seqno uint64) error
// Flush is for stores that may buffer operations and need them to be persisted.
Flush() error
// Close closes the subscriptions store.
Close() error
}
// MsgStore is the interface for storage of Messages on a given channel.
type MsgStore interface {
// State returns some statistics related to this store.
State() (numMessages int, byteSize uint64, err error)
// Store stores a message and returns the message sequence.
Store(data []byte) (uint64, error)
// Lookup returns the stored message with given sequence number.
Lookup(seq uint64) *pb.MsgProto
// FirstSequence returns sequence for first message stored, 0 if no
// message is stored.
FirstSequence() uint64
// LastSequence returns sequence for last message stored, 0 if no
// message is stored.
LastSequence() uint64
// FirstAndLastSequence returns sequences for the first and last messages stored,
// 0 if no message is stored.
FirstAndLastSequence() (uint64, uint64)
// GetSequenceFromTimestamp returns the sequence of the first message whose
// timestamp is greater or equal to given timestamp.
GetSequenceFromTimestamp(timestamp int64) uint64
// FirstMsg returns the first message stored.
FirstMsg() *pb.MsgProto
// LastMsg returns the last message stored.
LastMsg() *pb.MsgProto
// Flush is for stores that may buffer operations and need them to be persisted.
Flush() error
// Close closes the store.
Close() error
}

View file

@ -0,0 +1,53 @@
// Copyright 2016 Apcera Inc. All rights reserved.
package util
import (
"encoding/binary"
"io"
)
// ByteOrder specifies how to convert byte sequences into 16-, 32-, or 64-bit
// unsigned integers.
var ByteOrder binary.ByteOrder
func init() {
ByteOrder = binary.LittleEndian
}
// EnsureBufBigEnough checks that given buffer is big enough to hold 'needed'
// bytes, otherwise returns a buffer of a size of at least 'needed' bytes.
func EnsureBufBigEnough(buf []byte, needed int) []byte {
if buf == nil {
return make([]byte, needed)
} else if needed > len(buf) {
return make([]byte, int(float32(needed)*1.1))
}
return buf
}
// WriteInt writes an int (4 bytes) to the given writer using ByteOrder.
func WriteInt(w io.Writer, v int) error {
var b [4]byte
var bs []byte
bs = b[:4]
ByteOrder.PutUint32(bs, uint32(v))
_, err := w.Write(bs)
return err
}
// ReadInt reads an int (4 bytes) from the reader using ByteOrder.
func ReadInt(r io.Reader) (int, error) {
var b [4]byte
var bs []byte
bs = b[:4]
_, err := io.ReadFull(r, bs)
if err != nil {
return 0, err
}
return int(ByteOrder.Uint32(bs)), nil
}