2019-10-03 13:49:23 +00:00
|
|
|
/*
|
|
|
|
Copyright 2019 Stellar Project
|
|
|
|
|
|
|
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
|
|
this software and associated documentation files (the "Software"), to deal in the
|
|
|
|
Software without restriction, including without limitation the rights to use, copy,
|
|
|
|
modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
|
|
|
|
and to permit persons to whom the Software is furnished to do so, subject to the
|
|
|
|
following conditions:
|
|
|
|
|
|
|
|
The above copyright notice and this permission notice shall be included in all copies
|
|
|
|
or substantial portions of the Software.
|
|
|
|
|
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
|
|
|
|
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
|
|
|
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
|
|
|
|
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|
|
|
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package server
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-10-04 11:47:16 +00:00
|
|
|
"fmt"
|
2019-10-03 13:49:23 +00:00
|
|
|
"io/ioutil"
|
2019-10-07 17:52:47 +00:00
|
|
|
"net/url"
|
2019-10-07 14:06:12 +00:00
|
|
|
"path/filepath"
|
2019-10-03 13:49:23 +00:00
|
|
|
"runtime"
|
|
|
|
"runtime/pprof"
|
|
|
|
"time"
|
|
|
|
|
2019-10-04 13:46:03 +00:00
|
|
|
"github.com/gogo/protobuf/proto"
|
2019-10-03 13:49:23 +00:00
|
|
|
ptypes "github.com/gogo/protobuf/types"
|
|
|
|
"github.com/gomodule/redigo/redis"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/sirupsen/logrus"
|
|
|
|
"github.com/stellarproject/heimdall"
|
|
|
|
v1 "github.com/stellarproject/heimdall/api/v1"
|
|
|
|
"github.com/stellarproject/heimdall/client"
|
2019-10-07 14:06:12 +00:00
|
|
|
"github.com/stellarproject/heimdall/wg"
|
2019-10-03 13:49:23 +00:00
|
|
|
"google.golang.org/grpc"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2019-10-07 14:06:12 +00:00
|
|
|
masterKey = "heimdall:master"
|
|
|
|
clusterKey = "heimdall:key"
|
|
|
|
keypairsKey = "heimdall:keypairs"
|
|
|
|
nodesKey = "heimdall:nodes"
|
|
|
|
nodeJoinKey = "heimdall:join"
|
|
|
|
peersKey = "heimdall:peers"
|
|
|
|
routesKey = "heimdall:routes"
|
|
|
|
peerIPsKey = "heimdall:peerips"
|
|
|
|
nodeIPsKey = "heimdall:nodeips"
|
|
|
|
nodeNetworksKey = "heimdall:nodenetworks"
|
|
|
|
authorizedPeersKey = "heimdall:authorized"
|
|
|
|
|
|
|
|
wireguardConfigDir = "/etc/wireguard"
|
2019-10-03 13:49:23 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2019-10-04 13:46:03 +00:00
|
|
|
empty = &ptypes.Empty{}
|
|
|
|
masterHeartbeatInterval = time.Second * 5
|
2019-10-07 02:10:29 +00:00
|
|
|
nodeHeartbeatInterval = time.Second * 15
|
2019-10-04 13:46:03 +00:00
|
|
|
nodeHeartbeatExpiry = 86400
|
|
|
|
peerConfigUpdateInterval = time.Second * 10
|
2019-10-05 02:56:46 +00:00
|
|
|
|
|
|
|
// ErrRouteExists is returned when a requested route is already reserved
|
|
|
|
ErrRouteExists = errors.New("route already reserved")
|
|
|
|
// ErrNodeDoesNotExist is returned when an invalid node is requested
|
|
|
|
ErrNodeDoesNotExist = errors.New("node does not exist")
|
2019-10-03 13:49:23 +00:00
|
|
|
)
|
|
|
|
|
2019-10-05 02:56:46 +00:00
|
|
|
// Server represents the Heimdall server
|
2019-10-03 13:49:23 +00:00
|
|
|
type Server struct {
|
|
|
|
cfg *heimdall.Config
|
|
|
|
rpool *redis.Pool
|
|
|
|
wpool *redis.Pool
|
|
|
|
replicaCh chan struct{}
|
|
|
|
}
|
|
|
|
|
2019-10-05 02:56:46 +00:00
|
|
|
// NewServer returns a new Heimdall server
|
2019-10-03 13:49:23 +00:00
|
|
|
func NewServer(cfg *heimdall.Config) (*Server, error) {
|
2019-10-07 17:52:47 +00:00
|
|
|
pool, err := getPool(cfg.RedisURL)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-10-03 13:49:23 +00:00
|
|
|
return &Server{
|
|
|
|
cfg: cfg,
|
|
|
|
rpool: pool,
|
|
|
|
wpool: pool,
|
|
|
|
replicaCh: make(chan struct{}, 1),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register enables callers to register this service with an existing GRPC server
|
|
|
|
func (s *Server) Register(server *grpc.Server) error {
|
|
|
|
v1.RegisterHeimdallServer(server, s)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GenerateProfile generates a new Go profile
|
|
|
|
func (s *Server) GenerateProfile() (string, error) {
|
|
|
|
tmpfile, err := ioutil.TempFile("", "heimdall-profile-")
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
runtime.GC()
|
|
|
|
if err := pprof.WriteHeapProfile(tmpfile); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
tmpfile.Close()
|
|
|
|
return tmpfile.Name(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) Run() error {
|
2019-10-04 11:47:16 +00:00
|
|
|
ctx := context.Background()
|
2019-10-03 13:49:23 +00:00
|
|
|
// check peer address and make a grpc request for master info if present
|
|
|
|
if s.cfg.GRPCPeerAddress != "" {
|
|
|
|
logrus.Debugf("joining %s", s.cfg.GRPCPeerAddress)
|
|
|
|
c, err := s.getClient(s.cfg.GRPCPeerAddress)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
|
2019-10-07 17:52:47 +00:00
|
|
|
r, err := c.Join(&v1.JoinRequest{
|
|
|
|
ID: s.cfg.ID,
|
|
|
|
ClusterKey: s.cfg.ClusterKey,
|
|
|
|
GRPCAddress: s.cfg.GRPCAddress,
|
|
|
|
EndpointIP: s.cfg.EndpointIP,
|
|
|
|
EndpointPort: uint64(s.cfg.EndpointPort),
|
|
|
|
InterfaceName: s.cfg.InterfaceName,
|
|
|
|
})
|
2019-10-03 13:49:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-07 17:52:47 +00:00
|
|
|
logrus.Debugf("response: %+v", r)
|
|
|
|
// start tunnel
|
|
|
|
if err := s.updatePeerConfig(ctx, r.Node, r.Peers); err != nil {
|
|
|
|
return errors.Wrap(err, "error updating peer config")
|
|
|
|
}
|
|
|
|
// TODO: wait for tunnel to come up
|
|
|
|
time.Sleep(time.Second * 20)
|
|
|
|
|
|
|
|
logrus.Debugf("master info received: %+v", r)
|
|
|
|
if err := s.joinMaster(r.Master); err != nil {
|
2019-10-03 13:49:23 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
go s.replicaMonitor()
|
|
|
|
} else {
|
|
|
|
if err := s.configureNode(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-06 06:28:58 +00:00
|
|
|
// ensure keypair
|
2019-10-04 13:46:03 +00:00
|
|
|
if _, err := s.getOrCreateKeyPair(ctx, s.cfg.ID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-06 06:28:58 +00:00
|
|
|
// ensure node network subnet
|
2019-10-07 17:52:47 +00:00
|
|
|
if err := s.ensureNetworkSubnet(ctx, s.cfg.ID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// initial node update
|
|
|
|
if err := s.updateLocalNodeInfo(ctx); err != nil {
|
2019-10-04 11:47:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-04 13:46:03 +00:00
|
|
|
// start node heartbeat to update in redis
|
2019-10-07 17:52:47 +00:00
|
|
|
go s.updateNodeInfo(ctx)
|
2019-10-04 13:46:03 +00:00
|
|
|
|
2019-10-06 06:28:58 +00:00
|
|
|
// initial peer info update
|
2019-10-07 14:06:12 +00:00
|
|
|
if err := s.updatePeerInfo(ctx, s.cfg.ID); err != nil {
|
2019-10-06 06:28:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// initial config update
|
2019-10-07 17:52:47 +00:00
|
|
|
node, err := s.getNode(ctx, s.cfg.ID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
peers, err := s.getPeers(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := s.updatePeerConfig(ctx, node, peers); err != nil {
|
2019-10-06 06:28:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-04 13:46:03 +00:00
|
|
|
// start peer config updater to configure wireguard as peers join
|
2019-10-05 02:56:46 +00:00
|
|
|
go s.peerUpdater(ctx)
|
2019-10-03 13:49:23 +00:00
|
|
|
|
|
|
|
// start listener for pub/sub
|
|
|
|
errCh := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
c := s.rpool.Get()
|
|
|
|
defer c.Close()
|
|
|
|
|
|
|
|
psc := redis.PubSubConn{Conn: c}
|
|
|
|
psc.Subscribe(nodeJoinKey)
|
|
|
|
for {
|
|
|
|
switch v := psc.Receive().(type) {
|
|
|
|
case redis.Message:
|
|
|
|
// TODO: handle join notify
|
|
|
|
logrus.Debug("join notify")
|
|
|
|
case redis.Subscription:
|
|
|
|
default:
|
|
|
|
logrus.Debugf("unknown message type %T", v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2019-10-07 17:52:47 +00:00
|
|
|
err = <-errCh
|
2019-10-03 13:49:23 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) Stop() error {
|
|
|
|
s.rpool.Close()
|
|
|
|
s.wpool.Close()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-10-07 17:52:47 +00:00
|
|
|
func getPool(redisUrl string) (*redis.Pool, error) {
|
2019-10-03 13:49:23 +00:00
|
|
|
pool := redis.NewPool(func() (redis.Conn, error) {
|
2019-10-07 17:52:47 +00:00
|
|
|
conn, err := redis.DialURL(redisUrl)
|
2019-10-03 13:49:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "unable to connect to redis")
|
|
|
|
}
|
2019-10-07 17:52:47 +00:00
|
|
|
|
|
|
|
u, err := url.Parse(redisUrl)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
auth, ok := u.User.Password()
|
|
|
|
if ok {
|
|
|
|
logrus.Debug("setting masterauth for redis")
|
|
|
|
if _, err := conn.Do("CONFIG", "SET", "MASTERAUTH", auth); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "error authenticating to redis")
|
|
|
|
}
|
|
|
|
}
|
2019-10-03 13:49:23 +00:00
|
|
|
return conn, nil
|
|
|
|
}, 10)
|
|
|
|
|
2019-10-07 17:52:47 +00:00
|
|
|
return pool, nil
|
2019-10-03 13:49:23 +00:00
|
|
|
}
|
|
|
|
|
2019-10-07 17:52:47 +00:00
|
|
|
func (s *Server) ensureNetworkSubnet(ctx context.Context, id string) error {
|
|
|
|
network, err := redis.String(s.local(ctx, "GET", s.getNodeNetworkKey(id)))
|
2019-10-06 06:28:58 +00:00
|
|
|
if err != nil {
|
|
|
|
if err != redis.ErrNil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// allocate initial node subnet
|
|
|
|
r, err := parseSubnetRange(s.cfg.NodeNetwork)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// iterate node networks to find first free
|
|
|
|
nodeNetworkKeys, err := redis.Strings(s.local(ctx, "KEYS", s.getNodeNetworkKey("*")))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
lookup := map[string]struct{}{}
|
|
|
|
for _, netKey := range nodeNetworkKeys {
|
|
|
|
n, err := redis.String(s.local(ctx, "GET", netKey))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
lookup[n] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
subnet := r.Subnet
|
|
|
|
size, _ := subnet.Mask.Size()
|
|
|
|
|
|
|
|
for {
|
|
|
|
n, ok := nextSubnet(subnet, size)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("error getting next subnet")
|
|
|
|
}
|
|
|
|
if _, exists := lookup[n.String()]; exists {
|
|
|
|
subnet = n
|
|
|
|
continue
|
|
|
|
}
|
2019-10-07 17:52:47 +00:00
|
|
|
logrus.Debugf("allocated network %s for %s", n.String(), id)
|
|
|
|
if err := s.updateNodeNetwork(ctx, id, n.String()); err != nil {
|
2019-10-06 06:28:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-07 17:52:47 +00:00
|
|
|
logrus.Debugf("node network for %s: %s", id, network)
|
2019-10-06 06:28:58 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-10-04 13:46:03 +00:00
|
|
|
func (s *Server) getOrCreateKeyPair(ctx context.Context, id string) (*v1.KeyPair, error) {
|
|
|
|
key := s.getKeyPairKey(id)
|
|
|
|
keyData, err := redis.Bytes(s.master(ctx, "GET", key))
|
|
|
|
if err != nil {
|
|
|
|
if err != redis.ErrNil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
logrus.Debugf("generating new keypair for %s", s.cfg.ID)
|
2019-10-07 14:06:12 +00:00
|
|
|
privateKey, publicKey, err := wg.GenerateWireguardKeys(ctx)
|
2019-10-04 13:46:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
keyPair := &v1.KeyPair{
|
|
|
|
PrivateKey: privateKey,
|
|
|
|
PublicKey: publicKey,
|
|
|
|
}
|
|
|
|
data, err := proto.Marshal(keyPair)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if _, err := s.master(ctx, "SET", key, data); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return keyPair, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var keyPair v1.KeyPair
|
|
|
|
if err := proto.Unmarshal(keyData, &keyPair); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &keyPair, nil
|
|
|
|
}
|
|
|
|
|
2019-10-04 11:47:16 +00:00
|
|
|
func (s *Server) getNodeKey(id string) string {
|
|
|
|
return fmt.Sprintf("%s:%s", nodesKey, id)
|
|
|
|
}
|
|
|
|
|
2019-10-05 02:56:46 +00:00
|
|
|
func (s *Server) getRouteKey(network string) string {
|
|
|
|
return fmt.Sprintf("%s:%s", routesKey, network)
|
|
|
|
}
|
|
|
|
|
2019-10-04 11:47:16 +00:00
|
|
|
func (s *Server) getPeerKey(id string) string {
|
|
|
|
return fmt.Sprintf("%s:%s", peersKey, id)
|
|
|
|
}
|
|
|
|
|
2019-10-04 13:46:03 +00:00
|
|
|
func (s *Server) getKeyPairKey(id string) string {
|
|
|
|
return fmt.Sprintf("%s:%s", keypairsKey, id)
|
|
|
|
}
|
|
|
|
|
2019-10-06 06:28:58 +00:00
|
|
|
func (s *Server) getNodeNetworkKey(id string) string {
|
|
|
|
return fmt.Sprintf("%s:%s", nodeNetworksKey, id)
|
|
|
|
}
|
|
|
|
|
2019-10-03 13:49:23 +00:00
|
|
|
func (s *Server) getClient(addr string) (*client.Client, error) {
|
|
|
|
return client.NewClient(s.cfg.ID, addr)
|
|
|
|
}
|
|
|
|
|
2019-10-03 14:10:50 +00:00
|
|
|
func (s *Server) getClusterKey(ctx context.Context) (string, error) {
|
|
|
|
return redis.String(s.local(ctx, "GET", clusterKey))
|
|
|
|
}
|
|
|
|
|
2019-10-07 14:06:12 +00:00
|
|
|
func (s *Server) getWireguardConfigPath() string {
|
|
|
|
return filepath.Join(wireguardConfigDir, s.cfg.InterfaceName+".conf")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) getTunnelName() string {
|
|
|
|
return s.cfg.InterfaceName
|
|
|
|
}
|
|
|
|
|
2019-10-03 13:49:23 +00:00
|
|
|
func (s *Server) local(ctx context.Context, cmd string, args ...interface{}) (interface{}, error) {
|
|
|
|
return s.do(ctx, s.rpool, cmd, args...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) master(ctx context.Context, cmd string, args ...interface{}) (interface{}, error) {
|
|
|
|
return s.do(ctx, s.wpool, cmd, args...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) do(ctx context.Context, pool *redis.Pool, cmd string, args ...interface{}) (interface{}, error) {
|
|
|
|
conn, err := pool.GetContext(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
r, err := conn.Do(cmd, args...)
|
|
|
|
return r, err
|
|
|
|
}
|