This commit is contained in:
JJ Łakis 2017-04-22 10:54:13 +00:00 committed by GitHub
commit 4c0122ddd4
1237 changed files with 632648 additions and 12 deletions

View file

@ -3,12 +3,17 @@ package main
import ( import (
"fmt" "fmt"
"log" "log"
"net/url"
"os"
"strings" "strings"
"time" "time"
"github.com/urfave/cli" "github.com/urfave/cli"
"golang.org/x/net/context" "golang.org/x/net/context"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/client/unversioned/remotecommand"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
) )
var containerCommand = cli.Command{ var containerCommand = cli.Command{
@ -22,6 +27,7 @@ var containerCommand = cli.Command{
containerStatusCommand, containerStatusCommand,
listContainersCommand, listContainersCommand,
execSyncCommand, execSyncCommand,
execCommand,
}, },
} }
@ -236,6 +242,45 @@ var execSyncCommand = cli.Command{
}, },
} }
var execCommand = cli.Command{
Name: "exec",
Usage: "prepare a streaming endpoint to execute a command in the container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Value: "",
Usage: "id of the container",
},
cli.BoolFlag{
Name: "tty",
Usage: "wheter to use tty",
},
cli.BoolFlag{
Name: "stdin",
Usage: "wheter to stream to stdin",
},
cli.BoolFlag{
Name: "url",
Usage: "do not exec command, just prepare streaming endpoint",
},
},
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection(context)
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewRuntimeServiceClient(conn)
err = Exec(client, context.String("id"), context.Bool("tty"), context.Bool("stdin"), context.Bool("url"), context.Args())
if err != nil {
return fmt.Errorf("execing command in container failed: %v", err)
}
return nil
},
}
type listOptions struct { type listOptions struct {
// id of the container // id of the container
id string id string
@ -441,6 +486,52 @@ func ExecSync(client pb.RuntimeServiceClient, ID string, cmd []string, timeout i
return nil return nil
} }
// Exec sends an ExecRequest to the server, and parses
// the returned ExecResponse.
func Exec(client pb.RuntimeServiceClient, ID string, tty bool, stdin bool, urlOnly bool, cmd []string) error {
if ID == "" {
return fmt.Errorf("ID cannot be empty")
}
r, err := client.Exec(context.Background(), &pb.ExecRequest{
ContainerId: ID,
Cmd: cmd,
Tty: tty,
Stdin: stdin,
})
if err != nil {
return err
}
if urlOnly {
fmt.Println("URL:")
fmt.Println(r.Url)
return nil
}
execURL, err := url.Parse(r.Url)
if err != nil {
return err
}
streamExec, err := remotecommand.NewExecutor(&restclient.Config{}, "GET", execURL)
if err != nil {
return err
}
options := remotecommand.StreamOptions{
SupportedProtocols: remotecommandserver.SupportedStreamingProtocols,
Stdout: os.Stdout,
Stderr: os.Stderr,
Tty: tty,
}
if stdin {
options.Stdin = os.Stdin
}
return streamExec.Stream(options)
}
// ListContainers sends a ListContainerRequest to the server, and parses // ListContainers sends a ListContainerRequest to the server, and parses
// the returned ListContainerResponse. // the returned ListContainerResponse.
func ListContainers(client pb.RuntimeServiceClient, opts listOptions) error { func ListContainers(client pb.RuntimeServiceClient, opts listOptions) error {

171
lock.json
View file

@ -1,6 +1,15 @@
{ {
"memo": "0d3077faf280e4e13e18e56f085053d4ced593c2fcfcb09d7df1aea8f0bba403", "memo": "0d3077faf280e4e13e18e56f085053d4ced593c2fcfcb09d7df1aea8f0bba403",
"projects": [ "projects": [
{
"name": "github.com/Azure/go-ansiterm",
"branch": "master",
"revision": "fa152c58bc15761d0200cb75fe958b89a9d4888e",
"packages": [
".",
"winterm"
]
},
{ {
"name": "github.com/BurntSushi/toml", "name": "github.com/BurntSushi/toml",
"version": "v0.2.0", "version": "v0.2.0",
@ -27,6 +36,22 @@
"." "."
] ]
}, },
{
"name": "github.com/PuerkitoBio/purell",
"version": "v1.1.0",
"revision": "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4",
"packages": [
"."
]
},
{
"name": "github.com/PuerkitoBio/urlesc",
"branch": "master",
"revision": "5bd2802263f21d8788851d5305584c82a5c75d7e",
"packages": [
"."
]
},
{ {
"name": "github.com/Sirupsen/logrus", "name": "github.com/Sirupsen/logrus",
"branch": "master", "branch": "master",
@ -128,6 +153,14 @@
"dbus" "dbus"
] ]
}, },
{
"name": "github.com/davecgh/go-spew",
"version": "v1.1.0",
"revision": "346938d642f2ec3594ed81d874461961cd0faa76",
"packages": [
"spew"
]
},
{ {
"name": "github.com/docker/distribution", "name": "github.com/docker/distribution",
"branch": "master", "branch": "master",
@ -175,6 +208,7 @@
"pkg/symlink", "pkg/symlink",
"pkg/system", "pkg/system",
"pkg/tlsconfig", "pkg/tlsconfig",
"pkg/term",
"pkg/truncindex", "pkg/truncindex",
"utils/templates" "utils/templates"
] ]
@ -204,6 +238,21 @@
"packages": [ "packages": [
"." "."
] ]
},
"name": "github.com/docker/spdystream",
"branch": "master",
"revision": "ed496381df8283605c435b86d4fdd6f4f20b8c6e",
"packages": [
"."
]
},
{
"name": "github.com/emicklei/go-restful",
"version": "2.0.0",
"revision": "24f362968e4ac4f9f0a7f6c74ac4d66369716e16",
"packages": [
"."
]
}, },
{ {
"name": "github.com/fsnotify/fsnotify", "name": "github.com/fsnotify/fsnotify",
@ -228,6 +277,37 @@
"packages": [ "packages": [
"." "."
] ]
},
"name": "github.com/go-openapi/jsonpointer",
"branch": "master",
"revision": "779f45308c19820f1a69e9a4cd965f496e0da10f",
"packages": [
"."
]
},
{
"name": "github.com/go-openapi/jsonreference",
"branch": "master",
"revision": "36d33bfe519efae5632669801b180bf1a245da3b",
"packages": [
"."
]
},
{
"name": "github.com/go-openapi/spec",
"branch": "master",
"revision": "02fb9cd3430ed0581e0ceb4804d5d4b3cc702694",
"packages": [
"."
]
},
{
"name": "github.com/go-openapi/swag",
"branch": "master",
"revision": "d5f8ebc3b1c55a4cf6489eeae7354f338cfe299e",
"packages": [
"."
]
}, },
{ {
"name": "github.com/gogo/protobuf", "name": "github.com/gogo/protobuf",
@ -248,6 +328,14 @@
"proto" "proto"
] ]
}, },
{
"name": "github.com/google/gofuzz",
"branch": "master",
"revision": "44d81051d367757e1c7c6a5a86423ece9afcf63c",
"packages": [
"."
]
},
{ {
"name": "github.com/gorilla/context", "name": "github.com/gorilla/context",
"version": "v1.1", "version": "v1.1",
@ -272,6 +360,15 @@
"." "."
] ]
}, },
{
"name": "github.com/mailru/easyjson",
"branch": "master",
"revision": "99e922cf9de1bc0ab38310c277cff32c2147e747",
"packages": [
"jlexer",
"jwriter"
]
},
{ {
"name": "github.com/mattn/go-runewidth", "name": "github.com/mattn/go-runewidth",
"version": "v0.0.1", "version": "v0.0.1",
@ -296,6 +393,14 @@
"." "."
] ]
}, },
{
"name": "github.com/mitchellh/go-wordwrap",
"branch": "master",
"revision": "ad45545899c7b13c020ea92b2072220eefad42b8",
"packages": [
"."
]
},
{ {
"name": "github.com/mtrmac/gpgme", "name": "github.com/mtrmac/gpgme",
"branch": "master", "branch": "master",
@ -374,6 +479,14 @@
"." "."
] ]
}, },
{
"name": "github.com/spf13/pflag",
"branch": "master",
"revision": "9ff6c6923cfffbcd502984b8e0c80539a94968b7",
"packages": [
"."
]
},
{ {
"name": "github.com/syndtr/gocapability", "name": "github.com/syndtr/gocapability",
"branch": "master", "branch": "master",
@ -390,6 +503,14 @@
"patricia" "patricia"
] ]
}, },
{
"name": "github.com/ugorji/go",
"branch": "master",
"revision": "d23841a297e5489e787e72fceffabf9d2994b52a",
"packages": [
"codec"
]
},
{ {
"name": "github.com/urfave/cli", "name": "github.com/urfave/cli",
"version": "v1.19.1", "version": "v1.19.1",
@ -435,7 +556,8 @@
"internal/timeseries", "internal/timeseries",
"lex/httplex", "lex/httplex",
"proxy", "proxy",
"trace" "trace",
"websocket"
] ]
}, },
{ {
@ -447,6 +569,15 @@
"windows" "windows"
] ]
}, },
{
"name": "golang.org/x/text",
"branch": "master",
"revision": "dafb3384ad25363d928a9e97ce4ad3a2f0667e34",
"packages": [
"unicode/norm",
"width"
]
},
{ {
"name": "google.golang.org/grpc", "name": "google.golang.org/grpc",
"version": "v1.0.1-GA", "version": "v1.0.1-GA",
@ -471,6 +602,14 @@
"." "."
] ]
}, },
{
"name": "gopkg.in/inf.v0",
"version": "v0.9.0",
"revision": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4",
"packages": [
"."
]
},
{ {
"name": "gopkg.in/yaml.v2", "name": "gopkg.in/yaml.v2",
"branch": "v2", "branch": "v2",
@ -484,8 +623,31 @@
"branch": "master", "branch": "master",
"revision": "21807b270ec15d19215659a5caa08b17f66d6f44", "revision": "21807b270ec15d19215659a5caa08b17f66d6f44",
"packages": [ "packages": [
"pkg/api/errors",
"pkg/api/meta",
"pkg/apimachinery/announced",
"pkg/apimachinery/registered",
"pkg/apis/meta/v1",
"pkg/conversion",
"pkg/fields", "pkg/fields",
"pkg/selection" "pkg/labels",
"pkg/openapi",
"pkg/runtime",
"pkg/runtime/schema",
"pkg/runtime/serializer",
"pkg/selection",
"pkg/types",
"pkg/util/runtime",
"pkg/util/sets"
]
},
{
"name": "k8s.io/apiserver",
"branch": "master",
"revision": "18254ddaaab8024609bdf570493103036d72f86d",
"packages": [
"pkg/server/httplog",
"pkg/util/wsstream"
] ]
}, },
{ {
@ -493,6 +655,7 @@
"branch": "master", "branch": "master",
"revision": "b766ef93a46ce6dc863462254658ca2861a53314", "revision": "b766ef93a46ce6dc863462254658ca2861a53314",
"packages": [ "packages": [
"util/clock",
"util/homedir" "util/homedir"
] ]
}, },
@ -501,7 +664,9 @@
"branch": "master", "branch": "master",
"revision": "760d8e98e8f6ad27aaf50b1a030cb9e7b6859aab", "revision": "760d8e98e8f6ad27aaf50b1a030cb9e7b6859aab",
"packages": [ "packages": [
"pkg/kubelet/api/v1alpha1/runtime" "pkg/kubelet/api/v1alpha1/runtime",
"pkg/kubelet/server/streaming",
"staging/src/k8s.io/apimachinery/pkg/fields"
] ]
} }
] ]

View file

@ -1,11 +1,103 @@
package server package server
import ( import (
"fmt"
"io"
"os"
"os/exec"
"github.com/Sirupsen/logrus"
"github.com/kubernetes-incubator/cri-o/oci"
"golang.org/x/net/context" "golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
utilexec "k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/term"
) )
// Exec prepares a streaming endpoint to execute a command in the container. // Exec prepares a streaming endpoint to execute a command in the container.
func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecResponse, error) { func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecResponse, error) {
return &pb.ExecResponse{}, nil logrus.Debugf("ExecRequest %+v", req)
resp, err := s.GetExec(req)
if err != nil {
return nil, fmt.Errorf("unable to prepare exec endpoint")
}
return resp, nil
}
// Exec endpoint for streaming.Runtime
func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size) error {
fmt.Println(containerID, cmd, stdin, stdout, stderr, tty, resize)
c := ss.runtimeServer.state.containers.Get(containerID)
if err := ss.runtimeServer.runtime.UpdateStatus(c); err != nil {
return err
}
cState := ss.runtimeServer.runtime.ContainerStatus(c)
if !(cState.Status == oci.ContainerStateRunning || cState.Status == oci.ContainerStateCreated) {
return fmt.Errorf("container is not created or running")
}
args := []string{"exec"}
if tty {
args = append(args, "-t")
}
args = append(args, c.Name())
args = append(args, cmd...)
execCmd := exec.Command(ss.runtimeServer.runtime.Path(c), args...)
var cmdErr error
if tty {
p, err := kubecontainer.StartPty(execCmd)
if err != nil {
return err
}
defer p.Close()
// make sure to close the stdout stream
defer stdout.Close()
kubecontainer.HandleResizing(resize, func(size term.Size) {
term.SetSize(p.Fd(), size)
})
if stdin != nil {
go io.Copy(p, stdin)
}
if stdout != nil {
go io.Copy(stdout, p)
}
cmdErr = execCmd.Wait()
} else {
if stdin != nil {
// Use an os.Pipe here as it returns true *os.File objects.
// This way, if you run 'kubectl exec <pod> -i bash' (no tty) and type 'exit',
// the call below to execCmd.Run() can unblock because its Stdin is the read half
// of the pipe.
r, w, err := os.Pipe()
if err != nil {
return err
}
go io.Copy(w, stdin)
execCmd.Stdin = r
}
if stdout != nil {
execCmd.Stdout = stdout
}
if stderr != nil {
execCmd.Stderr = stderr
}
cmdErr = execCmd.Run()
}
if exitErr, ok := cmdErr.(*exec.ExitError); ok {
return &utilexec.ExitErrorWrapper{ExitError: exitErr}
}
return cmdErr
} }

View file

@ -21,6 +21,7 @@ import (
rspec "github.com/opencontainers/runtime-spec/specs-go" rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label" "github.com/opencontainers/selinux/go-selinux/label"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
) )
const ( const (
@ -28,6 +29,13 @@ const (
shutdownFile = "/var/lib/ocid/ocid.shutdown" shutdownFile = "/var/lib/ocid/ocid.shutdown"
) )
// streamService implements streaming.Runtime.
type streamService struct {
runtimeServer *Server // needed by Exec() endpoint
streamServer streaming.Server
streaming.Runtime
}
// Server implements the RuntimeService and ImageService // Server implements the RuntimeService and ImageService
type Server struct { type Server struct {
config Config config Config
@ -50,6 +58,23 @@ type Server struct {
appArmorEnabled bool appArmorEnabled bool
appArmorProfile string appArmorProfile string
stream streamService
}
// GetExec returns exec stream request
func (s *Server) GetExec(req *pb.ExecRequest) (*pb.ExecResponse, error) {
return s.stream.streamServer.GetExec(req)
}
// GetAttach returns attach stream request
func (s *Server) GetAttach(req *pb.AttachRequest) (*pb.AttachResponse, error) {
return s.stream.streamServer.GetAttach(req)
}
// GetPortForward returns port forward stream request
func (s *Server) GetPortForward(req *pb.PortForwardRequest) (*pb.PortForwardResponse, error) {
return s.stream.streamServer.GetPortForward(req)
} }
func (s *Server) loadContainer(id string) error { func (s *Server) loadContainer(id string) error {
@ -501,20 +526,20 @@ func New(config *Config) (*Server, error) {
appArmorProfile: config.ApparmorProfile, appArmorProfile: config.ApparmorProfile,
} }
if s.seccompEnabled { if s.seccompEnabled {
seccompProfile, err := ioutil.ReadFile(config.SeccompProfile) seccompProfile, fileErr := ioutil.ReadFile(config.SeccompProfile)
if err != nil { if fileErr != nil {
return nil, fmt.Errorf("opening seccomp profile (%s) failed: %v", config.SeccompProfile, err) return nil, fmt.Errorf("opening seccomp profile (%s) failed: %v", config.SeccompProfile, fileErr)
} }
var seccompConfig seccomp.Seccomp var seccompConfig seccomp.Seccomp
if err := json.Unmarshal(seccompProfile, &seccompConfig); err != nil { if jsonErr := json.Unmarshal(seccompProfile, &seccompConfig); jsonErr != nil {
return nil, fmt.Errorf("decoding seccomp profile failed: %v", err) return nil, fmt.Errorf("decoding seccomp profile failed: %v", jsonErr)
} }
s.seccompProfile = seccompConfig s.seccompProfile = seccompConfig
} }
if s.appArmorEnabled && s.appArmorProfile == apparmor.DefaultApparmorProfile { if s.appArmorEnabled && s.appArmorProfile == apparmor.DefaultApparmorProfile {
if err := apparmor.EnsureDefaultApparmorProfile(); err != nil { if apparmorErr := apparmor.EnsureDefaultApparmorProfile(); apparmorErr != nil {
return nil, fmt.Errorf("ensuring the default apparmor profile is installed failed: %v", err) return nil, fmt.Errorf("ensuring the default apparmor profile is installed failed: %v", apparmorErr)
} }
} }
@ -529,6 +554,20 @@ func New(config *Config) (*Server, error) {
s.restore() s.restore()
s.cleanupSandboxesOnShutdown() s.cleanupSandboxesOnShutdown()
// Prepare streaming server
streamServerConfig := streaming.DefaultConfig
streamServerConfig.Addr = "0.0.0.0:10101"
s.stream.runtimeServer = s
s.stream.streamServer, err = streaming.NewServer(streamServerConfig, s.stream)
if err != nil {
return nil, fmt.Errorf("unable to create streaming server")
}
// TODO: Is it should be started somewhere else?
go func() {
s.stream.streamServer.Start(true)
}()
logrus.Debugf("sandboxes: %v", s.state.sandboxes) logrus.Debugf("sandboxes: %v", s.state.sandboxes)
logrus.Debugf("containers: %v", s.state.containers) logrus.Debugf("containers: %v", s.state.containers)
return s, nil return s, nil

21
vendor/github.com/Azure/go-ansiterm/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Microsoft Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

12
vendor/github.com/Azure/go-ansiterm/README.md generated vendored Normal file
View file

@ -0,0 +1,12 @@
# go-ansiterm
This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent.
For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position.
The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
See parser_test.go for examples exercising the state machine and generating appropriate function calls.
-----
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

188
vendor/github.com/Azure/go-ansiterm/constants.go generated vendored Normal file
View file

@ -0,0 +1,188 @@
package ansiterm
const LogEnv = "DEBUG_TERMINAL"
// ANSI constants
// References:
// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm
// -- http://man7.org/linux/man-pages/man4/console_codes.4.html
// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
// -- http://en.wikipedia.org/wiki/ANSI_escape_code
// -- http://vt100.net/emu/dec_ansi_parser
// -- http://vt100.net/emu/vt500_parser.svg
// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
// -- http://www.inwap.com/pdp10/ansicode.txt
const (
// ECMA-48 Set Graphics Rendition
// Note:
// -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved
// -- Fonts could possibly be supported via SetCurrentConsoleFontEx
// -- Windows does not expose the per-window cursor (i.e., caret) blink times
ANSI_SGR_RESET = 0
ANSI_SGR_BOLD = 1
ANSI_SGR_DIM = 2
_ANSI_SGR_ITALIC = 3
ANSI_SGR_UNDERLINE = 4
_ANSI_SGR_BLINKSLOW = 5
_ANSI_SGR_BLINKFAST = 6
ANSI_SGR_REVERSE = 7
_ANSI_SGR_INVISIBLE = 8
_ANSI_SGR_LINETHROUGH = 9
_ANSI_SGR_FONT_00 = 10
_ANSI_SGR_FONT_01 = 11
_ANSI_SGR_FONT_02 = 12
_ANSI_SGR_FONT_03 = 13
_ANSI_SGR_FONT_04 = 14
_ANSI_SGR_FONT_05 = 15
_ANSI_SGR_FONT_06 = 16
_ANSI_SGR_FONT_07 = 17
_ANSI_SGR_FONT_08 = 18
_ANSI_SGR_FONT_09 = 19
_ANSI_SGR_FONT_10 = 20
_ANSI_SGR_DOUBLEUNDERLINE = 21
ANSI_SGR_BOLD_DIM_OFF = 22
_ANSI_SGR_ITALIC_OFF = 23
ANSI_SGR_UNDERLINE_OFF = 24
_ANSI_SGR_BLINK_OFF = 25
_ANSI_SGR_RESERVED_00 = 26
ANSI_SGR_REVERSE_OFF = 27
_ANSI_SGR_INVISIBLE_OFF = 28
_ANSI_SGR_LINETHROUGH_OFF = 29
ANSI_SGR_FOREGROUND_BLACK = 30
ANSI_SGR_FOREGROUND_RED = 31
ANSI_SGR_FOREGROUND_GREEN = 32
ANSI_SGR_FOREGROUND_YELLOW = 33
ANSI_SGR_FOREGROUND_BLUE = 34
ANSI_SGR_FOREGROUND_MAGENTA = 35
ANSI_SGR_FOREGROUND_CYAN = 36
ANSI_SGR_FOREGROUND_WHITE = 37
_ANSI_SGR_RESERVED_01 = 38
ANSI_SGR_FOREGROUND_DEFAULT = 39
ANSI_SGR_BACKGROUND_BLACK = 40
ANSI_SGR_BACKGROUND_RED = 41
ANSI_SGR_BACKGROUND_GREEN = 42
ANSI_SGR_BACKGROUND_YELLOW = 43
ANSI_SGR_BACKGROUND_BLUE = 44
ANSI_SGR_BACKGROUND_MAGENTA = 45
ANSI_SGR_BACKGROUND_CYAN = 46
ANSI_SGR_BACKGROUND_WHITE = 47
_ANSI_SGR_RESERVED_02 = 48
ANSI_SGR_BACKGROUND_DEFAULT = 49
// 50 - 65: Unsupported
ANSI_MAX_CMD_LENGTH = 4096
MAX_INPUT_EVENTS = 128
DEFAULT_WIDTH = 80
DEFAULT_HEIGHT = 24
ANSI_BEL = 0x07
ANSI_BACKSPACE = 0x08
ANSI_TAB = 0x09
ANSI_LINE_FEED = 0x0A
ANSI_VERTICAL_TAB = 0x0B
ANSI_FORM_FEED = 0x0C
ANSI_CARRIAGE_RETURN = 0x0D
ANSI_ESCAPE_PRIMARY = 0x1B
ANSI_ESCAPE_SECONDARY = 0x5B
ANSI_OSC_STRING_ENTRY = 0x5D
ANSI_COMMAND_FIRST = 0x40
ANSI_COMMAND_LAST = 0x7E
DCS_ENTRY = 0x90
CSI_ENTRY = 0x9B
OSC_STRING = 0x9D
ANSI_PARAMETER_SEP = ";"
ANSI_CMD_G0 = '('
ANSI_CMD_G1 = ')'
ANSI_CMD_G2 = '*'
ANSI_CMD_G3 = '+'
ANSI_CMD_DECPNM = '>'
ANSI_CMD_DECPAM = '='
ANSI_CMD_OSC = ']'
ANSI_CMD_STR_TERM = '\\'
KEY_CONTROL_PARAM_2 = ";2"
KEY_CONTROL_PARAM_3 = ";3"
KEY_CONTROL_PARAM_4 = ";4"
KEY_CONTROL_PARAM_5 = ";5"
KEY_CONTROL_PARAM_6 = ";6"
KEY_CONTROL_PARAM_7 = ";7"
KEY_CONTROL_PARAM_8 = ";8"
KEY_ESC_CSI = "\x1B["
KEY_ESC_N = "\x1BN"
KEY_ESC_O = "\x1BO"
FILL_CHARACTER = ' '
)
func getByteRange(start byte, end byte) []byte {
bytes := make([]byte, 0, 32)
for i := start; i <= end; i++ {
bytes = append(bytes, byte(i))
}
return bytes
}
var toGroundBytes = getToGroundBytes()
var executors = getExecuteBytes()
// SPACE 20+A0 hex Always and everywhere a blank space
// Intermediate 20-2F hex !"#$%&'()*+,-./
var intermeds = getByteRange(0x20, 0x2F)
// Parameters 30-3F hex 0123456789:;<=>?
// CSI Parameters 30-39, 3B hex 0123456789;
var csiParams = getByteRange(0x30, 0x3F)
var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...)
// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
var upperCase = getByteRange(0x40, 0x5F)
// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~
var lowerCase = getByteRange(0x60, 0x7E)
// Alphabetics 40-7E hex (all of upper and lower case)
var alphabetics = append(upperCase, lowerCase...)
var printables = getByteRange(0x20, 0x7F)
var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E)
var escapeToGroundBytes = getEscapeToGroundBytes()
// See http://www.vt100.net/emu/vt500_parser.png for description of the complex
// byte ranges below
func getEscapeToGroundBytes() []byte {
escapeToGroundBytes := getByteRange(0x30, 0x4F)
escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...)
escapeToGroundBytes = append(escapeToGroundBytes, 0x59)
escapeToGroundBytes = append(escapeToGroundBytes, 0x5A)
escapeToGroundBytes = append(escapeToGroundBytes, 0x5C)
escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...)
return escapeToGroundBytes
}
func getExecuteBytes() []byte {
executeBytes := getByteRange(0x00, 0x17)
executeBytes = append(executeBytes, 0x19)
executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...)
return executeBytes
}
func getToGroundBytes() []byte {
groundBytes := []byte{0x18}
groundBytes = append(groundBytes, 0x1A)
groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...)
groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...)
groundBytes = append(groundBytes, 0x99)
groundBytes = append(groundBytes, 0x9A)
groundBytes = append(groundBytes, 0x9C)
return groundBytes
}
// Delete 7F hex Always and everywhere ignored
// C1 Control 80-9F hex 32 additional control characters
// G1 Displayable A1-FE hex 94 additional displayable characters
// Special A0+FF hex Same as SPACE and DELETE

7
vendor/github.com/Azure/go-ansiterm/context.go generated vendored Normal file
View file

@ -0,0 +1,7 @@
package ansiterm
type ansiContext struct {
currentChar byte
paramBuffer []byte
interBuffer []byte
}

49
vendor/github.com/Azure/go-ansiterm/csi_entry_state.go generated vendored Normal file
View file

@ -0,0 +1,49 @@
package ansiterm
type csiEntryState struct {
baseState
}
func (csiState csiEntryState) Handle(b byte) (s state, e error) {
logger.Infof("CsiEntry::Handle %#x", b)
nextState, err := csiState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case sliceContains(alphabetics, b):
return csiState.parser.ground, nil
case sliceContains(csiCollectables, b):
return csiState.parser.csiParam, nil
case sliceContains(executors, b):
return csiState, csiState.parser.execute()
}
return csiState, nil
}
func (csiState csiEntryState) Transition(s state) error {
logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
csiState.baseState.Transition(s)
switch s {
case csiState.parser.ground:
return csiState.parser.csiDispatch()
case csiState.parser.csiParam:
switch {
case sliceContains(csiParams, csiState.parser.context.currentChar):
csiState.parser.collectParam()
case sliceContains(intermeds, csiState.parser.context.currentChar):
csiState.parser.collectInter()
}
}
return nil
}
func (csiState csiEntryState) Enter() error {
csiState.parser.clear()
return nil
}

38
vendor/github.com/Azure/go-ansiterm/csi_param_state.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
package ansiterm
type csiParamState struct {
baseState
}
func (csiState csiParamState) Handle(b byte) (s state, e error) {
logger.Infof("CsiParam::Handle %#x", b)
nextState, err := csiState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case sliceContains(alphabetics, b):
return csiState.parser.ground, nil
case sliceContains(csiCollectables, b):
csiState.parser.collectParam()
return csiState, nil
case sliceContains(executors, b):
return csiState, csiState.parser.execute()
}
return csiState, nil
}
func (csiState csiParamState) Transition(s state) error {
logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
csiState.baseState.Transition(s)
switch s {
case csiState.parser.ground:
return csiState.parser.csiDispatch()
}
return nil
}

View file

@ -0,0 +1,36 @@
package ansiterm
type escapeIntermediateState struct {
baseState
}
func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
logger.Infof("escapeIntermediateState::Handle %#x", b)
nextState, err := escState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case sliceContains(intermeds, b):
return escState, escState.parser.collectInter()
case sliceContains(executors, b):
return escState, escState.parser.execute()
case sliceContains(escapeIntermediateToGroundBytes, b):
return escState.parser.ground, nil
}
return escState, nil
}
func (escState escapeIntermediateState) Transition(s state) error {
logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
escState.baseState.Transition(s)
switch s {
case escState.parser.ground:
return escState.parser.escDispatch()
}
return nil
}

47
vendor/github.com/Azure/go-ansiterm/escape_state.go generated vendored Normal file
View file

@ -0,0 +1,47 @@
package ansiterm
type escapeState struct {
baseState
}
func (escState escapeState) Handle(b byte) (s state, e error) {
logger.Infof("escapeState::Handle %#x", b)
nextState, err := escState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case b == ANSI_ESCAPE_SECONDARY:
return escState.parser.csiEntry, nil
case b == ANSI_OSC_STRING_ENTRY:
return escState.parser.oscString, nil
case sliceContains(executors, b):
return escState, escState.parser.execute()
case sliceContains(escapeToGroundBytes, b):
return escState.parser.ground, nil
case sliceContains(intermeds, b):
return escState.parser.escapeIntermediate, nil
}
return escState, nil
}
func (escState escapeState) Transition(s state) error {
logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name())
escState.baseState.Transition(s)
switch s {
case escState.parser.ground:
return escState.parser.escDispatch()
case escState.parser.escapeIntermediate:
return escState.parser.collectInter()
}
return nil
}
func (escState escapeState) Enter() error {
escState.parser.clear()
return nil
}

90
vendor/github.com/Azure/go-ansiterm/event_handler.go generated vendored Normal file
View file

@ -0,0 +1,90 @@
package ansiterm
type AnsiEventHandler interface {
// Print
Print(b byte) error
// Execute C0 commands
Execute(b byte) error
// CUrsor Up
CUU(int) error
// CUrsor Down
CUD(int) error
// CUrsor Forward
CUF(int) error
// CUrsor Backward
CUB(int) error
// Cursor to Next Line
CNL(int) error
// Cursor to Previous Line
CPL(int) error
// Cursor Horizontal position Absolute
CHA(int) error
// Vertical line Position Absolute
VPA(int) error
// CUrsor Position
CUP(int, int) error
// Horizontal and Vertical Position (depends on PUM)
HVP(int, int) error
// Text Cursor Enable Mode
DECTCEM(bool) error
// Origin Mode
DECOM(bool) error
// 132 Column Mode
DECCOLM(bool) error
// Erase in Display
ED(int) error
// Erase in Line
EL(int) error
// Insert Line
IL(int) error
// Delete Line
DL(int) error
// Insert Character
ICH(int) error
// Delete Character
DCH(int) error
// Set Graphics Rendition
SGR([]int) error
// Pan Down
SU(int) error
// Pan Up
SD(int) error
// Device Attributes
DA([]string) error
// Set Top and Bottom Margins
DECSTBM(int, int) error
// Index
IND() error
// Reverse Index
RI() error
// Flush updates from previous commands
Flush() error
}

24
vendor/github.com/Azure/go-ansiterm/ground_state.go generated vendored Normal file
View file

@ -0,0 +1,24 @@
package ansiterm
type groundState struct {
baseState
}
func (gs groundState) Handle(b byte) (s state, e error) {
gs.parser.context.currentChar = b
nextState, err := gs.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case sliceContains(printables, b):
return gs, gs.parser.print()
case sliceContains(executors, b):
return gs, gs.parser.execute()
}
return gs, nil
}

View file

@ -0,0 +1,31 @@
package ansiterm
type oscStringState struct {
baseState
}
func (oscState oscStringState) Handle(b byte) (s state, e error) {
logger.Infof("OscString::Handle %#x", b)
nextState, err := oscState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case isOscStringTerminator(b):
return oscState.parser.ground, nil
}
return oscState, nil
}
// See below for OSC string terminators for linux
// http://man7.org/linux/man-pages/man4/console_codes.4.html
func isOscStringTerminator(b byte) bool {
if b == ANSI_BEL || b == 0x5C {
return true
}
return false
}

136
vendor/github.com/Azure/go-ansiterm/parser.go generated vendored Normal file
View file

@ -0,0 +1,136 @@
package ansiterm
import (
"errors"
"io/ioutil"
"os"
"github.com/Sirupsen/logrus"
)
var logger *logrus.Logger
type AnsiParser struct {
currState state
eventHandler AnsiEventHandler
context *ansiContext
csiEntry state
csiParam state
dcsEntry state
escape state
escapeIntermediate state
error state
ground state
oscString state
stateMap []state
}
func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser {
logFile := ioutil.Discard
if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
logFile, _ = os.Create("ansiParser.log")
}
logger = &logrus.Logger{
Out: logFile,
Formatter: new(logrus.TextFormatter),
Level: logrus.InfoLevel,
}
parser := &AnsiParser{
eventHandler: evtHandler,
context: &ansiContext{},
}
parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}}
parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}}
parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}}
parser.escape = escapeState{baseState{name: "Escape", parser: parser}}
parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}}
parser.error = errorState{baseState{name: "Error", parser: parser}}
parser.ground = groundState{baseState{name: "Ground", parser: parser}}
parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}}
parser.stateMap = []state{
parser.csiEntry,
parser.csiParam,
parser.dcsEntry,
parser.escape,
parser.escapeIntermediate,
parser.error,
parser.ground,
parser.oscString,
}
parser.currState = getState(initialState, parser.stateMap)
logger.Infof("CreateParser: parser %p", parser)
return parser
}
func getState(name string, states []state) state {
for _, el := range states {
if el.Name() == name {
return el
}
}
return nil
}
func (ap *AnsiParser) Parse(bytes []byte) (int, error) {
for i, b := range bytes {
if err := ap.handle(b); err != nil {
return i, err
}
}
return len(bytes), ap.eventHandler.Flush()
}
func (ap *AnsiParser) handle(b byte) error {
ap.context.currentChar = b
newState, err := ap.currState.Handle(b)
if err != nil {
return err
}
if newState == nil {
logger.Warning("newState is nil")
return errors.New("New state of 'nil' is invalid.")
}
if newState != ap.currState {
if err := ap.changeState(newState); err != nil {
return err
}
}
return nil
}
func (ap *AnsiParser) changeState(newState state) error {
logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
// Exit old state
if err := ap.currState.Exit(); err != nil {
logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
return err
}
// Perform transition action
if err := ap.currState.Transition(newState); err != nil {
logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
return err
}
// Enter new state
if err := newState.Enter(); err != nil {
logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err)
return err
}
ap.currState = newState
return nil
}

View file

@ -0,0 +1,103 @@
package ansiterm
import (
"strconv"
)
func parseParams(bytes []byte) ([]string, error) {
paramBuff := make([]byte, 0, 0)
params := []string{}
for _, v := range bytes {
if v == ';' {
if len(paramBuff) > 0 {
// Completed parameter, append it to the list
s := string(paramBuff)
params = append(params, s)
paramBuff = make([]byte, 0, 0)
}
} else {
paramBuff = append(paramBuff, v)
}
}
// Last parameter may not be terminated with ';'
if len(paramBuff) > 0 {
s := string(paramBuff)
params = append(params, s)
}
logger.Infof("Parsed params: %v with length: %d", params, len(params))
return params, nil
}
func parseCmd(context ansiContext) (string, error) {
return string(context.currentChar), nil
}
func getInt(params []string, dflt int) int {
i := getInts(params, 1, dflt)[0]
logger.Infof("getInt: %v", i)
return i
}
func getInts(params []string, minCount int, dflt int) []int {
ints := []int{}
for _, v := range params {
i, _ := strconv.Atoi(v)
// Zero is mapped to the default value in VT100.
if i == 0 {
i = dflt
}
ints = append(ints, i)
}
if len(ints) < minCount {
remaining := minCount - len(ints)
for i := 0; i < remaining; i++ {
ints = append(ints, dflt)
}
}
logger.Infof("getInts: %v", ints)
return ints
}
func (ap *AnsiParser) modeDispatch(param string, set bool) error {
switch param {
case "?3":
return ap.eventHandler.DECCOLM(set)
case "?6":
return ap.eventHandler.DECOM(set)
case "?25":
return ap.eventHandler.DECTCEM(set)
}
return nil
}
func (ap *AnsiParser) hDispatch(params []string) error {
if len(params) == 1 {
return ap.modeDispatch(params[0], true)
}
return nil
}
func (ap *AnsiParser) lDispatch(params []string) error {
if len(params) == 1 {
return ap.modeDispatch(params[0], false)
}
return nil
}
func getEraseParam(params []string) int {
param := getInt(params, 0)
if param < 0 || 3 < param {
param = 0
}
return param
}

122
vendor/github.com/Azure/go-ansiterm/parser_actions.go generated vendored Normal file
View file

@ -0,0 +1,122 @@
package ansiterm
import (
"fmt"
)
func (ap *AnsiParser) collectParam() error {
currChar := ap.context.currentChar
logger.Infof("collectParam %#x", currChar)
ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
return nil
}
func (ap *AnsiParser) collectInter() error {
currChar := ap.context.currentChar
logger.Infof("collectInter %#x", currChar)
ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
return nil
}
func (ap *AnsiParser) escDispatch() error {
cmd, _ := parseCmd(*ap.context)
intermeds := ap.context.interBuffer
logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar)
logger.Infof("escDispatch: %v(%v)", cmd, intermeds)
switch cmd {
case "D": // IND
return ap.eventHandler.IND()
case "E": // NEL, equivalent to CRLF
err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN)
if err == nil {
err = ap.eventHandler.Execute(ANSI_LINE_FEED)
}
return err
case "M": // RI
return ap.eventHandler.RI()
}
return nil
}
func (ap *AnsiParser) csiDispatch() error {
cmd, _ := parseCmd(*ap.context)
params, _ := parseParams(ap.context.paramBuffer)
logger.Infof("csiDispatch: %v(%v)", cmd, params)
switch cmd {
case "@":
return ap.eventHandler.ICH(getInt(params, 1))
case "A":
return ap.eventHandler.CUU(getInt(params, 1))
case "B":
return ap.eventHandler.CUD(getInt(params, 1))
case "C":
return ap.eventHandler.CUF(getInt(params, 1))
case "D":
return ap.eventHandler.CUB(getInt(params, 1))
case "E":
return ap.eventHandler.CNL(getInt(params, 1))
case "F":
return ap.eventHandler.CPL(getInt(params, 1))
case "G":
return ap.eventHandler.CHA(getInt(params, 1))
case "H":
ints := getInts(params, 2, 1)
x, y := ints[0], ints[1]
return ap.eventHandler.CUP(x, y)
case "J":
param := getEraseParam(params)
return ap.eventHandler.ED(param)
case "K":
param := getEraseParam(params)
return ap.eventHandler.EL(param)
case "L":
return ap.eventHandler.IL(getInt(params, 1))
case "M":
return ap.eventHandler.DL(getInt(params, 1))
case "P":
return ap.eventHandler.DCH(getInt(params, 1))
case "S":
return ap.eventHandler.SU(getInt(params, 1))
case "T":
return ap.eventHandler.SD(getInt(params, 1))
case "c":
return ap.eventHandler.DA(params)
case "d":
return ap.eventHandler.VPA(getInt(params, 1))
case "f":
ints := getInts(params, 2, 1)
x, y := ints[0], ints[1]
return ap.eventHandler.HVP(x, y)
case "h":
return ap.hDispatch(params)
case "l":
return ap.lDispatch(params)
case "m":
return ap.eventHandler.SGR(getInts(params, 1, 0))
case "r":
ints := getInts(params, 2, 1)
top, bottom := ints[0], ints[1]
return ap.eventHandler.DECSTBM(top, bottom)
default:
logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context))
return nil
}
}
func (ap *AnsiParser) print() error {
return ap.eventHandler.Print(ap.context.currentChar)
}
func (ap *AnsiParser) clear() error {
ap.context = &ansiContext{}
return nil
}
func (ap *AnsiParser) execute() error {
return ap.eventHandler.Execute(ap.context.currentChar)
}

141
vendor/github.com/Azure/go-ansiterm/parser_test.go generated vendored Normal file
View file

@ -0,0 +1,141 @@
package ansiterm
import (
"fmt"
"testing"
)
func TestStateTransitions(t *testing.T) {
stateTransitionHelper(t, "CsiEntry", "Ground", alphabetics)
stateTransitionHelper(t, "CsiEntry", "CsiParam", csiCollectables)
stateTransitionHelper(t, "Escape", "CsiEntry", []byte{ANSI_ESCAPE_SECONDARY})
stateTransitionHelper(t, "Escape", "OscString", []byte{0x5D})
stateTransitionHelper(t, "Escape", "Ground", escapeToGroundBytes)
stateTransitionHelper(t, "Escape", "EscapeIntermediate", intermeds)
stateTransitionHelper(t, "EscapeIntermediate", "EscapeIntermediate", intermeds)
stateTransitionHelper(t, "EscapeIntermediate", "EscapeIntermediate", executors)
stateTransitionHelper(t, "EscapeIntermediate", "Ground", escapeIntermediateToGroundBytes)
stateTransitionHelper(t, "OscString", "Ground", []byte{ANSI_BEL})
stateTransitionHelper(t, "OscString", "Ground", []byte{0x5C})
stateTransitionHelper(t, "Ground", "Ground", executors)
}
func TestAnyToX(t *testing.T) {
anyToXHelper(t, []byte{ANSI_ESCAPE_PRIMARY}, "Escape")
anyToXHelper(t, []byte{DCS_ENTRY}, "DcsEntry")
anyToXHelper(t, []byte{OSC_STRING}, "OscString")
anyToXHelper(t, []byte{CSI_ENTRY}, "CsiEntry")
anyToXHelper(t, toGroundBytes, "Ground")
}
func TestCollectCsiParams(t *testing.T) {
parser, _ := createTestParser("CsiEntry")
parser.Parse(csiCollectables)
buffer := parser.context.paramBuffer
bufferCount := len(buffer)
if bufferCount != len(csiCollectables) {
t.Errorf("Buffer: %v", buffer)
t.Errorf("CsiParams: %v", csiCollectables)
t.Errorf("Buffer count failure: %d != %d", bufferCount, len(csiParams))
return
}
for i, v := range csiCollectables {
if v != buffer[i] {
t.Errorf("Buffer: %v", buffer)
t.Errorf("CsiParams: %v", csiParams)
t.Errorf("Mismatch at buffer[%d] = %d", i, buffer[i])
}
}
}
func TestParseParams(t *testing.T) {
parseParamsHelper(t, []byte{}, []string{})
parseParamsHelper(t, []byte{';'}, []string{})
parseParamsHelper(t, []byte{';', ';'}, []string{})
parseParamsHelper(t, []byte{'7'}, []string{"7"})
parseParamsHelper(t, []byte{'7', ';'}, []string{"7"})
parseParamsHelper(t, []byte{'7', ';', ';'}, []string{"7"})
parseParamsHelper(t, []byte{'7', ';', ';', '8'}, []string{"7", "8"})
parseParamsHelper(t, []byte{'7', ';', '8', ';'}, []string{"7", "8"})
parseParamsHelper(t, []byte{'7', ';', ';', '8', ';', ';'}, []string{"7", "8"})
parseParamsHelper(t, []byte{'7', '8'}, []string{"78"})
parseParamsHelper(t, []byte{'7', '8', ';'}, []string{"78"})
parseParamsHelper(t, []byte{'7', '8', ';', '9', '0'}, []string{"78", "90"})
parseParamsHelper(t, []byte{'7', '8', ';', ';', '9', '0'}, []string{"78", "90"})
parseParamsHelper(t, []byte{'7', '8', ';', '9', '0', ';'}, []string{"78", "90"})
parseParamsHelper(t, []byte{'7', '8', ';', '9', '0', ';', ';'}, []string{"78", "90"})
}
func TestCursor(t *testing.T) {
cursorSingleParamHelper(t, 'A', "CUU")
cursorSingleParamHelper(t, 'B', "CUD")
cursorSingleParamHelper(t, 'C', "CUF")
cursorSingleParamHelper(t, 'D', "CUB")
cursorSingleParamHelper(t, 'E', "CNL")
cursorSingleParamHelper(t, 'F', "CPL")
cursorSingleParamHelper(t, 'G', "CHA")
cursorTwoParamHelper(t, 'H', "CUP")
cursorTwoParamHelper(t, 'f', "HVP")
funcCallParamHelper(t, []byte{'?', '2', '5', 'h'}, "CsiEntry", "Ground", []string{"DECTCEM([true])"})
funcCallParamHelper(t, []byte{'?', '2', '5', 'l'}, "CsiEntry", "Ground", []string{"DECTCEM([false])"})
}
func TestErase(t *testing.T) {
// Erase in Display
eraseHelper(t, 'J', "ED")
// Erase in Line
eraseHelper(t, 'K', "EL")
}
func TestSelectGraphicRendition(t *testing.T) {
funcCallParamHelper(t, []byte{'m'}, "CsiEntry", "Ground", []string{"SGR([0])"})
funcCallParamHelper(t, []byte{'0', 'm'}, "CsiEntry", "Ground", []string{"SGR([0])"})
funcCallParamHelper(t, []byte{'0', ';', '1', 'm'}, "CsiEntry", "Ground", []string{"SGR([0 1])"})
funcCallParamHelper(t, []byte{'0', ';', '1', ';', '2', 'm'}, "CsiEntry", "Ground", []string{"SGR([0 1 2])"})
}
func TestScroll(t *testing.T) {
scrollHelper(t, 'S', "SU")
scrollHelper(t, 'T', "SD")
}
func TestPrint(t *testing.T) {
parser, evtHandler := createTestParser("Ground")
parser.Parse(printables)
validateState(t, parser.currState, "Ground")
for i, v := range printables {
expectedCall := fmt.Sprintf("Print([%s])", string(v))
actualCall := evtHandler.FunctionCalls[i]
if actualCall != expectedCall {
t.Errorf("Actual != Expected: %v != %v at %d", actualCall, expectedCall, i)
}
}
}
func TestClear(t *testing.T) {
p, _ := createTestParser("Ground")
fillContext(p.context)
p.clear()
validateEmptyContext(t, p.context)
}
func TestClearOnStateChange(t *testing.T) {
clearOnStateChangeHelper(t, "Ground", "Escape", []byte{ANSI_ESCAPE_PRIMARY})
clearOnStateChangeHelper(t, "Ground", "CsiEntry", []byte{CSI_ENTRY})
}
func TestC0(t *testing.T) {
expectedCall := "Execute([" + string(ANSI_LINE_FEED) + "])"
c0Helper(t, []byte{ANSI_LINE_FEED}, "Ground", []string{expectedCall})
expectedCall = "Execute([" + string(ANSI_CARRIAGE_RETURN) + "])"
c0Helper(t, []byte{ANSI_CARRIAGE_RETURN}, "Ground", []string{expectedCall})
}
func TestEscDispatch(t *testing.T) {
funcCallParamHelper(t, []byte{'M'}, "Escape", "Ground", []string{"RI([])"})
}

View file

@ -0,0 +1,114 @@
package ansiterm
import (
"fmt"
"testing"
)
func getStateNames() []string {
parser, _ := createTestParser("Ground")
stateNames := []string{}
for _, state := range parser.stateMap {
stateNames = append(stateNames, state.Name())
}
return stateNames
}
func stateTransitionHelper(t *testing.T, start string, end string, bytes []byte) {
for _, b := range bytes {
bytes := []byte{byte(b)}
parser, _ := createTestParser(start)
parser.Parse(bytes)
validateState(t, parser.currState, end)
}
}
func anyToXHelper(t *testing.T, bytes []byte, expectedState string) {
for _, s := range getStateNames() {
stateTransitionHelper(t, s, expectedState, bytes)
}
}
func funcCallParamHelper(t *testing.T, bytes []byte, start string, expected string, expectedCalls []string) {
parser, evtHandler := createTestParser(start)
parser.Parse(bytes)
validateState(t, parser.currState, expected)
validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls)
}
func parseParamsHelper(t *testing.T, bytes []byte, expectedParams []string) {
params, err := parseParams(bytes)
if err != nil {
t.Errorf("Parameter parse error: %v", err)
return
}
if len(params) != len(expectedParams) {
t.Errorf("Parsed parameters: %v", params)
t.Errorf("Expected parameters: %v", expectedParams)
t.Errorf("Parameter length failure: %d != %d", len(params), len(expectedParams))
return
}
for i, v := range expectedParams {
if v != params[i] {
t.Errorf("Parsed parameters: %v", params)
t.Errorf("Expected parameters: %v", expectedParams)
t.Errorf("Parameter parse failure: %s != %s at position %d", v, params[i], i)
}
}
}
func cursorSingleParamHelper(t *testing.T, command byte, funcName string) {
funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)})
funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23])", funcName)})
funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)})
funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)})
}
func cursorTwoParamHelper(t *testing.T, command byte, funcName string) {
funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)})
funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)})
funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 1])", funcName)})
funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23 1])", funcName)})
funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)})
funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)})
}
func eraseHelper(t *testing.T, command byte, funcName string) {
funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)})
funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)})
funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)})
funcCallParamHelper(t, []byte{'3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([3])", funcName)})
funcCallParamHelper(t, []byte{'4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)})
funcCallParamHelper(t, []byte{'1', ';', '2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
}
func scrollHelper(t *testing.T, command byte, funcName string) {
funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
funcCallParamHelper(t, []byte{'5', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([5])", funcName)})
funcCallParamHelper(t, []byte{'4', ';', '6', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([4])", funcName)})
}
func clearOnStateChangeHelper(t *testing.T, start string, end string, bytes []byte) {
p, _ := createTestParser(start)
fillContext(p.context)
p.Parse(bytes)
validateState(t, p.currState, end)
validateEmptyContext(t, p.context)
}
func c0Helper(t *testing.T, bytes []byte, expectedState string, expectedCalls []string) {
parser, evtHandler := createTestParser("Ground")
parser.Parse(bytes)
validateState(t, parser.currState, expectedState)
validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls)
}

View file

@ -0,0 +1,66 @@
package ansiterm
import (
"testing"
)
func createTestParser(s string) (*AnsiParser, *TestAnsiEventHandler) {
evtHandler := CreateTestAnsiEventHandler()
parser := CreateParser(s, evtHandler)
return parser, evtHandler
}
func validateState(t *testing.T, actualState state, expectedStateName string) {
actualName := "Nil"
if actualState != nil {
actualName = actualState.Name()
}
if actualName != expectedStateName {
t.Errorf("Invalid state: '%s' != '%s'", actualName, expectedStateName)
}
}
func validateFuncCalls(t *testing.T, actualCalls []string, expectedCalls []string) {
actualCount := len(actualCalls)
expectedCount := len(expectedCalls)
if actualCount != expectedCount {
t.Errorf("Actual calls: %v", actualCalls)
t.Errorf("Expected calls: %v", expectedCalls)
t.Errorf("Call count error: %d != %d", actualCount, expectedCount)
return
}
for i, v := range actualCalls {
if v != expectedCalls[i] {
t.Errorf("Actual calls: %v", actualCalls)
t.Errorf("Expected calls: %v", expectedCalls)
t.Errorf("Mismatched calls: %s != %s with lengths %d and %d", v, expectedCalls[i], len(v), len(expectedCalls[i]))
}
}
}
func fillContext(context *ansiContext) {
context.currentChar = 'A'
context.paramBuffer = []byte{'C', 'D', 'E'}
context.interBuffer = []byte{'F', 'G', 'H'}
}
func validateEmptyContext(t *testing.T, context *ansiContext) {
var expectedCurrChar byte = 0x0
if context.currentChar != expectedCurrChar {
t.Errorf("Currentchar mismatch '%#x' != '%#x'", context.currentChar, expectedCurrChar)
}
if len(context.paramBuffer) != 0 {
t.Errorf("Non-empty parameter buffer: %v", context.paramBuffer)
}
if len(context.paramBuffer) != 0 {
t.Errorf("Non-empty intermediate buffer: %v", context.interBuffer)
}
}

71
vendor/github.com/Azure/go-ansiterm/states.go generated vendored Normal file
View file

@ -0,0 +1,71 @@
package ansiterm
type stateID int
type state interface {
Enter() error
Exit() error
Handle(byte) (state, error)
Name() string
Transition(state) error
}
type baseState struct {
name string
parser *AnsiParser
}
func (base baseState) Enter() error {
return nil
}
func (base baseState) Exit() error {
return nil
}
func (base baseState) Handle(b byte) (s state, e error) {
switch {
case b == CSI_ENTRY:
return base.parser.csiEntry, nil
case b == DCS_ENTRY:
return base.parser.dcsEntry, nil
case b == ANSI_ESCAPE_PRIMARY:
return base.parser.escape, nil
case b == OSC_STRING:
return base.parser.oscString, nil
case sliceContains(toGroundBytes, b):
return base.parser.ground, nil
}
return nil, nil
}
func (base baseState) Name() string {
return base.name
}
func (base baseState) Transition(s state) error {
if s == base.parser.ground {
execBytes := []byte{0x18}
execBytes = append(execBytes, 0x1A)
execBytes = append(execBytes, getByteRange(0x80, 0x8F)...)
execBytes = append(execBytes, getByteRange(0x91, 0x97)...)
execBytes = append(execBytes, 0x99)
execBytes = append(execBytes, 0x9A)
if sliceContains(execBytes, base.parser.context.currentChar) {
return base.parser.execute()
}
}
return nil
}
type dcsEntryState struct {
baseState
}
type errorState struct {
baseState
}

View file

@ -0,0 +1,173 @@
package ansiterm
import (
"fmt"
"strconv"
)
type TestAnsiEventHandler struct {
FunctionCalls []string
}
func CreateTestAnsiEventHandler() *TestAnsiEventHandler {
evtHandler := TestAnsiEventHandler{}
evtHandler.FunctionCalls = make([]string, 0)
return &evtHandler
}
func (h *TestAnsiEventHandler) recordCall(call string, params []string) {
s := fmt.Sprintf("%s(%v)", call, params)
h.FunctionCalls = append(h.FunctionCalls, s)
}
func (h *TestAnsiEventHandler) Print(b byte) error {
h.recordCall("Print", []string{string(b)})
return nil
}
func (h *TestAnsiEventHandler) Execute(b byte) error {
h.recordCall("Execute", []string{string(b)})
return nil
}
func (h *TestAnsiEventHandler) CUU(param int) error {
h.recordCall("CUU", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CUD(param int) error {
h.recordCall("CUD", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CUF(param int) error {
h.recordCall("CUF", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CUB(param int) error {
h.recordCall("CUB", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CNL(param int) error {
h.recordCall("CNL", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CPL(param int) error {
h.recordCall("CPL", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CHA(param int) error {
h.recordCall("CHA", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) VPA(param int) error {
h.recordCall("VPA", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) CUP(x int, y int) error {
xS, yS := strconv.Itoa(x), strconv.Itoa(y)
h.recordCall("CUP", []string{xS, yS})
return nil
}
func (h *TestAnsiEventHandler) HVP(x int, y int) error {
xS, yS := strconv.Itoa(x), strconv.Itoa(y)
h.recordCall("HVP", []string{xS, yS})
return nil
}
func (h *TestAnsiEventHandler) DECTCEM(visible bool) error {
h.recordCall("DECTCEM", []string{strconv.FormatBool(visible)})
return nil
}
func (h *TestAnsiEventHandler) DECOM(visible bool) error {
h.recordCall("DECOM", []string{strconv.FormatBool(visible)})
return nil
}
func (h *TestAnsiEventHandler) DECCOLM(use132 bool) error {
h.recordCall("DECOLM", []string{strconv.FormatBool(use132)})
return nil
}
func (h *TestAnsiEventHandler) ED(param int) error {
h.recordCall("ED", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) EL(param int) error {
h.recordCall("EL", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) IL(param int) error {
h.recordCall("IL", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) DL(param int) error {
h.recordCall("DL", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) ICH(param int) error {
h.recordCall("ICH", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) DCH(param int) error {
h.recordCall("DCH", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) SGR(params []int) error {
strings := []string{}
for _, v := range params {
strings = append(strings, strconv.Itoa(v))
}
h.recordCall("SGR", strings)
return nil
}
func (h *TestAnsiEventHandler) SU(param int) error {
h.recordCall("SU", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) SD(param int) error {
h.recordCall("SD", []string{strconv.Itoa(param)})
return nil
}
func (h *TestAnsiEventHandler) DA(params []string) error {
h.recordCall("DA", params)
return nil
}
func (h *TestAnsiEventHandler) DECSTBM(top int, bottom int) error {
topS, bottomS := strconv.Itoa(top), strconv.Itoa(bottom)
h.recordCall("DECSTBM", []string{topS, bottomS})
return nil
}
func (h *TestAnsiEventHandler) RI() error {
h.recordCall("RI", nil)
return nil
}
func (h *TestAnsiEventHandler) IND() error {
h.recordCall("IND", nil)
return nil
}
func (h *TestAnsiEventHandler) Flush() error {
return nil
}

21
vendor/github.com/Azure/go-ansiterm/utilities.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
package ansiterm
import (
"strconv"
)
func sliceContains(bytes []byte, b byte) bool {
for _, v := range bytes {
if v == b {
return true
}
}
return false
}
func convertBytesToInteger(bytes []byte) int {
s := string(bytes)
i, _ := strconv.Atoi(s)
return i
}

182
vendor/github.com/Azure/go-ansiterm/winterm/ansi.go generated vendored Normal file
View file

@ -0,0 +1,182 @@
// +build windows
package winterm
import (
"fmt"
"os"
"strconv"
"strings"
"syscall"
"github.com/Azure/go-ansiterm"
)
// Windows keyboard constants
// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx.
const (
VK_PRIOR = 0x21 // PAGE UP key
VK_NEXT = 0x22 // PAGE DOWN key
VK_END = 0x23 // END key
VK_HOME = 0x24 // HOME key
VK_LEFT = 0x25 // LEFT ARROW key
VK_UP = 0x26 // UP ARROW key
VK_RIGHT = 0x27 // RIGHT ARROW key
VK_DOWN = 0x28 // DOWN ARROW key
VK_SELECT = 0x29 // SELECT key
VK_PRINT = 0x2A // PRINT key
VK_EXECUTE = 0x2B // EXECUTE key
VK_SNAPSHOT = 0x2C // PRINT SCREEN key
VK_INSERT = 0x2D // INS key
VK_DELETE = 0x2E // DEL key
VK_HELP = 0x2F // HELP key
VK_F1 = 0x70 // F1 key
VK_F2 = 0x71 // F2 key
VK_F3 = 0x72 // F3 key
VK_F4 = 0x73 // F4 key
VK_F5 = 0x74 // F5 key
VK_F6 = 0x75 // F6 key
VK_F7 = 0x76 // F7 key
VK_F8 = 0x77 // F8 key
VK_F9 = 0x78 // F9 key
VK_F10 = 0x79 // F10 key
VK_F11 = 0x7A // F11 key
VK_F12 = 0x7B // F12 key
RIGHT_ALT_PRESSED = 0x0001
LEFT_ALT_PRESSED = 0x0002
RIGHT_CTRL_PRESSED = 0x0004
LEFT_CTRL_PRESSED = 0x0008
SHIFT_PRESSED = 0x0010
NUMLOCK_ON = 0x0020
SCROLLLOCK_ON = 0x0040
CAPSLOCK_ON = 0x0080
ENHANCED_KEY = 0x0100
)
type ansiCommand struct {
CommandBytes []byte
Command string
Parameters []string
IsSpecial bool
}
func newAnsiCommand(command []byte) *ansiCommand {
if isCharacterSelectionCmdChar(command[1]) {
// Is Character Set Selection commands
return &ansiCommand{
CommandBytes: command,
Command: string(command),
IsSpecial: true,
}
}
// last char is command character
lastCharIndex := len(command) - 1
ac := &ansiCommand{
CommandBytes: command,
Command: string(command[lastCharIndex]),
IsSpecial: false,
}
// more than a single escape
if lastCharIndex != 0 {
start := 1
// skip if double char escape sequence
if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY {
start++
}
// convert this to GetNextParam method
ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP)
}
return ac
}
func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 {
if index < 0 || index >= len(ac.Parameters) {
return defaultValue
}
param, err := strconv.ParseInt(ac.Parameters[index], 10, 16)
if err != nil {
return defaultValue
}
return int16(param)
}
func (ac *ansiCommand) String() string {
return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
bytesToHex(ac.CommandBytes),
ac.Command,
strings.Join(ac.Parameters, "\",\""))
}
// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands.
// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html.
func isAnsiCommandChar(b byte) bool {
switch {
case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY:
return true
case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM:
// non-CSI escape sequence terminator
return true
case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL:
// String escape sequence terminator
return true
}
return false
}
func isXtermOscSequence(command []byte, current byte) bool {
return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL)
}
func isCharacterSelectionCmdChar(b byte) bool {
return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3)
}
// bytesToHex converts a slice of bytes to a human-readable string.
func bytesToHex(b []byte) string {
hex := make([]string, len(b))
for i, ch := range b {
hex[i] = fmt.Sprintf("%X", ch)
}
return strings.Join(hex, "")
}
// ensureInRange adjusts the passed value, if necessary, to ensure it is within
// the passed min / max range.
func ensureInRange(n int16, min int16, max int16) int16 {
if n < min {
return min
} else if n > max {
return max
} else {
return n
}
}
func GetStdFile(nFile int) (*os.File, uintptr) {
var file *os.File
switch nFile {
case syscall.STD_INPUT_HANDLE:
file = os.Stdin
case syscall.STD_OUTPUT_HANDLE:
file = os.Stdout
case syscall.STD_ERROR_HANDLE:
file = os.Stderr
default:
panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
}
fd, err := syscall.GetStdHandle(nFile)
if err != nil {
panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err))
}
return file, uintptr(fd)
}

322
vendor/github.com/Azure/go-ansiterm/winterm/api.go generated vendored Normal file
View file

@ -0,0 +1,322 @@
// +build windows
package winterm
import (
"fmt"
"syscall"
"unsafe"
)
//===========================================================================================================
// IMPORTANT NOTE:
//
// The methods below make extensive use of the "unsafe" package to obtain the required pointers.
// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack
// variables) the pointers reference *before* the API completes.
//
// As a result, in those cases, the code must hint that the variables remain in active by invoking the
// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer
// require unsafe pointers.
//
// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform
// the garbage collector the variables remain in use if:
//
// -- The value is not a pointer (e.g., int32, struct)
// -- The value is not referenced by the method after passing the pointer to Windows
//
// See http://golang.org/doc/go1.3.
//===========================================================================================================
var (
kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo")
setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo")
setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition")
setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode")
getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize")
scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA")
setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute")
setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo")
writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW")
readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW")
waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject")
)
// Windows Console constants
const (
// Console modes
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
ENABLE_PROCESSED_INPUT = 0x0001
ENABLE_LINE_INPUT = 0x0002
ENABLE_ECHO_INPUT = 0x0004
ENABLE_WINDOW_INPUT = 0x0008
ENABLE_MOUSE_INPUT = 0x0010
ENABLE_INSERT_MODE = 0x0020
ENABLE_QUICK_EDIT_MODE = 0x0040
ENABLE_EXTENDED_FLAGS = 0x0080
ENABLE_PROCESSED_OUTPUT = 0x0001
ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
// Character attributes
// Note:
// -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
// Clearing all foreground or background colors results in black; setting all creates white.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
FOREGROUND_BLUE uint16 = 0x0001
FOREGROUND_GREEN uint16 = 0x0002
FOREGROUND_RED uint16 = 0x0004
FOREGROUND_INTENSITY uint16 = 0x0008
FOREGROUND_MASK uint16 = 0x000F
BACKGROUND_BLUE uint16 = 0x0010
BACKGROUND_GREEN uint16 = 0x0020
BACKGROUND_RED uint16 = 0x0040
BACKGROUND_INTENSITY uint16 = 0x0080
BACKGROUND_MASK uint16 = 0x00F0
COMMON_LVB_MASK uint16 = 0xFF00
COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000
COMMON_LVB_UNDERSCORE uint16 = 0x8000
// Input event types
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
KEY_EVENT = 0x0001
MOUSE_EVENT = 0x0002
WINDOW_BUFFER_SIZE_EVENT = 0x0004
MENU_EVENT = 0x0008
FOCUS_EVENT = 0x0010
// WaitForSingleObject return codes
WAIT_ABANDONED = 0x00000080
WAIT_FAILED = 0xFFFFFFFF
WAIT_SIGNALED = 0x0000000
WAIT_TIMEOUT = 0x00000102
// WaitForSingleObject wait duration
WAIT_INFINITE = 0xFFFFFFFF
WAIT_ONE_SECOND = 1000
WAIT_HALF_SECOND = 500
WAIT_QUARTER_SECOND = 250
)
// Windows API Console types
// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD)
// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment
type (
CHAR_INFO struct {
UnicodeChar uint16
Attributes uint16
}
CONSOLE_CURSOR_INFO struct {
Size uint32
Visible int32
}
CONSOLE_SCREEN_BUFFER_INFO struct {
Size COORD
CursorPosition COORD
Attributes uint16
Window SMALL_RECT
MaximumWindowSize COORD
}
COORD struct {
X int16
Y int16
}
SMALL_RECT struct {
Left int16
Top int16
Right int16
Bottom int16
}
// INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
INPUT_RECORD struct {
EventType uint16
KeyEvent KEY_EVENT_RECORD
}
KEY_EVENT_RECORD struct {
KeyDown int32
RepeatCount uint16
VirtualKeyCode uint16
VirtualScanCode uint16
UnicodeChar uint16
ControlKeyState uint32
}
WINDOW_BUFFER_SIZE struct {
Size COORD
}
)
// boolToBOOL converts a Go bool into a Windows int32.
func boolToBOOL(f bool) int32 {
if f {
return int32(1)
} else {
return int32(0)
}
}
// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx.
func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
return checkError(r1, r2, err)
}
// SetConsoleCursorInfo sets the size and visiblity of the console cursor.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx.
func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
return checkError(r1, r2, err)
}
// SetConsoleCursorPosition location of the console cursor.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx.
func SetConsoleCursorPosition(handle uintptr, coord COORD) error {
r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord))
use(coord)
return checkError(r1, r2, err)
}
// GetConsoleMode gets the console mode for given file descriptor
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx.
func GetConsoleMode(handle uintptr) (mode uint32, err error) {
err = syscall.GetConsoleMode(syscall.Handle(handle), &mode)
return mode, err
}
// SetConsoleMode sets the console mode for given file descriptor
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
func SetConsoleMode(handle uintptr, mode uint32) error {
r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0)
use(mode)
return checkError(r1, r2, err)
}
// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx.
func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
info := CONSOLE_SCREEN_BUFFER_INFO{}
err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0))
if err != nil {
return nil, err
}
return &info, nil
}
func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error {
r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char)))
use(scrollRect)
use(clipRect)
use(destOrigin)
use(char)
return checkError(r1, r2, err)
}
// SetConsoleScreenBufferSize sets the size of the console screen buffer.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx.
func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error {
r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord))
use(coord)
return checkError(r1, r2, err)
}
// SetConsoleTextAttribute sets the attributes of characters written to the
// console screen buffer by the WriteFile or WriteConsole function.
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
func SetConsoleTextAttribute(handle uintptr, attribute uint16) error {
r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)
use(attribute)
return checkError(r1, r2, err)
}
// SetConsoleWindowInfo sets the size and position of the console screen buffer's window.
// Note that the size and location must be within and no larger than the backing console screen buffer.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx.
func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error {
r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect)))
use(isAbsolute)
use(rect)
return checkError(r1, r2, err)
}
// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx.
func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error {
r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))
use(buffer)
use(bufferSize)
use(bufferCoord)
return checkError(r1, r2, err)
}
// ReadConsoleInput reads (and removes) data from the console input buffer.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx.
func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error {
r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count)))
use(buffer)
return checkError(r1, r2, err)
}
// WaitForSingleObject waits for the passed handle to be signaled.
// It returns true if the handle was signaled; false otherwise.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx.
func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) {
r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait)))
switch r1 {
case WAIT_ABANDONED, WAIT_TIMEOUT:
return false, nil
case WAIT_SIGNALED:
return true, nil
}
use(msWait)
return false, err
}
// String helpers
func (info CONSOLE_SCREEN_BUFFER_INFO) String() string {
return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize)
}
func (coord COORD) String() string {
return fmt.Sprintf("%v,%v", coord.X, coord.Y)
}
func (rect SMALL_RECT) String() string {
return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom)
}
// checkError evaluates the results of a Windows API call and returns the error if it failed.
func checkError(r1, r2 uintptr, err error) error {
// Windows APIs return non-zero to indicate success
if r1 != 0 {
return nil
}
// Return the error if provided, otherwise default to EINVAL
if err != nil {
return err
}
return syscall.EINVAL
}
// coordToPointer converts a COORD into a uintptr (by fooling the type system).
func coordToPointer(c COORD) uintptr {
// Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass.
return uintptr(*((*uint32)(unsafe.Pointer(&c))))
}
// use is a no-op, but the compiler cannot see that it is.
// Calling use(p) ensures that p is kept live until that point.
func use(p interface{}) {}

View file

@ -0,0 +1,100 @@
// +build windows
package winterm
import "github.com/Azure/go-ansiterm"
const (
FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
)
// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the
// request represented by the passed ANSI mode.
func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) {
switch ansiMode {
// Mode styles
case ansiterm.ANSI_SGR_BOLD:
windowsMode = windowsMode | FOREGROUND_INTENSITY
case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF:
windowsMode &^= FOREGROUND_INTENSITY
case ansiterm.ANSI_SGR_UNDERLINE:
windowsMode = windowsMode | COMMON_LVB_UNDERSCORE
case ansiterm.ANSI_SGR_REVERSE:
inverted = true
case ansiterm.ANSI_SGR_REVERSE_OFF:
inverted = false
case ansiterm.ANSI_SGR_UNDERLINE_OFF:
windowsMode &^= COMMON_LVB_UNDERSCORE
// Foreground colors
case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT:
windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK)
case ansiterm.ANSI_SGR_FOREGROUND_BLACK:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK)
case ansiterm.ANSI_SGR_FOREGROUND_RED:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED
case ansiterm.ANSI_SGR_FOREGROUND_GREEN:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN
case ansiterm.ANSI_SGR_FOREGROUND_YELLOW:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN
case ansiterm.ANSI_SGR_FOREGROUND_BLUE:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE
case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE
case ansiterm.ANSI_SGR_FOREGROUND_CYAN:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE
case ansiterm.ANSI_SGR_FOREGROUND_WHITE:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
// Background colors
case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT:
// Black with no intensity
windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK)
case ansiterm.ANSI_SGR_BACKGROUND_BLACK:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK)
case ansiterm.ANSI_SGR_BACKGROUND_RED:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED
case ansiterm.ANSI_SGR_BACKGROUND_GREEN:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN
case ansiterm.ANSI_SGR_BACKGROUND_YELLOW:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN
case ansiterm.ANSI_SGR_BACKGROUND_BLUE:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE
case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE
case ansiterm.ANSI_SGR_BACKGROUND_CYAN:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE
case ansiterm.ANSI_SGR_BACKGROUND_WHITE:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
}
return windowsMode, inverted
}
// invertAttributes inverts the foreground and background colors of a Windows attributes value
func invertAttributes(windowsMode uint16) uint16 {
return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4)
}

View file

@ -0,0 +1,101 @@
// +build windows
package winterm
const (
horizontal = iota
vertical
)
func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT {
if h.originMode {
sr := h.effectiveSr(info.Window)
return SMALL_RECT{
Top: sr.top,
Bottom: sr.bottom,
Left: 0,
Right: info.Size.X - 1,
}
} else {
return SMALL_RECT{
Top: info.Window.Top,
Bottom: info.Window.Bottom,
Left: 0,
Right: info.Size.X - 1,
}
}
}
// setCursorPosition sets the cursor to the specified position, bounded to the screen size
func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error {
position.X = ensureInRange(position.X, window.Left, window.Right)
position.Y = ensureInRange(position.Y, window.Top, window.Bottom)
err := SetConsoleCursorPosition(h.fd, position)
if err != nil {
return err
}
logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y)
return err
}
func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error {
return h.moveCursor(vertical, param)
}
func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error {
return h.moveCursor(horizontal, param)
}
func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
position := info.CursorPosition
switch moveMode {
case horizontal:
position.X += int16(param)
case vertical:
position.Y += int16(param)
}
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) moveCursorLine(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
position := info.CursorPosition
position.X = 0
position.Y += int16(param)
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
position := info.CursorPosition
position.X = int16(param) - 1
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,84 @@
// +build windows
package winterm
import "github.com/Azure/go-ansiterm"
func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error {
// Ignore an invalid (negative area) request
if toCoord.Y < fromCoord.Y {
return nil
}
var err error
var coordStart = COORD{}
var coordEnd = COORD{}
xCurrent, yCurrent := fromCoord.X, fromCoord.Y
xEnd, yEnd := toCoord.X, toCoord.Y
// Clear any partial initial line
if xCurrent > 0 {
coordStart.X, coordStart.Y = xCurrent, yCurrent
coordEnd.X, coordEnd.Y = xEnd, yCurrent
err = h.clearRect(attributes, coordStart, coordEnd)
if err != nil {
return err
}
xCurrent = 0
yCurrent += 1
}
// Clear intervening rectangular section
if yCurrent < yEnd {
coordStart.X, coordStart.Y = xCurrent, yCurrent
coordEnd.X, coordEnd.Y = xEnd, yEnd-1
err = h.clearRect(attributes, coordStart, coordEnd)
if err != nil {
return err
}
xCurrent = 0
yCurrent = yEnd
}
// Clear remaining partial ending line
coordStart.X, coordStart.Y = xCurrent, yCurrent
coordEnd.X, coordEnd.Y = xEnd, yEnd
err = h.clearRect(attributes, coordStart, coordEnd)
if err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error {
region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X}
width := toCoord.X - fromCoord.X + 1
height := toCoord.Y - fromCoord.Y + 1
size := uint32(width) * uint32(height)
if size <= 0 {
return nil
}
buffer := make([]CHAR_INFO, size)
char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes}
for i := 0; i < int(size); i++ {
buffer[i] = char
}
err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, &region)
if err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,118 @@
// +build windows
package winterm
// effectiveSr gets the current effective scroll region in buffer coordinates
func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion {
top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom)
bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom)
if top >= bottom {
top = window.Top
bottom = window.Bottom
}
return scrollRegion{top: top, bottom: bottom}
}
func (h *windowsAnsiEventHandler) scrollUp(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
sr := h.effectiveSr(info.Window)
return h.scroll(param, sr, info)
}
func (h *windowsAnsiEventHandler) scrollDown(param int) error {
return h.scrollUp(-param)
}
func (h *windowsAnsiEventHandler) deleteLines(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
start := info.CursorPosition.Y
sr := h.effectiveSr(info.Window)
// Lines cannot be inserted or deleted outside the scrolling region.
if start >= sr.top && start <= sr.bottom {
sr.top = start
return h.scroll(param, sr, info)
} else {
return nil
}
}
func (h *windowsAnsiEventHandler) insertLines(param int) error {
return h.deleteLines(-param)
}
// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
// Copy from and clip to the scroll region (full buffer width)
scrollRect := SMALL_RECT{
Top: sr.top,
Bottom: sr.bottom,
Left: 0,
Right: info.Size.X - 1,
}
// Origin to which area should be copied
destOrigin := COORD{
X: 0,
Y: sr.top - int16(param),
}
char := CHAR_INFO{
UnicodeChar: ' ',
Attributes: h.attributes,
}
if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) deleteCharacters(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
return h.scrollLine(param, info.CursorPosition, info)
}
func (h *windowsAnsiEventHandler) insertCharacters(param int) error {
return h.deleteCharacters(-param)
}
// scrollLine scrolls a line horizontally starting at the provided position by a number of columns.
func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {
// Copy from and clip to the scroll region (full buffer width)
scrollRect := SMALL_RECT{
Top: position.Y,
Bottom: position.Y,
Left: position.X,
Right: info.Size.X - 1,
}
// Origin to which area should be copied
destOrigin := COORD{
X: position.X - int16(columns),
Y: position.Y,
}
char := CHAR_INFO{
UnicodeChar: ' ',
Attributes: h.attributes,
}
if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,9 @@
// +build windows
package winterm
// AddInRange increments a value by the passed quantity while ensuring the values
// always remain within the supplied min / max range.
func addInRange(n int16, increment int16, min int16, max int16) int16 {
return ensureInRange(n+increment, min, max)
}

View file

@ -0,0 +1,726 @@
// +build windows
package winterm
import (
"bytes"
"io/ioutil"
"os"
"strconv"
"github.com/Azure/go-ansiterm"
"github.com/Sirupsen/logrus"
)
var logger *logrus.Logger
type windowsAnsiEventHandler struct {
fd uintptr
file *os.File
infoReset *CONSOLE_SCREEN_BUFFER_INFO
sr scrollRegion
buffer bytes.Buffer
attributes uint16
inverted bool
wrapNext bool
drewMarginByte bool
originMode bool
marginByte byte
curInfo *CONSOLE_SCREEN_BUFFER_INFO
curPos COORD
}
func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler {
logFile := ioutil.Discard
if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
logFile, _ = os.Create("winEventHandler.log")
}
logger = &logrus.Logger{
Out: logFile,
Formatter: new(logrus.TextFormatter),
Level: logrus.DebugLevel,
}
infoReset, err := GetConsoleScreenBufferInfo(fd)
if err != nil {
return nil
}
return &windowsAnsiEventHandler{
fd: fd,
file: file,
infoReset: infoReset,
attributes: infoReset.Attributes,
}
}
type scrollRegion struct {
top int16
bottom int16
}
// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the
// current cursor position and scroll region settings, in which case it returns
// true. If no special handling is necessary, then it does nothing and returns
// false.
//
// In the false case, the caller should ensure that a carriage return
// and line feed are inserted or that the text is otherwise wrapped.
func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
if h.wrapNext {
if err := h.Flush(); err != nil {
return false, err
}
h.clearWrap()
}
pos, info, err := h.getCurrentInfo()
if err != nil {
return false, err
}
sr := h.effectiveSr(info.Window)
if pos.Y == sr.bottom {
// Scrolling is necessary. Let Windows automatically scroll if the scrolling region
// is the full window.
if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom {
if includeCR {
pos.X = 0
h.updatePos(pos)
}
return false, nil
}
// A custom scroll region is active. Scroll the window manually to simulate
// the LF.
if err := h.Flush(); err != nil {
return false, err
}
logger.Info("Simulating LF inside scroll region")
if err := h.scrollUp(1); err != nil {
return false, err
}
if includeCR {
pos.X = 0
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return false, err
}
}
return true, nil
} else if pos.Y < info.Window.Bottom {
// Let Windows handle the LF.
pos.Y++
if includeCR {
pos.X = 0
}
h.updatePos(pos)
return false, nil
} else {
// The cursor is at the bottom of the screen but outside the scroll
// region. Skip the LF.
logger.Info("Simulating LF outside scroll region")
if includeCR {
if err := h.Flush(); err != nil {
return false, err
}
pos.X = 0
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return false, err
}
}
return true, nil
}
}
// executeLF executes a LF without a CR.
func (h *windowsAnsiEventHandler) executeLF() error {
handled, err := h.simulateLF(false)
if err != nil {
return err
}
if !handled {
// Windows LF will reset the cursor column position. Write the LF
// and restore the cursor position.
pos, _, err := h.getCurrentInfo()
if err != nil {
return err
}
h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
if pos.X != 0 {
if err := h.Flush(); err != nil {
return err
}
logger.Info("Resetting cursor position for LF without CR")
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return err
}
}
}
return nil
}
func (h *windowsAnsiEventHandler) Print(b byte) error {
if h.wrapNext {
h.buffer.WriteByte(h.marginByte)
h.clearWrap()
if _, err := h.simulateLF(true); err != nil {
return err
}
}
pos, info, err := h.getCurrentInfo()
if err != nil {
return err
}
if pos.X == info.Size.X-1 {
h.wrapNext = true
h.marginByte = b
} else {
pos.X++
h.updatePos(pos)
h.buffer.WriteByte(b)
}
return nil
}
func (h *windowsAnsiEventHandler) Execute(b byte) error {
switch b {
case ansiterm.ANSI_TAB:
logger.Info("Execute(TAB)")
// Move to the next tab stop, but preserve auto-wrap if already set.
if !h.wrapNext {
pos, info, err := h.getCurrentInfo()
if err != nil {
return err
}
pos.X = (pos.X + 8) - pos.X%8
if pos.X >= info.Size.X {
pos.X = info.Size.X - 1
}
if err := h.Flush(); err != nil {
return err
}
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return err
}
}
return nil
case ansiterm.ANSI_BEL:
h.buffer.WriteByte(ansiterm.ANSI_BEL)
return nil
case ansiterm.ANSI_BACKSPACE:
if h.wrapNext {
if err := h.Flush(); err != nil {
return err
}
h.clearWrap()
}
pos, _, err := h.getCurrentInfo()
if err != nil {
return err
}
if pos.X > 0 {
pos.X--
h.updatePos(pos)
h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE)
}
return nil
case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED:
// Treat as true LF.
return h.executeLF()
case ansiterm.ANSI_LINE_FEED:
// Simulate a CR and LF for now since there is no way in go-ansiterm
// to tell if the LF should include CR (and more things break when it's
// missing than when it's incorrectly added).
handled, err := h.simulateLF(true)
if handled || err != nil {
return err
}
return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
case ansiterm.ANSI_CARRIAGE_RETURN:
if h.wrapNext {
if err := h.Flush(); err != nil {
return err
}
h.clearWrap()
}
pos, _, err := h.getCurrentInfo()
if err != nil {
return err
}
if pos.X != 0 {
pos.X = 0
h.updatePos(pos)
h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN)
}
return nil
default:
return nil
}
}
func (h *windowsAnsiEventHandler) CUU(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorVertical(-param)
}
func (h *windowsAnsiEventHandler) CUD(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorVertical(param)
}
func (h *windowsAnsiEventHandler) CUF(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorHorizontal(param)
}
func (h *windowsAnsiEventHandler) CUB(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorHorizontal(-param)
}
func (h *windowsAnsiEventHandler) CNL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorLine(param)
}
func (h *windowsAnsiEventHandler) CPL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorLine(-param)
}
func (h *windowsAnsiEventHandler) CHA(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorColumn(param)
}
func (h *windowsAnsiEventHandler) VPA(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("VPA: [[%d]]", param)
h.clearWrap()
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
window := h.getCursorWindow(info)
position := info.CursorPosition
position.Y = window.Top + int16(param) - 1
return h.setCursorPosition(position, window)
}
func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("CUP: [[%d %d]]", row, col)
h.clearWrap()
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
window := h.getCursorWindow(info)
position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1}
return h.setCursorPosition(position, window)
}
func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("HVP: [[%d %d]]", row, col)
h.clearWrap()
return h.CUP(row, col)
}
func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
h.clearWrap()
return nil
}
func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)})
h.clearWrap()
h.originMode = enable
return h.CUP(1, 1)
}
func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
h.clearWrap()
if err := h.ED(2); err != nil {
return err
}
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
targetWidth := int16(80)
if use132 {
targetWidth = 132
}
if info.Size.X < targetWidth {
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
logger.Info("set buffer failed:", err)
return err
}
}
window := info.Window
window.Left = 0
window.Right = targetWidth - 1
if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
logger.Info("set window failed:", err)
return err
}
if info.Size.X > targetWidth {
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
logger.Info("set buffer failed:", err)
return err
}
}
return SetConsoleCursorPosition(h.fd, COORD{0, 0})
}
func (h *windowsAnsiEventHandler) ED(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("ED: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
// [J -- Erases from the cursor to the end of the screen, including the cursor position.
// [1J -- Erases from the beginning of the screen to the cursor, including the cursor position.
// [2J -- Erases the complete display. The cursor does not move.
// Notes:
// -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
var start COORD
var end COORD
switch param {
case 0:
start = info.CursorPosition
end = COORD{info.Size.X - 1, info.Size.Y - 1}
case 1:
start = COORD{0, 0}
end = info.CursorPosition
case 2:
start = COORD{0, 0}
end = COORD{info.Size.X - 1, info.Size.Y - 1}
}
err = h.clearRange(h.attributes, start, end)
if err != nil {
return err
}
// If the whole buffer was cleared, move the window to the top while preserving
// the window-relative cursor position.
if param == 2 {
pos := info.CursorPosition
window := info.Window
pos.Y -= window.Top
window.Bottom -= window.Top
window.Top = 0
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return err
}
if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
return err
}
}
return nil
}
func (h *windowsAnsiEventHandler) EL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("EL: [%v]", strconv.Itoa(param))
h.clearWrap()
// [K -- Erases from the cursor to the end of the line, including the cursor position.
// [1K -- Erases from the beginning of the line to the cursor, including the cursor position.
// [2K -- Erases the complete line.
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
var start COORD
var end COORD
switch param {
case 0:
start = info.CursorPosition
end = COORD{info.Size.X, info.CursorPosition.Y}
case 1:
start = COORD{0, info.CursorPosition.Y}
end = info.CursorPosition
case 2:
start = COORD{0, info.CursorPosition.Y}
end = COORD{info.Size.X, info.CursorPosition.Y}
}
err = h.clearRange(h.attributes, start, end)
if err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) IL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("IL: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.insertLines(param)
}
func (h *windowsAnsiEventHandler) DL(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DL: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.deleteLines(param)
}
func (h *windowsAnsiEventHandler) ICH(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("ICH: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.insertCharacters(param)
}
func (h *windowsAnsiEventHandler) DCH(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DCH: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.deleteCharacters(param)
}
func (h *windowsAnsiEventHandler) SGR(params []int) error {
if err := h.Flush(); err != nil {
return err
}
strings := []string{}
for _, v := range params {
strings = append(strings, strconv.Itoa(v))
}
logger.Infof("SGR: [%v]", strings)
if len(params) <= 0 {
h.attributes = h.infoReset.Attributes
h.inverted = false
} else {
for _, attr := range params {
if attr == ansiterm.ANSI_SGR_RESET {
h.attributes = h.infoReset.Attributes
h.inverted = false
continue
}
h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr))
}
}
attributes := h.attributes
if h.inverted {
attributes = invertAttributes(attributes)
}
err := SetConsoleTextAttribute(h.fd, attributes)
if err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) SU(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("SU: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.scrollUp(param)
}
func (h *windowsAnsiEventHandler) SD(param int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("SD: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.scrollDown(param)
}
func (h *windowsAnsiEventHandler) DA(params []string) error {
logger.Infof("DA: [%v]", params)
// DA cannot be implemented because it must send data on the VT100 input stream,
// which is not available to go-ansiterm.
return nil
}
func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
if err := h.Flush(); err != nil {
return err
}
logger.Infof("DECSTBM: [%d, %d]", top, bottom)
// Windows is 0 indexed, Linux is 1 indexed
h.sr.top = int16(top - 1)
h.sr.bottom = int16(bottom - 1)
// This command also moves the cursor to the origin.
h.clearWrap()
return h.CUP(1, 1)
}
func (h *windowsAnsiEventHandler) RI() error {
if err := h.Flush(); err != nil {
return err
}
logger.Info("RI: []")
h.clearWrap()
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
sr := h.effectiveSr(info.Window)
if info.CursorPosition.Y == sr.top {
return h.scrollDown(1)
}
return h.moveCursorVertical(-1)
}
func (h *windowsAnsiEventHandler) IND() error {
logger.Info("IND: []")
return h.executeLF()
}
func (h *windowsAnsiEventHandler) Flush() error {
h.curInfo = nil
if h.buffer.Len() > 0 {
logger.Infof("Flush: [%s]", h.buffer.Bytes())
if _, err := h.buffer.WriteTo(h.file); err != nil {
return err
}
}
if h.wrapNext && !h.drewMarginByte {
logger.Infof("Flush: drawing margin byte '%c'", h.marginByte)
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}}
size := COORD{1, 1}
position := COORD{0, 0}
region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y}
if err := WriteConsoleOutput(h.fd, charInfo, size, position, &region); err != nil {
return err
}
h.drewMarginByte = true
}
return nil
}
// cacheConsoleInfo ensures that the current console screen information has been queried
// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos.
func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) {
if h.curInfo == nil {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return COORD{}, nil, err
}
h.curInfo = info
h.curPos = info.CursorPosition
}
return h.curPos, h.curInfo, nil
}
func (h *windowsAnsiEventHandler) updatePos(pos COORD) {
if h.curInfo == nil {
panic("failed to call getCurrentInfo before calling updatePos")
}
h.curPos = pos
}
// clearWrap clears the state where the cursor is in the margin
// waiting for the next character before wrapping the line. This must
// be done before most operations that act on the cursor.
func (h *windowsAnsiEventHandler) clearWrap() {
h.wrapNext = false
h.drewMarginByte = false
}

5
vendor/github.com/PuerkitoBio/purell/.gitignore generated vendored Normal file
View file

@ -0,0 +1,5 @@
*.sublime-*
.DS_Store
*.swp
*.swo
tags

7
vendor/github.com/PuerkitoBio/purell/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,7 @@
language: go
go:
- 1.4
- 1.5
- 1.6
- tip

12
vendor/github.com/PuerkitoBio/purell/LICENSE generated vendored Normal file
View file

@ -0,0 +1,12 @@
Copyright (c) 2012, Martin Angers
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

187
vendor/github.com/PuerkitoBio/purell/README.md generated vendored Normal file
View file

@ -0,0 +1,187 @@
# Purell
Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell)
## Install
`go get github.com/PuerkitoBio/purell`
## Changelog
* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121).
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
* **v0.2.0** : Add benchmarks, Attempt IDN support.
* **v0.1.0** : Initial release.
## Examples
From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
```go
package purell
import (
"fmt"
"net/url"
)
func ExampleNormalizeURLString() {
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
panic(err)
} else {
fmt.Print(normalized)
}
// Output: http://somewebsite.com:80/Amazing%3F/url/
}
func ExampleMustNormalizeURLString() {
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
FlagsUnsafeGreedy)
fmt.Print(normalized)
// Output: http://somewebsite.com/Amazing%FA/url
}
func ExampleNormalizeURL() {
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
panic(err)
} else {
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
fmt.Print(normalized)
}
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
}
```
## API
As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
```go
const (
// Safe normalizations
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
FlagLowercaseHost // http://HOST -> http://host
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
FlagRemoveDefaultPort // http://host:80 -> http://host
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
// Usually safe normalizations
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
// Unsafe normalizations
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
FlagRemoveFragment // http://host/path#fragment -> http://host/path
FlagForceHTTP // https://host -> http://host
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
FlagRemoveWWW // http://www.host/ -> http://host/
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
// Normalizations not in the wikipedia article, required to cover tests cases
// submitted by jehiah
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
// Convenience set of safe normalizations
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
// Convenience set of usually safe normalizations (includes FlagsSafe)
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
// Convenience set of all available flags
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
)
```
For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
The [full godoc reference is available on gopkgdoc][godoc].
Some things to note:
* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
- %24 -> $
- %26 -> &
- %2B-%3B -> +,-./0123456789:;
- %3D -> =
- %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
- %5F -> _
- %61-%7A -> abcdefghijklmnopqrstuvwxyz
- %7E -> ~
* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
### Safe vs Usually Safe vs Unsafe
Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
Consider the following URL:
`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
Normalizing with the `FlagsSafe` gives:
`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
With the `FlagsUsuallySafeGreedy`:
`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
And with `FlagsUnsafeGreedy`:
`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
## TODOs
* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
## Thanks / Contributions
@rogpeppe
@jehiah
@opennota
@pchristopher1275
@zenovich
@beeker1121
## License
The [BSD 3-Clause license][bsd].
[bsd]: http://opensource.org/licenses/BSD-3-Clause
[wiki]: http://en.wikipedia.org/wiki/URL_normalization
[rfc]: http://tools.ietf.org/html/rfc3986#section-6
[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
[pr5]: https://github.com/PuerkitoBio/purell/pull/5
[iss7]: https://github.com/PuerkitoBio/purell/issues/7

57
vendor/github.com/PuerkitoBio/purell/bench_test.go generated vendored Normal file
View file

@ -0,0 +1,57 @@
package purell
import (
"testing"
)
var (
safeUrl = "HttPS://..iaMHost..Test:443/paTh^A%ef//./%41PaTH/..//?"
usuallySafeUrl = "HttPS://..iaMHost..Test:443/paTh^A%ef//./%41PaTH/../final/"
unsafeUrl = "HttPS://..www.iaMHost..Test:443/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
allDWORDUrl = "HttPS://1113982867:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
allOctalUrl = "HttPS://0102.0146.07.0223:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
allHexUrl = "HttPS://0x42660793:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
allCombinedUrl = "HttPS://..0x42660793.:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment"
)
func BenchmarkSafe(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(safeUrl, FlagsSafe)
}
}
func BenchmarkUsuallySafe(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(usuallySafeUrl, FlagsUsuallySafeGreedy)
}
}
func BenchmarkUnsafe(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(unsafeUrl, FlagsUnsafeGreedy)
}
}
func BenchmarkAllDWORD(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(allDWORDUrl, FlagsAllGreedy)
}
}
func BenchmarkAllOctal(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(allOctalUrl, FlagsAllGreedy)
}
}
func BenchmarkAllHex(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(allHexUrl, FlagsAllGreedy)
}
}
func BenchmarkAllCombined(b *testing.B) {
for i := 0; i < b.N; i++ {
NormalizeURLString(allCombinedUrl, FlagsAllGreedy)
}
}

View file

@ -0,0 +1,9 @@
PASS
BenchmarkSafe 500000 6131 ns/op
BenchmarkUsuallySafe 200000 7864 ns/op
BenchmarkUnsafe 100000 28560 ns/op
BenchmarkAllDWORD 50000 38722 ns/op
BenchmarkAllOctal 50000 40941 ns/op
BenchmarkAllHex 50000 44063 ns/op
BenchmarkAllCombined 50000 33613 ns/op
ok github.com/PuerkitoBio/purell 17.404s

35
vendor/github.com/PuerkitoBio/purell/example_test.go generated vendored Normal file
View file

@ -0,0 +1,35 @@
package purell
import (
"fmt"
"net/url"
)
func ExampleNormalizeURLString() {
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
panic(err)
} else {
fmt.Print(normalized)
}
// Output: http://somewebsite.com:80/Amazing%3F/url/
}
func ExampleMustNormalizeURLString() {
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
FlagsUnsafeGreedy)
fmt.Print(normalized)
// Output: http://somewebsite.com/Amazing%FA/url
}
func ExampleNormalizeURL() {
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
panic(err)
} else {
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
fmt.Print(normalized)
}
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
}

379
vendor/github.com/PuerkitoBio/purell/purell.go generated vendored Normal file
View file

@ -0,0 +1,379 @@
/*
Package purell offers URL normalization as described on the wikipedia page:
http://en.wikipedia.org/wiki/URL_normalization
*/
package purell
import (
"bytes"
"fmt"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"github.com/PuerkitoBio/urlesc"
"golang.org/x/net/idna"
"golang.org/x/text/unicode/norm"
"golang.org/x/text/width"
)
// A set of normalization flags determines how a URL will
// be normalized.
type NormalizationFlags uint
const (
// Safe normalizations
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
FlagLowercaseHost // http://HOST -> http://host
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
FlagRemoveDefaultPort // http://host:80 -> http://host
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
// Usually safe normalizations
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
// Unsafe normalizations
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
FlagRemoveFragment // http://host/path#fragment -> http://host/path
FlagForceHTTP // https://host -> http://host
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
FlagRemoveWWW // http://www.host/ -> http://host/
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
// Normalizations not in the wikipedia article, required to cover tests cases
// submitted by jehiah
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
// Convenience set of safe normalizations
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
// Convenience set of usually safe normalizations (includes FlagsSafe)
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
// Convenience set of all available flags
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
)
const (
defaultHttpPort = ":80"
defaultHttpsPort = ":443"
)
// Regular expressions used by the normalizations
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
var rxEmptyPort = regexp.MustCompile(`:+$`)
// Map of flags to implementation function.
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
// Since maps have undefined traversing order, make a slice of ordered keys
var flagsOrder = []NormalizationFlags{
FlagLowercaseScheme,
FlagLowercaseHost,
FlagRemoveDefaultPort,
FlagRemoveDirectoryIndex,
FlagRemoveDotSegments,
FlagRemoveFragment,
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
FlagRemoveDuplicateSlashes,
FlagRemoveWWW,
FlagAddWWW,
FlagSortQuery,
FlagDecodeDWORDHost,
FlagDecodeOctalHost,
FlagDecodeHexHost,
FlagRemoveUnnecessaryHostDots,
FlagRemoveEmptyPortSeparator,
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
FlagAddTrailingSlash,
}
// ... and then the map, where order is unimportant
var flags = map[NormalizationFlags]func(*url.URL){
FlagLowercaseScheme: lowercaseScheme,
FlagLowercaseHost: lowercaseHost,
FlagRemoveDefaultPort: removeDefaultPort,
FlagRemoveDirectoryIndex: removeDirectoryIndex,
FlagRemoveDotSegments: removeDotSegments,
FlagRemoveFragment: removeFragment,
FlagForceHTTP: forceHTTP,
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
FlagRemoveWWW: removeWWW,
FlagAddWWW: addWWW,
FlagSortQuery: sortQuery,
FlagDecodeDWORDHost: decodeDWORDHost,
FlagDecodeOctalHost: decodeOctalHost,
FlagDecodeHexHost: decodeHexHost,
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
FlagRemoveTrailingSlash: removeTrailingSlash,
FlagAddTrailingSlash: addTrailingSlash,
}
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
// It takes an URL string as input, as well as the normalization flags.
func MustNormalizeURLString(u string, f NormalizationFlags) string {
result, e := NormalizeURLString(u, f)
if e != nil {
panic(e)
}
return result
}
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
// It takes an URL string as input, as well as the normalization flags.
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
parsed, err := url.Parse(u)
if err != nil {
return "", err
}
if f&FlagLowercaseHost == FlagLowercaseHost {
parsed.Host = strings.ToLower(parsed.Host)
}
// The idna package doesn't fully conform to RFC 5895
// (https://tools.ietf.org/html/rfc5895), so we do it here.
// Taken from Go 1.8 cycle source, courtesy of bradfitz.
// TODO: Remove when (if?) idna package conforms to RFC 5895.
parsed.Host = width.Fold.String(parsed.Host)
parsed.Host = norm.NFC.String(parsed.Host)
if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
return "", err
}
return NormalizeURL(parsed, f), nil
}
// NormalizeURL returns the normalized string.
// It takes a parsed URL object as input, as well as the normalization flags.
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
for _, k := range flagsOrder {
if f&k == k {
flags[k](u)
}
}
return urlesc.Escape(u)
}
func lowercaseScheme(u *url.URL) {
if len(u.Scheme) > 0 {
u.Scheme = strings.ToLower(u.Scheme)
}
}
func lowercaseHost(u *url.URL) {
if len(u.Host) > 0 {
u.Host = strings.ToLower(u.Host)
}
}
func removeDefaultPort(u *url.URL) {
if len(u.Host) > 0 {
scheme := strings.ToLower(u.Scheme)
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
return ""
}
return val
})
}
}
func removeTrailingSlash(u *url.URL) {
if l := len(u.Path); l > 0 {
if strings.HasSuffix(u.Path, "/") {
u.Path = u.Path[:l-1]
}
} else if l = len(u.Host); l > 0 {
if strings.HasSuffix(u.Host, "/") {
u.Host = u.Host[:l-1]
}
}
}
func addTrailingSlash(u *url.URL) {
if l := len(u.Path); l > 0 {
if !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
}
} else if l = len(u.Host); l > 0 {
if !strings.HasSuffix(u.Host, "/") {
u.Host += "/"
}
}
}
func removeDotSegments(u *url.URL) {
if len(u.Path) > 0 {
var dotFree []string
var lastIsDot bool
sections := strings.Split(u.Path, "/")
for _, s := range sections {
if s == ".." {
if len(dotFree) > 0 {
dotFree = dotFree[:len(dotFree)-1]
}
} else if s != "." {
dotFree = append(dotFree, s)
}
lastIsDot = (s == "." || s == "..")
}
// Special case if host does not end with / and new path does not begin with /
u.Path = strings.Join(dotFree, "/")
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
u.Path = "/" + u.Path
}
// Special case if the last segment was a dot, make sure the path ends with a slash
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
}
}
}
func removeDirectoryIndex(u *url.URL) {
if len(u.Path) > 0 {
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
}
}
func removeFragment(u *url.URL) {
u.Fragment = ""
}
func forceHTTP(u *url.URL) {
if strings.ToLower(u.Scheme) == "https" {
u.Scheme = "http"
}
}
func removeDuplicateSlashes(u *url.URL) {
if len(u.Path) > 0 {
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
}
}
func removeWWW(u *url.URL) {
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
u.Host = u.Host[4:]
}
}
func addWWW(u *url.URL) {
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
u.Host = "www." + u.Host
}
}
func sortQuery(u *url.URL) {
q := u.Query()
if len(q) > 0 {
arKeys := make([]string, len(q))
i := 0
for k, _ := range q {
arKeys[i] = k
i++
}
sort.Strings(arKeys)
buf := new(bytes.Buffer)
for _, k := range arKeys {
sort.Strings(q[k])
for _, v := range q[k] {
if buf.Len() > 0 {
buf.WriteRune('&')
}
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
}
}
// Rebuild the raw query string
u.RawQuery = buf.String()
}
}
func decodeDWORDHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
var parts [4]int64
dword, _ := strconv.ParseInt(matches[1], 10, 0)
for i, shift := range []uint{24, 16, 8, 0} {
parts[i] = dword >> shift & 0xFF
}
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
}
}
}
func decodeOctalHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
var parts [4]int64
for i := 1; i <= 4; i++ {
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
}
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
}
}
}
func decodeHexHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
// Conversion is safe because of regex validation
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
// Set host as DWORD (base 10) encoded host
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
// The rest is the same as decoding a DWORD host
decodeDWORDHost(u)
}
}
}
func removeUnncessaryHostDots(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
// Trim the leading and trailing dots
u.Host = strings.Trim(matches[1], ".")
if len(matches) > 2 {
u.Host += matches[2]
}
}
}
}
func removeEmptyPortSeparator(u *url.URL) {
if len(u.Host) > 0 {
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
}
}

768
vendor/github.com/PuerkitoBio/purell/purell_test.go generated vendored Normal file
View file

@ -0,0 +1,768 @@
package purell
import (
"fmt"
"net/url"
"testing"
)
type testCase struct {
nm string
src string
flgs NormalizationFlags
res string
parsed bool
}
var (
cases = [...]*testCase{
&testCase{
"LowerScheme",
"HTTP://www.SRC.ca",
FlagLowercaseScheme,
"http://www.SRC.ca",
false,
},
&testCase{
"LowerScheme2",
"http://www.SRC.ca",
FlagLowercaseScheme,
"http://www.SRC.ca",
false,
},
&testCase{
"LowerHost",
"HTTP://www.SRC.ca/",
FlagLowercaseHost,
"http://www.src.ca/", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"UpperEscapes",
`http://www.whatever.com/Some%aa%20Special%8Ecases/`,
FlagUppercaseEscapes,
"http://www.whatever.com/Some%AA%20Special%8Ecases/",
false,
},
&testCase{
"UnnecessaryEscapes",
`http://www.toto.com/%41%42%2E%44/%32%33%52%2D/%5f%7E`,
FlagDecodeUnnecessaryEscapes,
"http://www.toto.com/AB.D/23R-/_~",
false,
},
&testCase{
"RemoveDefaultPort",
"HTTP://www.SRC.ca:80/",
FlagRemoveDefaultPort,
"http://www.SRC.ca/", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveDefaultPort2",
"HTTP://www.SRC.ca:80",
FlagRemoveDefaultPort,
"http://www.SRC.ca", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveDefaultPort3",
"HTTP://www.SRC.ca:8080",
FlagRemoveDefaultPort,
"http://www.SRC.ca:8080", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"Safe",
"HTTP://www.SRC.ca:80/to%1ato%8b%ee/OKnow%41%42%43%7e",
FlagsSafe,
"http://www.src.ca/to%1Ato%8B%EE/OKnowABC~",
false,
},
&testCase{
"BothLower",
"HTTP://www.SRC.ca:80/to%1ato%8b%ee/OKnow%41%42%43%7e",
FlagLowercaseHost | FlagLowercaseScheme,
"http://www.src.ca:80/to%1Ato%8B%EE/OKnowABC~",
false,
},
&testCase{
"RemoveTrailingSlash",
"HTTP://www.SRC.ca:80/",
FlagRemoveTrailingSlash,
"http://www.SRC.ca:80", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveTrailingSlash2",
"HTTP://www.SRC.ca:80/toto/titi/",
FlagRemoveTrailingSlash,
"http://www.SRC.ca:80/toto/titi", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveTrailingSlash3",
"HTTP://www.SRC.ca:80/toto/titi/fin/?a=1",
FlagRemoveTrailingSlash,
"http://www.SRC.ca:80/toto/titi/fin?a=1", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"AddTrailingSlash",
"HTTP://www.SRC.ca:80",
FlagAddTrailingSlash,
"http://www.SRC.ca:80/", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"AddTrailingSlash2",
"HTTP://www.SRC.ca:80/toto/titi.html",
FlagAddTrailingSlash,
"http://www.SRC.ca:80/toto/titi.html/", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"AddTrailingSlash3",
"HTTP://www.SRC.ca:80/toto/titi/fin?a=1",
FlagAddTrailingSlash,
"http://www.SRC.ca:80/toto/titi/fin/?a=1", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveDotSegments",
"HTTP://root/a/b/./../../c/",
FlagRemoveDotSegments,
"http://root/c/", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveDotSegments2",
"HTTP://root/../a/b/./../c/../d",
FlagRemoveDotSegments,
"http://root/a/d", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"UsuallySafe",
"HTTP://www.SRC.ca:80/to%1ato%8b%ee/./c/d/../OKnow%41%42%43%7e/?a=b#test",
FlagsUsuallySafeGreedy,
"http://www.src.ca/to%1Ato%8B%EE/c/OKnowABC~?a=b#test",
false,
},
&testCase{
"RemoveDirectoryIndex",
"HTTP://root/a/b/c/default.aspx",
FlagRemoveDirectoryIndex,
"http://root/a/b/c/", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveDirectoryIndex2",
"HTTP://root/a/b/c/default#a=b",
FlagRemoveDirectoryIndex,
"http://root/a/b/c/default#a=b", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"RemoveFragment",
"HTTP://root/a/b/c/default#toto=tata",
FlagRemoveFragment,
"http://root/a/b/c/default", // Since Go1.1, scheme is automatically lowercased
false,
},
&testCase{
"ForceHTTP",
"https://root/a/b/c/default#toto=tata",
FlagForceHTTP,
"http://root/a/b/c/default#toto=tata",
false,
},
&testCase{
"RemoveDuplicateSlashes",
"https://root/a//b///c////default#toto=tata",
FlagRemoveDuplicateSlashes,
"https://root/a/b/c/default#toto=tata",
false,
},
&testCase{
"RemoveDuplicateSlashes2",
"https://root//a//b///c////default#toto=tata",
FlagRemoveDuplicateSlashes,
"https://root/a/b/c/default#toto=tata",
false,
},
&testCase{
"RemoveWWW",
"https://www.root/a/b/c/",
FlagRemoveWWW,
"https://root/a/b/c/",
false,
},
&testCase{
"RemoveWWW2",
"https://WwW.Root/a/b/c/",
FlagRemoveWWW,
"https://Root/a/b/c/",
false,
},
&testCase{
"AddWWW",
"https://Root/a/b/c/",
FlagAddWWW,
"https://www.Root/a/b/c/",
false,
},
&testCase{
"SortQuery",
"http://root/toto/?b=4&a=1&c=3&b=2&a=5",
FlagSortQuery,
"http://root/toto/?a=1&a=5&b=2&b=4&c=3",
false,
},
&testCase{
"RemoveEmptyQuerySeparator",
"http://root/toto/?",
FlagRemoveEmptyQuerySeparator,
"http://root/toto/",
false,
},
&testCase{
"Unsafe",
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
FlagsUnsafeGreedy,
"http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3",
false,
},
&testCase{
"Safe2",
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
FlagsSafe,
"https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
false,
},
&testCase{
"UsuallySafe2",
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
FlagsUsuallySafeGreedy,
"https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid",
false,
},
&testCase{
"AddTrailingSlashBug",
"http://src.ca/",
FlagsAllNonGreedy,
"http://www.src.ca/",
false,
},
&testCase{
"SourceModified",
"HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid",
FlagsUnsafeGreedy,
"http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3",
true,
},
&testCase{
"IPv6-1",
"http://[2001:db8:1f70::999:de8:7648:6e8]/test",
FlagsSafe | FlagRemoveDotSegments,
"http://[2001:db8:1f70::999:de8:7648:6e8]/test",
false,
},
&testCase{
"IPv6-2",
"http://[::ffff:192.168.1.1]/test",
FlagsSafe | FlagRemoveDotSegments,
"http://[::ffff:192.168.1.1]/test",
false,
},
&testCase{
"IPv6-3",
"http://[::ffff:192.168.1.1]:80/test",
FlagsSafe | FlagRemoveDotSegments,
"http://[::ffff:192.168.1.1]/test",
false,
},
&testCase{
"IPv6-4",
"htTps://[::fFff:192.168.1.1]:443/test",
FlagsSafe | FlagRemoveDotSegments,
"https://[::ffff:192.168.1.1]/test",
false,
},
&testCase{
"FTP",
"ftp://user:pass@ftp.foo.net/foo/bar",
FlagsSafe | FlagRemoveDotSegments,
"ftp://user:pass@ftp.foo.net/foo/bar",
false,
},
&testCase{
"Standard-1",
"http://www.foo.com:80/foo",
FlagsSafe | FlagRemoveDotSegments,
"http://www.foo.com/foo",
false,
},
&testCase{
"Standard-2",
"http://www.foo.com:8000/foo",
FlagsSafe | FlagRemoveDotSegments,
"http://www.foo.com:8000/foo",
false,
},
&testCase{
"Standard-3",
"http://www.foo.com/%7ebar",
FlagsSafe | FlagRemoveDotSegments,
"http://www.foo.com/~bar",
false,
},
&testCase{
"Standard-4",
"http://www.foo.com/%7Ebar",
FlagsSafe | FlagRemoveDotSegments,
"http://www.foo.com/~bar",
false,
},
&testCase{
"Standard-5",
"http://USER:pass@www.Example.COM/foo/bar",
FlagsSafe | FlagRemoveDotSegments,
"http://USER:pass@www.example.com/foo/bar",
false,
},
&testCase{
"Standard-6",
"http://test.example/?a=%26&b=1",
FlagsSafe | FlagRemoveDotSegments,
"http://test.example/?a=%26&b=1",
false,
},
&testCase{
"Standard-7",
"http://test.example/%25/?p=%20val%20%25",
FlagsSafe | FlagRemoveDotSegments,
"http://test.example/%25/?p=%20val%20%25",
false,
},
&testCase{
"Standard-8",
"http://test.example/path/with a%20space+/",
FlagsSafe | FlagRemoveDotSegments,
"http://test.example/path/with%20a%20space+/",
false,
},
&testCase{
"Standard-9",
"http://test.example/?",
FlagsSafe | FlagRemoveDotSegments,
"http://test.example/",
false,
},
&testCase{
"Standard-10",
"http://a.COM/path/?b&a",
FlagsSafe | FlagRemoveDotSegments,
"http://a.com/path/?b&a",
false,
},
&testCase{
"StandardCasesAddTrailingSlash",
"http://test.example?",
FlagsSafe | FlagAddTrailingSlash,
"http://test.example/",
false,
},
&testCase{
"OctalIP-1",
"http://0123.011.0.4/",
FlagsSafe | FlagDecodeOctalHost,
"http://0123.011.0.4/",
false,
},
&testCase{
"OctalIP-2",
"http://0102.0146.07.0223/",
FlagsSafe | FlagDecodeOctalHost,
"http://66.102.7.147/",
false,
},
&testCase{
"OctalIP-3",
"http://0102.0146.07.0223.:23/",
FlagsSafe | FlagDecodeOctalHost,
"http://66.102.7.147.:23/",
false,
},
&testCase{
"OctalIP-4",
"http://USER:pass@0102.0146.07.0223../",
FlagsSafe | FlagDecodeOctalHost,
"http://USER:pass@66.102.7.147../",
false,
},
&testCase{
"DWORDIP-1",
"http://123.1113982867/",
FlagsSafe | FlagDecodeDWORDHost,
"http://123.1113982867/",
false,
},
&testCase{
"DWORDIP-2",
"http://1113982867/",
FlagsSafe | FlagDecodeDWORDHost,
"http://66.102.7.147/",
false,
},
&testCase{
"DWORDIP-3",
"http://1113982867.:23/",
FlagsSafe | FlagDecodeDWORDHost,
"http://66.102.7.147.:23/",
false,
},
&testCase{
"DWORDIP-4",
"http://USER:pass@1113982867../",
FlagsSafe | FlagDecodeDWORDHost,
"http://USER:pass@66.102.7.147../",
false,
},
&testCase{
"HexIP-1",
"http://0x123.1113982867/",
FlagsSafe | FlagDecodeHexHost,
"http://0x123.1113982867/",
false,
},
&testCase{
"HexIP-2",
"http://0x42660793/",
FlagsSafe | FlagDecodeHexHost,
"http://66.102.7.147/",
false,
},
&testCase{
"HexIP-3",
"http://0x42660793.:23/",
FlagsSafe | FlagDecodeHexHost,
"http://66.102.7.147.:23/",
false,
},
&testCase{
"HexIP-4",
"http://USER:pass@0x42660793../",
FlagsSafe | FlagDecodeHexHost,
"http://USER:pass@66.102.7.147../",
false,
},
&testCase{
"UnnecessaryHostDots-1",
"http://.www.foo.com../foo/bar.html",
FlagsSafe | FlagRemoveUnnecessaryHostDots,
"http://www.foo.com/foo/bar.html",
false,
},
&testCase{
"UnnecessaryHostDots-2",
"http://www.foo.com./foo/bar.html",
FlagsSafe | FlagRemoveUnnecessaryHostDots,
"http://www.foo.com/foo/bar.html",
false,
},
&testCase{
"UnnecessaryHostDots-3",
"http://www.foo.com.:81/foo",
FlagsSafe | FlagRemoveUnnecessaryHostDots,
"http://www.foo.com:81/foo",
false,
},
&testCase{
"UnnecessaryHostDots-4",
"http://www.example.com./",
FlagsSafe | FlagRemoveUnnecessaryHostDots,
"http://www.example.com/",
false,
},
&testCase{
"EmptyPort-1",
"http://www.thedraymin.co.uk:/main/?p=308",
FlagsSafe | FlagRemoveEmptyPortSeparator,
"http://www.thedraymin.co.uk/main/?p=308",
false,
},
&testCase{
"EmptyPort-2",
"http://www.src.ca:",
FlagsSafe | FlagRemoveEmptyPortSeparator,
"http://www.src.ca",
false,
},
&testCase{
"Slashes-1",
"http://test.example/foo/bar/.",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/bar/",
false,
},
&testCase{
"Slashes-2",
"http://test.example/foo/bar/./",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/bar/",
false,
},
&testCase{
"Slashes-3",
"http://test.example/foo/bar/..",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/",
false,
},
&testCase{
"Slashes-4",
"http://test.example/foo/bar/../",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/",
false,
},
&testCase{
"Slashes-5",
"http://test.example/foo/bar/../baz",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/baz",
false,
},
&testCase{
"Slashes-6",
"http://test.example/foo/bar/../..",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/",
false,
},
&testCase{
"Slashes-7",
"http://test.example/foo/bar/../../",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/",
false,
},
&testCase{
"Slashes-8",
"http://test.example/foo/bar/../../baz",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/baz",
false,
},
&testCase{
"Slashes-9",
"http://test.example/foo/bar/../../../baz",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/baz",
false,
},
&testCase{
"Slashes-10",
"http://test.example/foo/bar/../../../../baz",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/baz",
false,
},
&testCase{
"Slashes-11",
"http://test.example/./foo",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo",
false,
},
&testCase{
"Slashes-12",
"http://test.example/../foo",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo",
false,
},
&testCase{
"Slashes-13",
"http://test.example/foo.",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo.",
false,
},
&testCase{
"Slashes-14",
"http://test.example/.foo",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/.foo",
false,
},
&testCase{
"Slashes-15",
"http://test.example/foo..",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo..",
false,
},
&testCase{
"Slashes-16",
"http://test.example/..foo",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/..foo",
false,
},
&testCase{
"Slashes-17",
"http://test.example/./../foo",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo",
false,
},
&testCase{
"Slashes-18",
"http://test.example/./foo/.",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/",
false,
},
&testCase{
"Slashes-19",
"http://test.example/foo/./bar",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/bar",
false,
},
&testCase{
"Slashes-20",
"http://test.example/foo/../bar",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/bar",
false,
},
&testCase{
"Slashes-21",
"http://test.example/foo//",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/",
false,
},
&testCase{
"Slashes-22",
"http://test.example/foo///bar//",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"http://test.example/foo/bar/",
false,
},
&testCase{
"Relative",
"foo/bar",
FlagsAllGreedy,
"foo/bar",
false,
},
&testCase{
"Relative-1",
"./../foo",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"foo",
false,
},
&testCase{
"Relative-2",
"./foo/bar/../baz/../bang/..",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"foo/",
false,
},
&testCase{
"Relative-3",
"foo///bar//",
FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes,
"foo/bar/",
false,
},
&testCase{
"Relative-4",
"www.youtube.com",
FlagsUsuallySafeGreedy,
"www.youtube.com",
false,
},
/*&testCase{
"UrlNorm-5",
"http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3",
FlagsSafe | FlagRemoveDotSegments,
"http://ja.wikipedia.org/wiki/\xe3\x82\xad\xe3\x83\xa3\xe3\x82\xbf\xe3\x83\x94\xe3\x83\xa9\xe3\x83\xbc\xe3\x82\xb8\xe3\x83\xa3\xe3\x83\x91\xe3\x83\xb3",
false,
},
&testCase{
"UrlNorm-1",
"http://test.example/?a=%e3%82%82%26",
FlagsAllGreedy,
"http://test.example/?a=\xe3\x82\x82%26",
false,
},*/
}
)
func TestRunner(t *testing.T) {
for _, tc := range cases {
runCase(tc, t)
}
}
func runCase(tc *testCase, t *testing.T) {
t.Logf("running %s...", tc.nm)
if tc.parsed {
u, e := url.Parse(tc.src)
if e != nil {
t.Errorf("%s - FAIL : %s", tc.nm, e)
return
} else {
NormalizeURL(u, tc.flgs)
if s := u.String(); s != tc.res {
t.Errorf("%s - FAIL expected '%s', got '%s'", tc.nm, tc.res, s)
}
}
} else {
if s, e := NormalizeURLString(tc.src, tc.flgs); e != nil {
t.Errorf("%s - FAIL : %s", tc.nm, e)
} else if s != tc.res {
t.Errorf("%s - FAIL expected '%s', got '%s'", tc.nm, tc.res, s)
}
}
}
func TestDecodeUnnecessaryEscapesAll(t *testing.T) {
var url = "http://host/"
for i := 0; i < 256; i++ {
url += fmt.Sprintf("%%%02x", i)
}
if s, e := NormalizeURLString(url, FlagDecodeUnnecessaryEscapes); e != nil {
t.Fatalf("Got error %s", e.Error())
} else {
const want = "http://host/%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20!%22%23$%25&'()*+,-./0123456789:;%3C=%3E%3F@ABCDEFGHIJKLMNOPQRSTUVWXYZ[%5C]%5E_%60abcdefghijklmnopqrstuvwxyz%7B%7C%7D~%7F%80%81%82%83%84%85%86%87%88%89%8A%8B%8C%8D%8E%8F%90%91%92%93%94%95%96%97%98%99%9A%9B%9C%9D%9E%9F%A0%A1%A2%A3%A4%A5%A6%A7%A8%A9%AA%AB%AC%AD%AE%AF%B0%B1%B2%B3%B4%B5%B6%B7%B8%B9%BA%BB%BC%BD%BE%BF%C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF%D0%D1%D2%D3%D4%D5%D6%D7%D8%D9%DA%DB%DC%DD%DE%DF%E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE%EF%F0%F1%F2%F3%F4%F5%F6%F7%F8%F9%FA%FB%FC%FD%FE%FF"
if s != want {
t.Errorf("DecodeUnnecessaryEscapesAll:\nwant\n%s\ngot\n%s", want, s)
}
}
}
func TestEncodeNecessaryEscapesAll(t *testing.T) {
var url = "http://host/"
for i := 0; i < 256; i++ {
if i != 0x25 {
url += string(i)
}
}
if s, e := NormalizeURLString(url, FlagEncodeNecessaryEscapes); e != nil {
t.Fatalf("Got error %s", e.Error())
} else {
const want = "http://host/%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20!%22#$&'()*+,-./0123456789:;%3C=%3E?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[%5C]%5E_%60abcdefghijklmnopqrstuvwxyz%7B%7C%7D~%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%C2%A0%C2%A1%C2%A2%C2%A3%C2%A4%C2%A5%C2%A6%C2%A7%C2%A8%C2%A9%C2%AA%C2%AB%C2%AC%C2%AD%C2%AE%C2%AF%C2%B0%C2%B1%C2%B2%C2%B3%C2%B4%C2%B5%C2%B6%C2%B7%C2%B8%C2%B9%C2%BA%C2%BB%C2%BC%C2%BD%C2%BE%C2%BF%C3%80%C3%81%C3%82%C3%83%C3%84%C3%85%C3%86%C3%87%C3%88%C3%89%C3%8A%C3%8B%C3%8C%C3%8D%C3%8E%C3%8F%C3%90%C3%91%C3%92%C3%93%C3%94%C3%95%C3%96%C3%97%C3%98%C3%99%C3%9A%C3%9B%C3%9C%C3%9D%C3%9E%C3%9F%C3%A0%C3%A1%C3%A2%C3%A3%C3%A4%C3%A5%C3%A6%C3%A7%C3%A8%C3%A9%C3%AA%C3%AB%C3%AC%C3%AD%C3%AE%C3%AF%C3%B0%C3%B1%C3%B2%C3%B3%C3%B4%C3%B5%C3%B6%C3%B7%C3%B8%C3%B9%C3%BA%C3%BB%C3%BC%C3%BD%C3%BE%C3%BF"
if s != want {
t.Errorf("EncodeNecessaryEscapesAll:\nwant\n%s\ngot\n%s", want, s)
}
}
}

53
vendor/github.com/PuerkitoBio/purell/urlnorm_test.go generated vendored Normal file
View file

@ -0,0 +1,53 @@
package purell
import (
"testing"
)
// Test cases merged from PR #1
// Originally from https://github.com/jehiah/urlnorm/blob/master/test_urlnorm.py
func assertMap(t *testing.T, cases map[string]string, f NormalizationFlags) {
for bad, good := range cases {
s, e := NormalizeURLString(bad, f)
if e != nil {
t.Errorf("%s normalizing %v to %v", e.Error(), bad, good)
} else {
if s != good {
t.Errorf("source: %v expected: %v got: %v", bad, good, s)
}
}
}
}
// This tests normalization to a unicode representation
// precent escapes for unreserved values are unescaped to their unicode value
// tests normalization to idna domains
// test ip word handling, ipv6 address handling, and trailing domain periods
// in general, this matches google chromes unescaping for things in the address bar.
// spaces are converted to '+' (perhaphs controversial)
// http://code.google.com/p/google-url/ probably is another good reference for this approach
func TestUrlnorm(t *testing.T) {
testcases := map[string]string{
"http://test.example/?a=%e3%82%82%26": "http://test.example/?a=%e3%82%82%26",
//"http://test.example/?a=%e3%82%82%26": "http://test.example/?a=\xe3\x82\x82%26", //should return a unicode character
"http://s.xn--q-bga.DE/": "http://s.xn--q-bga.de/", //should be in idna format
"http://XBLA\u306eXbox.com": "http://xn--xblaxbox-jf4g.com", //test utf8 and unicode
"http://президент.рф": "http://xn--d1abbgf6aiiy.xn--p1ai",
"http://ПРЕЗИДЕНТ.РФ": "http://xn--d1abbgf6aiiy.xn--p1ai",
"http://ab¥ヲ₩○.com": "http://xn--ab-ida8983azmfnvs.com", //test width folding
"http://\u00e9.com": "http://xn--9ca.com",
"http://e\u0301.com": "http://xn--9ca.com",
"http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3": "http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3",
//"http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3": "http://ja.wikipedia.org/wiki/\xe3\x82\xad\xe3\x83\xa3\xe3\x82\xbf\xe3\x83\x94\xe3\x83\xa9\xe3\x83\xbc\xe3\x82\xb8\xe3\x83\xa3\xe3\x83\x91\xe3\x83\xb3",
"http://test.example/\xe3\x82\xad": "http://test.example/%E3%82%AD",
//"http://test.example/\xe3\x82\xad": "http://test.example/\xe3\x82\xad",
"http://test.example/?p=%23val#test-%23-val%25": "http://test.example/?p=%23val#test-%23-val%25", //check that %23 (#) is not escaped where it shouldn't be
"http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n": "http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n",
//"http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n": "http://test.domain/I\xc3\xb1t\xc3\xabrn\xc3\xa2ti\xc3\xb4n\xef\xbf\xbdliz\xc3\xa6ti\xc3\xb8n",
}
assertMap(t, testcases, FlagsSafe|FlagRemoveDotSegments)
}

11
vendor/github.com/PuerkitoBio/urlesc/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,11 @@
language: go
go:
- 1.4
- tip
install:
- go build .
script:
- go test -v

27
vendor/github.com/PuerkitoBio/urlesc/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

16
vendor/github.com/PuerkitoBio/urlesc/README.md generated vendored Normal file
View file

@ -0,0 +1,16 @@
urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.png?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
======
Package urlesc implements query escaping as per RFC 3986.
It contains some parts of the net/url package, modified so as to allow
some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
## Install
go get github.com/PuerkitoBio/urlesc
## License
Go license (BSD-3-Clause)

180
vendor/github.com/PuerkitoBio/urlesc/urlesc.go generated vendored Normal file
View file

@ -0,0 +1,180 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package urlesc implements query escaping as per RFC 3986.
// It contains some parts of the net/url package, modified so as to allow
// some reserved characters incorrectly escaped by net/url.
// See https://github.com/golang/go/issues/5684
package urlesc
import (
"bytes"
"net/url"
"strings"
)
type encoding int
const (
encodePath encoding = 1 + iota
encodeUserPassword
encodeQueryComponent
encodeFragment
)
// Return true if the specified character should be escaped when
// appearing in a URL string, according to RFC 3986.
func shouldEscape(c byte, mode encoding) bool {
// §2.3 Unreserved characters (alphanum)
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
return false
// §2.2 Reserved characters (reserved)
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
// Different sections of the URL allow a few of
// the reserved characters to appear unescaped.
switch mode {
case encodePath: // §3.3
// The RFC allows sub-delims and : @.
// '/', '[' and ']' can be used to assign meaning to individual path
// segments. This package only manipulates the path as a whole,
// so we allow those as well. That leaves only ? and # to escape.
return c == '?' || c == '#'
case encodeUserPassword: // §3.2.1
// The RFC allows : and sub-delims in
// userinfo. The parsing of userinfo treats ':' as special so we must escape
// all the gen-delims.
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
case encodeQueryComponent: // §3.4
// The RFC allows / and ?.
return c != '/' && c != '?'
case encodeFragment: // §4.1
// The RFC text is silent but the grammar allows
// everything, so escape nothing but #
return c == '#'
}
}
// Everything else must be escaped.
return true
}
// QueryEscape escapes the string so it can be safely placed
// inside a URL query.
func QueryEscape(s string) string {
return escape(s, encodeQueryComponent)
}
func escape(s string, mode encoding) string {
spaceCount, hexCount := 0, 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c, mode) {
if c == ' ' && mode == encodeQueryComponent {
spaceCount++
} else {
hexCount++
}
}
}
if spaceCount == 0 && hexCount == 0 {
return s
}
t := make([]byte, len(s)+2*hexCount)
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case c == ' ' && mode == encodeQueryComponent:
t[j] = '+'
j++
case shouldEscape(c, mode):
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
var uiReplacer = strings.NewReplacer(
"%21", "!",
"%27", "'",
"%28", "(",
"%29", ")",
"%2A", "*",
)
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
func unescapeUserinfo(s string) string {
return uiReplacer.Replace(s)
}
// Escape reassembles the URL into a valid URL string.
// The general form of the result is one of:
//
// scheme:opaque
// scheme://userinfo@host/path?query#fragment
//
// If u.Opaque is non-empty, String uses the first form;
// otherwise it uses the second form.
//
// In the second form, the following rules apply:
// - if u.Scheme is empty, scheme: is omitted.
// - if u.User is nil, userinfo@ is omitted.
// - if u.Host is empty, host/ is omitted.
// - if u.Scheme and u.Host are empty and u.User is nil,
// the entire scheme://userinfo@host/ is omitted.
// - if u.Host is non-empty and u.Path begins with a /,
// the form host/path does not add its own /.
// - if u.RawQuery is empty, ?query is omitted.
// - if u.Fragment is empty, #fragment is omitted.
func Escape(u *url.URL) string {
var buf bytes.Buffer
if u.Scheme != "" {
buf.WriteString(u.Scheme)
buf.WriteByte(':')
}
if u.Opaque != "" {
buf.WriteString(u.Opaque)
} else {
if u.Scheme != "" || u.Host != "" || u.User != nil {
buf.WriteString("//")
if ui := u.User; ui != nil {
buf.WriteString(unescapeUserinfo(ui.String()))
buf.WriteByte('@')
}
if h := u.Host; h != "" {
buf.WriteString(h)
}
}
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
buf.WriteByte('/')
}
buf.WriteString(escape(u.Path, encodePath))
}
if u.RawQuery != "" {
buf.WriteByte('?')
buf.WriteString(u.RawQuery)
}
if u.Fragment != "" {
buf.WriteByte('#')
buf.WriteString(escape(u.Fragment, encodeFragment))
}
return buf.String()
}

641
vendor/github.com/PuerkitoBio/urlesc/urlesc_test.go generated vendored Normal file
View file

@ -0,0 +1,641 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package urlesc
import (
"net/url"
"testing"
)
type URLTest struct {
in string
out *url.URL
roundtrip string // expected result of reserializing the URL; empty means same as "in".
}
var urltests = []URLTest{
// no path
{
"http://www.google.com",
&url.URL{
Scheme: "http",
Host: "www.google.com",
},
"",
},
// path
{
"http://www.google.com/",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/",
},
"",
},
// path with hex escaping
{
"http://www.google.com/file%20one%26two",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/file one&two",
},
"http://www.google.com/file%20one&two",
},
// user
{
"ftp://webmaster@www.google.com/",
&url.URL{
Scheme: "ftp",
User: url.User("webmaster"),
Host: "www.google.com",
Path: "/",
},
"",
},
// escape sequence in username
{
"ftp://john%20doe@www.google.com/",
&url.URL{
Scheme: "ftp",
User: url.User("john doe"),
Host: "www.google.com",
Path: "/",
},
"ftp://john%20doe@www.google.com/",
},
// query
{
"http://www.google.com/?q=go+language",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/",
RawQuery: "q=go+language",
},
"",
},
// query with hex escaping: NOT parsed
{
"http://www.google.com/?q=go%20language",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/",
RawQuery: "q=go%20language",
},
"",
},
// %20 outside query
{
"http://www.google.com/a%20b?q=c+d",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/a b",
RawQuery: "q=c+d",
},
"",
},
// path without leading /, so no parsing
{
"http:www.google.com/?q=go+language",
&url.URL{
Scheme: "http",
Opaque: "www.google.com/",
RawQuery: "q=go+language",
},
"http:www.google.com/?q=go+language",
},
// path without leading /, so no parsing
{
"http:%2f%2fwww.google.com/?q=go+language",
&url.URL{
Scheme: "http",
Opaque: "%2f%2fwww.google.com/",
RawQuery: "q=go+language",
},
"http:%2f%2fwww.google.com/?q=go+language",
},
// non-authority with path
{
"mailto:/webmaster@golang.org",
&url.URL{
Scheme: "mailto",
Path: "/webmaster@golang.org",
},
"mailto:///webmaster@golang.org", // unfortunate compromise
},
// non-authority
{
"mailto:webmaster@golang.org",
&url.URL{
Scheme: "mailto",
Opaque: "webmaster@golang.org",
},
"",
},
// unescaped :// in query should not create a scheme
{
"/foo?query=http://bad",
&url.URL{
Path: "/foo",
RawQuery: "query=http://bad",
},
"",
},
// leading // without scheme should create an authority
{
"//foo",
&url.URL{
Host: "foo",
},
"",
},
// leading // without scheme, with userinfo, path, and query
{
"//user@foo/path?a=b",
&url.URL{
User: url.User("user"),
Host: "foo",
Path: "/path",
RawQuery: "a=b",
},
"",
},
// Three leading slashes isn't an authority, but doesn't return an error.
// (We can't return an error, as this code is also used via
// ServeHTTP -> ReadRequest -> Parse, which is arguably a
// different URL parsing context, but currently shares the
// same codepath)
{
"///threeslashes",
&url.URL{
Path: "///threeslashes",
},
"",
},
{
"http://user:password@google.com",
&url.URL{
Scheme: "http",
User: url.UserPassword("user", "password"),
Host: "google.com",
},
"http://user:password@google.com",
},
// unescaped @ in username should not confuse host
{
"http://j@ne:password@google.com",
&url.URL{
Scheme: "http",
User: url.UserPassword("j@ne", "password"),
Host: "google.com",
},
"http://j%40ne:password@google.com",
},
// unescaped @ in password should not confuse host
{
"http://jane:p@ssword@google.com",
&url.URL{
Scheme: "http",
User: url.UserPassword("jane", "p@ssword"),
Host: "google.com",
},
"http://jane:p%40ssword@google.com",
},
{
"http://j@ne:password@google.com/p@th?q=@go",
&url.URL{
Scheme: "http",
User: url.UserPassword("j@ne", "password"),
Host: "google.com",
Path: "/p@th",
RawQuery: "q=@go",
},
"http://j%40ne:password@google.com/p@th?q=@go",
},
{
"http://www.google.com/?q=go+language#foo",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/",
RawQuery: "q=go+language",
Fragment: "foo",
},
"",
},
{
"http://www.google.com/?q=go+language#foo%26bar",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "/",
RawQuery: "q=go+language",
Fragment: "foo&bar",
},
"http://www.google.com/?q=go+language#foo&bar",
},
{
"file:///home/adg/rabbits",
&url.URL{
Scheme: "file",
Host: "",
Path: "/home/adg/rabbits",
},
"file:///home/adg/rabbits",
},
// "Windows" paths are no exception to the rule.
// See golang.org/issue/6027, especially comment #9.
{
"file:///C:/FooBar/Baz.txt",
&url.URL{
Scheme: "file",
Host: "",
Path: "/C:/FooBar/Baz.txt",
},
"file:///C:/FooBar/Baz.txt",
},
// case-insensitive scheme
{
"MaIlTo:webmaster@golang.org",
&url.URL{
Scheme: "mailto",
Opaque: "webmaster@golang.org",
},
"mailto:webmaster@golang.org",
},
// Relative path
{
"a/b/c",
&url.URL{
Path: "a/b/c",
},
"a/b/c",
},
// escaped '?' in username and password
{
"http://%3Fam:pa%3Fsword@google.com",
&url.URL{
Scheme: "http",
User: url.UserPassword("?am", "pa?sword"),
Host: "google.com",
},
"",
},
// escaped '?' and '#' in path
{
"http://example.com/%3F%23",
&url.URL{
Scheme: "http",
Host: "example.com",
Path: "?#",
},
"",
},
// unescaped [ ] ! ' ( ) * in path
{
"http://example.com/[]!'()*",
&url.URL{
Scheme: "http",
Host: "example.com",
Path: "[]!'()*",
},
"http://example.com/[]!'()*",
},
// escaped : / ? # [ ] @ in username and password
{
"http://%3A%2F%3F:%23%5B%5D%40@example.com",
&url.URL{
Scheme: "http",
User: url.UserPassword(":/?", "#[]@"),
Host: "example.com",
},
"",
},
// unescaped ! $ & ' ( ) * + , ; = in username and password
{
"http://!$&'():*+,;=@example.com",
&url.URL{
Scheme: "http",
User: url.UserPassword("!$&'()", "*+,;="),
Host: "example.com",
},
"",
},
// unescaped = : / . ? = in query component
{
"http://example.com/?q=http://google.com/?q=",
&url.URL{
Scheme: "http",
Host: "example.com",
Path: "/",
RawQuery: "q=http://google.com/?q=",
},
"",
},
// unescaped : / ? [ ] @ ! $ & ' ( ) * + , ; = in fragment
{
"http://example.com/#:/?%23[]@!$&'()*+,;=",
&url.URL{
Scheme: "http",
Host: "example.com",
Path: "/",
Fragment: ":/?#[]@!$&'()*+,;=",
},
"",
},
}
func DoTestString(t *testing.T, parse func(string) (*url.URL, error), name string, tests []URLTest) {
for _, tt := range tests {
u, err := parse(tt.in)
if err != nil {
t.Errorf("%s(%q) returned error %s", name, tt.in, err)
continue
}
expected := tt.in
if len(tt.roundtrip) > 0 {
expected = tt.roundtrip
}
s := Escape(u)
if s != expected {
t.Errorf("Escape(%s(%q)) == %q (expected %q)", name, tt.in, s, expected)
}
}
}
func TestURLString(t *testing.T) {
DoTestString(t, url.Parse, "Parse", urltests)
// no leading slash on path should prepend
// slash on String() call
noslash := URLTest{
"http://www.google.com/search",
&url.URL{
Scheme: "http",
Host: "www.google.com",
Path: "search",
},
"",
}
s := Escape(noslash.out)
if s != noslash.in {
t.Errorf("Expected %s; go %s", noslash.in, s)
}
}
type EscapeTest struct {
in string
out string
err error
}
var escapeTests = []EscapeTest{
{
"",
"",
nil,
},
{
"abc",
"abc",
nil,
},
{
"one two",
"one+two",
nil,
},
{
"10%",
"10%25",
nil,
},
{
" ?&=#+%!<>#\"{}|\\^[]`☺\t:/@$'()*,;",
"+?%26%3D%23%2B%25%21%3C%3E%23%22%7B%7D%7C%5C%5E%5B%5D%60%E2%98%BA%09%3A/%40%24%27%28%29%2A%2C%3B",
nil,
},
}
func TestEscape(t *testing.T) {
for _, tt := range escapeTests {
actual := QueryEscape(tt.in)
if tt.out != actual {
t.Errorf("QueryEscape(%q) = %q, want %q", tt.in, actual, tt.out)
}
// for bonus points, verify that escape:unescape is an identity.
roundtrip, err := url.QueryUnescape(actual)
if roundtrip != tt.in || err != nil {
t.Errorf("QueryUnescape(%q) = %q, %s; want %q, %s", actual, roundtrip, err, tt.in, "[no error]")
}
}
}
var resolveReferenceTests = []struct {
base, rel, expected string
}{
// Absolute URL references
{"http://foo.com?a=b", "https://bar.com/", "https://bar.com/"},
{"http://foo.com/", "https://bar.com/?a=b", "https://bar.com/?a=b"},
{"http://foo.com/bar", "mailto:foo@example.com", "mailto:foo@example.com"},
// Path-absolute references
{"http://foo.com/bar", "/baz", "http://foo.com/baz"},
{"http://foo.com/bar?a=b#f", "/baz", "http://foo.com/baz"},
{"http://foo.com/bar?a=b", "/baz?c=d", "http://foo.com/baz?c=d"},
// Scheme-relative
{"https://foo.com/bar?a=b", "//bar.com/quux", "https://bar.com/quux"},
// Path-relative references:
// ... current directory
{"http://foo.com", ".", "http://foo.com/"},
{"http://foo.com/bar", ".", "http://foo.com/"},
{"http://foo.com/bar/", ".", "http://foo.com/bar/"},
// ... going down
{"http://foo.com", "bar", "http://foo.com/bar"},
{"http://foo.com/", "bar", "http://foo.com/bar"},
{"http://foo.com/bar/baz", "quux", "http://foo.com/bar/quux"},
// ... going up
{"http://foo.com/bar/baz", "../quux", "http://foo.com/quux"},
{"http://foo.com/bar/baz", "../../../../../quux", "http://foo.com/quux"},
{"http://foo.com/bar", "..", "http://foo.com/"},
{"http://foo.com/bar/baz", "./..", "http://foo.com/"},
// ".." in the middle (issue 3560)
{"http://foo.com/bar/baz", "quux/dotdot/../tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/../tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/.././tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/./../tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/dotdot/././../../tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/dotdot/./.././../tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/dotdot/dotdot/./../../.././././tail", "http://foo.com/bar/quux/tail"},
{"http://foo.com/bar/baz", "quux/./dotdot/../dotdot/../dot/./tail/..", "http://foo.com/bar/quux/dot/"},
// Remove any dot-segments prior to forming the target URI.
// http://tools.ietf.org/html/rfc3986#section-5.2.4
{"http://foo.com/dot/./dotdot/../foo/bar", "../baz", "http://foo.com/dot/baz"},
// Triple dot isn't special
{"http://foo.com/bar", "...", "http://foo.com/..."},
// Fragment
{"http://foo.com/bar", ".#frag", "http://foo.com/#frag"},
// RFC 3986: Normal Examples
// http://tools.ietf.org/html/rfc3986#section-5.4.1
{"http://a/b/c/d;p?q", "g:h", "g:h"},
{"http://a/b/c/d;p?q", "g", "http://a/b/c/g"},
{"http://a/b/c/d;p?q", "./g", "http://a/b/c/g"},
{"http://a/b/c/d;p?q", "g/", "http://a/b/c/g/"},
{"http://a/b/c/d;p?q", "/g", "http://a/g"},
{"http://a/b/c/d;p?q", "//g", "http://g"},
{"http://a/b/c/d;p?q", "?y", "http://a/b/c/d;p?y"},
{"http://a/b/c/d;p?q", "g?y", "http://a/b/c/g?y"},
{"http://a/b/c/d;p?q", "#s", "http://a/b/c/d;p?q#s"},
{"http://a/b/c/d;p?q", "g#s", "http://a/b/c/g#s"},
{"http://a/b/c/d;p?q", "g?y#s", "http://a/b/c/g?y#s"},
{"http://a/b/c/d;p?q", ";x", "http://a/b/c/;x"},
{"http://a/b/c/d;p?q", "g;x", "http://a/b/c/g;x"},
{"http://a/b/c/d;p?q", "g;x?y#s", "http://a/b/c/g;x?y#s"},
{"http://a/b/c/d;p?q", "", "http://a/b/c/d;p?q"},
{"http://a/b/c/d;p?q", ".", "http://a/b/c/"},
{"http://a/b/c/d;p?q", "./", "http://a/b/c/"},
{"http://a/b/c/d;p?q", "..", "http://a/b/"},
{"http://a/b/c/d;p?q", "../", "http://a/b/"},
{"http://a/b/c/d;p?q", "../g", "http://a/b/g"},
{"http://a/b/c/d;p?q", "../..", "http://a/"},
{"http://a/b/c/d;p?q", "../../", "http://a/"},
{"http://a/b/c/d;p?q", "../../g", "http://a/g"},
// RFC 3986: Abnormal Examples
// http://tools.ietf.org/html/rfc3986#section-5.4.2
{"http://a/b/c/d;p?q", "../../../g", "http://a/g"},
{"http://a/b/c/d;p?q", "../../../../g", "http://a/g"},
{"http://a/b/c/d;p?q", "/./g", "http://a/g"},
{"http://a/b/c/d;p?q", "/../g", "http://a/g"},
{"http://a/b/c/d;p?q", "g.", "http://a/b/c/g."},
{"http://a/b/c/d;p?q", ".g", "http://a/b/c/.g"},
{"http://a/b/c/d;p?q", "g..", "http://a/b/c/g.."},
{"http://a/b/c/d;p?q", "..g", "http://a/b/c/..g"},
{"http://a/b/c/d;p?q", "./../g", "http://a/b/g"},
{"http://a/b/c/d;p?q", "./g/.", "http://a/b/c/g/"},
{"http://a/b/c/d;p?q", "g/./h", "http://a/b/c/g/h"},
{"http://a/b/c/d;p?q", "g/../h", "http://a/b/c/h"},
{"http://a/b/c/d;p?q", "g;x=1/./y", "http://a/b/c/g;x=1/y"},
{"http://a/b/c/d;p?q", "g;x=1/../y", "http://a/b/c/y"},
{"http://a/b/c/d;p?q", "g?y/./x", "http://a/b/c/g?y/./x"},
{"http://a/b/c/d;p?q", "g?y/../x", "http://a/b/c/g?y/../x"},
{"http://a/b/c/d;p?q", "g#s/./x", "http://a/b/c/g#s/./x"},
{"http://a/b/c/d;p?q", "g#s/../x", "http://a/b/c/g#s/../x"},
// Extras.
{"https://a/b/c/d;p?q", "//g?q", "https://g?q"},
{"https://a/b/c/d;p?q", "//g#s", "https://g#s"},
{"https://a/b/c/d;p?q", "//g/d/e/f?y#s", "https://g/d/e/f?y#s"},
{"https://a/b/c/d;p#s", "?y", "https://a/b/c/d;p?y"},
{"https://a/b/c/d;p?q#s", "?y", "https://a/b/c/d;p?y"},
}
func TestResolveReference(t *testing.T) {
mustParse := func(url_ string) *url.URL {
u, err := url.Parse(url_)
if err != nil {
t.Fatalf("Expected URL to parse: %q, got error: %v", url_, err)
}
return u
}
opaque := &url.URL{Scheme: "scheme", Opaque: "opaque"}
for _, test := range resolveReferenceTests {
base := mustParse(test.base)
rel := mustParse(test.rel)
url := base.ResolveReference(rel)
if Escape(url) != test.expected {
t.Errorf("URL(%q).ResolveReference(%q) == %q, got %q", test.base, test.rel, test.expected, Escape(url))
}
// Ensure that new instances are returned.
if base == url {
t.Errorf("Expected URL.ResolveReference to return new URL instance.")
}
// Test the convenience wrapper too.
url, err := base.Parse(test.rel)
if err != nil {
t.Errorf("URL(%q).Parse(%q) failed: %v", test.base, test.rel, err)
} else if Escape(url) != test.expected {
t.Errorf("URL(%q).Parse(%q) == %q, got %q", test.base, test.rel, test.expected, Escape(url))
} else if base == url {
// Ensure that new instances are returned for the wrapper too.
t.Errorf("Expected URL.Parse to return new URL instance.")
}
// Ensure Opaque resets the URL.
url = base.ResolveReference(opaque)
if *url != *opaque {
t.Errorf("ResolveReference failed to resolve opaque URL: want %#v, got %#v", url, opaque)
}
// Test the convenience wrapper with an opaque URL too.
url, err = base.Parse("scheme:opaque")
if err != nil {
t.Errorf(`URL(%q).Parse("scheme:opaque") failed: %v`, test.base, err)
} else if *url != *opaque {
t.Errorf("Parse failed to resolve opaque URL: want %#v, got %#v", url, opaque)
} else if base == url {
// Ensure that new instances are returned, again.
t.Errorf("Expected URL.Parse to return new URL instance.")
}
}
}
type shouldEscapeTest struct {
in byte
mode encoding
escape bool
}
var shouldEscapeTests = []shouldEscapeTest{
// Unreserved characters (§2.3)
{'a', encodePath, false},
{'a', encodeUserPassword, false},
{'a', encodeQueryComponent, false},
{'a', encodeFragment, false},
{'z', encodePath, false},
{'A', encodePath, false},
{'Z', encodePath, false},
{'0', encodePath, false},
{'9', encodePath, false},
{'-', encodePath, false},
{'-', encodeUserPassword, false},
{'-', encodeQueryComponent, false},
{'-', encodeFragment, false},
{'.', encodePath, false},
{'_', encodePath, false},
{'~', encodePath, false},
// User information (§3.2.1)
{':', encodeUserPassword, true},
{'/', encodeUserPassword, true},
{'?', encodeUserPassword, true},
{'@', encodeUserPassword, true},
{'$', encodeUserPassword, false},
{'&', encodeUserPassword, false},
{'+', encodeUserPassword, false},
{',', encodeUserPassword, false},
{';', encodeUserPassword, false},
{'=', encodeUserPassword, false},
}
func TestShouldEscape(t *testing.T) {
for _, tt := range shouldEscapeTests {
if shouldEscape(tt.in, tt.mode) != tt.escape {
t.Errorf("shouldEscape(%q, %v) returned %v; expected %v", tt.in, tt.mode, !tt.escape, tt.escape)
}
}
}

22
vendor/github.com/davecgh/go-spew/.gitignore generated vendored Normal file
View file

@ -0,0 +1,22 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

14
vendor/github.com/davecgh/go-spew/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,14 @@
language: go
go:
- 1.5.4
- 1.6.3
- 1.7
install:
- go get -v golang.org/x/tools/cmd/cover
script:
- go test -v -tags=safe ./spew
- go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
after_success:
- go get -v github.com/mattn/goveralls
- export PATH=$PATH:$HOME/gopath/bin
- goveralls -coverprofile=profile.cov -service=travis-ci

15
vendor/github.com/davecgh/go-spew/LICENSE generated vendored Normal file
View file

@ -0,0 +1,15 @@
ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

205
vendor/github.com/davecgh/go-spew/README.md generated vendored Normal file
View file

@ -0,0 +1,205 @@
go-spew
=======
[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)]
(https://travis-ci.org/davecgh/go-spew) [![ISC License]
(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]
(https://img.shields.io/coveralls/davecgh/go-spew.svg)]
(https://coveralls.io/r/davecgh/go-spew?branch=master)
Go-spew implements a deep pretty printer for Go data structures to aid in
debugging. A comprehensive suite of tests with 100% test coverage is provided
to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
report. Go-spew is licensed under the liberal ISC license, so it may be used in
open source or commercial projects.
If you're interested in reading about how this package came to life and some
of the challenges involved in providing a deep pretty printer, there is a blog
post about it
[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
## Documentation
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]
(http://godoc.org/github.com/davecgh/go-spew/spew)
Full `go doc` style documentation for the project can be viewed online without
installing this package by using the excellent GoDoc site here:
http://godoc.org/github.com/davecgh/go-spew/spew
You can also view the documentation locally once the package is installed with
the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
## Installation
```bash
$ go get -u github.com/davecgh/go-spew/spew
```
## Quick Start
Add this import line to the file you're working in:
```Go
import "github.com/davecgh/go-spew/spew"
```
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
```Go
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
```
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
and pointer addresses):
```Go
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
```
## Debugging a Web Application Example
Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
```Go
package main
import (
"fmt"
"html"
"net/http"
"github.com/davecgh/go-spew/spew"
)
func handler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html")
fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
}
func main() {
http.HandleFunc("/", handler)
http.ListenAndServe(":8080", nil)
}
```
## Sample Dump Output
```
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) {
(string) "one": (bool) true
}
}
([]uint8) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
```
## Sample Formatter Output
Double pointer to a uint8:
```
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
```
Pointer to circular struct with a uint8 field and a pointer to itself:
```
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
```
## Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available via the
spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
```
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables. This option
relies on access to the unsafe package, so it will not have any effect when
running in environments without access to the unsafe package such as Google
App Engine or with the "safe" build tag specified.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of capacities
for arrays, slices, maps and channels. This is useful when diffing data
structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are supported,
with other types sorted according to the reflect.Value.String() output
which guarantees display stability. Natural map order is used by
default.
* SpewKeys
SpewKeys specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only considered
if SortKeys is true.
```
## Unsafe Package Dependency
This package relies on the unsafe package to perform some of the more advanced
features, however it also supports a "limited" mode which allows it to work in
environments where the unsafe package is not available. By default, it will
operate in this mode on Google App Engine and when compiled with GopherJS. The
"safe" build tag may also be specified to force the package to build without
using the unsafe package.
## License
Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License.

22
vendor/github.com/davecgh/go-spew/cov_report.sh generated vendored Normal file
View file

@ -0,0 +1,22 @@
#!/bin/sh
# This script uses gocov to generate a test coverage report.
# The gocov tool my be obtained with the following command:
# go get github.com/axw/gocov/gocov
#
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
# Check for gocov.
if ! type gocov >/dev/null 2>&1; then
echo >&2 "This script requires the gocov tool."
echo >&2 "You may obtain it with the following command:"
echo >&2 "go get github.com/axw/gocov/gocov"
exit 1
fi
# Only run the cgo tests if gcc is installed.
if type gcc >/dev/null 2>&1; then
(cd spew && gocov test -tags testcgo | gocov report)
else
(cd spew && gocov test | gocov report)
fi

152
vendor/github.com/davecgh/go-spew/spew/bypass.go generated vendored Normal file
View file

@ -0,0 +1,152 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
package spew
import (
"reflect"
"unsafe"
)
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = false
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
var (
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
//
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
)
func init() {
// Older versions of reflect.Value stored small integers directly in the
// ptr field (which is named val in the older versions). Versions
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// Commit 90a7c3c86944 changed the flag positions such that the low
// order bits are the kind. This code extracts the kind from the flags
// field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
// Commit adf9b30e5594 modified the flags to separate the
// flagRO flag into two bits which specifies whether or not the
// field is embedded. This causes flagIndir to move over a bit
// and means that flagRO is the combination of either of the
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
}
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
indirects := 1
vt := v.Type()
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
}
}
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
}

38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe
package spew
import "reflect"
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = true
)
// unsafeReflectValue typically converts the passed reflect.Value into a one
// that bypasses the typical safety restrictions preventing access to
// unaddressable and unexported data. However, doing this relies on access to
// the unsafe package. This is a stub version which simply returns the passed
// reflect.Value when the unsafe package is not available.
func unsafeReflectValue(v reflect.Value) reflect.Value {
return v
}

341
vendor/github.com/davecgh/go-spew/spew/common.go generated vendored Normal file
View file

@ -0,0 +1,341 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"reflect"
"sort"
"strconv"
)
// Some constants in the form of bytes to avoid string overhead. This mirrors
// the technique used in the fmt package.
var (
panicBytes = []byte("(PANIC=")
plusBytes = []byte("+")
iBytes = []byte("i")
trueBytes = []byte("true")
falseBytes = []byte("false")
interfaceBytes = []byte("(interface {})")
commaNewlineBytes = []byte(",\n")
newlineBytes = []byte("\n")
openBraceBytes = []byte("{")
openBraceNewlineBytes = []byte("{\n")
closeBraceBytes = []byte("}")
asteriskBytes = []byte("*")
colonBytes = []byte(":")
colonSpaceBytes = []byte(": ")
openParenBytes = []byte("(")
closeParenBytes = []byte(")")
spaceBytes = []byte(" ")
pointerChainBytes = []byte("->")
nilAngleBytes = []byte("<nil>")
maxNewlineBytes = []byte("<max depth reached>\n")
maxShortBytes = []byte("<max>")
circularBytes = []byte("<already shown>")
circularShortBytes = []byte("<shown>")
invalidAngleBytes = []byte("<invalid>")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("map[")
closeMapBytes = []byte("]")
lenEqualsBytes = []byte("len=")
capEqualsBytes = []byte("cap=")
)
// hexDigits is used to map a decimal value to a hex digit.
var hexDigits = "0123456789abcdef"
// catchPanic handles any panics that might occur during the handleMethods
// calls.
func catchPanic(w io.Writer, v reflect.Value) {
if err := recover(); err != nil {
w.Write(panicBytes)
fmt.Fprintf(w, "%v", err)
w.Write(closeParenBytes)
}
}
// handleMethods attempts to call the Error and String methods on the underlying
// type the passed reflect.Value represents and outputes the result to Writer w.
//
// It handles panics in any called methods by catching and displaying the error
// as the formatted value.
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
// We need an interface to check if the type implements the error or
// Stringer interface. However, the reflect package won't give us an
// interface on certain things like unexported struct fields in order
// to enforce visibility rules. We use unsafe, when it's available,
// to bypass these restrictions since this package does not mutate the
// values.
if !v.CanInterface() {
if UnsafeDisabled {
return false
}
v = unsafeReflectValue(v)
}
// Choose whether or not to do error and Stringer interface lookups against
// the base type or a pointer to the base type depending on settings.
// Technically calling one of these methods with a pointer receiver can
// mutate the value, however, types which choose to satisify an error or
// Stringer interface with a pointer receiver should not be mutating their
// state inside these interface methods.
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
v = unsafeReflectValue(v)
}
if v.CanAddr() {
v = v.Addr()
}
// Is it an error or Stringer?
switch iface := v.Interface().(type) {
case error:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.Error()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.Error()))
return true
case fmt.Stringer:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.String()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.String()))
return true
}
return false
}
// printBool outputs a boolean value as true or false to Writer w.
func printBool(w io.Writer, val bool) {
if val {
w.Write(trueBytes)
} else {
w.Write(falseBytes)
}
}
// printInt outputs a signed integer value to Writer w.
func printInt(w io.Writer, val int64, base int) {
w.Write([]byte(strconv.FormatInt(val, base)))
}
// printUint outputs an unsigned integer value to Writer w.
func printUint(w io.Writer, val uint64, base int) {
w.Write([]byte(strconv.FormatUint(val, base)))
}
// printFloat outputs a floating point value using the specified precision,
// which is expected to be 32 or 64bit, to Writer w.
func printFloat(w io.Writer, val float64, precision int) {
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
}
// printComplex outputs a complex value using the specified float precision
// for the real and imaginary parts to Writer w.
func printComplex(w io.Writer, c complex128, floatPrecision int) {
r := real(c)
w.Write(openParenBytes)
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
i := imag(c)
if i >= 0 {
w.Write(plusBytes)
}
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
w.Write(iBytes)
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
num := uint64(p)
if num == 0 {
w.Write(nilAngleBytes)
return
}
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
buf := make([]byte, 18)
// It's simpler to construct the hex string right to left.
base := uint64(16)
i := len(buf) - 1
for num >= base {
buf[i] = hexDigits[num%base]
num /= base
i--
}
buf[i] = hexDigits[num]
// Add '0x' prefix.
i--
buf[i] = 'x'
i--
buf[i] = '0'
// Strip unused leading bytes.
buf = buf[i:]
w.Write(buf)
}
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
// elements to be sorted.
type valuesSorter struct {
values []reflect.Value
strings []string // either nil or same len and values
cs *ConfigState
}
// newValuesSorter initializes a valuesSorter instance, which holds a set of
// surrogate keys on which the data should be sorted. It uses flags in
// ConfigState to decide if and how to populate those surrogate keys.
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
vs := &valuesSorter{values: values, cs: cs}
if canSortSimply(vs.values[0].Kind()) {
return vs
}
if !cs.DisableMethods {
vs.strings = make([]string, len(values))
for i := range vs.values {
b := bytes.Buffer{}
if !handleMethods(cs, &b, vs.values[i]) {
vs.strings = nil
break
}
vs.strings[i] = b.String()
}
}
if vs.strings == nil && cs.SpewKeys {
vs.strings = make([]string, len(values))
for i := range vs.values {
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
}
}
return vs
}
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
// directly, or whether it should be considered for sorting by surrogate keys
// (if the ConfigState allows it).
func canSortSimply(kind reflect.Kind) bool {
// This switch parallels valueSortLess, except for the default case.
switch kind {
case reflect.Bool:
return true
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Uintptr:
return true
case reflect.Array:
return true
}
return false
}
// Len returns the number of values in the slice. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Len() int {
return len(s.values)
}
// Swap swaps the values at the passed indices. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
if s.strings != nil {
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
}
}
// valueSortLess returns whether the first value should sort before the second
// value. It is used by valueSorter.Less as part of the sort.Interface
// implementation.
func valueSortLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Bool:
return !a.Bool() && b.Bool()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return a.Int() < b.Int()
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return a.Uint() < b.Uint()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.String:
return a.String() < b.String()
case reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Array:
// Compare the contents of both arrays.
l := a.Len()
for i := 0; i < l; i++ {
av := a.Index(i)
bv := b.Index(i)
if av.Interface() == bv.Interface() {
continue
}
return valueSortLess(av, bv)
}
}
return a.String() < b.String()
}
// Less returns whether the value at index i should sort before the
// value at index j. It is part of the sort.Interface implementation.
func (s *valuesSorter) Less(i, j int) bool {
if s.strings == nil {
return valueSortLess(s.values[i], s.values[j])
}
return s.strings[i] < s.strings[j]
}
// sortValues is a sort function that handles both native types and any type that
// can be converted to error or Stringer. Other inputs are sorted according to
// their Value.String() value to ensure display stability.
func sortValues(values []reflect.Value, cs *ConfigState) {
if len(values) == 0 {
return
}
sort.Sort(newValuesSorter(values, cs))
}

298
vendor/github.com/davecgh/go-spew/spew/common_test.go generated vendored Normal file
View file

@ -0,0 +1,298 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"fmt"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
)
// custom type to test Stinger interface on non-pointer receiver.
type stringer string
// String implements the Stringer interface for testing invocation of custom
// stringers on types with non-pointer receivers.
func (s stringer) String() string {
return "stringer " + string(s)
}
// custom type to test Stinger interface on pointer receiver.
type pstringer string
// String implements the Stringer interface for testing invocation of custom
// stringers on types with only pointer receivers.
func (s *pstringer) String() string {
return "stringer " + string(*s)
}
// xref1 and xref2 are cross referencing structs for testing circular reference
// detection.
type xref1 struct {
ps2 *xref2
}
type xref2 struct {
ps1 *xref1
}
// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
// reference for testing detection.
type indirCir1 struct {
ps2 *indirCir2
}
type indirCir2 struct {
ps3 *indirCir3
}
type indirCir3 struct {
ps1 *indirCir1
}
// embed is used to test embedded structures.
type embed struct {
a string
}
// embedwrap is used to test embedded structures.
type embedwrap struct {
*embed
e *embed
}
// panicer is used to intentionally cause a panic for testing spew properly
// handles them
type panicer int
func (p panicer) String() string {
panic("test panic")
}
// customError is used to test custom error interface invocation.
type customError int
func (e customError) Error() string {
return fmt.Sprintf("error: %d", int(e))
}
// stringizeWants converts a slice of wanted test output into a format suitable
// for a test error message.
func stringizeWants(wants []string) string {
s := ""
for i, want := range wants {
if i > 0 {
s += fmt.Sprintf("want%d: %s", i+1, want)
} else {
s += "want: " + want
}
}
return s
}
// testFailed returns whether or not a test failed by checking if the result
// of the test is in the slice of wanted strings.
func testFailed(result string, wants []string) bool {
for _, want := range wants {
if result == want {
return false
}
}
return true
}
type sortableStruct struct {
x int
}
func (ss sortableStruct) String() string {
return fmt.Sprintf("ss.%d", ss.x)
}
type unsortableStruct struct {
x int
}
type sortTestCase struct {
input []reflect.Value
expected []reflect.Value
}
func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) {
getInterfaces := func(values []reflect.Value) []interface{} {
interfaces := []interface{}{}
for _, v := range values {
interfaces = append(interfaces, v.Interface())
}
return interfaces
}
for _, test := range tests {
spew.SortValues(test.input, cs)
// reflect.DeepEqual cannot really make sense of reflect.Value,
// probably because of all the pointer tricks. For instance,
// v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}
// instead.
input := getInterfaces(test.input)
expected := getInterfaces(test.expected)
if !reflect.DeepEqual(input, expected) {
t.Errorf("Sort mismatch:\n %v != %v", input, expected)
}
}
}
// TestSortValues ensures the sort functionality for relect.Value based sorting
// works as intended.
func TestSortValues(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
embedA := v(embed{"a"})
embedB := v(embed{"b"})
embedC := v(embed{"c"})
tests := []sortTestCase{
// No values.
{
[]reflect.Value{},
[]reflect.Value{},
},
// Bools.
{
[]reflect.Value{v(false), v(true), v(false)},
[]reflect.Value{v(false), v(false), v(true)},
},
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Uints.
{
[]reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},
[]reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},
},
// Floats.
{
[]reflect.Value{v(2.0), v(1.0), v(3.0)},
[]reflect.Value{v(1.0), v(2.0), v(3.0)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// Array
{
[]reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})},
[]reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})},
},
// Uintptrs.
{
[]reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},
[]reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},
},
// SortableStructs.
{
// Note: not sorted - DisableMethods is set.
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
// Note: not sorted - SpewKeys is false.
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
},
// Invalid.
{
[]reflect.Value{embedB, embedA, embedC},
[]reflect.Value{embedB, embedA, embedC},
},
}
cs := spew.ConfigState{DisableMethods: true, SpewKeys: false}
helpTestSortValues(tests, &cs, t)
}
// TestSortValuesWithMethods ensures the sort functionality for relect.Value
// based sorting works as intended when using string methods.
func TestSortValuesWithMethods(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
tests := []sortTestCase{
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// SortableStructs.
{
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
// Note: not sorted - SpewKeys is false.
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
},
}
cs := spew.ConfigState{DisableMethods: false, SpewKeys: false}
helpTestSortValues(tests, &cs, t)
}
// TestSortValuesWithSpew ensures the sort functionality for relect.Value
// based sorting works as intended when using spew to stringify keys.
func TestSortValuesWithSpew(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
tests := []sortTestCase{
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// SortableStructs.
{
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})},
},
}
cs := spew.ConfigState{DisableMethods: true, SpewKeys: true}
helpTestSortValues(tests, &cs, t)
}

306
vendor/github.com/davecgh/go-spew/spew/config.go generated vendored Normal file
View file

@ -0,0 +1,306 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"os"
)
// ConfigState houses the configuration options used by spew to format and
// display values. There is a global instance, Config, that is used to control
// all top-level Formatter and Dump functionality. Each ConfigState instance
// provides methods equivalent to the top-level functions.
//
// The zero value for ConfigState provides no indentation. You would typically
// want to set it to a space or a tab.
//
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
// with default settings. See the documentation of NewDefaultConfig for default
// values.
type ConfigState struct {
// Indent specifies the string to use for each indentation level. The
// global config instance that all top-level functions use set this to a
// single space by default. If you would like more indentation, you might
// set this to a tab with "\t" or perhaps two spaces with " ".
Indent string
// MaxDepth controls the maximum number of levels to descend into nested
// data structures. The default, 0, means there is no limit.
//
// NOTE: Circular data structures are properly detected, so it is not
// necessary to set this value unless you specifically want to limit deeply
// nested data structures.
MaxDepth int
// DisableMethods specifies whether or not error and Stringer interfaces are
// invoked for types that implement them.
DisableMethods bool
// DisablePointerMethods specifies whether or not to check for and invoke
// error and Stringer interfaces on types which only accept a pointer
// receiver when the current type is not a pointer.
//
// NOTE: This might be an unsafe action since calling one of these methods
// with a pointer receiver could technically mutate the value, however,
// in practice, types which choose to satisify an error or Stringer
// interface with a pointer receiver should not be mutating their state
// inside these interface methods. As a result, this option relies on
// access to the unsafe package, so it will not have any effect when
// running in environments without access to the unsafe package such as
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
// interface and return immediately instead of continuing to recurse into
// the internals of the data type.
//
// NOTE: This flag does not have any effect if method invocation is disabled
// via the DisableMethods or DisablePointerMethods options.
ContinueOnMethod bool
// SortKeys specifies map keys should be sorted before being printed. Use
// this to have a more deterministic, diffable output. Note that only
// native types (bool, int, uint, floats, uintptr and string) and types
// that support the error or Stringer interfaces (if methods are
// enabled) are supported, with other types sorted according to the
// reflect.Value.String() output which guarantees display stability.
SortKeys bool
// SpewKeys specifies that, as a last resort attempt, map keys should
// be spewed to strings and sorted by those strings. This is only
// considered if SortKeys is true.
SpewKeys bool
}
// Config is the active configuration of the top-level functions.
// The configuration can be changed by modifying the contents of spew.Config.
var Config = ConfigState{Indent: " "}
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the formatted string as a value that satisfies error. See NewFormatter
// for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, c.convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, c.convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, c.convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a Formatter interface returned by c.NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, c.convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
return fmt.Print(c.convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, c.convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
return fmt.Println(c.convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprint(a ...interface{}) string {
return fmt.Sprint(c.convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, c.convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a Formatter interface returned by c.NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintln(a ...interface{}) string {
return fmt.Sprintln(c.convertArgs(a)...)
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
c.Printf, c.Println, or c.Printf.
*/
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(c, v)
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
fdump(c, w, a...)
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func (c *ConfigState) Dump(a ...interface{}) {
fdump(c, os.Stdout, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func (c *ConfigState) Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(c, &buf, a...)
return buf.String()
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a spew Formatter interface using
// the ConfigState associated with s.
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = newFormatter(c, arg)
}
return formatters
}
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}

211
vendor/github.com/davecgh/go-spew/spew/doc.go generated vendored Normal file
View file

@ -0,0 +1,211 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Package spew implements a deep pretty printer for Go data structures to aid in
debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
via the spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
spew.Dump(myVar1, myVar2, ...)
You may also call spew.Fdump if you would prefer to output to an arbitrary
io.Writer. For example, to dump to standard error:
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) (len=1) {
(string) (len=3) "one": (bool) true
}
}
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
formatter is useful for inline printing of smaller data types similar to the
standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
functions have syntax you are most likely already familiar with:
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Println(myVar, myVar2)
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
See the Index for the full list convenience functions.
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
See the Printf example for details on the setup of variables being shown
here.
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information
inline with the output. Since spew is intended to provide deep pretty printing
capabilities on structures, it intentionally does not return any errors.
*/
package spew

509
vendor/github.com/davecgh/go-spew/spew/dump.go generated vendored Normal file
View file

@ -0,0 +1,509 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
// uint8Type is a reflect.Type representing a uint8. It is used to
// convert cgo types to uint8 slices for hexdumping.
uint8Type = reflect.TypeOf(uint8(0))
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
)
// dumpState contains information about the state of a dump operation.
type dumpState struct {
w io.Writer
depth int
pointers map[uintptr]int
ignoreNextType bool
ignoreNextIndent bool
cs *ConfigState
}
// indent performs indentation according to the depth level and cs.Indent
// option.
func (d *dumpState) indent() {
if d.ignoreNextIndent {
d.ignoreNextIndent = false
return
}
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
}
// unpackValue returns values inside of non-nil interfaces when possible.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface && !v.IsNil() {
v = v.Elem()
}
return v
}
// dumpPtr handles formatting of pointers by indirecting them as necessary.
func (d *dumpState) dumpPtr(v reflect.Value) {
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range d.pointers {
if depth >= d.depth {
delete(d.pointers, k)
}
}
// Keep list of all dereferenced pointers to show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by dereferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
cycleFound = true
indirects--
break
}
d.pointers[addr] = d.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type information.
d.w.Write(openParenBytes)
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
d.w.Write([]byte(ve.Type().String()))
d.w.Write(closeParenBytes)
// Display pointer information.
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
d.w.Write(pointerChainBytes)
}
printHexPtr(d.w, addr)
}
d.w.Write(closeParenBytes)
}
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound == true:
d.w.Write(nilAngleBytes)
case cycleFound == true:
d.w.Write(circularBytes)
default:
d.ignoreNextType = true
d.dump(ve)
}
d.w.Write(closeParenBytes)
}
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
// reflection) arrays and slices are dumped in hexdump -C fashion.
func (d *dumpState) dumpSlice(v reflect.Value) {
// Determine whether this type should be hex dumped or not. Also,
// for types which should be hexdumped, try to use the underlying data
// first, then fall back to trying to convert them to a uint8 slice.
var buf []uint8
doConvert := false
doHexDump := false
numEntries := v.Len()
if numEntries > 0 {
vt := v.Index(0).Type()
vts := vt.String()
switch {
// C types that need to be converted.
case cCharRE.MatchString(vts):
fallthrough
case cUnsignedCharRE.MatchString(vts):
fallthrough
case cUint8tCharRE.MatchString(vts):
doConvert = true
// Try to use existing uint8 slices and fall back to converting
// and copying if that fails.
case vt.Kind() == reflect.Uint8:
// We need an addressable interface to convert the type
// to a byte slice. However, the reflect package won't
// give us an interface on certain things like
// unexported struct fields in order to enforce
// visibility rules. We use unsafe, when available, to
// bypass these restrictions since this package does not
// mutate the values.
vs := v
if !vs.CanInterface() || !vs.CanAddr() {
vs = unsafeReflectValue(vs)
}
if !UnsafeDisabled {
vs = vs.Slice(0, numEntries)
// Use the existing uint8 slice if it can be
// type asserted.
iface := vs.Interface()
if slice, ok := iface.([]uint8); ok {
buf = slice
doHexDump = true
break
}
}
// The underlying data needs to be converted if it can't
// be type asserted to a uint8 slice.
doConvert = true
}
// Copy and convert the underlying type if needed.
if doConvert && vt.ConvertibleTo(uint8Type) {
// Convert and copy each element into a uint8 byte
// slice.
buf = make([]uint8, numEntries)
for i := 0; i < numEntries; i++ {
vv := v.Index(i)
buf[i] = uint8(vv.Convert(uint8Type).Uint())
}
doHexDump = true
}
}
// Hexdump the entire slice as needed.
if doHexDump {
indent := strings.Repeat(d.cs.Indent, d.depth)
str := indent + hex.Dump(buf)
str = strings.Replace(str, "\n", "\n"+indent, -1)
str = strings.TrimRight(str, d.cs.Indent)
d.w.Write([]byte(str))
return
}
// Recursively call dump for each item.
for i := 0; i < numEntries; i++ {
d.dump(d.unpackValue(v.Index(i)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
// dump is the main workhorse for dumping a value. It uses the passed reflect
// value to figure out what kind of object we are dealing with and formats it
// appropriately. It is a recursive function, however circular data structures
// are detected and handled properly.
func (d *dumpState) dump(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
d.w.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
d.indent()
d.dumpPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !d.ignoreNextType {
d.indent()
d.w.Write(openParenBytes)
d.w.Write([]byte(v.Type().String()))
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
d.ignoreNextType = false
// Display length and capacity if the built-in len and cap functions
// work with the value's kind and the len/cap itself is non-zero.
valueLen, valueCap := 0, 0
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
valueLen, valueCap = v.Len(), v.Cap()
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
d.w.Write(capEqualsBytes)
printInt(d.w, int64(valueCap), 10)
}
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
// Call Stringer/error interfaces if they exist and the handle methods flag
// is enabled
if !d.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(d.cs, d.w, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(d.w, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(d.w, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(d.w, v.Uint(), 10)
case reflect.Float32:
printFloat(d.w, v.Float(), 32)
case reflect.Float64:
printFloat(d.w, v.Float(), 64)
case reflect.Complex64:
printComplex(d.w, v.Complex(), 32)
case reflect.Complex128:
printComplex(d.w, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
d.dumpSlice(v)
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.String:
d.w.Write([]byte(strconv.Quote(v.String())))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
d.w.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
numEntries := v.Len()
keys := v.MapKeys()
if d.cs.SortKeys {
sortValues(keys, d.cs)
}
for i, key := range keys {
d.dump(d.unpackValue(key))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.MapIndex(key)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Struct:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
vt := v.Type()
numFields := v.NumField()
for i := 0; i < numFields; i++ {
d.indent()
vtf := vt.Field(i)
d.w.Write([]byte(vtf.Name))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.Field(i)))
if i < (numFields - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(d.w, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(d.w, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it in case any new
// types are added.
default:
if v.CanInterface() {
fmt.Fprintf(d.w, "%v", v.Interface())
} else {
fmt.Fprintf(d.w, "%v", v.String())
}
}
}
// fdump is a helper function to consolidate the logic from the various public
// methods which take varying writers and config states.
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
for _, arg := range a {
if arg == nil {
w.Write(interfaceBytes)
w.Write(spaceBytes)
w.Write(nilAngleBytes)
w.Write(newlineBytes)
continue
}
d := dumpState{w: w, cs: cs}
d.pointers = make(map[uintptr]int)
d.dump(reflect.ValueOf(arg))
d.w.Write(newlineBytes)
}
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func Fdump(w io.Writer, a ...interface{}) {
fdump(&Config, w, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(&Config, &buf, a...)
return buf.String()
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func Dump(a ...interface{}) {
fdump(&Config, os.Stdout, a...)
}

1042
vendor/github.com/davecgh/go-spew/spew/dump_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

99
vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go generated vendored Normal file
View file

@ -0,0 +1,99 @@
// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when both cgo is supported and "-tags testcgo" is added to the go test
// command line. This means the cgo tests are only added (and hence run) when
// specifially requested. This configuration is used because spew itself
// does not require cgo to run even though it does handle certain cgo types
// specially. Rather than forcing all clients to require cgo and an external
// C compiler just to run the tests, this scheme makes them optional.
// +build cgo,testcgo
package spew_test
import (
"fmt"
"github.com/davecgh/go-spew/spew/testdata"
)
func addCgoDumpTests() {
// C char pointer.
v := testdata.GetCgoCharPointer()
nv := testdata.GetCgoNullCharPointer()
pv := &v
vcAddr := fmt.Sprintf("%p", v)
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "*testdata._Ctype_char"
vs := "116"
addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n")
addDumpTest(nv, "("+vt+")(<nil>)\n")
// C char array.
v2, v2l, v2c := testdata.GetCgoCharArray()
v2Len := fmt.Sprintf("%d", v2l)
v2Cap := fmt.Sprintf("%d", v2c)
v2t := "[6]testdata._Ctype_char"
v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " +
"{\n 00000000 74 65 73 74 32 00 " +
" |test2.|\n}"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
// C unsigned char array.
v3, v3l, v3c := testdata.GetCgoUnsignedCharArray()
v3Len := fmt.Sprintf("%d", v3l)
v3Cap := fmt.Sprintf("%d", v3c)
v3t := "[6]testdata._Ctype_unsignedchar"
v3t2 := "[6]testdata._Ctype_uchar"
v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " +
"{\n 00000000 74 65 73 74 33 00 " +
" |test3.|\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n")
// C signed char array.
v4, v4l, v4c := testdata.GetCgoSignedCharArray()
v4Len := fmt.Sprintf("%d", v4l)
v4Cap := fmt.Sprintf("%d", v4c)
v4t := "[6]testdata._Ctype_schar"
v4t2 := "testdata._Ctype_schar"
v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
"{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 +
") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 +
") 0\n}"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
// C uint8_t array.
v5, v5l, v5c := testdata.GetCgoUint8tArray()
v5Len := fmt.Sprintf("%d", v5l)
v5Cap := fmt.Sprintf("%d", v5c)
v5t := "[6]testdata._Ctype_uint8_t"
v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
"{\n 00000000 74 65 73 74 35 00 " +
" |test5.|\n}"
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
// C typedefed unsigned char array.
v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
v6Len := fmt.Sprintf("%d", v6l)
v6Cap := fmt.Sprintf("%d", v6c)
v6t := "[6]testdata._Ctype_custom_uchar_t"
v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
"{\n 00000000 74 65 73 74 36 00 " +
" |test6.|\n}"
addDumpTest(v6, "("+v6t+") "+v6s+"\n")
}

View file

@ -0,0 +1,26 @@
// Copyright (c) 2013 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when either cgo is not supported or "-tags testcgo" is not added to the go
// test command line. This file intentionally does not setup any cgo tests in
// this scenario.
// +build !cgo !testcgo
package spew_test
func addCgoDumpTests() {
// Don't add any tests for cgo since this file is only compiled when
// there should not be any cgo tests.
}

226
vendor/github.com/davecgh/go-spew/spew/example_test.go generated vendored Normal file
View file

@ -0,0 +1,226 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"fmt"
"github.com/davecgh/go-spew/spew"
)
type Flag int
const (
flagOne Flag = iota
flagTwo
)
var flagStrings = map[Flag]string{
flagOne: "flagOne",
flagTwo: "flagTwo",
}
func (f Flag) String() string {
if s, ok := flagStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown flag (%d)", int(f))
}
type Bar struct {
data uintptr
}
type Foo struct {
unexportedField Bar
ExportedField map[interface{}]interface{}
}
// This example demonstrates how to use Dump to dump variables to stdout.
func ExampleDump() {
// The following package level declarations are assumed for this example:
/*
type Flag int
const (
flagOne Flag = iota
flagTwo
)
var flagStrings = map[Flag]string{
flagOne: "flagOne",
flagTwo: "flagTwo",
}
func (f Flag) String() string {
if s, ok := flagStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown flag (%d)", int(f))
}
type Bar struct {
data uintptr
}
type Foo struct {
unexportedField Bar
ExportedField map[interface{}]interface{}
}
*/
// Setup some sample data structures for the example.
bar := Bar{uintptr(0)}
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
f := Flag(5)
b := []byte{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
0x31, 0x32,
}
// Dump!
spew.Dump(s1, f, b)
// Output:
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// data: (uintptr) <nil>
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
// (spew_test.Flag) Unknown flag (5)
// ([]uint8) (len=34 cap=34) {
// 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
// 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
// 00000020 31 32 |12|
// }
//
}
// This example demonstrates how to use Printf to display a variable with a
// format string and inline formatting.
func ExamplePrintf() {
// Create a double pointer to a uint 8.
ui8 := uint8(5)
pui8 := &ui8
ppui8 := &pui8
// Create a circular data type.
type circular struct {
ui8 uint8
c *circular
}
c := circular{ui8: 1}
c.c = &c
// Print!
spew.Printf("ppui8: %v\n", ppui8)
spew.Printf("circular: %v\n", c)
// Output:
// ppui8: <**>5
// circular: {1 <*>{1 <*><shown>}}
}
// This example demonstrates how to use a ConfigState.
func ExampleConfigState() {
// Modify the indent level of the ConfigState only. The global
// configuration is not modified.
scs := spew.ConfigState{Indent: "\t"}
// Output using the ConfigState instance.
v := map[string]int{"one": 1}
scs.Printf("v: %v\n", v)
scs.Dump(v)
// Output:
// v: map[one:1]
// (map[string]int) (len=1) {
// (string) (len=3) "one": (int) 1
// }
}
// This example demonstrates how to use ConfigState.Dump to dump variables to
// stdout
func ExampleConfigState_Dump() {
// See the top-level Dump example for details on the types used in this
// example.
// Create two ConfigState instances with different indentation.
scs := spew.ConfigState{Indent: "\t"}
scs2 := spew.ConfigState{Indent: " "}
// Setup some sample data structures for the example.
bar := Bar{uintptr(0)}
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
// Dump using the ConfigState instances.
scs.Dump(s1)
scs2.Dump(s1)
// Output:
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// data: (uintptr) <nil>
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// data: (uintptr) <nil>
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
//
}
// This example demonstrates how to use ConfigState.Printf to display a variable
// with a format string and inline formatting.
func ExampleConfigState_Printf() {
// See the top-level Dump example for details on the types used in this
// example.
// Create two ConfigState instances and modify the method handling of the
// first ConfigState only.
scs := spew.NewDefaultConfig()
scs2 := spew.NewDefaultConfig()
scs.DisableMethods = true
// Alternatively
// scs := spew.ConfigState{Indent: " ", DisableMethods: true}
// scs2 := spew.ConfigState{Indent: " "}
// This is of type Flag which implements a Stringer and has raw value 1.
f := flagTwo
// Dump using the ConfigState instances.
scs.Printf("f: %v\n", f)
scs2.Printf("f: %v\n", f)
// Output:
// f: 1
// f: flagTwo
}

419
vendor/github.com/davecgh/go-spew/spew/format.go generated vendored Normal file
View file

@ -0,0 +1,419 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
)
// supportedFlags is a list of all the character flags supported by fmt package.
const supportedFlags = "0-+# "
// formatState implements the fmt.Formatter interface and contains information
// about the state of a formatting operation. The NewFormatter function can
// be used to get a new Formatter which can be used directly as arguments
// in standard fmt package printing calls.
type formatState struct {
value interface{}
fs fmt.State
depth int
pointers map[uintptr]int
ignoreNextType bool
cs *ConfigState
}
// buildDefaultFormat recreates the original format string without precision
// and width information to pass in to fmt.Sprintf in the case of an
// unrecognized type. Unless new types are added to the language, this
// function won't ever be called.
func (f *formatState) buildDefaultFormat() (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
buf.WriteRune('v')
format = buf.String()
return format
}
// constructOrigFormat recreates the original format string including precision
// and width information to pass along to the standard fmt package. This allows
// automatic deferral of all format strings this package doesn't support.
func (f *formatState) constructOrigFormat(verb rune) (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
if width, ok := f.fs.Width(); ok {
buf.WriteString(strconv.Itoa(width))
}
if precision, ok := f.fs.Precision(); ok {
buf.Write(precisionBytes)
buf.WriteString(strconv.Itoa(precision))
}
buf.WriteRune(verb)
format = buf.String()
return format
}
// unpackValue returns values inside of non-nil interfaces when possible and
// ensures that types for values which have been unpacked from an interface
// are displayed when the show types flag is also set.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface {
f.ignoreNextType = false
if !v.IsNil() {
v = v.Elem()
}
}
return v
}
// formatPtr handles formatting of pointers by indirecting them as necessary.
func (f *formatState) formatPtr(v reflect.Value) {
// Display nil if top level pointer is nil.
showTypes := f.fs.Flag('#')
if v.IsNil() && (!showTypes || f.ignoreNextType) {
f.fs.Write(nilAngleBytes)
return
}
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range f.pointers {
if depth >= f.depth {
delete(f.pointers, k)
}
}
// Keep list of all dereferenced pointers to possibly show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by derferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
cycleFound = true
indirects--
break
}
f.pointers[addr] = f.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type or indirection level depending on flags.
if showTypes && !f.ignoreNextType {
f.fs.Write(openParenBytes)
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
f.fs.Write([]byte(ve.Type().String()))
f.fs.Write(closeParenBytes)
} else {
if nilFound || cycleFound {
indirects += strings.Count(ve.Type().String(), "*")
}
f.fs.Write(openAngleBytes)
f.fs.Write([]byte(strings.Repeat("*", indirects)))
f.fs.Write(closeAngleBytes)
}
// Display pointer information depending on flags.
if f.fs.Flag('+') && (len(pointerChain) > 0) {
f.fs.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
f.fs.Write(pointerChainBytes)
}
printHexPtr(f.fs, addr)
}
f.fs.Write(closeParenBytes)
}
// Display dereferenced value.
switch {
case nilFound == true:
f.fs.Write(nilAngleBytes)
case cycleFound == true:
f.fs.Write(circularShortBytes)
default:
f.ignoreNextType = true
f.format(ve)
}
}
// format is the main workhorse for providing the Formatter interface. It
// uses the passed reflect value to figure out what kind of object we are
// dealing with and formats it appropriately. It is a recursive function,
// however circular data structures are detected and handled properly.
func (f *formatState) format(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
f.fs.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
f.formatPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !f.ignoreNextType && f.fs.Flag('#') {
f.fs.Write(openParenBytes)
f.fs.Write([]byte(v.Type().String()))
f.fs.Write(closeParenBytes)
}
f.ignoreNextType = false
// Call Stringer/error interfaces if they exist and the handle methods
// flag is enabled.
if !f.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(f.cs, f.fs, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(f.fs, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(f.fs, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(f.fs, v.Uint(), 10)
case reflect.Float32:
printFloat(f.fs, v.Float(), 32)
case reflect.Float64:
printFloat(f.fs, v.Float(), 64)
case reflect.Complex64:
printComplex(f.fs, v.Complex(), 32)
case reflect.Complex128:
printComplex(f.fs, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
f.fs.Write(openBracketBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
numEntries := v.Len()
for i := 0; i < numEntries; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(v.Index(i)))
}
}
f.depth--
f.fs.Write(closeBracketBytes)
case reflect.String:
f.fs.Write([]byte(v.String()))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
f.fs.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
f.fs.Write(openMapBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
keys := v.MapKeys()
if f.cs.SortKeys {
sortValues(keys, f.cs)
}
for i, key := range keys {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(key))
f.fs.Write(colonBytes)
f.ignoreNextType = true
f.format(f.unpackValue(v.MapIndex(key)))
}
}
f.depth--
f.fs.Write(closeMapBytes)
case reflect.Struct:
numFields := v.NumField()
f.fs.Write(openBraceBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
vt := v.Type()
for i := 0; i < numFields; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
vtf := vt.Field(i)
if f.fs.Flag('+') || f.fs.Flag('#') {
f.fs.Write([]byte(vtf.Name))
f.fs.Write(colonBytes)
}
f.format(f.unpackValue(v.Field(i)))
}
}
f.depth--
f.fs.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(f.fs, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(f.fs, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it if any get added.
default:
format := f.buildDefaultFormat()
if v.CanInterface() {
fmt.Fprintf(f.fs, format, v.Interface())
} else {
fmt.Fprintf(f.fs, format, v.String())
}
}
}
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
// details.
func (f *formatState) Format(fs fmt.State, verb rune) {
f.fs = fs
// Use standard formatting for verbs that are not v.
if verb != 'v' {
format := f.constructOrigFormat(verb)
fmt.Fprintf(fs, format, f.value)
return
}
if f.value == nil {
if fs.Flag('#') {
fs.Write(interfaceBytes)
}
fs.Write(nilAngleBytes)
return
}
f.format(reflect.ValueOf(f.value))
}
// newFormatter is a helper function to consolidate the logic from the various
// public methods which take varying config states.
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
fs := &formatState{value: v, cs: cs}
fs.pointers = make(map[uintptr]int)
return fs
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
Printf, Println, or Fprintf.
*/
func NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(&Config, v)
}

1558
vendor/github.com/davecgh/go-spew/spew/format_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,87 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
This test file is part of the spew package rather than than the spew_test
package because it needs access to internals to properly test certain cases
which are not possible via the public interface since they should never happen.
*/
package spew
import (
"bytes"
"reflect"
"testing"
)
// dummyFmtState implements a fake fmt.State to use for testing invalid
// reflect.Value handling. This is necessary because the fmt package catches
// invalid values before invoking the formatter on them.
type dummyFmtState struct {
bytes.Buffer
}
func (dfs *dummyFmtState) Flag(f int) bool {
if f == int('+') {
return true
}
return false
}
func (dfs *dummyFmtState) Precision() (int, bool) {
return 0, false
}
func (dfs *dummyFmtState) Width() (int, bool) {
return 0, false
}
// TestInvalidReflectValue ensures the dump and formatter code handles an
// invalid reflect value properly. This needs access to internal state since it
// should never happen in real code and therefore can't be tested via the public
// API.
func TestInvalidReflectValue(t *testing.T) {
i := 1
// Dump invalid reflect value.
v := new(reflect.Value)
buf := new(bytes.Buffer)
d := dumpState{w: buf, cs: &Config}
d.dump(*v)
s := buf.String()
want := "<invalid>"
if s != want {
t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want)
}
i++
// Formatter invalid reflect value.
buf2 := new(dummyFmtState)
f := formatState{value: *v, cs: &Config, fs: buf2}
f.format(*v)
s = buf2.String()
want = "<invalid>"
if s != want {
t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want)
}
}
// SortValues makes the internal sortValues function available to the test
// package.
func SortValues(values []reflect.Value, cs *ConfigState) {
sortValues(values, cs)
}

View file

@ -0,0 +1,102 @@
// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
/*
This test file is part of the spew package rather than than the spew_test
package because it needs access to internals to properly test certain cases
which are not possible via the public interface since they should never happen.
*/
package spew
import (
"bytes"
"reflect"
"testing"
"unsafe"
)
// changeKind uses unsafe to intentionally change the kind of a reflect.Value to
// the maximum kind value which does not exist. This is needed to test the
// fallback code which punts to the standard fmt library for new types that
// might get added to the language.
func changeKind(v *reflect.Value, readOnly bool) {
rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag))
*rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)
if readOnly {
*rvf |= flagRO
} else {
*rvf &= ^uintptr(flagRO)
}
}
// TestAddedReflectValue tests functionaly of the dump and formatter code which
// falls back to the standard fmt library for new types that might get added to
// the language.
func TestAddedReflectValue(t *testing.T) {
i := 1
// Dump using a reflect.Value that is exported.
v := reflect.ValueOf(int8(5))
changeKind(&v, false)
buf := new(bytes.Buffer)
d := dumpState{w: buf, cs: &Config}
d.dump(v)
s := buf.String()
want := "(int8) 5"
if s != want {
t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
}
i++
// Dump using a reflect.Value that is not exported.
changeKind(&v, true)
buf.Reset()
d.dump(v)
s = buf.String()
want = "(int8) <int8 Value>"
if s != want {
t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
}
i++
// Formatter using a reflect.Value that is exported.
changeKind(&v, false)
buf2 := new(dummyFmtState)
f := formatState{value: v, cs: &Config, fs: buf2}
f.format(v)
s = buf2.String()
want = "5"
if s != want {
t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
}
i++
// Formatter using a reflect.Value that is not exported.
changeKind(&v, true)
buf2.Reset()
f = formatState{value: v, cs: &Config, fs: buf2}
f.format(v)
s = buf2.String()
want = "<int8 Value>"
if s != want {
t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
}
}

148
vendor/github.com/davecgh/go-spew/spew/spew.go generated vendored Normal file
View file

@ -0,0 +1,148 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"fmt"
"io"
)
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the formatted string as a value that satisfies error. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a default Formatter interface returned by NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
func Print(a ...interface{}) (n int, err error) {
return fmt.Print(convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
func Println(a ...interface{}) (n int, err error) {
return fmt.Println(convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprint(a ...interface{}) string {
return fmt.Sprint(convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintln(a ...interface{}) string {
return fmt.Sprintln(convertArgs(a)...)
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a default spew Formatter interface.
func convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = NewFormatter(arg)
}
return formatters
}

320
vendor/github.com/davecgh/go-spew/spew/spew_test.go generated vendored Normal file
View file

@ -0,0 +1,320 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/davecgh/go-spew/spew"
)
// spewFunc is used to identify which public function of the spew package or
// ConfigState a test applies to.
type spewFunc int
const (
fCSFdump spewFunc = iota
fCSFprint
fCSFprintf
fCSFprintln
fCSPrint
fCSPrintln
fCSSdump
fCSSprint
fCSSprintf
fCSSprintln
fCSErrorf
fCSNewFormatter
fErrorf
fFprint
fFprintln
fPrint
fPrintln
fSdump
fSprint
fSprintf
fSprintln
)
// Map of spewFunc values to names for pretty printing.
var spewFuncStrings = map[spewFunc]string{
fCSFdump: "ConfigState.Fdump",
fCSFprint: "ConfigState.Fprint",
fCSFprintf: "ConfigState.Fprintf",
fCSFprintln: "ConfigState.Fprintln",
fCSSdump: "ConfigState.Sdump",
fCSPrint: "ConfigState.Print",
fCSPrintln: "ConfigState.Println",
fCSSprint: "ConfigState.Sprint",
fCSSprintf: "ConfigState.Sprintf",
fCSSprintln: "ConfigState.Sprintln",
fCSErrorf: "ConfigState.Errorf",
fCSNewFormatter: "ConfigState.NewFormatter",
fErrorf: "spew.Errorf",
fFprint: "spew.Fprint",
fFprintln: "spew.Fprintln",
fPrint: "spew.Print",
fPrintln: "spew.Println",
fSdump: "spew.Sdump",
fSprint: "spew.Sprint",
fSprintf: "spew.Sprintf",
fSprintln: "spew.Sprintln",
}
func (f spewFunc) String() string {
if s, ok := spewFuncStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown spewFunc (%d)", int(f))
}
// spewTest is used to describe a test to be performed against the public
// functions of the spew package or ConfigState.
type spewTest struct {
cs *spew.ConfigState
f spewFunc
format string
in interface{}
want string
}
// spewTests houses the tests to be performed against the public functions of
// the spew package and ConfigState.
//
// These tests are only intended to ensure the public functions are exercised
// and are intentionally not exhaustive of types. The exhaustive type
// tests are handled in the dump and format tests.
var spewTests []spewTest
// redirStdout is a helper function to return the standard output from f as a
// byte slice.
func redirStdout(f func()) ([]byte, error) {
tempFile, err := ioutil.TempFile("", "ss-test")
if err != nil {
return nil, err
}
fileName := tempFile.Name()
defer os.Remove(fileName) // Ignore error
origStdout := os.Stdout
os.Stdout = tempFile
f()
os.Stdout = origStdout
tempFile.Close()
return ioutil.ReadFile(fileName)
}
func initSpewTests() {
// Config states with various settings.
scsDefault := spew.NewDefaultConfig()
scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true}
scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true}
scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1}
scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true}
scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true}
scsNoCap := &spew.ConfigState{DisableCapacities: true}
// Variables for tests on types which implement Stringer interface with and
// without a pointer receiver.
ts := stringer("test")
tps := pstringer("test")
type ptrTester struct {
s *struct{}
}
tptr := &ptrTester{s: &struct{}{}}
// depthTester is used to test max depth handling for structs, array, slices
// and maps.
type depthTester struct {
ic indirCir1
arr [1]string
slice []string
m map[string]int
}
dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"},
map[string]int{"one": 1}}
// Variable for tests on types which implement error interface.
te := customError(10)
spewTests = []spewTest{
{scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"},
{scsDefault, fCSFprint, "", int16(32767), "32767"},
{scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"},
{scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"},
{scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"},
{scsDefault, fCSPrintln, "", uint8(255), "255\n"},
{scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"},
{scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"},
{scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"},
{scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"},
{scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"},
{scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"},
{scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"},
{scsDefault, fFprint, "", float32(3.14), "3.14"},
{scsDefault, fFprintln, "", float64(6.28), "6.28\n"},
{scsDefault, fPrint, "", true, "true"},
{scsDefault, fPrintln, "", false, "false\n"},
{scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"},
{scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"},
{scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"},
{scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"},
{scsNoMethods, fCSFprint, "", ts, "test"},
{scsNoMethods, fCSFprint, "", &ts, "<*>test"},
{scsNoMethods, fCSFprint, "", tps, "test"},
{scsNoMethods, fCSFprint, "", &tps, "<*>test"},
{scsNoPmethods, fCSFprint, "", ts, "stringer test"},
{scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"},
{scsNoPmethods, fCSFprint, "", tps, "test"},
{scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"},
{scsMaxDepth, fCSFprint, "", dt, "{{<max>} [<max>] [<max>] map[<max>]}"},
{scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" +
" ic: (spew_test.indirCir1) {\n <max depth reached>\n },\n" +
" arr: ([1]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
" slice: ([]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
" m: (map[string]int) (len=1) {\n <max depth reached>\n }\n}\n"},
{scsContinue, fCSFprint, "", ts, "(stringer test) test"},
{scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " +
"(len=4) (stringer test) \"test\"\n"},
{scsContinue, fCSFprint, "", te, "(error: 10) 10"},
{scsContinue, fCSFdump, "", te, "(spew_test.customError) " +
"(error: 10) 10\n"},
{scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"},
{scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"},
{scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"},
{scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"},
}
}
// TestSpew executes all of the tests described by spewTests.
func TestSpew(t *testing.T) {
initSpewTests()
t.Logf("Running %d tests", len(spewTests))
for i, test := range spewTests {
buf := new(bytes.Buffer)
switch test.f {
case fCSFdump:
test.cs.Fdump(buf, test.in)
case fCSFprint:
test.cs.Fprint(buf, test.in)
case fCSFprintf:
test.cs.Fprintf(buf, test.format, test.in)
case fCSFprintln:
test.cs.Fprintln(buf, test.in)
case fCSPrint:
b, err := redirStdout(func() { test.cs.Print(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fCSPrintln:
b, err := redirStdout(func() { test.cs.Println(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fCSSdump:
str := test.cs.Sdump(test.in)
buf.WriteString(str)
case fCSSprint:
str := test.cs.Sprint(test.in)
buf.WriteString(str)
case fCSSprintf:
str := test.cs.Sprintf(test.format, test.in)
buf.WriteString(str)
case fCSSprintln:
str := test.cs.Sprintln(test.in)
buf.WriteString(str)
case fCSErrorf:
err := test.cs.Errorf(test.format, test.in)
buf.WriteString(err.Error())
case fCSNewFormatter:
fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in))
case fErrorf:
err := spew.Errorf(test.format, test.in)
buf.WriteString(err.Error())
case fFprint:
spew.Fprint(buf, test.in)
case fFprintln:
spew.Fprintln(buf, test.in)
case fPrint:
b, err := redirStdout(func() { spew.Print(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fPrintln:
b, err := redirStdout(func() { spew.Println(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fSdump:
str := spew.Sdump(test.in)
buf.WriteString(str)
case fSprint:
str := spew.Sprint(test.in)
buf.WriteString(str)
case fSprintf:
str := spew.Sprintf(test.format, test.in)
buf.WriteString(str)
case fSprintln:
str := spew.Sprintln(test.in)
buf.WriteString(str)
default:
t.Errorf("%v #%d unrecognized function", test.f, i)
continue
}
s := buf.String()
if test.want != s {
t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want)
continue
}
}
}

View file

@ -0,0 +1,82 @@
// Copyright (c) 2013 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when both cgo is supported and "-tags testcgo" is added to the go test
// command line. This code should really only be in the dumpcgo_test.go file,
// but unfortunately Go will not allow cgo in test files, so this is a
// workaround to allow cgo types to be tested. This configuration is used
// because spew itself does not require cgo to run even though it does handle
// certain cgo types specially. Rather than forcing all clients to require cgo
// and an external C compiler just to run the tests, this scheme makes them
// optional.
// +build cgo,testcgo
package testdata
/*
#include <stdint.h>
typedef unsigned char custom_uchar_t;
char *ncp = 0;
char *cp = "test";
char ca[6] = {'t', 'e', 's', 't', '2', '\0'};
unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'};
signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'};
uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'};
custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'};
*/
import "C"
// GetCgoNullCharPointer returns a null char pointer via cgo. This is only
// used for tests.
func GetCgoNullCharPointer() interface{} {
return C.ncp
}
// GetCgoCharPointer returns a char pointer via cgo. This is only used for
// tests.
func GetCgoCharPointer() interface{} {
return C.cp
}
// GetCgoCharArray returns a char array via cgo and the array's len and cap.
// This is only used for tests.
func GetCgoCharArray() (interface{}, int, int) {
return C.ca, len(C.ca), cap(C.ca)
}
// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the
// array's len and cap. This is only used for tests.
func GetCgoUnsignedCharArray() (interface{}, int, int) {
return C.uca, len(C.uca), cap(C.uca)
}
// GetCgoSignedCharArray returns a signed char array via cgo and the array's len
// and cap. This is only used for tests.
func GetCgoSignedCharArray() (interface{}, int, int) {
return C.sca, len(C.sca), cap(C.sca)
}
// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and
// cap. This is only used for tests.
func GetCgoUint8tArray() (interface{}, int, int) {
return C.ui8ta, len(C.ui8ta), cap(C.ui8ta)
}
// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via
// cgo and the array's len and cap. This is only used for tests.
func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) {
return C.tuca, len(C.tuca), cap(C.tuca)
}

61
vendor/github.com/davecgh/go-spew/test_coverage.txt generated vendored Normal file
View file

@ -0,0 +1,61 @@
github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88)
github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82)
github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52)
github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44)
github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39)
github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30)
github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18)
github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13)
github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12)
github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11)
github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11)
github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10)
github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9)
github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8)
github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7)
github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5)
github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4)
github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4)
github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4)
github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4)
github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3)
github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3)
github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3)
github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3)
github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3)
github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1)
github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1)
github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1)
github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505)

13
vendor/github.com/docker/spdystream/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,13 @@
# Contributing to SpdyStream
Want to hack on spdystream? Awesome! Here are instructions to get you
started.
SpdyStream is a part of the [Docker](https://docker.io) project, and follows
the same rules and principles. If you're already familiar with the way
Docker does things, you'll feel right at home.
Otherwise, go read
[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
Happy hacking!

191
vendor/github.com/docker/spdystream/LICENSE generated vendored Normal file
View file

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014-2015 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

425
vendor/github.com/docker/spdystream/LICENSE.docs generated vendored Normal file
View file

@ -0,0 +1,425 @@
Attribution-ShareAlike 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution-ShareAlike 4.0 International Public
License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution-ShareAlike 4.0 International Public License ("Public
License"). To the extent this Public License may be interpreted as a
contract, You are granted the Licensed Rights in consideration of Your
acceptance of these terms and conditions, and the Licensor grants You
such rights in consideration of benefits the Licensor receives from
making the Licensed Material available under these terms and
conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. BY-SA Compatible License means a license listed at
creativecommons.org/compatiblelicenses, approved by Creative
Commons as essentially the equivalent of this Public License.
d. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
e. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
f. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
g. License Elements means the license attributes listed in the name
of a Creative Commons Public License. The License Elements of this
Public License are Attribution and ShareAlike.
h. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
i. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
j. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
k. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
l. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
m. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. Additional offer from the Licensor -- Adapted Material.
Every recipient of Adapted Material from You
automatically receives an offer from the Licensor to
exercise the Licensed Rights in the Adapted Material
under the conditions of the Adapter's License You apply.
c. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
b. ShareAlike.
In addition to the conditions in Section 3(a), if You Share
Adapted Material You produce, the following conditions also apply.
1. The Adapter's License You apply must be a Creative Commons
license with the same License Elements, this version or
later, or a BY-SA Compatible License.
2. You must include the text of, or the URI or hyperlink to, the
Adapter's License You apply. You may satisfy this condition
in any reasonable manner based on the medium, means, and
context in which You Share Adapted Material.
3. You may not offer or impose any additional or different terms
or conditions on, or apply any Effective Technological
Measures to, Adapted Material that restrict exercise of the
rights granted under the Adapter's License You apply.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material,
including for purposes of Section 3(b); and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public licenses.
Notwithstanding, Creative Commons may elect to apply one of its public
licenses to material it publishes and in those instances will be
considered the "Licensor." Except for the limited purpose of indicating
that material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the public
licenses.
Creative Commons may be contacted at creativecommons.org.

28
vendor/github.com/docker/spdystream/MAINTAINERS generated vendored Normal file
View file

@ -0,0 +1,28 @@
# Spdystream maintainers file
#
# This file describes who runs the docker/spdystream project and how.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant parser.
#
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
[Org."Core maintainers"]
people = [
"dmcgowan",
]
[people]
# A reference list of all people associated with the project.
# All other sections should refer to people by their canonical key
# in the people section.
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.dmcgowan]
Name = "Derek McGowan"
Email = "derek@docker.com"
GitHub = "dmcgowan"

77
vendor/github.com/docker/spdystream/README.md generated vendored Normal file
View file

@ -0,0 +1,77 @@
# SpdyStream
A multiplexed stream library using spdy
## Usage
Client example (connecting to mirroring server without auth)
```go
package main
import (
"fmt"
"github.com/docker/spdystream"
"net"
"net/http"
)
func main() {
conn, err := net.Dial("tcp", "localhost:8080")
if err != nil {
panic(err)
}
spdyConn, err := spdystream.NewConnection(conn, false)
if err != nil {
panic(err)
}
go spdyConn.Serve(spdystream.NoOpStreamHandler)
stream, err := spdyConn.CreateStream(http.Header{}, nil, false)
if err != nil {
panic(err)
}
stream.Wait()
fmt.Fprint(stream, "Writing to stream")
buf := make([]byte, 25)
stream.Read(buf)
fmt.Println(string(buf))
stream.Close()
}
```
Server example (mirroring server without auth)
```go
package main
import (
"github.com/docker/spdystream"
"net"
)
func main() {
listener, err := net.Listen("tcp", "localhost:8080")
if err != nil {
panic(err)
}
for {
conn, err := listener.Accept()
if err != nil {
panic(err)
}
spdyConn, err := spdystream.NewConnection(conn, true)
if err != nil {
panic(err)
}
go spdyConn.Serve(spdystream.MirrorStreamHandler)
}
}
```
## Copyright and license
Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.

959
vendor/github.com/docker/spdystream/connection.go generated vendored Normal file
View file

@ -0,0 +1,959 @@
package spdystream
import (
"errors"
"fmt"
"io"
"net"
"net/http"
"sync"
"time"
"github.com/docker/spdystream/spdy"
)
var (
ErrInvalidStreamId = errors.New("Invalid stream id")
ErrTimeout = errors.New("Timeout occured")
ErrReset = errors.New("Stream reset")
ErrWriteClosedStream = errors.New("Write on closed stream")
)
const (
FRAME_WORKERS = 5
QUEUE_SIZE = 50
)
type StreamHandler func(stream *Stream)
type AuthHandler func(header http.Header, slot uint8, parent uint32) bool
type idleAwareFramer struct {
f *spdy.Framer
conn *Connection
writeLock sync.Mutex
resetChan chan struct{}
setTimeoutLock sync.Mutex
setTimeoutChan chan time.Duration
timeout time.Duration
}
func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer {
iaf := &idleAwareFramer{
f: framer,
resetChan: make(chan struct{}, 2),
// setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about
// the same time the connection is being closed
setTimeoutChan: make(chan time.Duration, 1),
}
return iaf
}
func (i *idleAwareFramer) monitor() {
var (
timer *time.Timer
expired <-chan time.Time
resetChan = i.resetChan
setTimeoutChan = i.setTimeoutChan
)
Loop:
for {
select {
case timeout := <-i.setTimeoutChan:
i.timeout = timeout
if timeout == 0 {
if timer != nil {
timer.Stop()
}
} else {
if timer == nil {
timer = time.NewTimer(timeout)
expired = timer.C
} else {
timer.Reset(timeout)
}
}
case <-resetChan:
if timer != nil && i.timeout > 0 {
timer.Reset(i.timeout)
}
case <-expired:
i.conn.streamCond.L.Lock()
streams := i.conn.streams
i.conn.streams = make(map[spdy.StreamId]*Stream)
i.conn.streamCond.Broadcast()
i.conn.streamCond.L.Unlock()
go func() {
for _, stream := range streams {
stream.resetStream()
}
i.conn.Close()
}()
case <-i.conn.closeChan:
if timer != nil {
timer.Stop()
}
// Start a goroutine to drain resetChan. This is needed because we've seen
// some unit tests with large numbers of goroutines get into a situation
// where resetChan fills up, at least 1 call to Write() is still trying to
// send to resetChan, the connection gets closed, and this case statement
// attempts to grab the write lock that Write() already has, causing a
// deadlock.
//
// See https://github.com/docker/spdystream/issues/49 for more details.
go func() {
for _ = range resetChan {
}
}()
go func() {
for _ = range setTimeoutChan {
}
}()
i.writeLock.Lock()
close(resetChan)
i.resetChan = nil
i.writeLock.Unlock()
i.setTimeoutLock.Lock()
close(i.setTimeoutChan)
i.setTimeoutChan = nil
i.setTimeoutLock.Unlock()
break Loop
}
}
// Drain resetChan
for _ = range resetChan {
}
}
func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error {
i.writeLock.Lock()
defer i.writeLock.Unlock()
if i.resetChan == nil {
return io.EOF
}
err := i.f.WriteFrame(frame)
if err != nil {
return err
}
i.resetChan <- struct{}{}
return nil
}
func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) {
frame, err := i.f.ReadFrame()
if err != nil {
return nil, err
}
// resetChan should never be closed since it is only closed
// when the connection has closed its closeChan. This closure
// only occurs after all Reads have finished
// TODO (dmcgowan): refactor relationship into connection
i.resetChan <- struct{}{}
return frame, nil
}
func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) {
i.setTimeoutLock.Lock()
defer i.setTimeoutLock.Unlock()
if i.setTimeoutChan == nil {
return
}
i.setTimeoutChan <- timeout
}
type Connection struct {
conn net.Conn
framer *idleAwareFramer
closeChan chan bool
goneAway bool
lastStreamChan chan<- *Stream
goAwayTimeout time.Duration
closeTimeout time.Duration
streamLock *sync.RWMutex
streamCond *sync.Cond
streams map[spdy.StreamId]*Stream
nextIdLock sync.Mutex
receiveIdLock sync.Mutex
nextStreamId spdy.StreamId
receivedStreamId spdy.StreamId
pingIdLock sync.Mutex
pingId uint32
pingChans map[uint32]chan error
shutdownLock sync.Mutex
shutdownChan chan error
hasShutdown bool
// for testing https://github.com/docker/spdystream/pull/56
dataFrameHandler func(*spdy.DataFrame) error
}
// NewConnection creates a new spdy connection from an existing
// network connection.
func NewConnection(conn net.Conn, server bool) (*Connection, error) {
framer, framerErr := spdy.NewFramer(conn, conn)
if framerErr != nil {
return nil, framerErr
}
idleAwareFramer := newIdleAwareFramer(framer)
var sid spdy.StreamId
var rid spdy.StreamId
var pid uint32
if server {
sid = 2
rid = 1
pid = 2
} else {
sid = 1
rid = 2
pid = 1
}
streamLock := new(sync.RWMutex)
streamCond := sync.NewCond(streamLock)
session := &Connection{
conn: conn,
framer: idleAwareFramer,
closeChan: make(chan bool),
goAwayTimeout: time.Duration(0),
closeTimeout: time.Duration(0),
streamLock: streamLock,
streamCond: streamCond,
streams: make(map[spdy.StreamId]*Stream),
nextStreamId: sid,
receivedStreamId: rid,
pingId: pid,
pingChans: make(map[uint32]chan error),
shutdownChan: make(chan error),
}
session.dataFrameHandler = session.handleDataFrame
idleAwareFramer.conn = session
go idleAwareFramer.monitor()
return session, nil
}
// Ping sends a ping frame across the connection and
// returns the response time
func (s *Connection) Ping() (time.Duration, error) {
pid := s.pingId
s.pingIdLock.Lock()
if s.pingId > 0x7ffffffe {
s.pingId = s.pingId - 0x7ffffffe
} else {
s.pingId = s.pingId + 2
}
s.pingIdLock.Unlock()
pingChan := make(chan error)
s.pingChans[pid] = pingChan
defer delete(s.pingChans, pid)
frame := &spdy.PingFrame{Id: pid}
startTime := time.Now()
writeErr := s.framer.WriteFrame(frame)
if writeErr != nil {
return time.Duration(0), writeErr
}
select {
case <-s.closeChan:
return time.Duration(0), errors.New("connection closed")
case err, ok := <-pingChan:
if ok && err != nil {
return time.Duration(0), err
}
break
}
return time.Now().Sub(startTime), nil
}
// Serve handles frames sent from the server, including reply frames
// which are needed to fully initiate connections. Both clients and servers
// should call Serve in a separate goroutine before creating streams.
func (s *Connection) Serve(newHandler StreamHandler) {
// use a WaitGroup to wait for all frames to be drained after receiving
// go-away.
var wg sync.WaitGroup
// Parition queues to ensure stream frames are handled
// by the same worker, ensuring order is maintained
frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS)
for i := 0; i < FRAME_WORKERS; i++ {
frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE)
// Ensure frame queue is drained when connection is closed
go func(frameQueue *PriorityFrameQueue) {
<-s.closeChan
frameQueue.Drain()
}(frameQueues[i])
wg.Add(1)
go func(frameQueue *PriorityFrameQueue) {
// let the WaitGroup know this worker is done
defer wg.Done()
s.frameHandler(frameQueue, newHandler)
}(frameQueues[i])
}
var (
partitionRoundRobin int
goAwayFrame *spdy.GoAwayFrame
)
Loop:
for {
readFrame, err := s.framer.ReadFrame()
if err != nil {
if err != io.EOF {
debugMessage("frame read error: %s", err)
} else {
debugMessage("(%p) EOF received", s)
}
break
}
var priority uint8
var partition int
switch frame := readFrame.(type) {
case *spdy.SynStreamFrame:
if s.checkStreamFrame(frame) {
priority = frame.Priority
partition = int(frame.StreamId % FRAME_WORKERS)
debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId)
s.addStreamFrame(frame)
} else {
debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId)
continue
}
case *spdy.SynReplyFrame:
priority = s.getStreamPriority(frame.StreamId)
partition = int(frame.StreamId % FRAME_WORKERS)
case *spdy.DataFrame:
priority = s.getStreamPriority(frame.StreamId)
partition = int(frame.StreamId % FRAME_WORKERS)
case *spdy.RstStreamFrame:
priority = s.getStreamPriority(frame.StreamId)
partition = int(frame.StreamId % FRAME_WORKERS)
case *spdy.HeadersFrame:
priority = s.getStreamPriority(frame.StreamId)
partition = int(frame.StreamId % FRAME_WORKERS)
case *spdy.PingFrame:
priority = 0
partition = partitionRoundRobin
partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
case *spdy.GoAwayFrame:
// hold on to the go away frame and exit the loop
goAwayFrame = frame
break Loop
default:
priority = 7
partition = partitionRoundRobin
partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
}
frameQueues[partition].Push(readFrame, priority)
}
close(s.closeChan)
// wait for all frame handler workers to indicate they've drained their queues
// before handling the go away frame
wg.Wait()
if goAwayFrame != nil {
s.handleGoAwayFrame(goAwayFrame)
}
// now it's safe to close remote channels and empty s.streams
s.streamCond.L.Lock()
// notify streams that they're now closed, which will
// unblock any stream Read() calls
for _, stream := range s.streams {
stream.closeRemoteChannels()
}
s.streams = make(map[spdy.StreamId]*Stream)
s.streamCond.Broadcast()
s.streamCond.L.Unlock()
}
func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) {
for {
popFrame := frameQueue.Pop()
if popFrame == nil {
return
}
var frameErr error
switch frame := popFrame.(type) {
case *spdy.SynStreamFrame:
frameErr = s.handleStreamFrame(frame, newHandler)
case *spdy.SynReplyFrame:
frameErr = s.handleReplyFrame(frame)
case *spdy.DataFrame:
frameErr = s.dataFrameHandler(frame)
case *spdy.RstStreamFrame:
frameErr = s.handleResetFrame(frame)
case *spdy.HeadersFrame:
frameErr = s.handleHeaderFrame(frame)
case *spdy.PingFrame:
frameErr = s.handlePingFrame(frame)
case *spdy.GoAwayFrame:
frameErr = s.handleGoAwayFrame(frame)
default:
frameErr = fmt.Errorf("unhandled frame type: %T", frame)
}
if frameErr != nil {
debugMessage("frame handling error: %s", frameErr)
}
}
}
func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 {
stream, streamOk := s.getStream(streamId)
if !streamOk {
return 7
}
return stream.priority
}
func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) {
var parent *Stream
if frame.AssociatedToStreamId != spdy.StreamId(0) {
parent, _ = s.getStream(frame.AssociatedToStreamId)
}
stream := &Stream{
streamId: frame.StreamId,
parent: parent,
conn: s,
startChan: make(chan error),
headers: frame.Headers,
finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00,
replyCond: sync.NewCond(new(sync.Mutex)),
dataChan: make(chan []byte),
headerChan: make(chan http.Header),
closeChan: make(chan bool),
priority: frame.Priority,
}
if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 {
stream.closeRemoteChannels()
}
s.addStream(stream)
}
// checkStreamFrame checks to see if a stream frame is allowed.
// If the stream is invalid, then a reset frame with protocol error
// will be returned.
func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool {
s.receiveIdLock.Lock()
defer s.receiveIdLock.Unlock()
if s.goneAway {
return false
}
validationErr := s.validateStreamId(frame.StreamId)
if validationErr != nil {
go func() {
resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId)
if resetErr != nil {
debugMessage("reset error: %s", resetErr)
}
}()
return false
}
return true
}
func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error {
stream, ok := s.getStream(frame.StreamId)
if !ok {
return fmt.Errorf("Missing stream: %d", frame.StreamId)
}
newHandler(stream)
return nil
}
func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error {
debugMessage("(%p) Reply frame received for %d", s, frame.StreamId)
stream, streamOk := s.getStream(frame.StreamId)
if !streamOk {
debugMessage("Reply frame gone away for %d", frame.StreamId)
// Stream has already gone away
return nil
}
if stream.replied {
// Stream has already received reply
return nil
}
stream.replied = true
// TODO Check for error
if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
s.remoteStreamFinish(stream)
}
close(stream.startChan)
return nil
}
func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error {
stream, streamOk := s.getStream(frame.StreamId)
if !streamOk {
// Stream has already been removed
return nil
}
s.removeStream(stream)
stream.closeRemoteChannels()
if !stream.replied {
stream.replied = true
stream.startChan <- ErrReset
close(stream.startChan)
}
stream.finishLock.Lock()
stream.finished = true
stream.finishLock.Unlock()
return nil
}
func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error {
stream, streamOk := s.getStream(frame.StreamId)
if !streamOk {
// Stream has already gone away
return nil
}
if !stream.replied {
// No reply received...Protocol error?
return nil
}
// TODO limit headers while not blocking (use buffered chan or goroutine?)
select {
case <-stream.closeChan:
return nil
case stream.headerChan <- frame.Headers:
}
if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
s.remoteStreamFinish(stream)
}
return nil
}
func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error {
debugMessage("(%p) Data frame received for %d", s, frame.StreamId)
stream, streamOk := s.getStream(frame.StreamId)
if !streamOk {
debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId)
// Stream has already gone away
return nil
}
if !stream.replied {
debugMessage("(%p) Data frame not replied %d", s, frame.StreamId)
// No reply received...Protocol error?
return nil
}
debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId)
if len(frame.Data) > 0 {
stream.dataLock.RLock()
select {
case <-stream.closeChan:
debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId)
case stream.dataChan <- frame.Data:
debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId)
}
stream.dataLock.RUnlock()
}
if (frame.Flags & spdy.DataFlagFin) != 0x00 {
s.remoteStreamFinish(stream)
}
return nil
}
func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error {
if s.pingId&0x01 != frame.Id&0x01 {
return s.framer.WriteFrame(frame)
}
pingChan, pingOk := s.pingChans[frame.Id]
if pingOk {
close(pingChan)
}
return nil
}
func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error {
debugMessage("(%p) Go away received", s)
s.receiveIdLock.Lock()
if s.goneAway {
s.receiveIdLock.Unlock()
return nil
}
s.goneAway = true
s.receiveIdLock.Unlock()
if s.lastStreamChan != nil {
stream, _ := s.getStream(frame.LastGoodStreamId)
go func() {
s.lastStreamChan <- stream
}()
}
// Do not block frame handler waiting for closure
go s.shutdown(s.goAwayTimeout)
return nil
}
func (s *Connection) remoteStreamFinish(stream *Stream) {
stream.closeRemoteChannels()
stream.finishLock.Lock()
if stream.finished {
// Stream is fully closed, cleanup
s.removeStream(stream)
}
stream.finishLock.Unlock()
}
// CreateStream creates a new spdy stream using the parameters for
// creating the stream frame. The stream frame will be sent upon
// calling this function, however this function does not wait for
// the reply frame. If waiting for the reply is desired, use
// the stream Wait or WaitTimeout function on the stream returned
// by this function.
func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) {
// MUST synchronize stream creation (all the way to writing the frame)
// as stream IDs **MUST** increase monotonically.
s.nextIdLock.Lock()
defer s.nextIdLock.Unlock()
streamId := s.getNextStreamId()
if streamId == 0 {
return nil, fmt.Errorf("Unable to get new stream id")
}
stream := &Stream{
streamId: streamId,
parent: parent,
conn: s,
startChan: make(chan error),
headers: headers,
dataChan: make(chan []byte),
headerChan: make(chan http.Header),
closeChan: make(chan bool),
}
debugMessage("(%p) (%p) Create stream", s, stream)
s.addStream(stream)
return stream, s.sendStream(stream, fin)
}
func (s *Connection) shutdown(closeTimeout time.Duration) {
// TODO Ensure this isn't called multiple times
s.shutdownLock.Lock()
if s.hasShutdown {
s.shutdownLock.Unlock()
return
}
s.hasShutdown = true
s.shutdownLock.Unlock()
var timeout <-chan time.Time
if closeTimeout > time.Duration(0) {
timeout = time.After(closeTimeout)
}
streamsClosed := make(chan bool)
go func() {
s.streamCond.L.Lock()
for len(s.streams) > 0 {
debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams)
s.streamCond.Wait()
}
s.streamCond.L.Unlock()
close(streamsClosed)
}()
var err error
select {
case <-streamsClosed:
// No active streams, close should be safe
err = s.conn.Close()
case <-timeout:
// Force ungraceful close
err = s.conn.Close()
// Wait for cleanup to clear active streams
<-streamsClosed
}
if err != nil {
duration := 10 * time.Minute
time.AfterFunc(duration, func() {
select {
case err, ok := <-s.shutdownChan:
if ok {
debugMessage("Unhandled close error after %s: %s", duration, err)
}
default:
}
})
s.shutdownChan <- err
}
close(s.shutdownChan)
return
}
// Closes spdy connection by sending GoAway frame and initiating shutdown
func (s *Connection) Close() error {
s.receiveIdLock.Lock()
if s.goneAway {
s.receiveIdLock.Unlock()
return nil
}
s.goneAway = true
s.receiveIdLock.Unlock()
var lastStreamId spdy.StreamId
if s.receivedStreamId > 2 {
lastStreamId = s.receivedStreamId - 2
}
goAwayFrame := &spdy.GoAwayFrame{
LastGoodStreamId: lastStreamId,
Status: spdy.GoAwayOK,
}
err := s.framer.WriteFrame(goAwayFrame)
if err != nil {
return err
}
go s.shutdown(s.closeTimeout)
return nil
}
// CloseWait closes the connection and waits for shutdown
// to finish. Note the underlying network Connection
// is not closed until the end of shutdown.
func (s *Connection) CloseWait() error {
closeErr := s.Close()
if closeErr != nil {
return closeErr
}
shutdownErr, ok := <-s.shutdownChan
if ok {
return shutdownErr
}
return nil
}
// Wait waits for the connection to finish shutdown or for
// the wait timeout duration to expire. This needs to be
// called either after Close has been called or the GOAWAYFRAME
// has been received. If the wait timeout is 0, this function
// will block until shutdown finishes. If wait is never called
// and a shutdown error occurs, that error will be logged as an
// unhandled error.
func (s *Connection) Wait(waitTimeout time.Duration) error {
var timeout <-chan time.Time
if waitTimeout > time.Duration(0) {
timeout = time.After(waitTimeout)
}
select {
case err, ok := <-s.shutdownChan:
if ok {
return err
}
case <-timeout:
return ErrTimeout
}
return nil
}
// NotifyClose registers a channel to be called when the remote
// peer inidicates connection closure. The last stream to be
// received by the remote will be sent on the channel. The notify
// timeout will determine the duration between go away received
// and the connection being closed.
func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) {
s.goAwayTimeout = timeout
s.lastStreamChan = c
}
// SetCloseTimeout sets the amount of time close will wait for
// streams to finish before terminating the underlying network
// connection. Setting the timeout to 0 will cause close to
// wait forever, which is the default.
func (s *Connection) SetCloseTimeout(timeout time.Duration) {
s.closeTimeout = timeout
}
// SetIdleTimeout sets the amount of time the connection may sit idle before
// it is forcefully terminated.
func (s *Connection) SetIdleTimeout(timeout time.Duration) {
s.framer.setIdleTimeout(timeout)
}
func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error {
var flags spdy.ControlFlags
if fin {
flags = spdy.ControlFlagFin
}
headerFrame := &spdy.HeadersFrame{
StreamId: stream.streamId,
Headers: headers,
CFHeader: spdy.ControlFrameHeader{Flags: flags},
}
return s.framer.WriteFrame(headerFrame)
}
func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error {
var flags spdy.ControlFlags
if fin {
flags = spdy.ControlFlagFin
}
replyFrame := &spdy.SynReplyFrame{
StreamId: stream.streamId,
Headers: headers,
CFHeader: spdy.ControlFrameHeader{Flags: flags},
}
return s.framer.WriteFrame(replyFrame)
}
func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error {
resetFrame := &spdy.RstStreamFrame{
StreamId: streamId,
Status: status,
}
return s.framer.WriteFrame(resetFrame)
}
func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error {
return s.sendResetFrame(status, stream.streamId)
}
func (s *Connection) sendStream(stream *Stream, fin bool) error {
var flags spdy.ControlFlags
if fin {
flags = spdy.ControlFlagFin
stream.finished = true
}
var parentId spdy.StreamId
if stream.parent != nil {
parentId = stream.parent.streamId
}
streamFrame := &spdy.SynStreamFrame{
StreamId: spdy.StreamId(stream.streamId),
AssociatedToStreamId: spdy.StreamId(parentId),
Headers: stream.headers,
CFHeader: spdy.ControlFrameHeader{Flags: flags},
}
return s.framer.WriteFrame(streamFrame)
}
// getNextStreamId returns the next sequential id
// every call should produce a unique value or an error
func (s *Connection) getNextStreamId() spdy.StreamId {
sid := s.nextStreamId
if sid > 0x7fffffff {
return 0
}
s.nextStreamId = s.nextStreamId + 2
return sid
}
// PeekNextStreamId returns the next sequential id and keeps the next id untouched
func (s *Connection) PeekNextStreamId() spdy.StreamId {
sid := s.nextStreamId
return sid
}
func (s *Connection) validateStreamId(rid spdy.StreamId) error {
if rid > 0x7fffffff || rid < s.receivedStreamId {
return ErrInvalidStreamId
}
s.receivedStreamId = rid + 2
return nil
}
func (s *Connection) addStream(stream *Stream) {
s.streamCond.L.Lock()
s.streams[stream.streamId] = stream
debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId)
s.streamCond.Broadcast()
s.streamCond.L.Unlock()
}
func (s *Connection) removeStream(stream *Stream) {
s.streamCond.L.Lock()
delete(s.streams, stream.streamId)
debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId)
s.streamCond.Broadcast()
s.streamCond.L.Unlock()
}
func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) {
s.streamLock.RLock()
stream, ok = s.streams[streamId]
s.streamLock.RUnlock()
return
}
// FindStream looks up the given stream id and either waits for the
// stream to be found or returns nil if the stream id is no longer
// valid.
func (s *Connection) FindStream(streamId uint32) *Stream {
var stream *Stream
var ok bool
s.streamCond.L.Lock()
stream, ok = s.streams[spdy.StreamId(streamId)]
debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok)
for !ok && streamId >= uint32(s.receivedStreamId) {
s.streamCond.Wait()
stream, ok = s.streams[spdy.StreamId(streamId)]
}
s.streamCond.L.Unlock()
return stream
}
func (s *Connection) CloseChan() <-chan bool {
return s.closeChan
}

38
vendor/github.com/docker/spdystream/handlers.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
package spdystream
import (
"io"
"net/http"
)
// MirrorStreamHandler mirrors all streams.
func MirrorStreamHandler(stream *Stream) {
replyErr := stream.SendReply(http.Header{}, false)
if replyErr != nil {
return
}
go func() {
io.Copy(stream, stream)
stream.Close()
}()
go func() {
for {
header, receiveErr := stream.ReceiveHeader()
if receiveErr != nil {
return
}
sendErr := stream.SendHeader(header, false)
if sendErr != nil {
return
}
}
}()
}
// NoopStreamHandler does nothing when stream connects, most
// likely used with RejectAuthHandler which will not allow any
// streams to make it to the stream handler.
func NoOpStreamHandler(stream *Stream) {
stream.SendReply(http.Header{}, false)
}

98
vendor/github.com/docker/spdystream/priority.go generated vendored Normal file
View file

@ -0,0 +1,98 @@
package spdystream
import (
"container/heap"
"sync"
"github.com/docker/spdystream/spdy"
)
type prioritizedFrame struct {
frame spdy.Frame
priority uint8
insertId uint64
}
type frameQueue []*prioritizedFrame
func (fq frameQueue) Len() int {
return len(fq)
}
func (fq frameQueue) Less(i, j int) bool {
if fq[i].priority == fq[j].priority {
return fq[i].insertId < fq[j].insertId
}
return fq[i].priority < fq[j].priority
}
func (fq frameQueue) Swap(i, j int) {
fq[i], fq[j] = fq[j], fq[i]
}
func (fq *frameQueue) Push(x interface{}) {
*fq = append(*fq, x.(*prioritizedFrame))
}
func (fq *frameQueue) Pop() interface{} {
old := *fq
n := len(old)
*fq = old[0 : n-1]
return old[n-1]
}
type PriorityFrameQueue struct {
queue *frameQueue
c *sync.Cond
size int
nextInsertId uint64
drain bool
}
func NewPriorityFrameQueue(size int) *PriorityFrameQueue {
queue := make(frameQueue, 0, size)
heap.Init(&queue)
return &PriorityFrameQueue{
queue: &queue,
size: size,
c: sync.NewCond(&sync.Mutex{}),
}
}
func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) {
q.c.L.Lock()
defer q.c.L.Unlock()
for q.queue.Len() >= q.size {
q.c.Wait()
}
pFrame := &prioritizedFrame{
frame: frame,
priority: priority,
insertId: q.nextInsertId,
}
q.nextInsertId = q.nextInsertId + 1
heap.Push(q.queue, pFrame)
q.c.Signal()
}
func (q *PriorityFrameQueue) Pop() spdy.Frame {
q.c.L.Lock()
defer q.c.L.Unlock()
for q.queue.Len() == 0 {
if q.drain {
return nil
}
q.c.Wait()
}
frame := heap.Pop(q.queue).(*prioritizedFrame).frame
q.c.Signal()
return frame
}
func (q *PriorityFrameQueue) Drain() {
q.c.L.Lock()
defer q.c.L.Unlock()
q.drain = true
q.c.Broadcast()
}

108
vendor/github.com/docker/spdystream/priority_test.go generated vendored Normal file
View file

@ -0,0 +1,108 @@
package spdystream
import (
"sync"
"testing"
"time"
"github.com/docker/spdystream/spdy"
)
func TestPriorityQueueOrdering(t *testing.T) {
queue := NewPriorityFrameQueue(150)
data1 := &spdy.DataFrame{}
data2 := &spdy.DataFrame{}
data3 := &spdy.DataFrame{}
data4 := &spdy.DataFrame{}
queue.Push(data1, 2)
queue.Push(data2, 1)
queue.Push(data3, 1)
queue.Push(data4, 0)
if queue.Pop() != data4 {
t.Fatalf("Wrong order, expected data4 first")
}
if queue.Pop() != data2 {
t.Fatalf("Wrong order, expected data2 second")
}
if queue.Pop() != data3 {
t.Fatalf("Wrong order, expected data3 third")
}
if queue.Pop() != data1 {
t.Fatalf("Wrong order, expected data1 fourth")
}
// Insert 50 Medium priority frames
for i := spdy.StreamId(50); i < 100; i++ {
queue.Push(&spdy.DataFrame{StreamId: i}, 1)
}
// Insert 50 low priority frames
for i := spdy.StreamId(100); i < 150; i++ {
queue.Push(&spdy.DataFrame{StreamId: i}, 2)
}
// Insert 50 high priority frames
for i := spdy.StreamId(0); i < 50; i++ {
queue.Push(&spdy.DataFrame{StreamId: i}, 0)
}
for i := spdy.StreamId(0); i < 150; i++ {
frame := queue.Pop()
if frame.(*spdy.DataFrame).StreamId != i {
t.Fatalf("Wrong frame\nActual: %d\nExpecting: %d", frame.(*spdy.DataFrame).StreamId, i)
}
}
}
func TestPriorityQueueSync(t *testing.T) {
queue := NewPriorityFrameQueue(150)
var wg sync.WaitGroup
insertRange := func(start, stop spdy.StreamId, priority uint8) {
for i := start; i < stop; i++ {
queue.Push(&spdy.DataFrame{StreamId: i}, priority)
}
wg.Done()
}
wg.Add(3)
go insertRange(spdy.StreamId(100), spdy.StreamId(150), 2)
go insertRange(spdy.StreamId(0), spdy.StreamId(50), 0)
go insertRange(spdy.StreamId(50), spdy.StreamId(100), 1)
wg.Wait()
for i := spdy.StreamId(0); i < 150; i++ {
frame := queue.Pop()
if frame.(*spdy.DataFrame).StreamId != i {
t.Fatalf("Wrong frame\nActual: %d\nExpecting: %d", frame.(*spdy.DataFrame).StreamId, i)
}
}
}
func TestPriorityQueueBlocking(t *testing.T) {
queue := NewPriorityFrameQueue(15)
for i := 0; i < 15; i++ {
queue.Push(&spdy.DataFrame{}, 2)
}
doneChan := make(chan bool)
go func() {
queue.Push(&spdy.DataFrame{}, 2)
close(doneChan)
}()
select {
case <-doneChan:
t.Fatalf("Push succeeded, expected to block")
case <-time.After(time.Millisecond):
break
}
queue.Pop()
select {
case <-doneChan:
break
case <-time.After(time.Millisecond):
t.Fatalf("Push should have succeeded, but timeout reached")
}
for i := 0; i < 15; i++ {
queue.Pop()
}
}

187
vendor/github.com/docker/spdystream/spdy/dictionary.go generated vendored Normal file
View file

@ -0,0 +1,187 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package spdy
// headerDictionary is the dictionary sent to the zlib compressor/decompressor.
var headerDictionary = []byte{
0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68,
0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70,
0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70,
0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65,
0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05,
0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00,
0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00,
0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70,
0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63,
0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f,
0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c,
0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00,
0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70,
0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73,
0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00,
0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68,
0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63,
0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f,
0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65,
0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74,
0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10,
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d,
0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74,
0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67,
0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f,
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00,
0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00,
0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00,
0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00,
0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00,
0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00,
0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69,
0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66,
0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68,
0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69,
0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00,
0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f,
0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73,
0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d,
0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d,
0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00,
0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67,
0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d,
0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69,
0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65,
0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74,
0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65,
0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00,
0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72,
0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00,
0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00,
0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79,
0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00,
0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61,
0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05,
0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00,
0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72,
0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72,
0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00,
0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65,
0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00,
0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c,
0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72,
0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65,
0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00,
0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61,
0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73,
0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74,
0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79,
0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00,
0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69,
0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77,
0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e,
0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00,
0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00,
0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30,
0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00,
0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31,
0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72,
0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62,
0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73,
0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69,
0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65,
0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00,
0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69,
0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32,
0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35,
0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30,
0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33,
0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37,
0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30,
0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34,
0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31,
0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31,
0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34,
0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34,
0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e,
0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f,
0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65,
0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20,
0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65,
0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f,
0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d,
0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34,
0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30,
0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68,
0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30,
0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64,
0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e,
0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64,
0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65,
0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72,
0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f,
0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74,
0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65,
0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20,
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20,
0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46,
0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41,
0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a,
0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41,
0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20,
0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20,
0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30,
0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e,
0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57,
0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c,
0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61,
0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20,
0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b,
0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f,
0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61,
0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69,
0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67,
0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67,
0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c,
0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c,
0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74,
0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c,
0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74,
0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65,
0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65,
0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64,
0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63,
0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69,
0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d,
0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a,
0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e,
}

348
vendor/github.com/docker/spdystream/spdy/read.go generated vendored Normal file
View file

@ -0,0 +1,348 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package spdy
import (
"compress/zlib"
"encoding/binary"
"io"
"net/http"
"strings"
)
func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error {
return f.readSynStreamFrame(h, frame)
}
func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error {
return f.readSynReplyFrame(h, frame)
}
func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error {
frame.CFHeader = h
if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
return err
}
if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
return err
}
if frame.Status == 0 {
return &Error{InvalidControlFrame, frame.StreamId}
}
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
return nil
}
func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error {
frame.CFHeader = h
var numSettings uint32
if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil {
return err
}
frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings)
for i := uint32(0); i < numSettings; i++ {
if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil {
return err
}
frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24)
frame.FlagIdValues[i].Id &= 0xffffff
if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil {
return err
}
}
return nil
}
func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error {
frame.CFHeader = h
if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil {
return err
}
if frame.Id == 0 {
return &Error{ZeroStreamId, 0}
}
if frame.CFHeader.Flags != 0 {
return &Error{InvalidControlFrame, StreamId(frame.Id)}
}
return nil
}
func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error {
frame.CFHeader = h
if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil {
return err
}
if frame.CFHeader.Flags != 0 {
return &Error{InvalidControlFrame, frame.LastGoodStreamId}
}
if frame.CFHeader.length != 8 {
return &Error{InvalidControlFrame, frame.LastGoodStreamId}
}
if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
return err
}
return nil
}
func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error {
return f.readHeadersFrame(h, frame)
}
func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error {
frame.CFHeader = h
if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
return err
}
if frame.CFHeader.Flags != 0 {
return &Error{InvalidControlFrame, frame.StreamId}
}
if frame.CFHeader.length != 8 {
return &Error{InvalidControlFrame, frame.StreamId}
}
if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil {
return err
}
return nil
}
func newControlFrame(frameType ControlFrameType) (controlFrame, error) {
ctor, ok := cframeCtor[frameType]
if !ok {
return nil, &Error{Err: InvalidControlFrame}
}
return ctor(), nil
}
var cframeCtor = map[ControlFrameType]func() controlFrame{
TypeSynStream: func() controlFrame { return new(SynStreamFrame) },
TypeSynReply: func() controlFrame { return new(SynReplyFrame) },
TypeRstStream: func() controlFrame { return new(RstStreamFrame) },
TypeSettings: func() controlFrame { return new(SettingsFrame) },
TypePing: func() controlFrame { return new(PingFrame) },
TypeGoAway: func() controlFrame { return new(GoAwayFrame) },
TypeHeaders: func() controlFrame { return new(HeadersFrame) },
TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) },
}
func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error {
if f.headerDecompressor != nil {
f.headerReader.N = payloadSize
return nil
}
f.headerReader = io.LimitedReader{R: f.r, N: payloadSize}
decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary))
if err != nil {
return err
}
f.headerDecompressor = decompressor
return nil
}
// ReadFrame reads SPDY encoded data and returns a decompressed Frame.
func (f *Framer) ReadFrame() (Frame, error) {
var firstWord uint32
if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil {
return nil, err
}
if firstWord&0x80000000 != 0 {
frameType := ControlFrameType(firstWord & 0xffff)
version := uint16(firstWord >> 16 & 0x7fff)
return f.parseControlFrame(version, frameType)
}
return f.parseDataFrame(StreamId(firstWord & 0x7fffffff))
}
func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) {
var length uint32
if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
return nil, err
}
flags := ControlFlags((length & 0xff000000) >> 24)
length &= 0xffffff
header := ControlFrameHeader{version, frameType, flags, length}
cframe, err := newControlFrame(frameType)
if err != nil {
return nil, err
}
if err = cframe.read(header, f); err != nil {
return nil, err
}
return cframe, nil
}
func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) {
var numHeaders uint32
if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil {
return nil, err
}
var e error
h := make(http.Header, int(numHeaders))
for i := 0; i < int(numHeaders); i++ {
var length uint32
if err := binary.Read(r, binary.BigEndian, &length); err != nil {
return nil, err
}
nameBytes := make([]byte, length)
if _, err := io.ReadFull(r, nameBytes); err != nil {
return nil, err
}
name := string(nameBytes)
if name != strings.ToLower(name) {
e = &Error{UnlowercasedHeaderName, streamId}
name = strings.ToLower(name)
}
if h[name] != nil {
e = &Error{DuplicateHeaders, streamId}
}
if err := binary.Read(r, binary.BigEndian, &length); err != nil {
return nil, err
}
value := make([]byte, length)
if _, err := io.ReadFull(r, value); err != nil {
return nil, err
}
valueList := strings.Split(string(value), headerValueSeparator)
for _, v := range valueList {
h.Add(name, v)
}
}
if e != nil {
return h, e
}
return h, nil
}
func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error {
frame.CFHeader = h
var err error
if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
return err
}
if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil {
return err
}
if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil {
return err
}
frame.Priority >>= 5
if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil {
return err
}
reader := f.r
if !f.headerCompressionDisabled {
err := f.uncorkHeaderDecompressor(int64(h.length - 10))
if err != nil {
return err
}
reader = f.headerDecompressor
}
frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
err = &Error{WrongCompressedPayloadSize, 0}
}
if err != nil {
return err
}
for h := range frame.Headers {
if invalidReqHeaders[h] {
return &Error{InvalidHeaderPresent, frame.StreamId}
}
}
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
return nil
}
func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error {
frame.CFHeader = h
var err error
if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
return err
}
reader := f.r
if !f.headerCompressionDisabled {
err := f.uncorkHeaderDecompressor(int64(h.length - 4))
if err != nil {
return err
}
reader = f.headerDecompressor
}
frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
err = &Error{WrongCompressedPayloadSize, 0}
}
if err != nil {
return err
}
for h := range frame.Headers {
if invalidRespHeaders[h] {
return &Error{InvalidHeaderPresent, frame.StreamId}
}
}
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
return nil
}
func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error {
frame.CFHeader = h
var err error
if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
return err
}
reader := f.r
if !f.headerCompressionDisabled {
err := f.uncorkHeaderDecompressor(int64(h.length - 4))
if err != nil {
return err
}
reader = f.headerDecompressor
}
frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
err = &Error{WrongCompressedPayloadSize, 0}
}
if err != nil {
return err
}
var invalidHeaders map[string]bool
if frame.StreamId%2 == 0 {
invalidHeaders = invalidReqHeaders
} else {
invalidHeaders = invalidRespHeaders
}
for h := range frame.Headers {
if invalidHeaders[h] {
return &Error{InvalidHeaderPresent, frame.StreamId}
}
}
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
return nil
}
func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) {
var length uint32
if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
return nil, err
}
var frame DataFrame
frame.StreamId = streamId
frame.Flags = DataFlags(length >> 24)
length &= 0xffffff
frame.Data = make([]byte, length)
if _, err := io.ReadFull(f.r, frame.Data); err != nil {
return nil, err
}
if frame.StreamId == 0 {
return nil, &Error{ZeroStreamId, 0}
}
return &frame, nil
}

644
vendor/github.com/docker/spdystream/spdy/spdy_test.go generated vendored Normal file
View file

@ -0,0 +1,644 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package spdy
import (
"bytes"
"compress/zlib"
"encoding/base64"
"io"
"io/ioutil"
"net/http"
"reflect"
"testing"
)
var HeadersFixture = http.Header{
"Url": []string{"http://www.google.com/"},
"Method": []string{"get"},
"Version": []string{"http/1.1"},
}
func TestHeaderParsing(t *testing.T) {
var headerValueBlockBuf bytes.Buffer
writeHeaderValueBlock(&headerValueBlockBuf, HeadersFixture)
const bogusStreamId = 1
newHeaders, err := parseHeaderValueBlock(&headerValueBlockBuf, bogusStreamId)
if err != nil {
t.Fatal("parseHeaderValueBlock:", err)
}
if !reflect.DeepEqual(HeadersFixture, newHeaders) {
t.Fatal("got: ", newHeaders, "\nwant: ", HeadersFixture)
}
}
func TestCreateParseSynStreamFrameCompressionDisable(t *testing.T) {
buffer := new(bytes.Buffer)
// Fixture framer for no compression test.
framer := &Framer{
headerCompressionDisabled: true,
w: buffer,
headerBuf: new(bytes.Buffer),
r: buffer,
}
synStreamFrame := SynStreamFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypeSynStream,
},
StreamId: 2,
Headers: HeadersFixture,
}
if err := framer.WriteFrame(&synStreamFrame); err != nil {
t.Fatal("WriteFrame without compression:", err)
}
frame, err := framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame without compression:", err)
}
parsedSynStreamFrame, ok := frame.(*SynStreamFrame)
if !ok {
t.Fatal("Parsed incorrect frame type:", frame)
}
if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) {
t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame)
}
}
func TestCreateParseSynStreamFrameCompressionEnable(t *testing.T) {
buffer := new(bytes.Buffer)
framer, err := NewFramer(buffer, buffer)
synStreamFrame := SynStreamFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypeSynStream,
},
StreamId: 2,
Headers: HeadersFixture,
}
if err != nil {
t.Fatal("Failed to create new framer:", err)
}
if err := framer.WriteFrame(&synStreamFrame); err != nil {
t.Fatal("WriteFrame with compression:", err)
}
frame, err := framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame with compression:", err)
}
parsedSynStreamFrame, ok := frame.(*SynStreamFrame)
if !ok {
t.Fatal("Parsed incorrect frame type:", frame)
}
if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) {
t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame)
}
}
func TestCreateParseSynReplyFrameCompressionDisable(t *testing.T) {
buffer := new(bytes.Buffer)
framer := &Framer{
headerCompressionDisabled: true,
w: buffer,
headerBuf: new(bytes.Buffer),
r: buffer,
}
synReplyFrame := SynReplyFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypeSynReply,
},
StreamId: 2,
Headers: HeadersFixture,
}
if err := framer.WriteFrame(&synReplyFrame); err != nil {
t.Fatal("WriteFrame without compression:", err)
}
frame, err := framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame without compression:", err)
}
parsedSynReplyFrame, ok := frame.(*SynReplyFrame)
if !ok {
t.Fatal("Parsed incorrect frame type:", frame)
}
if !reflect.DeepEqual(synReplyFrame, *parsedSynReplyFrame) {
t.Fatal("got: ", *parsedSynReplyFrame, "\nwant: ", synReplyFrame)
}
}
func TestCreateParseSynReplyFrameCompressionEnable(t *testing.T) {
buffer := new(bytes.Buffer)
framer, err := NewFramer(buffer, buffer)
synReplyFrame := SynReplyFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypeSynReply,
},
StreamId: 2,
Headers: HeadersFixture,
}
if err != nil {
t.Fatal("Failed to create new framer:", err)
}
if err := framer.WriteFrame(&synReplyFrame); err != nil {
t.Fatal("WriteFrame with compression:", err)
}
frame, err := framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame with compression:", err)
}
parsedSynReplyFrame, ok := frame.(*SynReplyFrame)
if !ok {
t.Fatal("Parsed incorrect frame type:", frame)
}
if !reflect.DeepEqual(synReplyFrame, *parsedSynReplyFrame) {
t.Fatal("got: ", *parsedSynReplyFrame, "\nwant: ", synReplyFrame)
}
}
func TestCreateParseRstStream(t *testing.T) {
buffer := new(bytes.Buffer)
framer, err := NewFramer(buffer, buffer)
if err != nil {
t.Fatal("Failed to create new framer:", err)
}
rstStreamFrame := RstStreamFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypeRstStream,
},
StreamId: 1,
Status: InvalidStream,
}
if err := framer.WriteFrame(&rstStreamFrame); err != nil {
t.Fatal("WriteFrame:", err)
}
frame, err := framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame:", err)
}
parsedRstStreamFrame, ok := frame.(*RstStreamFrame)
if !ok {
t.Fatal("Parsed incorrect frame type:", frame)
}
if !reflect.DeepEqual(rstStreamFrame, *parsedRstStreamFrame) {
t.Fatal("got: ", *parsedRstStreamFrame, "\nwant: ", rstStreamFrame)
}
}
func TestCreateParseSettings(t *testing.T) {
buffer := new(bytes.Buffer)
framer, err := NewFramer(buffer, buffer)
if err != nil {
t.Fatal("Failed to create new framer:", err)
}
settingsFrame := SettingsFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypeSettings,
},
FlagIdValues: []SettingsFlagIdValue{
{FlagSettingsPersistValue, SettingsCurrentCwnd, 10},
{FlagSettingsPersisted, SettingsUploadBandwidth, 1},
},
}
if err := framer.WriteFrame(&settingsFrame); err != nil {
t.Fatal("WriteFrame:", err)
}
frame, err := framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame:", err)
}
parsedSettingsFrame, ok := frame.(*SettingsFrame)
if !ok {
t.Fatal("Parsed incorrect frame type:", frame)
}
if !reflect.DeepEqual(settingsFrame, *parsedSettingsFrame) {
t.Fatal("got: ", *parsedSettingsFrame, "\nwant: ", settingsFrame)
}
}
func TestCreateParsePing(t *testing.T) {
buffer := new(bytes.Buffer)
framer, err := NewFramer(buffer, buffer)
if err != nil {
t.Fatal("Failed to create new framer:", err)
}
pingFrame := PingFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypePing,
},
Id: 31337,
}
if err := framer.WriteFrame(&pingFrame); err != nil {
t.Fatal("WriteFrame:", err)
}
if pingFrame.CFHeader.Flags != 0 {
t.Fatal("Incorrect frame type:", pingFrame)
}
frame, err := framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame:", err)
}
parsedPingFrame, ok := frame.(*PingFrame)
if !ok {
t.Fatal("Parsed incorrect frame type:", frame)
}
if parsedPingFrame.CFHeader.Flags != 0 {
t.Fatal("Parsed incorrect frame type:", parsedPingFrame)
}
if !reflect.DeepEqual(pingFrame, *parsedPingFrame) {
t.Fatal("got: ", *parsedPingFrame, "\nwant: ", pingFrame)
}
}
func TestCreateParseGoAway(t *testing.T) {
buffer := new(bytes.Buffer)
framer, err := NewFramer(buffer, buffer)
if err != nil {
t.Fatal("Failed to create new framer:", err)
}
goAwayFrame := GoAwayFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypeGoAway,
},
LastGoodStreamId: 31337,
Status: 1,
}
if err := framer.WriteFrame(&goAwayFrame); err != nil {
t.Fatal("WriteFrame:", err)
}
if goAwayFrame.CFHeader.Flags != 0 {
t.Fatal("Incorrect frame type:", goAwayFrame)
}
if goAwayFrame.CFHeader.length != 8 {
t.Fatal("Incorrect frame type:", goAwayFrame)
}
frame, err := framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame:", err)
}
parsedGoAwayFrame, ok := frame.(*GoAwayFrame)
if !ok {
t.Fatal("Parsed incorrect frame type:", frame)
}
if parsedGoAwayFrame.CFHeader.Flags != 0 {
t.Fatal("Incorrect frame type:", parsedGoAwayFrame)
}
if parsedGoAwayFrame.CFHeader.length != 8 {
t.Fatal("Incorrect frame type:", parsedGoAwayFrame)
}
if !reflect.DeepEqual(goAwayFrame, *parsedGoAwayFrame) {
t.Fatal("got: ", *parsedGoAwayFrame, "\nwant: ", goAwayFrame)
}
}
func TestCreateParseHeadersFrame(t *testing.T) {
buffer := new(bytes.Buffer)
framer := &Framer{
headerCompressionDisabled: true,
w: buffer,
headerBuf: new(bytes.Buffer),
r: buffer,
}
headersFrame := HeadersFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypeHeaders,
},
StreamId: 2,
}
headersFrame.Headers = HeadersFixture
if err := framer.WriteFrame(&headersFrame); err != nil {
t.Fatal("WriteFrame without compression:", err)
}
frame, err := framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame without compression:", err)
}
parsedHeadersFrame, ok := frame.(*HeadersFrame)
if !ok {
t.Fatal("Parsed incorrect frame type:", frame)
}
if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) {
t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame)
}
}
func TestCreateParseHeadersFrameCompressionEnable(t *testing.T) {
buffer := new(bytes.Buffer)
headersFrame := HeadersFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypeHeaders,
},
StreamId: 2,
}
headersFrame.Headers = HeadersFixture
framer, err := NewFramer(buffer, buffer)
if err := framer.WriteFrame(&headersFrame); err != nil {
t.Fatal("WriteFrame with compression:", err)
}
frame, err := framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame with compression:", err)
}
parsedHeadersFrame, ok := frame.(*HeadersFrame)
if !ok {
t.Fatal("Parsed incorrect frame type:", frame)
}
if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) {
t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame)
}
}
func TestCreateParseWindowUpdateFrame(t *testing.T) {
buffer := new(bytes.Buffer)
framer, err := NewFramer(buffer, buffer)
if err != nil {
t.Fatal("Failed to create new framer:", err)
}
windowUpdateFrame := WindowUpdateFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypeWindowUpdate,
},
StreamId: 31337,
DeltaWindowSize: 1,
}
if err := framer.WriteFrame(&windowUpdateFrame); err != nil {
t.Fatal("WriteFrame:", err)
}
if windowUpdateFrame.CFHeader.Flags != 0 {
t.Fatal("Incorrect frame type:", windowUpdateFrame)
}
if windowUpdateFrame.CFHeader.length != 8 {
t.Fatal("Incorrect frame type:", windowUpdateFrame)
}
frame, err := framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame:", err)
}
parsedWindowUpdateFrame, ok := frame.(*WindowUpdateFrame)
if !ok {
t.Fatal("Parsed incorrect frame type:", frame)
}
if parsedWindowUpdateFrame.CFHeader.Flags != 0 {
t.Fatal("Incorrect frame type:", parsedWindowUpdateFrame)
}
if parsedWindowUpdateFrame.CFHeader.length != 8 {
t.Fatal("Incorrect frame type:", parsedWindowUpdateFrame)
}
if !reflect.DeepEqual(windowUpdateFrame, *parsedWindowUpdateFrame) {
t.Fatal("got: ", *parsedWindowUpdateFrame, "\nwant: ", windowUpdateFrame)
}
}
func TestCreateParseDataFrame(t *testing.T) {
buffer := new(bytes.Buffer)
framer, err := NewFramer(buffer, buffer)
if err != nil {
t.Fatal("Failed to create new framer:", err)
}
dataFrame := DataFrame{
StreamId: 1,
Data: []byte{'h', 'e', 'l', 'l', 'o'},
}
if err := framer.WriteFrame(&dataFrame); err != nil {
t.Fatal("WriteFrame:", err)
}
frame, err := framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame:", err)
}
parsedDataFrame, ok := frame.(*DataFrame)
if !ok {
t.Fatal("Parsed incorrect frame type:", frame)
}
if !reflect.DeepEqual(dataFrame, *parsedDataFrame) {
t.Fatal("got: ", *parsedDataFrame, "\nwant: ", dataFrame)
}
}
func TestCompressionContextAcrossFrames(t *testing.T) {
buffer := new(bytes.Buffer)
framer, err := NewFramer(buffer, buffer)
if err != nil {
t.Fatal("Failed to create new framer:", err)
}
headersFrame := HeadersFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypeHeaders,
},
StreamId: 2,
Headers: HeadersFixture,
}
if err := framer.WriteFrame(&headersFrame); err != nil {
t.Fatal("WriteFrame (HEADERS):", err)
}
synStreamFrame := SynStreamFrame{
ControlFrameHeader{
Version,
TypeSynStream,
0, // Flags
0, // length
},
2, // StreamId
0, // AssociatedTOStreamID
0, // Priority
1, // Slot
nil, // Headers
}
synStreamFrame.Headers = HeadersFixture
if err := framer.WriteFrame(&synStreamFrame); err != nil {
t.Fatal("WriteFrame (SYN_STREAM):", err)
}
frame, err := framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame (HEADERS):", err, buffer.Bytes())
}
parsedHeadersFrame, ok := frame.(*HeadersFrame)
if !ok {
t.Fatalf("expected HeadersFrame; got %T %v", frame, frame)
}
if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) {
t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame)
}
frame, err = framer.ReadFrame()
if err != nil {
t.Fatal("ReadFrame (SYN_STREAM):", err, buffer.Bytes())
}
parsedSynStreamFrame, ok := frame.(*SynStreamFrame)
if !ok {
t.Fatalf("expected SynStreamFrame; got %T %v", frame, frame)
}
if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) {
t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame)
}
}
func TestMultipleSPDYFrames(t *testing.T) {
// Initialize the framers.
pr1, pw1 := io.Pipe()
pr2, pw2 := io.Pipe()
writer, err := NewFramer(pw1, pr2)
if err != nil {
t.Fatal("Failed to create writer:", err)
}
reader, err := NewFramer(pw2, pr1)
if err != nil {
t.Fatal("Failed to create reader:", err)
}
// Set up the frames we're actually transferring.
headersFrame := HeadersFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypeHeaders,
},
StreamId: 2,
Headers: HeadersFixture,
}
synStreamFrame := SynStreamFrame{
CFHeader: ControlFrameHeader{
version: Version,
frameType: TypeSynStream,
},
StreamId: 2,
Headers: HeadersFixture,
}
// Start the goroutines to write the frames.
go func() {
if err := writer.WriteFrame(&headersFrame); err != nil {
t.Fatal("WriteFrame (HEADERS): ", err)
}
if err := writer.WriteFrame(&synStreamFrame); err != nil {
t.Fatal("WriteFrame (SYN_STREAM): ", err)
}
}()
// Read the frames and verify they look as expected.
frame, err := reader.ReadFrame()
if err != nil {
t.Fatal("ReadFrame (HEADERS): ", err)
}
parsedHeadersFrame, ok := frame.(*HeadersFrame)
if !ok {
t.Fatal("Parsed incorrect frame type:", frame)
}
if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) {
t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame)
}
frame, err = reader.ReadFrame()
if err != nil {
t.Fatal("ReadFrame (SYN_STREAM):", err)
}
parsedSynStreamFrame, ok := frame.(*SynStreamFrame)
if !ok {
t.Fatal("Parsed incorrect frame type.")
}
if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) {
t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame)
}
}
func TestReadMalformedZlibHeader(t *testing.T) {
// These were constructed by corrupting the first byte of the zlib
// header after writing.
malformedStructs := map[string]string{
"SynStreamFrame": "gAIAAQAAABgAAAACAAAAAAAAF/nfolGyYmAAAAAA//8=",
"SynReplyFrame": "gAIAAgAAABQAAAACAAAX+d+iUbJiYAAAAAD//w==",
"HeadersFrame": "gAIACAAAABQAAAACAAAX+d+iUbJiYAAAAAD//w==",
}
for name, bad := range malformedStructs {
b, err := base64.StdEncoding.DecodeString(bad)
if err != nil {
t.Errorf("Unable to decode base64 encoded frame %s: %v", name, err)
}
buf := bytes.NewBuffer(b)
reader, err := NewFramer(buf, buf)
if err != nil {
t.Fatalf("NewFramer: %v", err)
}
_, err = reader.ReadFrame()
if err != zlib.ErrHeader {
t.Errorf("Frame %s, expected: %#v, actual: %#v", name, zlib.ErrHeader, err)
}
}
}
// TODO: these tests are too weak for updating SPDY spec. Fix me.
type zeroStream struct {
frame Frame
encoded string
}
var streamIdZeroFrames = map[string]zeroStream{
"SynStreamFrame": {
&SynStreamFrame{StreamId: 0},
"gAIAAQAAABgAAAAAAAAAAAAAePnfolGyYmAAAAAA//8=",
},
"SynReplyFrame": {
&SynReplyFrame{StreamId: 0},
"gAIAAgAAABQAAAAAAAB4+d+iUbJiYAAAAAD//w==",
},
"RstStreamFrame": {
&RstStreamFrame{StreamId: 0},
"gAIAAwAAAAgAAAAAAAAAAA==",
},
"HeadersFrame": {
&HeadersFrame{StreamId: 0},
"gAIACAAAABQAAAAAAAB4+d+iUbJiYAAAAAD//w==",
},
"DataFrame": {
&DataFrame{StreamId: 0},
"AAAAAAAAAAA=",
},
"PingFrame": {
&PingFrame{Id: 0},
"gAIABgAAAAQAAAAA",
},
}
func TestNoZeroStreamId(t *testing.T) {
t.Log("skipping") // TODO: update to work with SPDY3
return
for name, f := range streamIdZeroFrames {
b, err := base64.StdEncoding.DecodeString(f.encoded)
if err != nil {
t.Errorf("Unable to decode base64 encoded frame %s: %v", f, err)
continue
}
framer, err := NewFramer(ioutil.Discard, bytes.NewReader(b))
if err != nil {
t.Fatalf("NewFramer: %v", err)
}
err = framer.WriteFrame(f.frame)
checkZeroStreamId(t, name, "WriteFrame", err)
_, err = framer.ReadFrame()
checkZeroStreamId(t, name, "ReadFrame", err)
}
}
func checkZeroStreamId(t *testing.T, frame string, method string, err error) {
if err == nil {
t.Errorf("%s ZeroStreamId, no error on %s", method, frame)
return
}
eerr, ok := err.(*Error)
if !ok || eerr.Err != ZeroStreamId {
t.Errorf("%s ZeroStreamId, incorrect error %#v, frame %s", method, eerr, frame)
}
}

275
vendor/github.com/docker/spdystream/spdy/types.go generated vendored Normal file
View file

@ -0,0 +1,275 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package spdy implements the SPDY protocol (currently SPDY/3), described in
// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3.
package spdy
import (
"bytes"
"compress/zlib"
"io"
"net/http"
)
// Version is the protocol version number that this package implements.
const Version = 3
// ControlFrameType stores the type field in a control frame header.
type ControlFrameType uint16
const (
TypeSynStream ControlFrameType = 0x0001
TypeSynReply = 0x0002
TypeRstStream = 0x0003
TypeSettings = 0x0004
TypePing = 0x0006
TypeGoAway = 0x0007
TypeHeaders = 0x0008
TypeWindowUpdate = 0x0009
)
// ControlFlags are the flags that can be set on a control frame.
type ControlFlags uint8
const (
ControlFlagFin ControlFlags = 0x01
ControlFlagUnidirectional = 0x02
ControlFlagSettingsClearSettings = 0x01
)
// DataFlags are the flags that can be set on a data frame.
type DataFlags uint8
const (
DataFlagFin DataFlags = 0x01
)
// MaxDataLength is the maximum number of bytes that can be stored in one frame.
const MaxDataLength = 1<<24 - 1
// headerValueSepator separates multiple header values.
const headerValueSeparator = "\x00"
// Frame is a single SPDY frame in its unpacked in-memory representation. Use
// Framer to read and write it.
type Frame interface {
write(f *Framer) error
}
// ControlFrameHeader contains all the fields in a control frame header,
// in its unpacked in-memory representation.
type ControlFrameHeader struct {
// Note, high bit is the "Control" bit.
version uint16 // spdy version number
frameType ControlFrameType
Flags ControlFlags
length uint32 // length of data field
}
type controlFrame interface {
Frame
read(h ControlFrameHeader, f *Framer) error
}
// StreamId represents a 31-bit value identifying the stream.
type StreamId uint32
// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM
// frame.
type SynStreamFrame struct {
CFHeader ControlFrameHeader
StreamId StreamId
AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to
Priority uint8 // priority of this frame (3-bit)
Slot uint8 // index in the server's credential vector of the client certificate
Headers http.Header
}
// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame.
type SynReplyFrame struct {
CFHeader ControlFrameHeader
StreamId StreamId
Headers http.Header
}
// RstStreamStatus represents the status that led to a RST_STREAM.
type RstStreamStatus uint32
const (
ProtocolError RstStreamStatus = iota + 1
InvalidStream
RefusedStream
UnsupportedVersion
Cancel
InternalError
FlowControlError
StreamInUse
StreamAlreadyClosed
InvalidCredentials
FrameTooLarge
)
// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM
// frame.
type RstStreamFrame struct {
CFHeader ControlFrameHeader
StreamId StreamId
Status RstStreamStatus
}
// SettingsFlag represents a flag in a SETTINGS frame.
type SettingsFlag uint8
const (
FlagSettingsPersistValue SettingsFlag = 0x1
FlagSettingsPersisted = 0x2
)
// SettingsFlag represents the id of an id/value pair in a SETTINGS frame.
type SettingsId uint32
const (
SettingsUploadBandwidth SettingsId = iota + 1
SettingsDownloadBandwidth
SettingsRoundTripTime
SettingsMaxConcurrentStreams
SettingsCurrentCwnd
SettingsDownloadRetransRate
SettingsInitialWindowSize
SettingsClientCretificateVectorSize
)
// SettingsFlagIdValue is the unpacked, in-memory representation of the
// combined flag/id/value for a setting in a SETTINGS frame.
type SettingsFlagIdValue struct {
Flag SettingsFlag
Id SettingsId
Value uint32
}
// SettingsFrame is the unpacked, in-memory representation of a SPDY
// SETTINGS frame.
type SettingsFrame struct {
CFHeader ControlFrameHeader
FlagIdValues []SettingsFlagIdValue
}
// PingFrame is the unpacked, in-memory representation of a PING frame.
type PingFrame struct {
CFHeader ControlFrameHeader
Id uint32 // unique id for this ping, from server is even, from client is odd.
}
// GoAwayStatus represents the status in a GoAwayFrame.
type GoAwayStatus uint32
const (
GoAwayOK GoAwayStatus = iota
GoAwayProtocolError
GoAwayInternalError
)
// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame.
type GoAwayFrame struct {
CFHeader ControlFrameHeader
LastGoodStreamId StreamId // last stream id which was accepted by sender
Status GoAwayStatus
}
// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame.
type HeadersFrame struct {
CFHeader ControlFrameHeader
StreamId StreamId
Headers http.Header
}
// WindowUpdateFrame is the unpacked, in-memory representation of a
// WINDOW_UPDATE frame.
type WindowUpdateFrame struct {
CFHeader ControlFrameHeader
StreamId StreamId
DeltaWindowSize uint32 // additional number of bytes to existing window size
}
// TODO: Implement credential frame and related methods.
// DataFrame is the unpacked, in-memory representation of a DATA frame.
type DataFrame struct {
// Note, high bit is the "Control" bit. Should be 0 for data frames.
StreamId StreamId
Flags DataFlags
Data []byte // payload data of this frame
}
// A SPDY specific error.
type ErrorCode string
const (
UnlowercasedHeaderName ErrorCode = "header was not lowercased"
DuplicateHeaders = "multiple headers with same name"
WrongCompressedPayloadSize = "compressed payload size was incorrect"
UnknownFrameType = "unknown frame type"
InvalidControlFrame = "invalid control frame"
InvalidDataFrame = "invalid data frame"
InvalidHeaderPresent = "frame contained invalid header"
ZeroStreamId = "stream id zero is disallowed"
)
// Error contains both the type of error and additional values. StreamId is 0
// if Error is not associated with a stream.
type Error struct {
Err ErrorCode
StreamId StreamId
}
func (e *Error) Error() string {
return string(e.Err)
}
var invalidReqHeaders = map[string]bool{
"Connection": true,
"Host": true,
"Keep-Alive": true,
"Proxy-Connection": true,
"Transfer-Encoding": true,
}
var invalidRespHeaders = map[string]bool{
"Connection": true,
"Keep-Alive": true,
"Proxy-Connection": true,
"Transfer-Encoding": true,
}
// Framer handles serializing/deserializing SPDY frames, including compressing/
// decompressing payloads.
type Framer struct {
headerCompressionDisabled bool
w io.Writer
headerBuf *bytes.Buffer
headerCompressor *zlib.Writer
r io.Reader
headerReader io.LimitedReader
headerDecompressor io.ReadCloser
}
// NewFramer allocates a new Framer for a given SPDY connection, represented by
// a io.Writer and io.Reader. Note that Framer will read and write individual fields
// from/to the Reader and Writer, so the caller should pass in an appropriately
// buffered implementation to optimize performance.
func NewFramer(w io.Writer, r io.Reader) (*Framer, error) {
compressBuf := new(bytes.Buffer)
compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary))
if err != nil {
return nil, err
}
framer := &Framer{
w: w,
headerBuf: compressBuf,
headerCompressor: compressor,
r: r,
}
return framer, nil
}

318
vendor/github.com/docker/spdystream/spdy/write.go generated vendored Normal file
View file

@ -0,0 +1,318 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package spdy
import (
"encoding/binary"
"io"
"net/http"
"strings"
)
func (frame *SynStreamFrame) write(f *Framer) error {
return f.writeSynStreamFrame(frame)
}
func (frame *SynReplyFrame) write(f *Framer) error {
return f.writeSynReplyFrame(frame)
}
func (frame *RstStreamFrame) write(f *Framer) (err error) {
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeRstStream
frame.CFHeader.Flags = 0
frame.CFHeader.length = 8
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
return
}
if frame.Status == 0 {
return &Error{InvalidControlFrame, frame.StreamId}
}
if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
return
}
return
}
func (frame *SettingsFrame) write(f *Framer) (err error) {
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeSettings
frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4)
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil {
return
}
for _, flagIdValue := range frame.FlagIdValues {
flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id)
if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil {
return
}
}
return
}
func (frame *PingFrame) write(f *Framer) (err error) {
if frame.Id == 0 {
return &Error{ZeroStreamId, 0}
}
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypePing
frame.CFHeader.Flags = 0
frame.CFHeader.length = 4
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil {
return
}
return
}
func (frame *GoAwayFrame) write(f *Framer) (err error) {
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeGoAway
frame.CFHeader.Flags = 0
frame.CFHeader.length = 8
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
return
}
return nil
}
func (frame *HeadersFrame) write(f *Framer) error {
return f.writeHeadersFrame(frame)
}
func (frame *WindowUpdateFrame) write(f *Framer) (err error) {
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeWindowUpdate
frame.CFHeader.Flags = 0
frame.CFHeader.length = 8
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil {
return
}
return nil
}
func (frame *DataFrame) write(f *Framer) error {
return f.writeDataFrame(frame)
}
// WriteFrame writes a frame.
func (f *Framer) WriteFrame(frame Frame) error {
return frame.write(f)
}
func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error {
if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil {
return err
}
if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil {
return err
}
flagsAndLength := uint32(h.Flags)<<24 | h.length
if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil {
return err
}
return nil
}
func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) {
n = 0
if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil {
return
}
n += 2
for name, values := range h {
if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil {
return
}
n += 2
name = strings.ToLower(name)
if _, err = io.WriteString(w, name); err != nil {
return
}
n += len(name)
v := strings.Join(values, headerValueSeparator)
if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil {
return
}
n += 2
if _, err = io.WriteString(w, v); err != nil {
return
}
n += len(v)
}
return
}
func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) {
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
// Marshal the headers.
var writer io.Writer = f.headerBuf
if !f.headerCompressionDisabled {
writer = f.headerCompressor
}
if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
return
}
if !f.headerCompressionDisabled {
f.headerCompressor.Flush()
}
// Set ControlFrameHeader.
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeSynStream
frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10)
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return err
}
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
return err
}
if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil {
return err
}
if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil {
return err
}
if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil {
return err
}
if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
return err
}
f.headerBuf.Reset()
return nil
}
func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) {
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
// Marshal the headers.
var writer io.Writer = f.headerBuf
if !f.headerCompressionDisabled {
writer = f.headerCompressor
}
if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
return
}
if !f.headerCompressionDisabled {
f.headerCompressor.Flush()
}
// Set ControlFrameHeader.
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeSynReply
frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
return
}
if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
return
}
f.headerBuf.Reset()
return
}
func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) {
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
// Marshal the headers.
var writer io.Writer = f.headerBuf
if !f.headerCompressionDisabled {
writer = f.headerCompressor
}
if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
return
}
if !f.headerCompressionDisabled {
f.headerCompressor.Flush()
}
// Set ControlFrameHeader.
frame.CFHeader.version = Version
frame.CFHeader.frameType = TypeHeaders
frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
// Serialize frame to Writer.
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
return
}
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
return
}
if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
return
}
f.headerBuf.Reset()
return
}
func (f *Framer) writeDataFrame(frame *DataFrame) (err error) {
if frame.StreamId == 0 {
return &Error{ZeroStreamId, 0}
}
if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength {
return &Error{InvalidDataFrame, frame.StreamId}
}
// Serialize frame to Writer.
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
return
}
flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data))
if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil {
return
}
if _, err = f.w.Write(frame.Data); err != nil {
return
}
return nil
}

113
vendor/github.com/docker/spdystream/spdy_bench_test.go generated vendored Normal file
View file

@ -0,0 +1,113 @@
package spdystream
import (
"fmt"
"io"
"net"
"net/http"
"sync"
"testing"
)
func configureServer() (io.Closer, string, *sync.WaitGroup) {
authenticated = true
wg := &sync.WaitGroup{}
server, listen, serverErr := runServer(wg)
if serverErr != nil {
panic(serverErr)
}
return server, listen, wg
}
func BenchmarkDial10000(b *testing.B) {
server, addr, wg := configureServer()
defer func() {
server.Close()
wg.Wait()
}()
for i := 0; i < b.N; i++ {
conn, dialErr := net.Dial("tcp", addr)
if dialErr != nil {
panic(fmt.Sprintf("Error dialing server: %s", dialErr))
}
conn.Close()
}
}
func BenchmarkDialWithSPDYStream10000(b *testing.B) {
server, addr, wg := configureServer()
defer func() {
server.Close()
wg.Wait()
}()
for i := 0; i < b.N; i++ {
conn, dialErr := net.Dial("tcp", addr)
if dialErr != nil {
b.Fatalf("Error dialing server: %s", dialErr)
}
spdyConn, spdyErr := NewConnection(conn, false)
if spdyErr != nil {
b.Fatalf("Error creating spdy connection: %s", spdyErr)
}
go spdyConn.Serve(NoOpStreamHandler)
closeErr := spdyConn.Close()
if closeErr != nil {
b.Fatalf("Error closing connection: %s, closeErr")
}
}
}
func benchmarkStreamWithDataAndSize(size uint64, b *testing.B) {
server, addr, wg := configureServer()
defer func() {
server.Close()
wg.Wait()
}()
for i := 0; i < b.N; i++ {
conn, dialErr := net.Dial("tcp", addr)
if dialErr != nil {
b.Fatalf("Error dialing server: %s", dialErr)
}
spdyConn, spdyErr := NewConnection(conn, false)
if spdyErr != nil {
b.Fatalf("Error creating spdy connection: %s", spdyErr)
}
go spdyConn.Serve(MirrorStreamHandler)
stream, err := spdyConn.CreateStream(http.Header{}, nil, false)
writer := make([]byte, size)
stream.Write(writer)
if err != nil {
panic(err)
}
reader := make([]byte, size)
stream.Read(reader)
stream.Close()
closeErr := spdyConn.Close()
if closeErr != nil {
b.Fatalf("Error closing connection: %s, closeErr")
}
}
}
func BenchmarkStreamWith1Byte10000(b *testing.B) { benchmarkStreamWithDataAndSize(1, b) }
func BenchmarkStreamWith1KiloByte10000(b *testing.B) { benchmarkStreamWithDataAndSize(1024, b) }
func BenchmarkStreamWith1Megabyte10000(b *testing.B) { benchmarkStreamWithDataAndSize(1024*1024, b) }

1171
vendor/github.com/docker/spdystream/spdy_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

327
vendor/github.com/docker/spdystream/stream.go generated vendored Normal file
View file

@ -0,0 +1,327 @@
package spdystream
import (
"errors"
"fmt"
"io"
"net"
"net/http"
"sync"
"time"
"github.com/docker/spdystream/spdy"
)
var (
ErrUnreadPartialData = errors.New("unread partial data")
)
type Stream struct {
streamId spdy.StreamId
parent *Stream
conn *Connection
startChan chan error
dataLock sync.RWMutex
dataChan chan []byte
unread []byte
priority uint8
headers http.Header
headerChan chan http.Header
finishLock sync.Mutex
finished bool
replyCond *sync.Cond
replied bool
closeLock sync.Mutex
closeChan chan bool
}
// WriteData writes data to stream, sending a dataframe per call
func (s *Stream) WriteData(data []byte, fin bool) error {
s.waitWriteReply()
var flags spdy.DataFlags
if fin {
flags = spdy.DataFlagFin
s.finishLock.Lock()
if s.finished {
s.finishLock.Unlock()
return ErrWriteClosedStream
}
s.finished = true
s.finishLock.Unlock()
}
dataFrame := &spdy.DataFrame{
StreamId: s.streamId,
Flags: flags,
Data: data,
}
debugMessage("(%p) (%d) Writing data frame", s, s.streamId)
return s.conn.framer.WriteFrame(dataFrame)
}
// Write writes bytes to a stream, calling write data for each call.
func (s *Stream) Write(data []byte) (n int, err error) {
err = s.WriteData(data, false)
if err == nil {
n = len(data)
}
return
}
// Read reads bytes from a stream, a single read will never get more
// than what is sent on a single data frame, but a multiple calls to
// read may get data from the same data frame.
func (s *Stream) Read(p []byte) (n int, err error) {
if s.unread == nil {
select {
case <-s.closeChan:
return 0, io.EOF
case read, ok := <-s.dataChan:
if !ok {
return 0, io.EOF
}
s.unread = read
}
}
n = copy(p, s.unread)
if n < len(s.unread) {
s.unread = s.unread[n:]
} else {
s.unread = nil
}
return
}
// ReadData reads an entire data frame and returns the byte array
// from the data frame. If there is unread data from the result
// of a Read call, this function will return an ErrUnreadPartialData.
func (s *Stream) ReadData() ([]byte, error) {
debugMessage("(%p) Reading data from %d", s, s.streamId)
if s.unread != nil {
return nil, ErrUnreadPartialData
}
select {
case <-s.closeChan:
return nil, io.EOF
case read, ok := <-s.dataChan:
if !ok {
return nil, io.EOF
}
return read, nil
}
}
func (s *Stream) waitWriteReply() {
if s.replyCond != nil {
s.replyCond.L.Lock()
for !s.replied {
s.replyCond.Wait()
}
s.replyCond.L.Unlock()
}
}
// Wait waits for the stream to receive a reply.
func (s *Stream) Wait() error {
return s.WaitTimeout(time.Duration(0))
}
// WaitTimeout waits for the stream to receive a reply or for timeout.
// When the timeout is reached, ErrTimeout will be returned.
func (s *Stream) WaitTimeout(timeout time.Duration) error {
var timeoutChan <-chan time.Time
if timeout > time.Duration(0) {
timeoutChan = time.After(timeout)
}
select {
case err := <-s.startChan:
if err != nil {
return err
}
break
case <-timeoutChan:
return ErrTimeout
}
return nil
}
// Close closes the stream by sending an empty data frame with the
// finish flag set, indicating this side is finished with the stream.
func (s *Stream) Close() error {
select {
case <-s.closeChan:
// Stream is now fully closed
s.conn.removeStream(s)
default:
break
}
return s.WriteData([]byte{}, true)
}
// Reset sends a reset frame, putting the stream into the fully closed state.
func (s *Stream) Reset() error {
s.conn.removeStream(s)
return s.resetStream()
}
func (s *Stream) resetStream() error {
// Always call closeRemoteChannels, even if s.finished is already true.
// This makes it so that stream.Close() followed by stream.Reset() allows
// stream.Read() to unblock.
s.closeRemoteChannels()
s.finishLock.Lock()
if s.finished {
s.finishLock.Unlock()
return nil
}
s.finished = true
s.finishLock.Unlock()
resetFrame := &spdy.RstStreamFrame{
StreamId: s.streamId,
Status: spdy.Cancel,
}
return s.conn.framer.WriteFrame(resetFrame)
}
// CreateSubStream creates a stream using the current as the parent
func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) {
return s.conn.CreateStream(headers, s, fin)
}
// SetPriority sets the stream priority, does not affect the
// remote priority of this stream after Open has been called.
// Valid values are 0 through 7, 0 being the highest priority
// and 7 the lowest.
func (s *Stream) SetPriority(priority uint8) {
s.priority = priority
}
// SendHeader sends a header frame across the stream
func (s *Stream) SendHeader(headers http.Header, fin bool) error {
return s.conn.sendHeaders(headers, s, fin)
}
// SendReply sends a reply on a stream, only valid to be called once
// when handling a new stream
func (s *Stream) SendReply(headers http.Header, fin bool) error {
if s.replyCond == nil {
return errors.New("cannot reply on initiated stream")
}
s.replyCond.L.Lock()
defer s.replyCond.L.Unlock()
if s.replied {
return nil
}
err := s.conn.sendReply(headers, s, fin)
if err != nil {
return err
}
s.replied = true
s.replyCond.Broadcast()
return nil
}
// Refuse sends a reset frame with the status refuse, only
// valid to be called once when handling a new stream. This
// may be used to indicate that a stream is not allowed
// when http status codes are not being used.
func (s *Stream) Refuse() error {
if s.replied {
return nil
}
s.replied = true
return s.conn.sendReset(spdy.RefusedStream, s)
}
// Cancel sends a reset frame with the status canceled. This
// can be used at any time by the creator of the Stream to
// indicate the stream is no longer needed.
func (s *Stream) Cancel() error {
return s.conn.sendReset(spdy.Cancel, s)
}
// ReceiveHeader receives a header sent on the other side
// of the stream. This function will block until a header
// is received or stream is closed.
func (s *Stream) ReceiveHeader() (http.Header, error) {
select {
case <-s.closeChan:
break
case header, ok := <-s.headerChan:
if !ok {
return nil, fmt.Errorf("header chan closed")
}
return header, nil
}
return nil, fmt.Errorf("stream closed")
}
// Parent returns the parent stream
func (s *Stream) Parent() *Stream {
return s.parent
}
// Headers returns the headers used to create the stream
func (s *Stream) Headers() http.Header {
return s.headers
}
// String returns the string version of stream using the
// streamId to uniquely identify the stream
func (s *Stream) String() string {
return fmt.Sprintf("stream:%d", s.streamId)
}
// Identifier returns a 32 bit identifier for the stream
func (s *Stream) Identifier() uint32 {
return uint32(s.streamId)
}
// IsFinished returns whether the stream has finished
// sending data
func (s *Stream) IsFinished() bool {
return s.finished
}
// Implement net.Conn interface
func (s *Stream) LocalAddr() net.Addr {
return s.conn.conn.LocalAddr()
}
func (s *Stream) RemoteAddr() net.Addr {
return s.conn.conn.RemoteAddr()
}
// TODO set per stream values instead of connection-wide
func (s *Stream) SetDeadline(t time.Time) error {
return s.conn.conn.SetDeadline(t)
}
func (s *Stream) SetReadDeadline(t time.Time) error {
return s.conn.conn.SetReadDeadline(t)
}
func (s *Stream) SetWriteDeadline(t time.Time) error {
return s.conn.conn.SetWriteDeadline(t)
}
func (s *Stream) closeRemoteChannels() {
s.closeLock.Lock()
defer s.closeLock.Unlock()
select {
case <-s.closeChan:
default:
close(s.closeChan)
}
}

16
vendor/github.com/docker/spdystream/utils.go generated vendored Normal file
View file

@ -0,0 +1,16 @@
package spdystream
import (
"log"
"os"
)
var (
DEBUG = os.Getenv("DEBUG")
)
func debugMessage(fmt string, args ...interface{}) {
if DEBUG != "" {
log.Printf(fmt, args...)
}
}

65
vendor/github.com/docker/spdystream/ws/connection.go generated vendored Normal file
View file

@ -0,0 +1,65 @@
package ws
import (
"github.com/gorilla/websocket"
"io"
"log"
"time"
)
// Wrap an HTTP2 connection over WebSockets and
// use the underlying WebSocket framing for proxy
// compatibility.
type Conn struct {
*websocket.Conn
reader io.Reader
}
func NewConnection(w *websocket.Conn) *Conn {
return &Conn{Conn: w}
}
func (c Conn) Write(b []byte) (int, error) {
err := c.WriteMessage(websocket.BinaryMessage, b)
if err != nil {
return 0, err
}
return len(b), nil
}
func (c Conn) Read(b []byte) (int, error) {
if c.reader == nil {
t, r, err := c.NextReader()
if err != nil {
return 0, err
}
if t != websocket.BinaryMessage {
log.Printf("ws: ignored non-binary message in stream")
return 0, nil
}
c.reader = r
}
n, err := c.reader.Read(b)
if err != nil {
if err == io.EOF {
c.reader = nil
}
return n, err
}
return n, nil
}
func (c Conn) SetDeadline(t time.Time) error {
if err := c.Conn.SetReadDeadline(t); err != nil {
return err
}
if err := c.Conn.SetWriteDeadline(t); err != nil {
return err
}
return nil
}
func (c Conn) Close() error {
err := c.Conn.Close()
return err
}

175
vendor/github.com/docker/spdystream/ws/ws_test.go generated vendored Normal file
View file

@ -0,0 +1,175 @@
package ws
import (
"bytes"
"github.com/docker/spdystream"
"github.com/gorilla/websocket"
"io"
"log"
"net/http"
"net/http/httptest"
"strings"
"testing"
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
var serverSpdyConn *spdystream.Connection
// Connect to the Websocket endpoint at ws://localhost
// using SPDY over Websockets framing.
func ExampleConn() {
wsconn, _, _ := websocket.DefaultDialer.Dial("ws://localhost/", http.Header{"Origin": {"http://localhost/"}})
conn, _ := spdystream.NewConnection(NewConnection(wsconn), false)
go conn.Serve(spdystream.NoOpStreamHandler, spdystream.NoAuthHandler)
stream, _ := conn.CreateStream(http.Header{}, nil, false)
stream.Wait()
}
func serveWs(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "Method not allowed", 405)
return
}
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
if _, ok := err.(websocket.HandshakeError); !ok {
log.Println(err)
}
return
}
wrap := NewConnection(ws)
spdyConn, err := spdystream.NewConnection(wrap, true)
if err != nil {
log.Fatal(err)
return
}
serverSpdyConn = spdyConn
go spdyConn.Serve(spdystream.MirrorStreamHandler, authStreamHandler)
}
func TestSpdyStreamOverWs(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(serveWs))
defer server.Close()
defer func() {
if serverSpdyConn != nil {
serverSpdyConn.Close()
}
}()
wsconn, _, err := websocket.DefaultDialer.Dial(strings.Replace(server.URL, "http://", "ws://", 1), http.Header{"Origin": {server.URL}})
if err != nil {
t.Fatal(err)
}
wrap := NewConnection(wsconn)
spdyConn, err := spdystream.NewConnection(wrap, false)
if err != nil {
defer wsconn.Close()
t.Fatal(err)
}
defer spdyConn.Close()
authenticated = true
go spdyConn.Serve(spdystream.NoOpStreamHandler, spdystream.RejectAuthHandler)
stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false)
if streamErr != nil {
t.Fatalf("Error creating stream: %s", streamErr)
}
waitErr := stream.Wait()
if waitErr != nil {
t.Fatalf("Error waiting for stream: %s", waitErr)
}
message := []byte("hello")
writeErr := stream.WriteData(message, false)
if writeErr != nil {
t.Fatalf("Error writing data")
}
buf := make([]byte, 10)
n, readErr := stream.Read(buf)
if readErr != nil {
t.Fatalf("Error reading data from stream: %s", readErr)
}
if n != 5 {
t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 5", n)
}
if bytes.Compare(buf[:n], message) != 0 {
t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", buf, message)
}
writeErr = stream.WriteData(message, true)
if writeErr != nil {
t.Fatalf("Error writing data")
}
smallBuf := make([]byte, 3)
n, readErr = stream.Read(smallBuf)
if readErr != nil {
t.Fatalf("Error reading data from stream: %s", readErr)
}
if n != 3 {
t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 3", n)
}
if bytes.Compare(smallBuf[:n], []byte("hel")) != 0 {
t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", smallBuf[:n], message)
}
n, readErr = stream.Read(smallBuf)
if readErr != nil {
t.Fatalf("Error reading data from stream: %s", readErr)
}
if n != 2 {
t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 2", n)
}
if bytes.Compare(smallBuf[:n], []byte("lo")) != 0 {
t.Fatalf("Did not receive expected message:\nActual: %s\nExpected: lo", smallBuf[:n])
}
n, readErr = stream.Read(buf)
if readErr != io.EOF {
t.Fatalf("Expected EOF reading from finished stream, read %d bytes", n)
}
streamCloseErr := stream.Close()
if streamCloseErr != nil {
t.Fatalf("Error closing stream: %s", streamCloseErr)
}
// Closing again should return nil
streamCloseErr = stream.Close()
if streamCloseErr != nil {
t.Fatalf("Error closing stream: %s", streamCloseErr)
}
authenticated = false
badStream, badStreamErr := spdyConn.CreateStream(http.Header{}, nil, false)
if badStreamErr != nil {
t.Fatalf("Error creating stream: %s", badStreamErr)
}
waitErr = badStream.Wait()
if waitErr == nil {
t.Fatalf("Did not receive error creating stream")
}
if waitErr != spdystream.ErrReset {
t.Fatalf("Unexpected error creating stream: %s", waitErr)
}
spdyCloseErr := spdyConn.Close()
if spdyCloseErr != nil {
t.Fatalf("Error closing spdy connection: %s", spdyCloseErr)
}
}
var authenticated bool
func authStreamHandler(header http.Header, slot uint8, parent uint32) bool {
return authenticated
}

70
vendor/github.com/emicklei/go-restful/.gitignore generated vendored Normal file
View file

@ -0,0 +1,70 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
restful.html
*.out
tmp.prof
go-restful.test
examples/restful-basic-authentication
examples/restful-encoding-filter
examples/restful-filters
examples/restful-hello-world
examples/restful-resource-functions
examples/restful-serve-static
examples/restful-user-service
*.DS_Store
examples/restful-user-resource
examples/restful-multi-containers
examples/restful-form-handling
examples/restful-CORS-filter
examples/restful-options-filter
examples/restful-curly-router
examples/restful-cpuprofiler-service
examples/restful-pre-post-filters
curly.prof
examples/restful-NCSA-logging
examples/restful-html-template
s.html
restful-path-tail

220
vendor/github.com/emicklei/go-restful/CHANGES.md generated vendored Normal file
View file

@ -0,0 +1,220 @@
Change history of go-restful
=
2017-01-30
[IMPORTANT] For swagger users, change your import statement to:
swagger "github.com/emicklei/go-restful-swagger12"
- moved swagger 1.2 code to go-restful-swagger12
- created TAG 2.0.0
2017-01-27
- remove defer request body close
- expose Dispatch for testing filters and Routefunctions
- swagger response model cannot be array
- created TAG 1.0.0
2016-12-22
- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
2016-11-26
- Default change! now use CurlyRouter (was RouterJSR311)
- Default change! no more caching of request content
- Default change! do not recover from panics
2016-09-22
- fix the DefaultRequestContentType feature
2016-02-14
- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
- add constructors for custom entity accessors for xml and json
2015-09-27
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
2015-09-25
- fixed problem with changing Header after WriteHeader (issue 235)
2015-09-14
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
- added support for custom EntityReaderWriters.
2015-08-06
- add support for reading entities from compressed request content
- use sync.Pool for compressors of http response and request body
- add Description to Parameter for documentation in Swagger UI
2015-03-20
- add configurable logging
2015-03-18
- if not specified, the Operation is derived from the Route function
2015-03-17
- expose Parameter creation functions
- make trace logger an interface
- fix OPTIONSFilter
- customize rendering of ServiceError
- JSR311 router now handles wildcards
- add Notes to Route
2014-11-27
- (api add) PrettyPrint per response. (as proposed in #167)
2014-11-12
- (api add) ApiVersion(.) for documentation in Swagger UI
2014-11-10
- (api change) struct fields tagged with "description" show up in Swagger UI
2014-10-31
- (api change) ReturnsError -> Returns
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
- fix swagger nested structs
- sort Swagger response messages by code
2014-10-23
- (api add) ReturnsError allows you to document Http codes in swagger
- fixed problem with greedy CurlyRouter
- (api add) Access-Control-Max-Age in CORS
- add tracing functionality (injectable) for debugging purposes
- support JSON parse 64bit int
- fix empty parameters for swagger
- WebServicesUrl is now optional for swagger
- fixed duplicate AccessControlAllowOrigin in CORS
- (api change) expose ServeMux in container
- (api add) added AllowedDomains in CORS
- (api add) ParameterNamed for detailed documentation
2014-04-16
- (api add) expose constructor of Request for testing.
2014-06-27
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
2014-07-03
- (api add) CORS can be configured with a list of allowed domains
2014-03-12
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
2014-02-26
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
2014-02-17
- (api change) renamed parameter constants (go-lint checks)
2014-01-10
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
2014-01-07
- (api change) Write* methods in Response now return the error or nil.
- added example of serving HTML from a Go template.
- fixed comparing Allowed headers in CORS (is now case-insensitive)
2013-11-13
- (api add) Response knows how many bytes are written to the response body.
2013-10-29
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
2013-10-04
- (api add) Response knows what HTTP status has been written
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
2013-09-12
- (api change) Router interface simplified
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
2013-08-05
- add OPTIONS support
- add CORS support
2013-08-27
- fixed some reported issues (see github)
- (api change) deprecated use of WriteError; use WriteErrorString instead
2014-04-15
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
2013-08-08
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
- (api add) the swagger package has be extended to have a UI per container.
- if panic is detected then a small stack trace is printed (thanks to runner-mei)
- (api add) WriteErrorString to Response
Important API changes:
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
2013-07-06
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
2013-06-19
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
2013-06-03
- (api change) removed Dispatcher interface, hide PathExpression
- changed receiver names of type functions to be more idiomatic Go
2013-06-02
- (optimize) Cache the RegExp compilation of Paths.
2013-05-22
- (api add) Added support for request/response filter functions
2013-05-18
- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
[2012-11-14 .. 2013-05-18>
- See https://github.com/emicklei/go-restful/commits
2012-11-14
- Initial commit

22
vendor/github.com/emicklei/go-restful/LICENSE generated vendored Normal file
View file

@ -0,0 +1,22 @@
Copyright (c) 2012,2013 Ernest Micklei
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

74
vendor/github.com/emicklei/go-restful/README.md generated vendored Normal file
View file

@ -0,0 +1,74 @@
go-restful
==========
[![Build Status](https://drone.io/github.com/emicklei/go-restful/status.png)](https://drone.io/github.com/emicklei/go-restful/latest)
package for building REST-style Web Services using Google Go
- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful)
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
- GET = Retrieve a representation of a resource
- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
- PUT = Create if you are sending the full content of the specified resource (URI).
- PUT = Update if you are updating the full content of the specified resource.
- DELETE = Delete if you are requesting the server to delete the resource
- PATCH = Update partial content of a resource
- OPTIONS = Get information about the communication options for the request URI
### Example
```Go
ws := new(restful.WebService)
ws.
Path("/users").
Consumes(restful.MIME_XML, restful.MIME_JSON).
Produces(restful.MIME_JSON, restful.MIME_XML)
ws.Route(ws.GET("/{user-id}").To(u.findUser).
Doc("get a user").
Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
Writes(User{}))
...
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
id := request.PathParameter("user-id")
...
}
```
[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
### Features
- Routes for request &#8594; function mapping with path parameter (e.g. {id}) support
- Configurable router:
- (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
- Response API for writing structs to JSON/XML and setting headers
- Customizable encoding using EntityReaderWriter registration
- Filters for intercepting the request &#8594; response flow on Service or Route level
- Request-scoped variables using attributes
- Containers for WebServices on different HTTP endpoints
- Content encoding (gzip,deflate) of request and response payloads
- Automatic responses on OPTIONS (using a filter)
- Automatic CORS request handling (using a filter)
- API declaration for Swagger UI (see go-restful-swagger12,go-restful-swagger20 packages)
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
- Configurable (trace) logging
- Customizable gzip/deflate readers and writers using CompressorProvider registration
### Resources
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
(c) 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome.
Type ```git shortlog -s``` for a full list of contributors.

1
vendor/github.com/emicklei/go-restful/Srcfile generated vendored Normal file
View file

@ -0,0 +1 @@
{"SkipDirs": ["examples"]}

View file

@ -0,0 +1,51 @@
package restful
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
)
func setupCurly(container *Container) []string {
wsCount := 26
rtCount := 26
urisCurly := []string{}
container.Router(CurlyRouter{})
for i := 0; i < wsCount; i++ {
root := fmt.Sprintf("/%s/{%s}/", string(i+97), string(i+97))
ws := new(WebService).Path(root)
for j := 0; j < rtCount; j++ {
sub := fmt.Sprintf("/%s2/{%s2}", string(j+97), string(j+97))
ws.Route(ws.GET(sub).Consumes("application/xml").Produces("application/xml").To(echoCurly))
}
container.Add(ws)
for _, each := range ws.Routes() {
urisCurly = append(urisCurly, "http://bench.com"+each.Path)
}
}
return urisCurly
}
func echoCurly(req *Request, resp *Response) {}
func BenchmarkManyCurly(b *testing.B) {
container := NewContainer()
urisCurly := setupCurly(container)
b.ResetTimer()
for t := 0; t < b.N; t++ {
for r := 0; r < 1000; r++ {
for _, each := range urisCurly {
sendNoReturnTo(each, container, t)
}
}
}
}
func sendNoReturnTo(address string, container *Container, t int) {
httpRequest, _ := http.NewRequest("GET", address, nil)
httpRequest.Header.Set("Accept", "application/xml")
httpWriter := httptest.NewRecorder()
container.dispatch(httpWriter, httpRequest)
}

43
vendor/github.com/emicklei/go-restful/bench_test.go generated vendored Normal file
View file

@ -0,0 +1,43 @@
package restful
import (
"fmt"
"io"
"testing"
)
var uris = []string{}
func setup(container *Container) {
wsCount := 26
rtCount := 26
for i := 0; i < wsCount; i++ {
root := fmt.Sprintf("/%s/{%s}/", string(i+97), string(i+97))
ws := new(WebService).Path(root)
for j := 0; j < rtCount; j++ {
sub := fmt.Sprintf("/%s2/{%s2}", string(j+97), string(j+97))
ws.Route(ws.GET(sub).To(echo))
}
container.Add(ws)
for _, each := range ws.Routes() {
uris = append(uris, "http://bench.com"+each.Path)
}
}
}
func echo(req *Request, resp *Response) {
io.WriteString(resp.ResponseWriter, "echo")
}
func BenchmarkMany(b *testing.B) {
container := NewContainer()
setup(container)
b.ResetTimer()
for t := 0; t < b.N; t++ {
for _, each := range uris {
// println(each)
sendItTo(each, container)
}
}
}

10
vendor/github.com/emicklei/go-restful/bench_test.sh generated vendored Normal file
View file

@ -0,0 +1,10 @@
#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
go test -c
./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
#go tool pprof go-restful.test tmp.prof
go tool pprof go-restful.test curly.prof

123
vendor/github.com/emicklei/go-restful/compress.go generated vendored Normal file
View file

@ -0,0 +1,123 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bufio"
"compress/gzip"
"compress/zlib"
"errors"
"io"
"net"
"net/http"
"strings"
)
// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
var EnableContentEncoding = false
// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
type CompressingResponseWriter struct {
writer http.ResponseWriter
compressor io.WriteCloser
encoding string
}
// Header is part of http.ResponseWriter interface
func (c *CompressingResponseWriter) Header() http.Header {
return c.writer.Header()
}
// WriteHeader is part of http.ResponseWriter interface
func (c *CompressingResponseWriter) WriteHeader(status int) {
c.writer.WriteHeader(status)
}
// Write is part of http.ResponseWriter interface
// It is passed through the compressor
func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
if c.isCompressorClosed() {
return -1, errors.New("Compressing error: tried to write data using closed compressor")
}
return c.compressor.Write(bytes)
}
// CloseNotify is part of http.CloseNotifier interface
func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
return c.writer.(http.CloseNotifier).CloseNotify()
}
// Close the underlying compressor
func (c *CompressingResponseWriter) Close() error {
if c.isCompressorClosed() {
return errors.New("Compressing error: tried to close already closed compressor")
}
c.compressor.Close()
if ENCODING_GZIP == c.encoding {
currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
}
if ENCODING_DEFLATE == c.encoding {
currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
}
// gc hint needed?
c.compressor = nil
return nil
}
func (c *CompressingResponseWriter) isCompressorClosed() bool {
return nil == c.compressor
}
// Hijack implements the Hijacker interface
// This is especially useful when combining Container.EnabledContentEncoding
// in combination with websockets (for instance gorilla/websocket)
func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
hijacker, ok := c.writer.(http.Hijacker)
if !ok {
return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
}
return hijacker.Hijack()
}
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
gi := strings.Index(header, ENCODING_GZIP)
zi := strings.Index(header, ENCODING_DEFLATE)
// use in order of appearance
if gi == -1 {
return zi != -1, ENCODING_DEFLATE
} else if zi == -1 {
return gi != -1, ENCODING_GZIP
} else {
if gi < zi {
return true, ENCODING_GZIP
}
return true, ENCODING_DEFLATE
}
}
// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
c := new(CompressingResponseWriter)
c.writer = httpWriter
var err error
if ENCODING_GZIP == encoding {
w := currentCompressorProvider.AcquireGzipWriter()
w.Reset(httpWriter)
c.compressor = w
c.encoding = ENCODING_GZIP
} else if ENCODING_DEFLATE == encoding {
w := currentCompressorProvider.AcquireZlibWriter()
w.Reset(httpWriter)
c.compressor = w
c.encoding = ENCODING_DEFLATE
} else {
return nil, errors.New("Unknown encoding:" + encoding)
}
return c, err
}

Some files were not shown because too many files have changed in this diff Show more