add better generate
Signed-off-by: Jess Frazelle <acidburn@microsoft.com>
This commit is contained in:
parent
3fc6abf56b
commit
cdd93563f5
5655 changed files with 1187011 additions and 392 deletions
213
Gopkg.lock
generated
213
Gopkg.lock
generated
|
@ -1,12 +1,33 @@
|
||||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/Azure/go-ansiterm"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"winterm"
|
||||||
|
]
|
||||||
|
revision = "d6e3b3328b783f23731bc4d058875b0371ff8109"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/Microsoft/go-winio"
|
name = "github.com/Microsoft/go-winio"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f"
|
revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f"
|
||||||
version = "v0.4.7"
|
version = "v0.4.7"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/Nvveen/Gotty"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "cd527374f1e5bff4938207604a14f2e38a9cf512"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/beorn7/perks"
|
||||||
|
packages = ["quantile"]
|
||||||
|
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/containerd/console"
|
name = "github.com/containerd/console"
|
||||||
|
@ -41,48 +62,179 @@
|
||||||
revision = "06bda8370f45268db985f7af15732444d94ed51c"
|
revision = "06bda8370f45268db985f7af15732444d94ed51c"
|
||||||
version = "v0.2.1"
|
version = "v0.2.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/docker/cli"
|
||||||
|
packages = [
|
||||||
|
"cli/config/configfile",
|
||||||
|
"cli/config/credentials",
|
||||||
|
"opts"
|
||||||
|
]
|
||||||
|
revision = "c0ffb9491cdffb628e18bb491b566255987fd28d"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/docker/distribution"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"context",
|
||||||
|
"digestset",
|
||||||
|
"manifest",
|
||||||
|
"manifest/manifestlist",
|
||||||
|
"manifest/schema1",
|
||||||
|
"manifest/schema2",
|
||||||
|
"metrics",
|
||||||
|
"reference",
|
||||||
|
"registry/api/errcode",
|
||||||
|
"registry/api/v2",
|
||||||
|
"registry/client",
|
||||||
|
"registry/client/auth",
|
||||||
|
"registry/client/auth/challenge",
|
||||||
|
"registry/client/transport",
|
||||||
|
"registry/storage",
|
||||||
|
"registry/storage/cache",
|
||||||
|
"registry/storage/cache/memory",
|
||||||
|
"registry/storage/driver",
|
||||||
|
"registry/storage/driver/base",
|
||||||
|
"registry/storage/driver/factory",
|
||||||
|
"registry/storage/driver/filesystem",
|
||||||
|
"uuid"
|
||||||
|
]
|
||||||
|
revision = "607ae5d128a82f280e8c7f453d5fb30c535bda17"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/docker/docker"
|
name = "github.com/docker/docker"
|
||||||
packages = [
|
packages = [
|
||||||
|
"api/types",
|
||||||
|
"api/types/blkiodev",
|
||||||
|
"api/types/container",
|
||||||
|
"api/types/filters",
|
||||||
|
"api/types/mount",
|
||||||
|
"api/types/network",
|
||||||
|
"api/types/registry",
|
||||||
|
"api/types/strslice",
|
||||||
|
"api/types/swarm",
|
||||||
|
"api/types/swarm/runtime",
|
||||||
|
"api/types/versions",
|
||||||
|
"errdefs",
|
||||||
"pkg/aaparser",
|
"pkg/aaparser",
|
||||||
"pkg/archive",
|
"pkg/archive",
|
||||||
"pkg/fileutils",
|
"pkg/fileutils",
|
||||||
|
"pkg/homedir",
|
||||||
"pkg/idtools",
|
"pkg/idtools",
|
||||||
"pkg/ioutils",
|
"pkg/ioutils",
|
||||||
|
"pkg/jsonmessage",
|
||||||
"pkg/longpath",
|
"pkg/longpath",
|
||||||
"pkg/mount",
|
"pkg/mount",
|
||||||
"pkg/pools",
|
"pkg/pools",
|
||||||
|
"pkg/stringid",
|
||||||
"pkg/system",
|
"pkg/system",
|
||||||
"profiles/apparmor"
|
"pkg/tarsum",
|
||||||
|
"pkg/term",
|
||||||
|
"pkg/term/windows",
|
||||||
|
"profiles/apparmor",
|
||||||
|
"registry",
|
||||||
|
"registry/resumable"
|
||||||
]
|
]
|
||||||
revision = "0c1006f1abc1af7aa6b9847754370d054dfa6c68"
|
revision = "0c1006f1abc1af7aa6b9847754370d054dfa6c68"
|
||||||
source = "github.com/moby/moby"
|
source = "github.com/moby/moby"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/docker/docker-ce"
|
||||||
|
packages = ["components/cli/cli/config"]
|
||||||
|
revision = "4836f8c6fc8cb4bd4de48f019f772b683b94ed71"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/docker/docker-credential-helpers"
|
||||||
|
packages = [
|
||||||
|
"client",
|
||||||
|
"credentials",
|
||||||
|
"pass"
|
||||||
|
]
|
||||||
|
revision = "d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1"
|
||||||
|
version = "v0.6.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/docker/go-connections"
|
||||||
|
packages = [
|
||||||
|
"nat",
|
||||||
|
"sockets",
|
||||||
|
"tlsconfig"
|
||||||
|
]
|
||||||
|
revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
|
||||||
|
version = "v0.3.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/docker/go-metrics"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "399ea8c73916000c64c2c76e8da00ca82f8387ab"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/docker/go-units"
|
name = "github.com/docker/go-units"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "0dadbb0345b35ec7ef35e228dabb8de89a65bf52"
|
revision = "0dadbb0345b35ec7ef35e228dabb8de89a65bf52"
|
||||||
version = "v0.3.2"
|
version = "v0.3.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/docker/libtrust"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/genuinetools/reg"
|
||||||
|
packages = [
|
||||||
|
"registry",
|
||||||
|
"repoutils"
|
||||||
|
]
|
||||||
|
revision = "dd16b5bfefa6d5aac88f1104433f272392fe0aa3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/godbus/dbus"
|
name = "github.com/godbus/dbus"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "a389bdde4dd695d414e47b755e95e72b7826432c"
|
revision = "a389bdde4dd695d414e47b755e95e72b7826432c"
|
||||||
version = "v4.1.0"
|
version = "v4.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/gogo/protobuf"
|
||||||
|
packages = ["proto"]
|
||||||
|
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/golang/protobuf"
|
name = "github.com/golang/protobuf"
|
||||||
packages = ["proto"]
|
packages = ["proto"]
|
||||||
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||||
version = "v1.0.0"
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/gorilla/context"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a"
|
||||||
|
version = "v1.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/gorilla/mux"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "53c1911da2b537f792e7cafcb446b05ffe33b996"
|
||||||
|
version = "v1.6.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/jteeuwen/go-bindata"
|
name = "github.com/jteeuwen/go-bindata"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "6025e8de665b31fa74ab1a66f2cddd8c0abf887e"
|
revision = "6025e8de665b31fa74ab1a66f2cddd8c0abf887e"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||||
|
packages = ["pbutil"]
|
||||||
|
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/mrunalp/fileutils"
|
name = "github.com/mrunalp/fileutils"
|
||||||
|
@ -144,12 +296,54 @@
|
||||||
revision = "ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d"
|
revision = "ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d"
|
||||||
version = "v1.0.0-rc1"
|
version = "v1.0.0-rc1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/peterhellberg/link"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "d1cebc7ea14a5fc0de7cb4a45acae773161642c6"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/pkg/errors"
|
name = "github.com/pkg/errors"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||||
version = "v0.8.0"
|
version = "v0.8.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/prometheus/client_golang"
|
||||||
|
packages = [
|
||||||
|
"prometheus",
|
||||||
|
"prometheus/promhttp"
|
||||||
|
]
|
||||||
|
revision = "d49167c4b9f3c4451707560c5c71471ff5291aaa"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/prometheus/client_model"
|
||||||
|
packages = ["go"]
|
||||||
|
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/prometheus/common"
|
||||||
|
packages = [
|
||||||
|
"expfmt",
|
||||||
|
"internal/bitbucket.org/ww/goautoneg",
|
||||||
|
"model"
|
||||||
|
]
|
||||||
|
revision = "e4aa40a9169a88835b849a6efb71e05dc04b88f0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/prometheus/procfs"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"internal/util",
|
||||||
|
"nfs",
|
||||||
|
"xfs"
|
||||||
|
]
|
||||||
|
revision = "54d17b57dd7d4a3aa092476596b3f8a933bde349"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/seccomp/libseccomp-golang"
|
name = "github.com/seccomp/libseccomp-golang"
|
||||||
|
@ -162,6 +356,16 @@
|
||||||
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
|
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
|
||||||
version = "v1.0.5"
|
version = "v1.0.5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/stevvooe/resumable"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"sha256",
|
||||||
|
"sha512"
|
||||||
|
]
|
||||||
|
revision = "2aaf90b2ceea5072cb503ef2a620b08ff3119870"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/syndtr/gocapability"
|
name = "github.com/syndtr/gocapability"
|
||||||
|
@ -192,7 +396,10 @@
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/net"
|
name = "golang.org/x/net"
|
||||||
packages = ["context"]
|
packages = [
|
||||||
|
"context",
|
||||||
|
"proxy"
|
||||||
|
]
|
||||||
revision = "6078986fec03a1dcc236c34816c71b0e05018fda"
|
revision = "6078986fec03a1dcc236c34816c71b0e05018fda"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
@ -207,6 +414,6 @@
|
||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
inputs-digest = "9b1a33b507730008bd6eab13779a1a39519098e5c5f30b9fe15ce8c5cd2b6074"
|
inputs-digest = "3bfe517dda3080944417768fdbeb5f1286fb1370a35c1e6c1bb9c3e2d45961f9"
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
|
12
Gopkg.toml
12
Gopkg.toml
|
@ -52,3 +52,15 @@
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/jteeuwen/go-bindata"
|
name = "github.com/jteeuwen/go-bindata"
|
||||||
branch = "master"
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/genuinetools/reg"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/docker/distribution"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[override]]
|
||||||
|
name = "github.com/prometheus/client_golang"
|
||||||
|
branch = "master"
|
||||||
|
|
16
Makefile
16
Makefile
|
@ -12,7 +12,7 @@ BUILDTAGS := seccomp apparmor
|
||||||
BUILDDIR := ${PREFIX}/cross
|
BUILDDIR := ${PREFIX}/cross
|
||||||
|
|
||||||
IMAGE := alpine
|
IMAGE := alpine
|
||||||
IMAGE_DATA_FILE := image/bindata.go
|
IMAGE_DATA_FILE := container/bindata.go
|
||||||
|
|
||||||
# Populate version variables
|
# Populate version variables
|
||||||
# Add to compile time flags
|
# Add to compile time flags
|
||||||
|
@ -22,9 +22,7 @@ GITUNTRACKEDCHANGES := $(shell git status --porcelain --untracked-files=no)
|
||||||
ifneq ($(GITUNTRACKEDCHANGES),)
|
ifneq ($(GITUNTRACKEDCHANGES),)
|
||||||
GITCOMMIT := $(GITCOMMIT)-dirty
|
GITCOMMIT := $(GITCOMMIT)-dirty
|
||||||
endif
|
endif
|
||||||
CTIMEVAR=-X $(PKG)/version.GITCOMMIT=$(GITCOMMIT) -X $(PKG)/version.VERSION=$(VERSION) \
|
CTIMEVAR=-X $(PKG)/version.GITCOMMIT=$(GITCOMMIT) -X $(PKG)/version.VERSION=$(VERSION)
|
||||||
-X $(PKG)/image.NAME=$(notdir $(IMAGE)) \
|
|
||||||
-X $(PKG)/image.SHA=$(shell docker inspect --format "{{.Id}}" $(IMAGE))
|
|
||||||
GO_LDFLAGS=-ldflags "-w $(CTIMEVAR)"
|
GO_LDFLAGS=-ldflags "-w $(CTIMEVAR)"
|
||||||
GO_LDFLAGS_STATIC=-ldflags "-w $(CTIMEVAR) -extldflags -static"
|
GO_LDFLAGS_STATIC=-ldflags "-w $(CTIMEVAR) -extldflags -static"
|
||||||
|
|
||||||
|
@ -97,13 +95,8 @@ tag: ## Create a new git tag to prepare to build a release
|
||||||
git tag -sa $(VERSION) -m "$(VERSION)"
|
git tag -sa $(VERSION) -m "$(VERSION)"
|
||||||
@echo "Run git push origin $(VERSION) to push your new tag to GitHub and trigger a travis build."
|
@echo "Run git push origin $(VERSION) to push your new tag to GitHub and trigger a travis build."
|
||||||
|
|
||||||
.PHONY: image.tar
|
|
||||||
image.tar:
|
|
||||||
docker pull --disable-content-trust=false $(IMAGE)
|
|
||||||
docker export $(shell docker create $(IMAGE) sh) > $@
|
|
||||||
|
|
||||||
.PHONY: $(IMAGE_DATA_FILE)
|
.PHONY: $(IMAGE_DATA_FILE)
|
||||||
$(IMAGE_DATA_FILE): image.tar
|
$(IMAGE_DATA_FILE):
|
||||||
GOMAXPROCS=1 go generate
|
GOMAXPROCS=1 go generate
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
|
@ -112,9 +105,6 @@ clean: ## Cleanup any build binaries or packages
|
||||||
$(RM) $(NAME)
|
$(RM) $(NAME)
|
||||||
$(RM) -r $(BUILDDIR)
|
$(RM) -r $(BUILDDIR)
|
||||||
@sudo $(RM) -r rootfs
|
@sudo $(RM) -r rootfs
|
||||||
$(RM) *.tar
|
|
||||||
$(RM) $(IMAGE_DATA_FILE)
|
|
||||||
-@docker rm $(shell docker ps -aq) /dev/null 2>&1
|
|
||||||
|
|
||||||
.PHONY: help
|
.PHONY: help
|
||||||
help:
|
help:
|
||||||
|
|
178
container/image.go
Normal file
178
container/image.go
Normal file
|
@ -0,0 +1,178 @@
|
||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/distribution/reference"
|
||||||
|
"github.com/docker/distribution/registry/storage"
|
||||||
|
"github.com/docker/distribution/registry/storage/driver/filesystem"
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
"github.com/genuinetools/reg/repoutils"
|
||||||
|
bindata "github.com/jteeuwen/go-bindata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EmbedImage pulls a docker image locally. Creates a tarball of it's contents
|
||||||
|
// and then embeds the tarball as binary data into an output bindata.go file.
|
||||||
|
func EmbedImage(image string) error {
|
||||||
|
// Get the current working directory.
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create our output path
|
||||||
|
output := filepath.Join(wd, "bindata.go")
|
||||||
|
|
||||||
|
// Create the temporary directory for the image contents.
|
||||||
|
tmpd, err := ioutil.TempDir("", "container-lib")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpd) // Cleanup on complete.
|
||||||
|
|
||||||
|
// Create our tarball path.
|
||||||
|
tarball := filepath.Join(tmpd, DefaultTarballPath)
|
||||||
|
|
||||||
|
// Create our image root and state.
|
||||||
|
root := filepath.Join(tmpd, "root")
|
||||||
|
state := filepath.Join(tmpd, "state")
|
||||||
|
|
||||||
|
// Create the rootfs
|
||||||
|
if err := createRootFS(image, root, state); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the tar.
|
||||||
|
tar, err := archive.Tar(root, archive.Gzip)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("create tar failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the tarball writer.
|
||||||
|
writer, err := os.Create(tarball)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer writer.Close() // Close the writer.
|
||||||
|
|
||||||
|
if _, err := io.Copy(writer, tar); err != nil {
|
||||||
|
return fmt.Errorf("copy tarball failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the bindata config.
|
||||||
|
bc := bindata.NewConfig()
|
||||||
|
bc.Input = []bindata.InputConfig{
|
||||||
|
{
|
||||||
|
Path: tarball,
|
||||||
|
Recursive: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
bc.Output = output
|
||||||
|
bc.Package = "main"
|
||||||
|
bc.NoMetadata = true
|
||||||
|
bc.Prefix = filepath.Dir(tarball)
|
||||||
|
|
||||||
|
if err := bindata.Translate(bc); err != nil {
|
||||||
|
return fmt.Errorf("bindata failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createRootFS creates the base filesystem for a docker image.
|
||||||
|
// It will pull the base image if it does not exist locally.
|
||||||
|
// This function takes in a image name and the directory where the
|
||||||
|
// rootfs should be created.
|
||||||
|
func createRootFS(image, rootfs, state string) error {
|
||||||
|
// Create the context.
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Create the new local registry storage.
|
||||||
|
local, err := storage.NewRegistry(ctx, filesystem.New(filesystem.DriverParameters{
|
||||||
|
RootDirectory: state,
|
||||||
|
MaxThreads: 100,
|
||||||
|
}))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("creating new registry storage failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the repository name.
|
||||||
|
name, err := reference.ParseNormalizedNamed(image)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("not a valid image %q: %v", image, err)
|
||||||
|
}
|
||||||
|
// Add latest to the image name if it is empty.
|
||||||
|
name = reference.TagNameOnly(name)
|
||||||
|
|
||||||
|
// Get the tag for the repo.
|
||||||
|
_, tag, err := repoutils.GetRepoAndRef(image)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the local repository.
|
||||||
|
repo, err := local.Repository(ctx, name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("creating local repository for %q failed: %v", reference.Path(name), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the manifest service.
|
||||||
|
ms, err := repo.Manifests(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("creating manifest service failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the specific tag.
|
||||||
|
td, err := repo.Tags(ctx).Get(ctx, tag)
|
||||||
|
// Check if we got an unknown error, that means the tag does not exist.
|
||||||
|
if err != nil && strings.Contains(err.Error(), "unknown") {
|
||||||
|
log.Println("image not found locally, pulling the image")
|
||||||
|
|
||||||
|
// Pull the image.
|
||||||
|
if err := pull(ctx, local, name, tag); err != nil {
|
||||||
|
return fmt.Errorf("pulling failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to get the tag again.
|
||||||
|
td, err = repo.Tags(ctx).Get(ctx, tag)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting local repository tag %q failed: %v", tag, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the specific manifest for the tag.
|
||||||
|
manifest, err := ms.Get(ctx, td.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting local manifest for digest %q failed: %v", td.Digest.String(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
blobStore := repo.Blobs(ctx)
|
||||||
|
for i, ref := range manifest.References() {
|
||||||
|
if i == 0 {
|
||||||
|
fmt.Printf("skipping config %v\n", ref.Digest.String())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("unpacking %v\n", ref.Digest.String())
|
||||||
|
layer, err := blobStore.Open(ctx, ref.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting blob %q failed: %v", ref.Digest.String(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unpack the tarfile to the mount path.
|
||||||
|
// FROM: https://godoc.org/github.com/moby/moby/pkg/archive#TarOptions
|
||||||
|
if err := archive.Untar(layer, rootfs, &archive.TarOptions{
|
||||||
|
NoLchown: true,
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("error extracting tar for %q: %v", ref.Digest.String(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
196
container/pull.go
Normal file
196
container/pull.go
Normal file
|
@ -0,0 +1,196 @@
|
||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/docker/distribution"
|
||||||
|
"github.com/docker/distribution/manifest/manifestlist"
|
||||||
|
"github.com/docker/distribution/manifest/schema1"
|
||||||
|
"github.com/docker/distribution/manifest/schema2"
|
||||||
|
"github.com/docker/distribution/reference"
|
||||||
|
"github.com/genuinetools/reg/registry"
|
||||||
|
"github.com/genuinetools/reg/repoutils"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func pull(ctx context.Context, dst distribution.Namespace, name reference.Named, tag string) error {
|
||||||
|
// Get the auth config.
|
||||||
|
auth, err := repoutils.GetAuthConfig("", "", reference.Domain(name))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: add flag to flip switch for turning off SSL verification
|
||||||
|
// Create a new registry client.
|
||||||
|
src, err := registry.New(auth, false)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("creating new registry api client failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("pulling", name.String())
|
||||||
|
|
||||||
|
imgPath := reference.Path(name)
|
||||||
|
|
||||||
|
// Get the manifest.
|
||||||
|
manifest, err := src.Manifest(imgPath, tag)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting manifest for '%s:%s' failed: %v", imgPath, tag, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := manifest.(type) {
|
||||||
|
case *schema1.SignedManifest:
|
||||||
|
return pullV1()
|
||||||
|
case *schema2.DeserializedManifest:
|
||||||
|
return pullV2(ctx, dst, src, v, name, imgPath, tag)
|
||||||
|
case *manifestlist.DeserializedManifestList:
|
||||||
|
return pullManifestList(ctx, dst, src, v, name, imgPath, tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("unsupported manifest format")
|
||||||
|
}
|
||||||
|
|
||||||
|
func pullV1() error {
|
||||||
|
return errors.New("schema1 manifest not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func pullV2(ctx context.Context, dst distribution.Namespace, src *registry.Registry, manifest *schema2.DeserializedManifest, name reference.Named, imgPath, tag string) error {
|
||||||
|
dstRepo, err := dst.Repository(ctx, name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("creating the destination repository failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dstBlobStore := dstRepo.Blobs(ctx)
|
||||||
|
for _, ref := range manifest.References() {
|
||||||
|
// TODO: make a progress bar
|
||||||
|
fmt.Printf("pulling layer %s\n", ref.Digest.String())
|
||||||
|
|
||||||
|
blob, err := src.DownloadLayer(imgPath, ref.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting remote blob %q failed failed: %v", ref.Digest.String(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
upload, err := dstBlobStore.Create(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("creating the local blob writer failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(upload, blob); err != nil {
|
||||||
|
return fmt.Errorf("writing to the local blob failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := upload.Commit(ctx, ref); err != nil {
|
||||||
|
return fmt.Errorf("commiting %q locally failed: %v", ref.Digest.String(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
upload.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the manifest service locally.
|
||||||
|
dms, err := dstRepo.Manifests(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("creating manifest service locally failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put the manifest locally.
|
||||||
|
manDst, err := dms.Put(ctx, manifest, distribution.WithTag(tag))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("putting the manifest with tag %q locally failed: %v", tag, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: find a better way to get the manifest descriptor locally.
|
||||||
|
// Get the manifest descriptor.
|
||||||
|
mf, err := dms.Get(ctx, manDst)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting the manifest with digest %q locally failed: %v", manDst.String(), err)
|
||||||
|
}
|
||||||
|
mediatype, pl, err := mf.Payload()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("payload failed: %v", err)
|
||||||
|
}
|
||||||
|
_, desc, err := distribution.UnmarshalManifest(mediatype, pl)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("umarshal failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put the tag locally.
|
||||||
|
if err := dstRepo.Tags(ctx).Tag(ctx, tag, desc); err != nil {
|
||||||
|
return fmt.Errorf("establishing a relationship between the tag %q and digest %q locally failed: %v", tag, manDst.String(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func pullManifestList(ctx context.Context, dst distribution.Namespace, src *registry.Registry, mfstList *manifestlist.DeserializedManifestList, name reference.Named, imgPath, tag string) error {
|
||||||
|
if _, err := schema2ManifestDigest(name, mfstList); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", name, len(mfstList.Manifests), runtime.GOOS, runtime.GOARCH)
|
||||||
|
|
||||||
|
manifestMatches := filterManifests(mfstList.Manifests, runtime.GOOS)
|
||||||
|
|
||||||
|
if len(manifestMatches) == 0 {
|
||||||
|
return fmt.Errorf("no matching manifest for %s/%s in the manifest list entries", runtime.GOOS, runtime.GOARCH)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(manifestMatches) > 1 {
|
||||||
|
log.Printf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String())
|
||||||
|
|
||||||
|
}
|
||||||
|
manifestDigest := manifestMatches[0].Digest
|
||||||
|
|
||||||
|
// Get the manifest.
|
||||||
|
manifest, err := src.Manifest(imgPath, manifestDigest.String())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting manifest for %s@%s failed: %v", imgPath, manifestDigest.String(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := manifest.(type) {
|
||||||
|
case *schema1.SignedManifest:
|
||||||
|
return pullV1()
|
||||||
|
case *schema2.DeserializedManifest:
|
||||||
|
return pullV2(ctx, dst, src, v, name, imgPath, tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("unsupported manifest format")
|
||||||
|
}
|
||||||
|
|
||||||
|
// schema2ManifestDigest computes the manifest digest, and, if pulling by
|
||||||
|
// digest, ensures that it matches the requested digest.
|
||||||
|
func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
|
||||||
|
_, canonical, err := mfst.Payload()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If pull by digest, then verify the manifest digest.
|
||||||
|
if digested, isDigested := ref.(reference.Canonical); isDigested {
|
||||||
|
verifier := digested.Digest().Verifier()
|
||||||
|
if _, err := verifier.Write(canonical); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if !verifier.Verified() {
|
||||||
|
return "", fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
|
||||||
|
}
|
||||||
|
return digested.Digest(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return digest.FromBytes(canonical), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterManifests(manifests []manifestlist.ManifestDescriptor, os string) []manifestlist.ManifestDescriptor {
|
||||||
|
var matches []manifestlist.ManifestDescriptor
|
||||||
|
for _, manifestDescriptor := range manifests {
|
||||||
|
if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == os {
|
||||||
|
matches = append(matches, manifestDescriptor)
|
||||||
|
|
||||||
|
log.Printf("found match for %s/%s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.MediaType, manifestDescriptor.Digest.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return matches
|
||||||
|
}
|
43
container/rootfs.go
Normal file
43
container/rootfs.go
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultTarballPath holds the default path for the embedded tarball.
|
||||||
|
DefaultTarballPath = "image.tar"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnpackRootfs unpacks the embedded tarball to the rootfs.
|
||||||
|
func (c *Container) UnpackRootfs(rootfsDir string, asset func(string) ([]byte, error)) error {
|
||||||
|
// Make the rootfs directory.
|
||||||
|
if err := os.MkdirAll(rootfsDir, 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the embedded tarball.
|
||||||
|
data, err := asset(DefaultTarballPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting bindata asset image.tar failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unpack the tarball.
|
||||||
|
r := bytes.NewReader(data)
|
||||||
|
if err := archive.Untar(r, rootfsDir, &archive.TarOptions{NoLchown: true}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write a resolv.conf.
|
||||||
|
if err := ioutil.WriteFile(filepath.Join(rootfsDir, "etc", "resolv.conf"), []byte("nameserver 8.8.8.8\nnameserver 8.8.4.4"), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -1,6 +1,6 @@
|
||||||
// +build seccomp
|
// +build seccomp
|
||||||
|
|
||||||
package main
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"syscall"
|
"syscall"
|
||||||
|
@ -33,8 +33,8 @@ func arches() []specs.Arch {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// defaultProfile defines the whitelist for the default seccomp profile.
|
// DefaultSeccompProfile defines the whitelist for the default seccomp profile.
|
||||||
var defaultSeccompProfile = &specs.LinuxSeccomp{
|
var DefaultSeccompProfile = &specs.LinuxSeccomp{
|
||||||
DefaultAction: specs.ActErrno,
|
DefaultAction: specs.ActErrno,
|
||||||
Architectures: arches(),
|
Architectures: arches(),
|
||||||
Syscalls: []specs.LinuxSyscall{
|
Syscalls: []specs.LinuxSyscall{
|
10
container/seccomp_unsupported.go
Normal file
10
container/seccomp_unsupported.go
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
// +build !seccomp
|
||||||
|
|
||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultSeccompProfile defines the whitelist for the default seccomp profile.
|
||||||
|
var DefaultSeccompProfile = &specs.LinuxSeccomp{}
|
54
container/spec.go
Normal file
54
container/spec.go
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
aaprofile "github.com/docker/docker/profiles/apparmor"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||||
|
"github.com/opencontainers/runc/libcontainer/specconv"
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultApparmorProfile is the default apparmor profile for the containers.
|
||||||
|
DefaultApparmorProfile = "docker-default"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SpecOpts defines the options available for a spec.
|
||||||
|
type SpecOpts struct {
|
||||||
|
Rootless bool
|
||||||
|
Readonly bool
|
||||||
|
Terminal bool
|
||||||
|
Hooks *specs.Hooks
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spec returns a default oci spec with some options being passed.
|
||||||
|
func Spec(opts SpecOpts) *specs.Spec {
|
||||||
|
// Initialize the spec.
|
||||||
|
spec := specconv.Example()
|
||||||
|
|
||||||
|
// Set the spec to be rootless.
|
||||||
|
if opts.Rootless {
|
||||||
|
specconv.ToRootless(spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup readonly fs in spec.
|
||||||
|
spec.Root.Readonly = opts.Readonly
|
||||||
|
|
||||||
|
// Setup tty in spec.
|
||||||
|
spec.Process.Terminal = opts.Terminal
|
||||||
|
|
||||||
|
// Pass in any hooks to the spec.
|
||||||
|
spec.Hooks = opts.Hooks
|
||||||
|
|
||||||
|
// Set the default seccomp profile.
|
||||||
|
spec.Linux.Seccomp = DefaultSeccompProfile
|
||||||
|
|
||||||
|
// Install the default apparmor profile.
|
||||||
|
if apparmor.IsEnabled() {
|
||||||
|
// Check if we have the docker-default apparmor profile loaded.
|
||||||
|
if _, err := aaprofile.IsLoaded(DefaultApparmorProfile); err == nil {
|
||||||
|
spec.Process.ApparmorProfile = DefaultApparmorProfile
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return spec
|
||||||
|
}
|
18
examples/busybox/generate.go
Normal file
18
examples/busybox/generate.go
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/genuinetools/binctr/container"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Pulls an image and saves the binary data in the container package bindata.go.
|
||||||
|
func main() {
|
||||||
|
if err := container.EmbedImage("busybox"); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "embed image failed: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
88
examples/busybox/main.go
Normal file
88
examples/busybox/main.go
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/genuinetools/binctr/container"
|
||||||
|
"github.com/opencontainers/runc/libcontainer"
|
||||||
|
_ "github.com/opencontainers/runc/libcontainer/nsenter"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultRoot = "/tmp/binctr-busybox"
|
||||||
|
defaultRootfsDir = "rootfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
containerID string
|
||||||
|
root string
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Parse flags
|
||||||
|
flag.StringVar(&containerID, "id", "busybox", "container ID")
|
||||||
|
flag.StringVar(&root, "root", defaultRoot, "root directory of container state, should be tmpfs")
|
||||||
|
|
||||||
|
flag.Usage = func() {
|
||||||
|
flag.PrintDefaults()
|
||||||
|
}
|
||||||
|
|
||||||
|
flag.Parse()
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate go run generate.go
|
||||||
|
func main() {
|
||||||
|
if len(os.Args) > 1 && os.Args[1] == "init" {
|
||||||
|
runInit()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new container spec with the following options.
|
||||||
|
opts := container.SpecOpts{
|
||||||
|
Rootless: true,
|
||||||
|
Terminal: true,
|
||||||
|
}
|
||||||
|
spec := container.Spec(opts)
|
||||||
|
|
||||||
|
// Initialize the container object.
|
||||||
|
c := &container.Container{
|
||||||
|
ID: containerID,
|
||||||
|
Spec: spec,
|
||||||
|
Root: root,
|
||||||
|
Rootless: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unpack the rootfs.
|
||||||
|
if err := c.UnpackRootfs(defaultRootfsDir, Asset); err != nil {
|
||||||
|
logrus.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the container.
|
||||||
|
status, err := c.Run()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the rootfs after the container has exited.
|
||||||
|
if err := os.RemoveAll(defaultRootfsDir); err != nil {
|
||||||
|
logrus.Warnf("removing rootfs failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exit with the container's exit status.
|
||||||
|
os.Exit(status)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runInit() {
|
||||||
|
runtime.GOMAXPROCS(1)
|
||||||
|
runtime.LockOSThread()
|
||||||
|
factory, _ := libcontainer.New("")
|
||||||
|
if err := factory.StartInitialization(); err != nil {
|
||||||
|
// as the error is sent back to the parent there is no need to log
|
||||||
|
// or write it to stderr because the parent process will handle this
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
panic("libcontainer: container init failed to exec")
|
||||||
|
}
|
30
generate.go
30
generate.go
|
@ -5,36 +5,14 @@ package main
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
bindata "github.com/jteeuwen/go-bindata"
|
"github.com/genuinetools/binctr/container"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reads image.tar and saves the binary data in image/bindata.go.
|
// Pulls an image and saves the binary data in the container package bindata.go.
|
||||||
func main() {
|
func main() {
|
||||||
wd, err := os.Getwd()
|
if err := container.EmbedImage("alpine"); err != nil {
|
||||||
if err != nil {
|
fmt.Fprintf(os.Stderr, "embed image failed: %v\n", err)
|
||||||
fmt.Fprintf(os.Stderr, "os.Getwd: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
tarPath := filepath.Join(wd, "image.tar")
|
|
||||||
|
|
||||||
// Create the bindata config.
|
|
||||||
bc := bindata.NewConfig()
|
|
||||||
bc.Input = []bindata.InputConfig{
|
|
||||||
{
|
|
||||||
Path: tarPath,
|
|
||||||
Recursive: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
bc.Output = filepath.Join(wd, "image", "bindata.go")
|
|
||||||
bc.Package = "image"
|
|
||||||
bc.NoMetadata = true
|
|
||||||
bc.Prefix = wd
|
|
||||||
|
|
||||||
if err := bindata.Translate(bc); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "bindata: %v\n", err)
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
235
image/bindata.go
235
image/bindata.go
File diff suppressed because one or more lines are too long
|
@ -1,21 +0,0 @@
|
||||||
package image
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NAME is the name of the image that is embedded at compile time.
|
|
||||||
var NAME string
|
|
||||||
|
|
||||||
// SHA is the sha digest of the image that is embedded at compile time.
|
|
||||||
var SHA string
|
|
||||||
|
|
||||||
// Data returns the tarball image data that is embedded at compile time.
|
|
||||||
func Data() ([]byte, error) {
|
|
||||||
data, err := Asset("image.tar")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("getting bindata asset image.tar failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
|
59
main.go
59
main.go
|
@ -8,14 +8,10 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
aaprofile "github.com/docker/docker/profiles/apparmor"
|
|
||||||
"github.com/genuinetools/binctr/container"
|
"github.com/genuinetools/binctr/container"
|
||||||
"github.com/genuinetools/binctr/image"
|
|
||||||
"github.com/genuinetools/binctr/version"
|
"github.com/genuinetools/binctr/version"
|
||||||
"github.com/opencontainers/runc/libcontainer"
|
"github.com/opencontainers/runc/libcontainer"
|
||||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
|
||||||
_ "github.com/opencontainers/runc/libcontainer/nsenter"
|
_ "github.com/opencontainers/runc/libcontainer/nsenter"
|
||||||
"github.com/opencontainers/runc/libcontainer/specconv"
|
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
@ -31,15 +27,13 @@ const (
|
||||||
Fully static, self-contained container including the rootfs
|
Fully static, self-contained container including the rootfs
|
||||||
that can be run by an unprivileged user.
|
that can be run by an unprivileged user.
|
||||||
|
|
||||||
Embedded Image: %s - %s
|
|
||||||
Version: %s
|
Version: %s
|
||||||
Build: %s
|
Build: %s
|
||||||
|
|
||||||
`
|
`
|
||||||
|
|
||||||
defaultRoot = "/tmp/binctr"
|
defaultRoot = "/tmp/binctr"
|
||||||
defaultRootfsDir = "rootfs"
|
defaultRootfsDir = "rootfs"
|
||||||
defaultApparmorProfile = "docker-default"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -103,7 +97,7 @@ func (s stringSlice) ParseHooks() (hooks specs.Hooks, err error) {
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// Parse flags
|
// Parse flags
|
||||||
flag.StringVar(&containerID, "id", image.NAME, "container ID")
|
flag.StringVar(&containerID, "id", "binctr", "container ID")
|
||||||
flag.StringVar(&pidFile, "pid-file", "", "specify the file to write the process id to")
|
flag.StringVar(&pidFile, "pid-file", "", "specify the file to write the process id to")
|
||||||
flag.StringVar(&root, "root", defaultRoot, "root directory of container state, should be tmpfs")
|
flag.StringVar(&root, "root", defaultRoot, "root directory of container state, should be tmpfs")
|
||||||
|
|
||||||
|
@ -119,14 +113,14 @@ func init() {
|
||||||
flag.BoolVar(&debug, "D", false, "run in debug mode")
|
flag.BoolVar(&debug, "D", false, "run in debug mode")
|
||||||
|
|
||||||
flag.Usage = func() {
|
flag.Usage = func() {
|
||||||
fmt.Fprint(os.Stderr, fmt.Sprintf(BANNER, image.NAME, image.SHA, version.VERSION, version.GITCOMMIT))
|
fmt.Fprint(os.Stderr, fmt.Sprintf(BANNER, version.VERSION, version.GITCOMMIT))
|
||||||
flag.PrintDefaults()
|
flag.PrintDefaults()
|
||||||
}
|
}
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if vrsn {
|
if vrsn {
|
||||||
fmt.Printf("%s, commit: %s, image: %s, image digest: %s", version.VERSION, version.GITCOMMIT, image.NAME, image.SHA)
|
fmt.Printf("%s, commit: %s", version.VERSION, version.GITCOMMIT)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,38 +145,14 @@ func main() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize the spec.
|
// Create a new container spec with the following options.
|
||||||
spec := specconv.Example()
|
opts := container.SpecOpts{
|
||||||
|
Rootless: true,
|
||||||
// Set the spec to be rootless.
|
Readonly: readonly,
|
||||||
specconv.ToRootless(spec)
|
Terminal: allocateTty,
|
||||||
|
Hooks: &hooks,
|
||||||
// Setup readonly fs in spec.
|
|
||||||
spec.Root.Readonly = readonly
|
|
||||||
|
|
||||||
// Setup tty in spec.
|
|
||||||
spec.Process.Terminal = allocateTty
|
|
||||||
|
|
||||||
// Pass in any hooks to the spec.
|
|
||||||
spec.Hooks = &hooks
|
|
||||||
|
|
||||||
// Set the default seccomp profile.
|
|
||||||
spec.Linux.Seccomp = defaultSeccompProfile
|
|
||||||
|
|
||||||
// Install the default apparmor profile.
|
|
||||||
if apparmor.IsEnabled() {
|
|
||||||
// Check if we have the docker-default apparmor profile loaded.
|
|
||||||
if _, err := aaprofile.IsLoaded(defaultApparmorProfile); err != nil {
|
|
||||||
logrus.Warnf("AppArmor enabled on system but the %s profile is not loaded. apparmor_parser needs root to load a profile so we can't do it for you.", defaultApparmorProfile)
|
|
||||||
} else {
|
|
||||||
spec.Process.ApparmorProfile = defaultApparmorProfile
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unpack the rootfs.
|
|
||||||
if err := unpackRootfs(spec); err != nil {
|
|
||||||
logrus.Fatal(err)
|
|
||||||
}
|
}
|
||||||
|
spec := container.Spec(opts)
|
||||||
|
|
||||||
// Initialize the container object.
|
// Initialize the container object.
|
||||||
c := &container.Container{
|
c := &container.Container{
|
||||||
|
@ -195,6 +165,11 @@ func main() {
|
||||||
Rootless: true,
|
Rootless: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unpack the rootfs.
|
||||||
|
if err := c.UnpackRootfs(defaultRootfsDir, Asset); err != nil {
|
||||||
|
logrus.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Run the container.
|
// Run the container.
|
||||||
status, err := c.Run()
|
status, err := c.Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
38
rootfs.go
38
rootfs.go
|
@ -1,38 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/archive"
|
|
||||||
"github.com/genuinetools/binctr/image"
|
|
||||||
"github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func unpackRootfs(spec *specs.Spec) error {
|
|
||||||
// Make the rootfs directory.
|
|
||||||
if err := os.MkdirAll(defaultRootfsDir, 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the embedded tarball.
|
|
||||||
data, err := image.Data()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unpack the tarball.
|
|
||||||
r := bytes.NewReader(data)
|
|
||||||
if err := archive.Untar(r, defaultRootfsDir, &archive.TarOptions{NoLchown: true}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write a resolv.conf.
|
|
||||||
if err := ioutil.WriteFile(filepath.Join(defaultRootfsDir, "etc", "resolv.conf"), []byte("nameserver 8.8.8.8\nnameserver 8.8.4.4"), 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,10 +0,0 @@
|
||||||
// +build !seccomp
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// defaultProfile defines the whitelist for the default seccomp profile.
|
|
||||||
var defaultSeccompProfile = &specs.LinuxSeccomp{}
|
|
21
vendor/github.com/Azure/go-ansiterm/LICENSE
generated
vendored
Normal file
21
vendor/github.com/Azure/go-ansiterm/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2015 Microsoft Corporation
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
12
vendor/github.com/Azure/go-ansiterm/README.md
generated
vendored
Normal file
12
vendor/github.com/Azure/go-ansiterm/README.md
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
# go-ansiterm
|
||||||
|
|
||||||
|
This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent.
|
||||||
|
|
||||||
|
For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position.
|
||||||
|
|
||||||
|
The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
|
||||||
|
|
||||||
|
See parser_test.go for examples exercising the state machine and generating appropriate function calls.
|
||||||
|
|
||||||
|
-----
|
||||||
|
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
188
vendor/github.com/Azure/go-ansiterm/constants.go
generated
vendored
Normal file
188
vendor/github.com/Azure/go-ansiterm/constants.go
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
const LogEnv = "DEBUG_TERMINAL"
|
||||||
|
|
||||||
|
// ANSI constants
|
||||||
|
// References:
|
||||||
|
// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm
|
||||||
|
// -- http://man7.org/linux/man-pages/man4/console_codes.4.html
|
||||||
|
// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
|
||||||
|
// -- http://en.wikipedia.org/wiki/ANSI_escape_code
|
||||||
|
// -- http://vt100.net/emu/dec_ansi_parser
|
||||||
|
// -- http://vt100.net/emu/vt500_parser.svg
|
||||||
|
// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
|
||||||
|
// -- http://www.inwap.com/pdp10/ansicode.txt
|
||||||
|
const (
|
||||||
|
// ECMA-48 Set Graphics Rendition
|
||||||
|
// Note:
|
||||||
|
// -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved
|
||||||
|
// -- Fonts could possibly be supported via SetCurrentConsoleFontEx
|
||||||
|
// -- Windows does not expose the per-window cursor (i.e., caret) blink times
|
||||||
|
ANSI_SGR_RESET = 0
|
||||||
|
ANSI_SGR_BOLD = 1
|
||||||
|
ANSI_SGR_DIM = 2
|
||||||
|
_ANSI_SGR_ITALIC = 3
|
||||||
|
ANSI_SGR_UNDERLINE = 4
|
||||||
|
_ANSI_SGR_BLINKSLOW = 5
|
||||||
|
_ANSI_SGR_BLINKFAST = 6
|
||||||
|
ANSI_SGR_REVERSE = 7
|
||||||
|
_ANSI_SGR_INVISIBLE = 8
|
||||||
|
_ANSI_SGR_LINETHROUGH = 9
|
||||||
|
_ANSI_SGR_FONT_00 = 10
|
||||||
|
_ANSI_SGR_FONT_01 = 11
|
||||||
|
_ANSI_SGR_FONT_02 = 12
|
||||||
|
_ANSI_SGR_FONT_03 = 13
|
||||||
|
_ANSI_SGR_FONT_04 = 14
|
||||||
|
_ANSI_SGR_FONT_05 = 15
|
||||||
|
_ANSI_SGR_FONT_06 = 16
|
||||||
|
_ANSI_SGR_FONT_07 = 17
|
||||||
|
_ANSI_SGR_FONT_08 = 18
|
||||||
|
_ANSI_SGR_FONT_09 = 19
|
||||||
|
_ANSI_SGR_FONT_10 = 20
|
||||||
|
_ANSI_SGR_DOUBLEUNDERLINE = 21
|
||||||
|
ANSI_SGR_BOLD_DIM_OFF = 22
|
||||||
|
_ANSI_SGR_ITALIC_OFF = 23
|
||||||
|
ANSI_SGR_UNDERLINE_OFF = 24
|
||||||
|
_ANSI_SGR_BLINK_OFF = 25
|
||||||
|
_ANSI_SGR_RESERVED_00 = 26
|
||||||
|
ANSI_SGR_REVERSE_OFF = 27
|
||||||
|
_ANSI_SGR_INVISIBLE_OFF = 28
|
||||||
|
_ANSI_SGR_LINETHROUGH_OFF = 29
|
||||||
|
ANSI_SGR_FOREGROUND_BLACK = 30
|
||||||
|
ANSI_SGR_FOREGROUND_RED = 31
|
||||||
|
ANSI_SGR_FOREGROUND_GREEN = 32
|
||||||
|
ANSI_SGR_FOREGROUND_YELLOW = 33
|
||||||
|
ANSI_SGR_FOREGROUND_BLUE = 34
|
||||||
|
ANSI_SGR_FOREGROUND_MAGENTA = 35
|
||||||
|
ANSI_SGR_FOREGROUND_CYAN = 36
|
||||||
|
ANSI_SGR_FOREGROUND_WHITE = 37
|
||||||
|
_ANSI_SGR_RESERVED_01 = 38
|
||||||
|
ANSI_SGR_FOREGROUND_DEFAULT = 39
|
||||||
|
ANSI_SGR_BACKGROUND_BLACK = 40
|
||||||
|
ANSI_SGR_BACKGROUND_RED = 41
|
||||||
|
ANSI_SGR_BACKGROUND_GREEN = 42
|
||||||
|
ANSI_SGR_BACKGROUND_YELLOW = 43
|
||||||
|
ANSI_SGR_BACKGROUND_BLUE = 44
|
||||||
|
ANSI_SGR_BACKGROUND_MAGENTA = 45
|
||||||
|
ANSI_SGR_BACKGROUND_CYAN = 46
|
||||||
|
ANSI_SGR_BACKGROUND_WHITE = 47
|
||||||
|
_ANSI_SGR_RESERVED_02 = 48
|
||||||
|
ANSI_SGR_BACKGROUND_DEFAULT = 49
|
||||||
|
// 50 - 65: Unsupported
|
||||||
|
|
||||||
|
ANSI_MAX_CMD_LENGTH = 4096
|
||||||
|
|
||||||
|
MAX_INPUT_EVENTS = 128
|
||||||
|
DEFAULT_WIDTH = 80
|
||||||
|
DEFAULT_HEIGHT = 24
|
||||||
|
|
||||||
|
ANSI_BEL = 0x07
|
||||||
|
ANSI_BACKSPACE = 0x08
|
||||||
|
ANSI_TAB = 0x09
|
||||||
|
ANSI_LINE_FEED = 0x0A
|
||||||
|
ANSI_VERTICAL_TAB = 0x0B
|
||||||
|
ANSI_FORM_FEED = 0x0C
|
||||||
|
ANSI_CARRIAGE_RETURN = 0x0D
|
||||||
|
ANSI_ESCAPE_PRIMARY = 0x1B
|
||||||
|
ANSI_ESCAPE_SECONDARY = 0x5B
|
||||||
|
ANSI_OSC_STRING_ENTRY = 0x5D
|
||||||
|
ANSI_COMMAND_FIRST = 0x40
|
||||||
|
ANSI_COMMAND_LAST = 0x7E
|
||||||
|
DCS_ENTRY = 0x90
|
||||||
|
CSI_ENTRY = 0x9B
|
||||||
|
OSC_STRING = 0x9D
|
||||||
|
ANSI_PARAMETER_SEP = ";"
|
||||||
|
ANSI_CMD_G0 = '('
|
||||||
|
ANSI_CMD_G1 = ')'
|
||||||
|
ANSI_CMD_G2 = '*'
|
||||||
|
ANSI_CMD_G3 = '+'
|
||||||
|
ANSI_CMD_DECPNM = '>'
|
||||||
|
ANSI_CMD_DECPAM = '='
|
||||||
|
ANSI_CMD_OSC = ']'
|
||||||
|
ANSI_CMD_STR_TERM = '\\'
|
||||||
|
|
||||||
|
KEY_CONTROL_PARAM_2 = ";2"
|
||||||
|
KEY_CONTROL_PARAM_3 = ";3"
|
||||||
|
KEY_CONTROL_PARAM_4 = ";4"
|
||||||
|
KEY_CONTROL_PARAM_5 = ";5"
|
||||||
|
KEY_CONTROL_PARAM_6 = ";6"
|
||||||
|
KEY_CONTROL_PARAM_7 = ";7"
|
||||||
|
KEY_CONTROL_PARAM_8 = ";8"
|
||||||
|
KEY_ESC_CSI = "\x1B["
|
||||||
|
KEY_ESC_N = "\x1BN"
|
||||||
|
KEY_ESC_O = "\x1BO"
|
||||||
|
|
||||||
|
FILL_CHARACTER = ' '
|
||||||
|
)
|
||||||
|
|
||||||
|
func getByteRange(start byte, end byte) []byte {
|
||||||
|
bytes := make([]byte, 0, 32)
|
||||||
|
for i := start; i <= end; i++ {
|
||||||
|
bytes = append(bytes, byte(i))
|
||||||
|
}
|
||||||
|
|
||||||
|
return bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
var toGroundBytes = getToGroundBytes()
|
||||||
|
var executors = getExecuteBytes()
|
||||||
|
|
||||||
|
// SPACE 20+A0 hex Always and everywhere a blank space
|
||||||
|
// Intermediate 20-2F hex !"#$%&'()*+,-./
|
||||||
|
var intermeds = getByteRange(0x20, 0x2F)
|
||||||
|
|
||||||
|
// Parameters 30-3F hex 0123456789:;<=>?
|
||||||
|
// CSI Parameters 30-39, 3B hex 0123456789;
|
||||||
|
var csiParams = getByteRange(0x30, 0x3F)
|
||||||
|
|
||||||
|
var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...)
|
||||||
|
|
||||||
|
// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
|
||||||
|
var upperCase = getByteRange(0x40, 0x5F)
|
||||||
|
|
||||||
|
// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~
|
||||||
|
var lowerCase = getByteRange(0x60, 0x7E)
|
||||||
|
|
||||||
|
// Alphabetics 40-7E hex (all of upper and lower case)
|
||||||
|
var alphabetics = append(upperCase, lowerCase...)
|
||||||
|
|
||||||
|
var printables = getByteRange(0x20, 0x7F)
|
||||||
|
|
||||||
|
var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E)
|
||||||
|
var escapeToGroundBytes = getEscapeToGroundBytes()
|
||||||
|
|
||||||
|
// See http://www.vt100.net/emu/vt500_parser.png for description of the complex
|
||||||
|
// byte ranges below
|
||||||
|
|
||||||
|
func getEscapeToGroundBytes() []byte {
|
||||||
|
escapeToGroundBytes := getByteRange(0x30, 0x4F)
|
||||||
|
escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...)
|
||||||
|
escapeToGroundBytes = append(escapeToGroundBytes, 0x59)
|
||||||
|
escapeToGroundBytes = append(escapeToGroundBytes, 0x5A)
|
||||||
|
escapeToGroundBytes = append(escapeToGroundBytes, 0x5C)
|
||||||
|
escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...)
|
||||||
|
return escapeToGroundBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
func getExecuteBytes() []byte {
|
||||||
|
executeBytes := getByteRange(0x00, 0x17)
|
||||||
|
executeBytes = append(executeBytes, 0x19)
|
||||||
|
executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...)
|
||||||
|
return executeBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
func getToGroundBytes() []byte {
|
||||||
|
groundBytes := []byte{0x18}
|
||||||
|
groundBytes = append(groundBytes, 0x1A)
|
||||||
|
groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...)
|
||||||
|
groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...)
|
||||||
|
groundBytes = append(groundBytes, 0x99)
|
||||||
|
groundBytes = append(groundBytes, 0x9A)
|
||||||
|
groundBytes = append(groundBytes, 0x9C)
|
||||||
|
return groundBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete 7F hex Always and everywhere ignored
|
||||||
|
// C1 Control 80-9F hex 32 additional control characters
|
||||||
|
// G1 Displayable A1-FE hex 94 additional displayable characters
|
||||||
|
// Special A0+FF hex Same as SPACE and DELETE
|
7
vendor/github.com/Azure/go-ansiterm/context.go
generated
vendored
Normal file
7
vendor/github.com/Azure/go-ansiterm/context.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
type ansiContext struct {
|
||||||
|
currentChar byte
|
||||||
|
paramBuffer []byte
|
||||||
|
interBuffer []byte
|
||||||
|
}
|
49
vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
generated
vendored
Normal file
49
vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
type csiEntryState struct {
|
||||||
|
baseState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (csiState csiEntryState) Handle(b byte) (s state, e error) {
|
||||||
|
csiState.parser.logf("CsiEntry::Handle %#x", b)
|
||||||
|
|
||||||
|
nextState, err := csiState.baseState.Handle(b)
|
||||||
|
if nextState != nil || err != nil {
|
||||||
|
return nextState, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case sliceContains(alphabetics, b):
|
||||||
|
return csiState.parser.ground, nil
|
||||||
|
case sliceContains(csiCollectables, b):
|
||||||
|
return csiState.parser.csiParam, nil
|
||||||
|
case sliceContains(executors, b):
|
||||||
|
return csiState, csiState.parser.execute()
|
||||||
|
}
|
||||||
|
|
||||||
|
return csiState, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (csiState csiEntryState) Transition(s state) error {
|
||||||
|
csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
|
||||||
|
csiState.baseState.Transition(s)
|
||||||
|
|
||||||
|
switch s {
|
||||||
|
case csiState.parser.ground:
|
||||||
|
return csiState.parser.csiDispatch()
|
||||||
|
case csiState.parser.csiParam:
|
||||||
|
switch {
|
||||||
|
case sliceContains(csiParams, csiState.parser.context.currentChar):
|
||||||
|
csiState.parser.collectParam()
|
||||||
|
case sliceContains(intermeds, csiState.parser.context.currentChar):
|
||||||
|
csiState.parser.collectInter()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (csiState csiEntryState) Enter() error {
|
||||||
|
csiState.parser.clear()
|
||||||
|
return nil
|
||||||
|
}
|
38
vendor/github.com/Azure/go-ansiterm/csi_param_state.go
generated
vendored
Normal file
38
vendor/github.com/Azure/go-ansiterm/csi_param_state.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
type csiParamState struct {
|
||||||
|
baseState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (csiState csiParamState) Handle(b byte) (s state, e error) {
|
||||||
|
csiState.parser.logf("CsiParam::Handle %#x", b)
|
||||||
|
|
||||||
|
nextState, err := csiState.baseState.Handle(b)
|
||||||
|
if nextState != nil || err != nil {
|
||||||
|
return nextState, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case sliceContains(alphabetics, b):
|
||||||
|
return csiState.parser.ground, nil
|
||||||
|
case sliceContains(csiCollectables, b):
|
||||||
|
csiState.parser.collectParam()
|
||||||
|
return csiState, nil
|
||||||
|
case sliceContains(executors, b):
|
||||||
|
return csiState, csiState.parser.execute()
|
||||||
|
}
|
||||||
|
|
||||||
|
return csiState, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (csiState csiParamState) Transition(s state) error {
|
||||||
|
csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
|
||||||
|
csiState.baseState.Transition(s)
|
||||||
|
|
||||||
|
switch s {
|
||||||
|
case csiState.parser.ground:
|
||||||
|
return csiState.parser.csiDispatch()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
36
vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
generated
vendored
Normal file
36
vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
type escapeIntermediateState struct {
|
||||||
|
baseState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
|
||||||
|
escState.parser.logf("escapeIntermediateState::Handle %#x", b)
|
||||||
|
nextState, err := escState.baseState.Handle(b)
|
||||||
|
if nextState != nil || err != nil {
|
||||||
|
return nextState, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case sliceContains(intermeds, b):
|
||||||
|
return escState, escState.parser.collectInter()
|
||||||
|
case sliceContains(executors, b):
|
||||||
|
return escState, escState.parser.execute()
|
||||||
|
case sliceContains(escapeIntermediateToGroundBytes, b):
|
||||||
|
return escState.parser.ground, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return escState, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (escState escapeIntermediateState) Transition(s state) error {
|
||||||
|
escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
|
||||||
|
escState.baseState.Transition(s)
|
||||||
|
|
||||||
|
switch s {
|
||||||
|
case escState.parser.ground:
|
||||||
|
return escState.parser.escDispatch()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
47
vendor/github.com/Azure/go-ansiterm/escape_state.go
generated
vendored
Normal file
47
vendor/github.com/Azure/go-ansiterm/escape_state.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
type escapeState struct {
|
||||||
|
baseState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (escState escapeState) Handle(b byte) (s state, e error) {
|
||||||
|
escState.parser.logf("escapeState::Handle %#x", b)
|
||||||
|
nextState, err := escState.baseState.Handle(b)
|
||||||
|
if nextState != nil || err != nil {
|
||||||
|
return nextState, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case b == ANSI_ESCAPE_SECONDARY:
|
||||||
|
return escState.parser.csiEntry, nil
|
||||||
|
case b == ANSI_OSC_STRING_ENTRY:
|
||||||
|
return escState.parser.oscString, nil
|
||||||
|
case sliceContains(executors, b):
|
||||||
|
return escState, escState.parser.execute()
|
||||||
|
case sliceContains(escapeToGroundBytes, b):
|
||||||
|
return escState.parser.ground, nil
|
||||||
|
case sliceContains(intermeds, b):
|
||||||
|
return escState.parser.escapeIntermediate, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return escState, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (escState escapeState) Transition(s state) error {
|
||||||
|
escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name())
|
||||||
|
escState.baseState.Transition(s)
|
||||||
|
|
||||||
|
switch s {
|
||||||
|
case escState.parser.ground:
|
||||||
|
return escState.parser.escDispatch()
|
||||||
|
case escState.parser.escapeIntermediate:
|
||||||
|
return escState.parser.collectInter()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (escState escapeState) Enter() error {
|
||||||
|
escState.parser.clear()
|
||||||
|
return nil
|
||||||
|
}
|
90
vendor/github.com/Azure/go-ansiterm/event_handler.go
generated
vendored
Normal file
90
vendor/github.com/Azure/go-ansiterm/event_handler.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
type AnsiEventHandler interface {
|
||||||
|
// Print
|
||||||
|
Print(b byte) error
|
||||||
|
|
||||||
|
// Execute C0 commands
|
||||||
|
Execute(b byte) error
|
||||||
|
|
||||||
|
// CUrsor Up
|
||||||
|
CUU(int) error
|
||||||
|
|
||||||
|
// CUrsor Down
|
||||||
|
CUD(int) error
|
||||||
|
|
||||||
|
// CUrsor Forward
|
||||||
|
CUF(int) error
|
||||||
|
|
||||||
|
// CUrsor Backward
|
||||||
|
CUB(int) error
|
||||||
|
|
||||||
|
// Cursor to Next Line
|
||||||
|
CNL(int) error
|
||||||
|
|
||||||
|
// Cursor to Previous Line
|
||||||
|
CPL(int) error
|
||||||
|
|
||||||
|
// Cursor Horizontal position Absolute
|
||||||
|
CHA(int) error
|
||||||
|
|
||||||
|
// Vertical line Position Absolute
|
||||||
|
VPA(int) error
|
||||||
|
|
||||||
|
// CUrsor Position
|
||||||
|
CUP(int, int) error
|
||||||
|
|
||||||
|
// Horizontal and Vertical Position (depends on PUM)
|
||||||
|
HVP(int, int) error
|
||||||
|
|
||||||
|
// Text Cursor Enable Mode
|
||||||
|
DECTCEM(bool) error
|
||||||
|
|
||||||
|
// Origin Mode
|
||||||
|
DECOM(bool) error
|
||||||
|
|
||||||
|
// 132 Column Mode
|
||||||
|
DECCOLM(bool) error
|
||||||
|
|
||||||
|
// Erase in Display
|
||||||
|
ED(int) error
|
||||||
|
|
||||||
|
// Erase in Line
|
||||||
|
EL(int) error
|
||||||
|
|
||||||
|
// Insert Line
|
||||||
|
IL(int) error
|
||||||
|
|
||||||
|
// Delete Line
|
||||||
|
DL(int) error
|
||||||
|
|
||||||
|
// Insert Character
|
||||||
|
ICH(int) error
|
||||||
|
|
||||||
|
// Delete Character
|
||||||
|
DCH(int) error
|
||||||
|
|
||||||
|
// Set Graphics Rendition
|
||||||
|
SGR([]int) error
|
||||||
|
|
||||||
|
// Pan Down
|
||||||
|
SU(int) error
|
||||||
|
|
||||||
|
// Pan Up
|
||||||
|
SD(int) error
|
||||||
|
|
||||||
|
// Device Attributes
|
||||||
|
DA([]string) error
|
||||||
|
|
||||||
|
// Set Top and Bottom Margins
|
||||||
|
DECSTBM(int, int) error
|
||||||
|
|
||||||
|
// Index
|
||||||
|
IND() error
|
||||||
|
|
||||||
|
// Reverse Index
|
||||||
|
RI() error
|
||||||
|
|
||||||
|
// Flush updates from previous commands
|
||||||
|
Flush() error
|
||||||
|
}
|
24
vendor/github.com/Azure/go-ansiterm/ground_state.go
generated
vendored
Normal file
24
vendor/github.com/Azure/go-ansiterm/ground_state.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
type groundState struct {
|
||||||
|
baseState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gs groundState) Handle(b byte) (s state, e error) {
|
||||||
|
gs.parser.context.currentChar = b
|
||||||
|
|
||||||
|
nextState, err := gs.baseState.Handle(b)
|
||||||
|
if nextState != nil || err != nil {
|
||||||
|
return nextState, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case sliceContains(printables, b):
|
||||||
|
return gs, gs.parser.print()
|
||||||
|
|
||||||
|
case sliceContains(executors, b):
|
||||||
|
return gs, gs.parser.execute()
|
||||||
|
}
|
||||||
|
|
||||||
|
return gs, nil
|
||||||
|
}
|
31
vendor/github.com/Azure/go-ansiterm/osc_string_state.go
generated
vendored
Normal file
31
vendor/github.com/Azure/go-ansiterm/osc_string_state.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
type oscStringState struct {
|
||||||
|
baseState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oscState oscStringState) Handle(b byte) (s state, e error) {
|
||||||
|
oscState.parser.logf("OscString::Handle %#x", b)
|
||||||
|
nextState, err := oscState.baseState.Handle(b)
|
||||||
|
if nextState != nil || err != nil {
|
||||||
|
return nextState, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case isOscStringTerminator(b):
|
||||||
|
return oscState.parser.ground, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return oscState, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// See below for OSC string terminators for linux
|
||||||
|
// http://man7.org/linux/man-pages/man4/console_codes.4.html
|
||||||
|
func isOscStringTerminator(b byte) bool {
|
||||||
|
|
||||||
|
if b == ANSI_BEL || b == 0x5C {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
151
vendor/github.com/Azure/go-ansiterm/parser.go
generated
vendored
Normal file
151
vendor/github.com/Azure/go-ansiterm/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AnsiParser struct {
|
||||||
|
currState state
|
||||||
|
eventHandler AnsiEventHandler
|
||||||
|
context *ansiContext
|
||||||
|
csiEntry state
|
||||||
|
csiParam state
|
||||||
|
dcsEntry state
|
||||||
|
escape state
|
||||||
|
escapeIntermediate state
|
||||||
|
error state
|
||||||
|
ground state
|
||||||
|
oscString state
|
||||||
|
stateMap []state
|
||||||
|
|
||||||
|
logf func(string, ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type Option func(*AnsiParser)
|
||||||
|
|
||||||
|
func WithLogf(f func(string, ...interface{})) Option {
|
||||||
|
return func(ap *AnsiParser) {
|
||||||
|
ap.logf = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser {
|
||||||
|
ap := &AnsiParser{
|
||||||
|
eventHandler: evtHandler,
|
||||||
|
context: &ansiContext{},
|
||||||
|
}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(ap)
|
||||||
|
}
|
||||||
|
|
||||||
|
if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
|
||||||
|
logFile, _ := os.Create("ansiParser.log")
|
||||||
|
logger := log.New(logFile, "", log.LstdFlags)
|
||||||
|
if ap.logf != nil {
|
||||||
|
l := ap.logf
|
||||||
|
ap.logf = func(s string, v ...interface{}) {
|
||||||
|
l(s, v...)
|
||||||
|
logger.Printf(s, v...)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ap.logf = logger.Printf
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ap.logf == nil {
|
||||||
|
ap.logf = func(string, ...interface{}) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}}
|
||||||
|
ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}}
|
||||||
|
ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}}
|
||||||
|
ap.escape = escapeState{baseState{name: "Escape", parser: ap}}
|
||||||
|
ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}}
|
||||||
|
ap.error = errorState{baseState{name: "Error", parser: ap}}
|
||||||
|
ap.ground = groundState{baseState{name: "Ground", parser: ap}}
|
||||||
|
ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}}
|
||||||
|
|
||||||
|
ap.stateMap = []state{
|
||||||
|
ap.csiEntry,
|
||||||
|
ap.csiParam,
|
||||||
|
ap.dcsEntry,
|
||||||
|
ap.escape,
|
||||||
|
ap.escapeIntermediate,
|
||||||
|
ap.error,
|
||||||
|
ap.ground,
|
||||||
|
ap.oscString,
|
||||||
|
}
|
||||||
|
|
||||||
|
ap.currState = getState(initialState, ap.stateMap)
|
||||||
|
|
||||||
|
ap.logf("CreateParser: parser %p", ap)
|
||||||
|
return ap
|
||||||
|
}
|
||||||
|
|
||||||
|
func getState(name string, states []state) state {
|
||||||
|
for _, el := range states {
|
||||||
|
if el.Name() == name {
|
||||||
|
return el
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *AnsiParser) Parse(bytes []byte) (int, error) {
|
||||||
|
for i, b := range bytes {
|
||||||
|
if err := ap.handle(b); err != nil {
|
||||||
|
return i, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(bytes), ap.eventHandler.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *AnsiParser) handle(b byte) error {
|
||||||
|
ap.context.currentChar = b
|
||||||
|
newState, err := ap.currState.Handle(b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if newState == nil {
|
||||||
|
ap.logf("WARNING: newState is nil")
|
||||||
|
return errors.New("New state of 'nil' is invalid.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if newState != ap.currState {
|
||||||
|
if err := ap.changeState(newState); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *AnsiParser) changeState(newState state) error {
|
||||||
|
ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
|
||||||
|
|
||||||
|
// Exit old state
|
||||||
|
if err := ap.currState.Exit(); err != nil {
|
||||||
|
ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform transition action
|
||||||
|
if err := ap.currState.Transition(newState); err != nil {
|
||||||
|
ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enter new state
|
||||||
|
if err := newState.Enter(); err != nil {
|
||||||
|
ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ap.currState = newState
|
||||||
|
return nil
|
||||||
|
}
|
99
vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
generated
vendored
Normal file
99
vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseParams(bytes []byte) ([]string, error) {
|
||||||
|
paramBuff := make([]byte, 0, 0)
|
||||||
|
params := []string{}
|
||||||
|
|
||||||
|
for _, v := range bytes {
|
||||||
|
if v == ';' {
|
||||||
|
if len(paramBuff) > 0 {
|
||||||
|
// Completed parameter, append it to the list
|
||||||
|
s := string(paramBuff)
|
||||||
|
params = append(params, s)
|
||||||
|
paramBuff = make([]byte, 0, 0)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
paramBuff = append(paramBuff, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Last parameter may not be terminated with ';'
|
||||||
|
if len(paramBuff) > 0 {
|
||||||
|
s := string(paramBuff)
|
||||||
|
params = append(params, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCmd(context ansiContext) (string, error) {
|
||||||
|
return string(context.currentChar), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInt(params []string, dflt int) int {
|
||||||
|
i := getInts(params, 1, dflt)[0]
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInts(params []string, minCount int, dflt int) []int {
|
||||||
|
ints := []int{}
|
||||||
|
|
||||||
|
for _, v := range params {
|
||||||
|
i, _ := strconv.Atoi(v)
|
||||||
|
// Zero is mapped to the default value in VT100.
|
||||||
|
if i == 0 {
|
||||||
|
i = dflt
|
||||||
|
}
|
||||||
|
ints = append(ints, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ints) < minCount {
|
||||||
|
remaining := minCount - len(ints)
|
||||||
|
for i := 0; i < remaining; i++ {
|
||||||
|
ints = append(ints, dflt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ints
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *AnsiParser) modeDispatch(param string, set bool) error {
|
||||||
|
switch param {
|
||||||
|
case "?3":
|
||||||
|
return ap.eventHandler.DECCOLM(set)
|
||||||
|
case "?6":
|
||||||
|
return ap.eventHandler.DECOM(set)
|
||||||
|
case "?25":
|
||||||
|
return ap.eventHandler.DECTCEM(set)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *AnsiParser) hDispatch(params []string) error {
|
||||||
|
if len(params) == 1 {
|
||||||
|
return ap.modeDispatch(params[0], true)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *AnsiParser) lDispatch(params []string) error {
|
||||||
|
if len(params) == 1 {
|
||||||
|
return ap.modeDispatch(params[0], false)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEraseParam(params []string) int {
|
||||||
|
param := getInt(params, 0)
|
||||||
|
if param < 0 || 3 < param {
|
||||||
|
param = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return param
|
||||||
|
}
|
119
vendor/github.com/Azure/go-ansiterm/parser_actions.go
generated
vendored
Normal file
119
vendor/github.com/Azure/go-ansiterm/parser_actions.go
generated
vendored
Normal file
|
@ -0,0 +1,119 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
func (ap *AnsiParser) collectParam() error {
|
||||||
|
currChar := ap.context.currentChar
|
||||||
|
ap.logf("collectParam %#x", currChar)
|
||||||
|
ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *AnsiParser) collectInter() error {
|
||||||
|
currChar := ap.context.currentChar
|
||||||
|
ap.logf("collectInter %#x", currChar)
|
||||||
|
ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *AnsiParser) escDispatch() error {
|
||||||
|
cmd, _ := parseCmd(*ap.context)
|
||||||
|
intermeds := ap.context.interBuffer
|
||||||
|
ap.logf("escDispatch currentChar: %#x", ap.context.currentChar)
|
||||||
|
ap.logf("escDispatch: %v(%v)", cmd, intermeds)
|
||||||
|
|
||||||
|
switch cmd {
|
||||||
|
case "D": // IND
|
||||||
|
return ap.eventHandler.IND()
|
||||||
|
case "E": // NEL, equivalent to CRLF
|
||||||
|
err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN)
|
||||||
|
if err == nil {
|
||||||
|
err = ap.eventHandler.Execute(ANSI_LINE_FEED)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
case "M": // RI
|
||||||
|
return ap.eventHandler.RI()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *AnsiParser) csiDispatch() error {
|
||||||
|
cmd, _ := parseCmd(*ap.context)
|
||||||
|
params, _ := parseParams(ap.context.paramBuffer)
|
||||||
|
ap.logf("Parsed params: %v with length: %d", params, len(params))
|
||||||
|
|
||||||
|
ap.logf("csiDispatch: %v(%v)", cmd, params)
|
||||||
|
|
||||||
|
switch cmd {
|
||||||
|
case "@":
|
||||||
|
return ap.eventHandler.ICH(getInt(params, 1))
|
||||||
|
case "A":
|
||||||
|
return ap.eventHandler.CUU(getInt(params, 1))
|
||||||
|
case "B":
|
||||||
|
return ap.eventHandler.CUD(getInt(params, 1))
|
||||||
|
case "C":
|
||||||
|
return ap.eventHandler.CUF(getInt(params, 1))
|
||||||
|
case "D":
|
||||||
|
return ap.eventHandler.CUB(getInt(params, 1))
|
||||||
|
case "E":
|
||||||
|
return ap.eventHandler.CNL(getInt(params, 1))
|
||||||
|
case "F":
|
||||||
|
return ap.eventHandler.CPL(getInt(params, 1))
|
||||||
|
case "G":
|
||||||
|
return ap.eventHandler.CHA(getInt(params, 1))
|
||||||
|
case "H":
|
||||||
|
ints := getInts(params, 2, 1)
|
||||||
|
x, y := ints[0], ints[1]
|
||||||
|
return ap.eventHandler.CUP(x, y)
|
||||||
|
case "J":
|
||||||
|
param := getEraseParam(params)
|
||||||
|
return ap.eventHandler.ED(param)
|
||||||
|
case "K":
|
||||||
|
param := getEraseParam(params)
|
||||||
|
return ap.eventHandler.EL(param)
|
||||||
|
case "L":
|
||||||
|
return ap.eventHandler.IL(getInt(params, 1))
|
||||||
|
case "M":
|
||||||
|
return ap.eventHandler.DL(getInt(params, 1))
|
||||||
|
case "P":
|
||||||
|
return ap.eventHandler.DCH(getInt(params, 1))
|
||||||
|
case "S":
|
||||||
|
return ap.eventHandler.SU(getInt(params, 1))
|
||||||
|
case "T":
|
||||||
|
return ap.eventHandler.SD(getInt(params, 1))
|
||||||
|
case "c":
|
||||||
|
return ap.eventHandler.DA(params)
|
||||||
|
case "d":
|
||||||
|
return ap.eventHandler.VPA(getInt(params, 1))
|
||||||
|
case "f":
|
||||||
|
ints := getInts(params, 2, 1)
|
||||||
|
x, y := ints[0], ints[1]
|
||||||
|
return ap.eventHandler.HVP(x, y)
|
||||||
|
case "h":
|
||||||
|
return ap.hDispatch(params)
|
||||||
|
case "l":
|
||||||
|
return ap.lDispatch(params)
|
||||||
|
case "m":
|
||||||
|
return ap.eventHandler.SGR(getInts(params, 1, 0))
|
||||||
|
case "r":
|
||||||
|
ints := getInts(params, 2, 1)
|
||||||
|
top, bottom := ints[0], ints[1]
|
||||||
|
return ap.eventHandler.DECSTBM(top, bottom)
|
||||||
|
default:
|
||||||
|
ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *AnsiParser) print() error {
|
||||||
|
return ap.eventHandler.Print(ap.context.currentChar)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *AnsiParser) clear() error {
|
||||||
|
ap.context = &ansiContext{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *AnsiParser) execute() error {
|
||||||
|
return ap.eventHandler.Execute(ap.context.currentChar)
|
||||||
|
}
|
141
vendor/github.com/Azure/go-ansiterm/parser_test.go
generated
vendored
Normal file
141
vendor/github.com/Azure/go-ansiterm/parser_test.go
generated
vendored
Normal file
|
@ -0,0 +1,141 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStateTransitions(t *testing.T) {
|
||||||
|
stateTransitionHelper(t, "CsiEntry", "Ground", alphabetics)
|
||||||
|
stateTransitionHelper(t, "CsiEntry", "CsiParam", csiCollectables)
|
||||||
|
stateTransitionHelper(t, "Escape", "CsiEntry", []byte{ANSI_ESCAPE_SECONDARY})
|
||||||
|
stateTransitionHelper(t, "Escape", "OscString", []byte{0x5D})
|
||||||
|
stateTransitionHelper(t, "Escape", "Ground", escapeToGroundBytes)
|
||||||
|
stateTransitionHelper(t, "Escape", "EscapeIntermediate", intermeds)
|
||||||
|
stateTransitionHelper(t, "EscapeIntermediate", "EscapeIntermediate", intermeds)
|
||||||
|
stateTransitionHelper(t, "EscapeIntermediate", "EscapeIntermediate", executors)
|
||||||
|
stateTransitionHelper(t, "EscapeIntermediate", "Ground", escapeIntermediateToGroundBytes)
|
||||||
|
stateTransitionHelper(t, "OscString", "Ground", []byte{ANSI_BEL})
|
||||||
|
stateTransitionHelper(t, "OscString", "Ground", []byte{0x5C})
|
||||||
|
stateTransitionHelper(t, "Ground", "Ground", executors)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAnyToX(t *testing.T) {
|
||||||
|
anyToXHelper(t, []byte{ANSI_ESCAPE_PRIMARY}, "Escape")
|
||||||
|
anyToXHelper(t, []byte{DCS_ENTRY}, "DcsEntry")
|
||||||
|
anyToXHelper(t, []byte{OSC_STRING}, "OscString")
|
||||||
|
anyToXHelper(t, []byte{CSI_ENTRY}, "CsiEntry")
|
||||||
|
anyToXHelper(t, toGroundBytes, "Ground")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollectCsiParams(t *testing.T) {
|
||||||
|
parser, _ := createTestParser("CsiEntry")
|
||||||
|
parser.Parse(csiCollectables)
|
||||||
|
|
||||||
|
buffer := parser.context.paramBuffer
|
||||||
|
bufferCount := len(buffer)
|
||||||
|
|
||||||
|
if bufferCount != len(csiCollectables) {
|
||||||
|
t.Errorf("Buffer: %v", buffer)
|
||||||
|
t.Errorf("CsiParams: %v", csiCollectables)
|
||||||
|
t.Errorf("Buffer count failure: %d != %d", bufferCount, len(csiParams))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range csiCollectables {
|
||||||
|
if v != buffer[i] {
|
||||||
|
t.Errorf("Buffer: %v", buffer)
|
||||||
|
t.Errorf("CsiParams: %v", csiParams)
|
||||||
|
t.Errorf("Mismatch at buffer[%d] = %d", i, buffer[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseParams(t *testing.T) {
|
||||||
|
parseParamsHelper(t, []byte{}, []string{})
|
||||||
|
parseParamsHelper(t, []byte{';'}, []string{})
|
||||||
|
parseParamsHelper(t, []byte{';', ';'}, []string{})
|
||||||
|
parseParamsHelper(t, []byte{'7'}, []string{"7"})
|
||||||
|
parseParamsHelper(t, []byte{'7', ';'}, []string{"7"})
|
||||||
|
parseParamsHelper(t, []byte{'7', ';', ';'}, []string{"7"})
|
||||||
|
parseParamsHelper(t, []byte{'7', ';', ';', '8'}, []string{"7", "8"})
|
||||||
|
parseParamsHelper(t, []byte{'7', ';', '8', ';'}, []string{"7", "8"})
|
||||||
|
parseParamsHelper(t, []byte{'7', ';', ';', '8', ';', ';'}, []string{"7", "8"})
|
||||||
|
parseParamsHelper(t, []byte{'7', '8'}, []string{"78"})
|
||||||
|
parseParamsHelper(t, []byte{'7', '8', ';'}, []string{"78"})
|
||||||
|
parseParamsHelper(t, []byte{'7', '8', ';', '9', '0'}, []string{"78", "90"})
|
||||||
|
parseParamsHelper(t, []byte{'7', '8', ';', ';', '9', '0'}, []string{"78", "90"})
|
||||||
|
parseParamsHelper(t, []byte{'7', '8', ';', '9', '0', ';'}, []string{"78", "90"})
|
||||||
|
parseParamsHelper(t, []byte{'7', '8', ';', '9', '0', ';', ';'}, []string{"78", "90"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCursor(t *testing.T) {
|
||||||
|
cursorSingleParamHelper(t, 'A', "CUU")
|
||||||
|
cursorSingleParamHelper(t, 'B', "CUD")
|
||||||
|
cursorSingleParamHelper(t, 'C', "CUF")
|
||||||
|
cursorSingleParamHelper(t, 'D', "CUB")
|
||||||
|
cursorSingleParamHelper(t, 'E', "CNL")
|
||||||
|
cursorSingleParamHelper(t, 'F', "CPL")
|
||||||
|
cursorSingleParamHelper(t, 'G', "CHA")
|
||||||
|
cursorTwoParamHelper(t, 'H', "CUP")
|
||||||
|
cursorTwoParamHelper(t, 'f', "HVP")
|
||||||
|
funcCallParamHelper(t, []byte{'?', '2', '5', 'h'}, "CsiEntry", "Ground", []string{"DECTCEM([true])"})
|
||||||
|
funcCallParamHelper(t, []byte{'?', '2', '5', 'l'}, "CsiEntry", "Ground", []string{"DECTCEM([false])"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErase(t *testing.T) {
|
||||||
|
// Erase in Display
|
||||||
|
eraseHelper(t, 'J', "ED")
|
||||||
|
|
||||||
|
// Erase in Line
|
||||||
|
eraseHelper(t, 'K', "EL")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectGraphicRendition(t *testing.T) {
|
||||||
|
funcCallParamHelper(t, []byte{'m'}, "CsiEntry", "Ground", []string{"SGR([0])"})
|
||||||
|
funcCallParamHelper(t, []byte{'0', 'm'}, "CsiEntry", "Ground", []string{"SGR([0])"})
|
||||||
|
funcCallParamHelper(t, []byte{'0', ';', '1', 'm'}, "CsiEntry", "Ground", []string{"SGR([0 1])"})
|
||||||
|
funcCallParamHelper(t, []byte{'0', ';', '1', ';', '2', 'm'}, "CsiEntry", "Ground", []string{"SGR([0 1 2])"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestScroll(t *testing.T) {
|
||||||
|
scrollHelper(t, 'S', "SU")
|
||||||
|
scrollHelper(t, 'T', "SD")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrint(t *testing.T) {
|
||||||
|
parser, evtHandler := createTestParser("Ground")
|
||||||
|
parser.Parse(printables)
|
||||||
|
validateState(t, parser.currState, "Ground")
|
||||||
|
|
||||||
|
for i, v := range printables {
|
||||||
|
expectedCall := fmt.Sprintf("Print([%s])", string(v))
|
||||||
|
actualCall := evtHandler.FunctionCalls[i]
|
||||||
|
if actualCall != expectedCall {
|
||||||
|
t.Errorf("Actual != Expected: %v != %v at %d", actualCall, expectedCall, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClear(t *testing.T) {
|
||||||
|
p, _ := createTestParser("Ground")
|
||||||
|
fillContext(p.context)
|
||||||
|
p.clear()
|
||||||
|
validateEmptyContext(t, p.context)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClearOnStateChange(t *testing.T) {
|
||||||
|
clearOnStateChangeHelper(t, "Ground", "Escape", []byte{ANSI_ESCAPE_PRIMARY})
|
||||||
|
clearOnStateChangeHelper(t, "Ground", "CsiEntry", []byte{CSI_ENTRY})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestC0(t *testing.T) {
|
||||||
|
expectedCall := "Execute([" + string(ANSI_LINE_FEED) + "])"
|
||||||
|
c0Helper(t, []byte{ANSI_LINE_FEED}, "Ground", []string{expectedCall})
|
||||||
|
expectedCall = "Execute([" + string(ANSI_CARRIAGE_RETURN) + "])"
|
||||||
|
c0Helper(t, []byte{ANSI_CARRIAGE_RETURN}, "Ground", []string{expectedCall})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEscDispatch(t *testing.T) {
|
||||||
|
funcCallParamHelper(t, []byte{'M'}, "Escape", "Ground", []string{"RI([])"})
|
||||||
|
}
|
114
vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go
generated
vendored
Normal file
114
vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go
generated
vendored
Normal file
|
@ -0,0 +1,114 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getStateNames() []string {
|
||||||
|
parser, _ := createTestParser("Ground")
|
||||||
|
|
||||||
|
stateNames := []string{}
|
||||||
|
for _, state := range parser.stateMap {
|
||||||
|
stateNames = append(stateNames, state.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
return stateNames
|
||||||
|
}
|
||||||
|
|
||||||
|
func stateTransitionHelper(t *testing.T, start string, end string, bytes []byte) {
|
||||||
|
for _, b := range bytes {
|
||||||
|
bytes := []byte{byte(b)}
|
||||||
|
parser, _ := createTestParser(start)
|
||||||
|
parser.Parse(bytes)
|
||||||
|
validateState(t, parser.currState, end)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func anyToXHelper(t *testing.T, bytes []byte, expectedState string) {
|
||||||
|
for _, s := range getStateNames() {
|
||||||
|
stateTransitionHelper(t, s, expectedState, bytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func funcCallParamHelper(t *testing.T, bytes []byte, start string, expected string, expectedCalls []string) {
|
||||||
|
parser, evtHandler := createTestParser(start)
|
||||||
|
parser.Parse(bytes)
|
||||||
|
validateState(t, parser.currState, expected)
|
||||||
|
validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseParamsHelper(t *testing.T, bytes []byte, expectedParams []string) {
|
||||||
|
params, err := parseParams(bytes)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Parameter parse error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(params) != len(expectedParams) {
|
||||||
|
t.Errorf("Parsed parameters: %v", params)
|
||||||
|
t.Errorf("Expected parameters: %v", expectedParams)
|
||||||
|
t.Errorf("Parameter length failure: %d != %d", len(params), len(expectedParams))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range expectedParams {
|
||||||
|
if v != params[i] {
|
||||||
|
t.Errorf("Parsed parameters: %v", params)
|
||||||
|
t.Errorf("Expected parameters: %v", expectedParams)
|
||||||
|
t.Errorf("Parameter parse failure: %s != %s at position %d", v, params[i], i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func cursorSingleParamHelper(t *testing.T, command byte, funcName string) {
|
||||||
|
funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func cursorTwoParamHelper(t *testing.T, command byte, funcName string) {
|
||||||
|
funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 1])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23 1])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func eraseHelper(t *testing.T, command byte, funcName string) {
|
||||||
|
funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([3])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'1', ';', '2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func scrollHelper(t *testing.T, command byte, funcName string) {
|
||||||
|
funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'5', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([5])", funcName)})
|
||||||
|
funcCallParamHelper(t, []byte{'4', ';', '6', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([4])", funcName)})
|
||||||
|
}
|
||||||
|
|
||||||
|
func clearOnStateChangeHelper(t *testing.T, start string, end string, bytes []byte) {
|
||||||
|
p, _ := createTestParser(start)
|
||||||
|
fillContext(p.context)
|
||||||
|
p.Parse(bytes)
|
||||||
|
validateState(t, p.currState, end)
|
||||||
|
validateEmptyContext(t, p.context)
|
||||||
|
}
|
||||||
|
|
||||||
|
func c0Helper(t *testing.T, bytes []byte, expectedState string, expectedCalls []string) {
|
||||||
|
parser, evtHandler := createTestParser("Ground")
|
||||||
|
parser.Parse(bytes)
|
||||||
|
validateState(t, parser.currState, expectedState)
|
||||||
|
validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls)
|
||||||
|
}
|
66
vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go
generated
vendored
Normal file
66
vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createTestParser(s string) (*AnsiParser, *TestAnsiEventHandler) {
|
||||||
|
evtHandler := CreateTestAnsiEventHandler()
|
||||||
|
parser := CreateParser(s, evtHandler)
|
||||||
|
|
||||||
|
return parser, evtHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateState(t *testing.T, actualState state, expectedStateName string) {
|
||||||
|
actualName := "Nil"
|
||||||
|
|
||||||
|
if actualState != nil {
|
||||||
|
actualName = actualState.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
if actualName != expectedStateName {
|
||||||
|
t.Errorf("Invalid state: '%s' != '%s'", actualName, expectedStateName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateFuncCalls(t *testing.T, actualCalls []string, expectedCalls []string) {
|
||||||
|
actualCount := len(actualCalls)
|
||||||
|
expectedCount := len(expectedCalls)
|
||||||
|
|
||||||
|
if actualCount != expectedCount {
|
||||||
|
t.Errorf("Actual calls: %v", actualCalls)
|
||||||
|
t.Errorf("Expected calls: %v", expectedCalls)
|
||||||
|
t.Errorf("Call count error: %d != %d", actualCount, expectedCount)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range actualCalls {
|
||||||
|
if v != expectedCalls[i] {
|
||||||
|
t.Errorf("Actual calls: %v", actualCalls)
|
||||||
|
t.Errorf("Expected calls: %v", expectedCalls)
|
||||||
|
t.Errorf("Mismatched calls: %s != %s with lengths %d and %d", v, expectedCalls[i], len(v), len(expectedCalls[i]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func fillContext(context *ansiContext) {
|
||||||
|
context.currentChar = 'A'
|
||||||
|
context.paramBuffer = []byte{'C', 'D', 'E'}
|
||||||
|
context.interBuffer = []byte{'F', 'G', 'H'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateEmptyContext(t *testing.T, context *ansiContext) {
|
||||||
|
var expectedCurrChar byte = 0x0
|
||||||
|
if context.currentChar != expectedCurrChar {
|
||||||
|
t.Errorf("Currentchar mismatch '%#x' != '%#x'", context.currentChar, expectedCurrChar)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(context.paramBuffer) != 0 {
|
||||||
|
t.Errorf("Non-empty parameter buffer: %v", context.paramBuffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(context.paramBuffer) != 0 {
|
||||||
|
t.Errorf("Non-empty intermediate buffer: %v", context.interBuffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
71
vendor/github.com/Azure/go-ansiterm/states.go
generated
vendored
Normal file
71
vendor/github.com/Azure/go-ansiterm/states.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
type stateID int
|
||||||
|
|
||||||
|
type state interface {
|
||||||
|
Enter() error
|
||||||
|
Exit() error
|
||||||
|
Handle(byte) (state, error)
|
||||||
|
Name() string
|
||||||
|
Transition(state) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type baseState struct {
|
||||||
|
name string
|
||||||
|
parser *AnsiParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (base baseState) Enter() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (base baseState) Exit() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (base baseState) Handle(b byte) (s state, e error) {
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case b == CSI_ENTRY:
|
||||||
|
return base.parser.csiEntry, nil
|
||||||
|
case b == DCS_ENTRY:
|
||||||
|
return base.parser.dcsEntry, nil
|
||||||
|
case b == ANSI_ESCAPE_PRIMARY:
|
||||||
|
return base.parser.escape, nil
|
||||||
|
case b == OSC_STRING:
|
||||||
|
return base.parser.oscString, nil
|
||||||
|
case sliceContains(toGroundBytes, b):
|
||||||
|
return base.parser.ground, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (base baseState) Name() string {
|
||||||
|
return base.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (base baseState) Transition(s state) error {
|
||||||
|
if s == base.parser.ground {
|
||||||
|
execBytes := []byte{0x18}
|
||||||
|
execBytes = append(execBytes, 0x1A)
|
||||||
|
execBytes = append(execBytes, getByteRange(0x80, 0x8F)...)
|
||||||
|
execBytes = append(execBytes, getByteRange(0x91, 0x97)...)
|
||||||
|
execBytes = append(execBytes, 0x99)
|
||||||
|
execBytes = append(execBytes, 0x9A)
|
||||||
|
|
||||||
|
if sliceContains(execBytes, base.parser.context.currentChar) {
|
||||||
|
return base.parser.execute()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type dcsEntryState struct {
|
||||||
|
baseState
|
||||||
|
}
|
||||||
|
|
||||||
|
type errorState struct {
|
||||||
|
baseState
|
||||||
|
}
|
173
vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go
generated
vendored
Normal file
173
vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go
generated
vendored
Normal file
|
@ -0,0 +1,173 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TestAnsiEventHandler struct {
|
||||||
|
FunctionCalls []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateTestAnsiEventHandler() *TestAnsiEventHandler {
|
||||||
|
evtHandler := TestAnsiEventHandler{}
|
||||||
|
evtHandler.FunctionCalls = make([]string, 0)
|
||||||
|
return &evtHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) recordCall(call string, params []string) {
|
||||||
|
s := fmt.Sprintf("%s(%v)", call, params)
|
||||||
|
h.FunctionCalls = append(h.FunctionCalls, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) Print(b byte) error {
|
||||||
|
h.recordCall("Print", []string{string(b)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) Execute(b byte) error {
|
||||||
|
h.recordCall("Execute", []string{string(b)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) CUU(param int) error {
|
||||||
|
h.recordCall("CUU", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) CUD(param int) error {
|
||||||
|
h.recordCall("CUD", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) CUF(param int) error {
|
||||||
|
h.recordCall("CUF", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) CUB(param int) error {
|
||||||
|
h.recordCall("CUB", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) CNL(param int) error {
|
||||||
|
h.recordCall("CNL", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) CPL(param int) error {
|
||||||
|
h.recordCall("CPL", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) CHA(param int) error {
|
||||||
|
h.recordCall("CHA", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) VPA(param int) error {
|
||||||
|
h.recordCall("VPA", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) CUP(x int, y int) error {
|
||||||
|
xS, yS := strconv.Itoa(x), strconv.Itoa(y)
|
||||||
|
h.recordCall("CUP", []string{xS, yS})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) HVP(x int, y int) error {
|
||||||
|
xS, yS := strconv.Itoa(x), strconv.Itoa(y)
|
||||||
|
h.recordCall("HVP", []string{xS, yS})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) DECTCEM(visible bool) error {
|
||||||
|
h.recordCall("DECTCEM", []string{strconv.FormatBool(visible)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) DECOM(visible bool) error {
|
||||||
|
h.recordCall("DECOM", []string{strconv.FormatBool(visible)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) DECCOLM(use132 bool) error {
|
||||||
|
h.recordCall("DECOLM", []string{strconv.FormatBool(use132)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) ED(param int) error {
|
||||||
|
h.recordCall("ED", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) EL(param int) error {
|
||||||
|
h.recordCall("EL", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) IL(param int) error {
|
||||||
|
h.recordCall("IL", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) DL(param int) error {
|
||||||
|
h.recordCall("DL", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) ICH(param int) error {
|
||||||
|
h.recordCall("ICH", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) DCH(param int) error {
|
||||||
|
h.recordCall("DCH", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) SGR(params []int) error {
|
||||||
|
strings := []string{}
|
||||||
|
for _, v := range params {
|
||||||
|
strings = append(strings, strconv.Itoa(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
h.recordCall("SGR", strings)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) SU(param int) error {
|
||||||
|
h.recordCall("SU", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) SD(param int) error {
|
||||||
|
h.recordCall("SD", []string{strconv.Itoa(param)})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) DA(params []string) error {
|
||||||
|
h.recordCall("DA", params)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) DECSTBM(top int, bottom int) error {
|
||||||
|
topS, bottomS := strconv.Itoa(top), strconv.Itoa(bottom)
|
||||||
|
h.recordCall("DECSTBM", []string{topS, bottomS})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) RI() error {
|
||||||
|
h.recordCall("RI", nil)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) IND() error {
|
||||||
|
h.recordCall("IND", nil)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *TestAnsiEventHandler) Flush() error {
|
||||||
|
return nil
|
||||||
|
}
|
21
vendor/github.com/Azure/go-ansiterm/utilities.go
generated
vendored
Normal file
21
vendor/github.com/Azure/go-ansiterm/utilities.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
package ansiterm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
func sliceContains(bytes []byte, b byte) bool {
|
||||||
|
for _, v := range bytes {
|
||||||
|
if v == b {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertBytesToInteger(bytes []byte) int {
|
||||||
|
s := string(bytes)
|
||||||
|
i, _ := strconv.Atoi(s)
|
||||||
|
return i
|
||||||
|
}
|
182
vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
generated
vendored
Normal file
182
vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winterm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/Azure/go-ansiterm"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Windows keyboard constants
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx.
|
||||||
|
const (
|
||||||
|
VK_PRIOR = 0x21 // PAGE UP key
|
||||||
|
VK_NEXT = 0x22 // PAGE DOWN key
|
||||||
|
VK_END = 0x23 // END key
|
||||||
|
VK_HOME = 0x24 // HOME key
|
||||||
|
VK_LEFT = 0x25 // LEFT ARROW key
|
||||||
|
VK_UP = 0x26 // UP ARROW key
|
||||||
|
VK_RIGHT = 0x27 // RIGHT ARROW key
|
||||||
|
VK_DOWN = 0x28 // DOWN ARROW key
|
||||||
|
VK_SELECT = 0x29 // SELECT key
|
||||||
|
VK_PRINT = 0x2A // PRINT key
|
||||||
|
VK_EXECUTE = 0x2B // EXECUTE key
|
||||||
|
VK_SNAPSHOT = 0x2C // PRINT SCREEN key
|
||||||
|
VK_INSERT = 0x2D // INS key
|
||||||
|
VK_DELETE = 0x2E // DEL key
|
||||||
|
VK_HELP = 0x2F // HELP key
|
||||||
|
VK_F1 = 0x70 // F1 key
|
||||||
|
VK_F2 = 0x71 // F2 key
|
||||||
|
VK_F3 = 0x72 // F3 key
|
||||||
|
VK_F4 = 0x73 // F4 key
|
||||||
|
VK_F5 = 0x74 // F5 key
|
||||||
|
VK_F6 = 0x75 // F6 key
|
||||||
|
VK_F7 = 0x76 // F7 key
|
||||||
|
VK_F8 = 0x77 // F8 key
|
||||||
|
VK_F9 = 0x78 // F9 key
|
||||||
|
VK_F10 = 0x79 // F10 key
|
||||||
|
VK_F11 = 0x7A // F11 key
|
||||||
|
VK_F12 = 0x7B // F12 key
|
||||||
|
|
||||||
|
RIGHT_ALT_PRESSED = 0x0001
|
||||||
|
LEFT_ALT_PRESSED = 0x0002
|
||||||
|
RIGHT_CTRL_PRESSED = 0x0004
|
||||||
|
LEFT_CTRL_PRESSED = 0x0008
|
||||||
|
SHIFT_PRESSED = 0x0010
|
||||||
|
NUMLOCK_ON = 0x0020
|
||||||
|
SCROLLLOCK_ON = 0x0040
|
||||||
|
CAPSLOCK_ON = 0x0080
|
||||||
|
ENHANCED_KEY = 0x0100
|
||||||
|
)
|
||||||
|
|
||||||
|
type ansiCommand struct {
|
||||||
|
CommandBytes []byte
|
||||||
|
Command string
|
||||||
|
Parameters []string
|
||||||
|
IsSpecial bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAnsiCommand(command []byte) *ansiCommand {
|
||||||
|
|
||||||
|
if isCharacterSelectionCmdChar(command[1]) {
|
||||||
|
// Is Character Set Selection commands
|
||||||
|
return &ansiCommand{
|
||||||
|
CommandBytes: command,
|
||||||
|
Command: string(command),
|
||||||
|
IsSpecial: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// last char is command character
|
||||||
|
lastCharIndex := len(command) - 1
|
||||||
|
|
||||||
|
ac := &ansiCommand{
|
||||||
|
CommandBytes: command,
|
||||||
|
Command: string(command[lastCharIndex]),
|
||||||
|
IsSpecial: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
// more than a single escape
|
||||||
|
if lastCharIndex != 0 {
|
||||||
|
start := 1
|
||||||
|
// skip if double char escape sequence
|
||||||
|
if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY {
|
||||||
|
start++
|
||||||
|
}
|
||||||
|
// convert this to GetNextParam method
|
||||||
|
ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ac
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 {
|
||||||
|
if index < 0 || index >= len(ac.Parameters) {
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
|
|
||||||
|
param, err := strconv.ParseInt(ac.Parameters[index], 10, 16)
|
||||||
|
if err != nil {
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
|
|
||||||
|
return int16(param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *ansiCommand) String() string {
|
||||||
|
return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
|
||||||
|
bytesToHex(ac.CommandBytes),
|
||||||
|
ac.Command,
|
||||||
|
strings.Join(ac.Parameters, "\",\""))
|
||||||
|
}
|
||||||
|
|
||||||
|
// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands.
|
||||||
|
// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html.
|
||||||
|
func isAnsiCommandChar(b byte) bool {
|
||||||
|
switch {
|
||||||
|
case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY:
|
||||||
|
return true
|
||||||
|
case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM:
|
||||||
|
// non-CSI escape sequence terminator
|
||||||
|
return true
|
||||||
|
case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL:
|
||||||
|
// String escape sequence terminator
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isXtermOscSequence(command []byte, current byte) bool {
|
||||||
|
return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isCharacterSelectionCmdChar(b byte) bool {
|
||||||
|
return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// bytesToHex converts a slice of bytes to a human-readable string.
|
||||||
|
func bytesToHex(b []byte) string {
|
||||||
|
hex := make([]string, len(b))
|
||||||
|
for i, ch := range b {
|
||||||
|
hex[i] = fmt.Sprintf("%X", ch)
|
||||||
|
}
|
||||||
|
return strings.Join(hex, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureInRange adjusts the passed value, if necessary, to ensure it is within
|
||||||
|
// the passed min / max range.
|
||||||
|
func ensureInRange(n int16, min int16, max int16) int16 {
|
||||||
|
if n < min {
|
||||||
|
return min
|
||||||
|
} else if n > max {
|
||||||
|
return max
|
||||||
|
} else {
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetStdFile(nFile int) (*os.File, uintptr) {
|
||||||
|
var file *os.File
|
||||||
|
switch nFile {
|
||||||
|
case syscall.STD_INPUT_HANDLE:
|
||||||
|
file = os.Stdin
|
||||||
|
case syscall.STD_OUTPUT_HANDLE:
|
||||||
|
file = os.Stdout
|
||||||
|
case syscall.STD_ERROR_HANDLE:
|
||||||
|
file = os.Stderr
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := syscall.GetStdHandle(nFile)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return file, uintptr(fd)
|
||||||
|
}
|
327
vendor/github.com/Azure/go-ansiterm/winterm/api.go
generated
vendored
Normal file
327
vendor/github.com/Azure/go-ansiterm/winterm/api.go
generated
vendored
Normal file
|
@ -0,0 +1,327 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winterm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
//===========================================================================================================
|
||||||
|
// IMPORTANT NOTE:
|
||||||
|
//
|
||||||
|
// The methods below make extensive use of the "unsafe" package to obtain the required pointers.
|
||||||
|
// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack
|
||||||
|
// variables) the pointers reference *before* the API completes.
|
||||||
|
//
|
||||||
|
// As a result, in those cases, the code must hint that the variables remain in active by invoking the
|
||||||
|
// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer
|
||||||
|
// require unsafe pointers.
|
||||||
|
//
|
||||||
|
// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform
|
||||||
|
// the garbage collector the variables remain in use if:
|
||||||
|
//
|
||||||
|
// -- The value is not a pointer (e.g., int32, struct)
|
||||||
|
// -- The value is not referenced by the method after passing the pointer to Windows
|
||||||
|
//
|
||||||
|
// See http://golang.org/doc/go1.3.
|
||||||
|
//===========================================================================================================
|
||||||
|
|
||||||
|
var (
|
||||||
|
kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
|
||||||
|
getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo")
|
||||||
|
setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo")
|
||||||
|
setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition")
|
||||||
|
setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode")
|
||||||
|
getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
|
||||||
|
setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize")
|
||||||
|
scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA")
|
||||||
|
setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute")
|
||||||
|
setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo")
|
||||||
|
writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW")
|
||||||
|
readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW")
|
||||||
|
waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Windows Console constants
|
||||||
|
const (
|
||||||
|
// Console modes
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
|
||||||
|
ENABLE_PROCESSED_INPUT = 0x0001
|
||||||
|
ENABLE_LINE_INPUT = 0x0002
|
||||||
|
ENABLE_ECHO_INPUT = 0x0004
|
||||||
|
ENABLE_WINDOW_INPUT = 0x0008
|
||||||
|
ENABLE_MOUSE_INPUT = 0x0010
|
||||||
|
ENABLE_INSERT_MODE = 0x0020
|
||||||
|
ENABLE_QUICK_EDIT_MODE = 0x0040
|
||||||
|
ENABLE_EXTENDED_FLAGS = 0x0080
|
||||||
|
ENABLE_AUTO_POSITION = 0x0100
|
||||||
|
ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
|
||||||
|
|
||||||
|
ENABLE_PROCESSED_OUTPUT = 0x0001
|
||||||
|
ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
|
||||||
|
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
|
||||||
|
DISABLE_NEWLINE_AUTO_RETURN = 0x0008
|
||||||
|
ENABLE_LVB_GRID_WORLDWIDE = 0x0010
|
||||||
|
|
||||||
|
// Character attributes
|
||||||
|
// Note:
|
||||||
|
// -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
|
||||||
|
// Clearing all foreground or background colors results in black; setting all creates white.
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
|
||||||
|
FOREGROUND_BLUE uint16 = 0x0001
|
||||||
|
FOREGROUND_GREEN uint16 = 0x0002
|
||||||
|
FOREGROUND_RED uint16 = 0x0004
|
||||||
|
FOREGROUND_INTENSITY uint16 = 0x0008
|
||||||
|
FOREGROUND_MASK uint16 = 0x000F
|
||||||
|
|
||||||
|
BACKGROUND_BLUE uint16 = 0x0010
|
||||||
|
BACKGROUND_GREEN uint16 = 0x0020
|
||||||
|
BACKGROUND_RED uint16 = 0x0040
|
||||||
|
BACKGROUND_INTENSITY uint16 = 0x0080
|
||||||
|
BACKGROUND_MASK uint16 = 0x00F0
|
||||||
|
|
||||||
|
COMMON_LVB_MASK uint16 = 0xFF00
|
||||||
|
COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000
|
||||||
|
COMMON_LVB_UNDERSCORE uint16 = 0x8000
|
||||||
|
|
||||||
|
// Input event types
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
|
||||||
|
KEY_EVENT = 0x0001
|
||||||
|
MOUSE_EVENT = 0x0002
|
||||||
|
WINDOW_BUFFER_SIZE_EVENT = 0x0004
|
||||||
|
MENU_EVENT = 0x0008
|
||||||
|
FOCUS_EVENT = 0x0010
|
||||||
|
|
||||||
|
// WaitForSingleObject return codes
|
||||||
|
WAIT_ABANDONED = 0x00000080
|
||||||
|
WAIT_FAILED = 0xFFFFFFFF
|
||||||
|
WAIT_SIGNALED = 0x0000000
|
||||||
|
WAIT_TIMEOUT = 0x00000102
|
||||||
|
|
||||||
|
// WaitForSingleObject wait duration
|
||||||
|
WAIT_INFINITE = 0xFFFFFFFF
|
||||||
|
WAIT_ONE_SECOND = 1000
|
||||||
|
WAIT_HALF_SECOND = 500
|
||||||
|
WAIT_QUARTER_SECOND = 250
|
||||||
|
)
|
||||||
|
|
||||||
|
// Windows API Console types
|
||||||
|
// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD)
|
||||||
|
// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment
|
||||||
|
type (
|
||||||
|
CHAR_INFO struct {
|
||||||
|
UnicodeChar uint16
|
||||||
|
Attributes uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
CONSOLE_CURSOR_INFO struct {
|
||||||
|
Size uint32
|
||||||
|
Visible int32
|
||||||
|
}
|
||||||
|
|
||||||
|
CONSOLE_SCREEN_BUFFER_INFO struct {
|
||||||
|
Size COORD
|
||||||
|
CursorPosition COORD
|
||||||
|
Attributes uint16
|
||||||
|
Window SMALL_RECT
|
||||||
|
MaximumWindowSize COORD
|
||||||
|
}
|
||||||
|
|
||||||
|
COORD struct {
|
||||||
|
X int16
|
||||||
|
Y int16
|
||||||
|
}
|
||||||
|
|
||||||
|
SMALL_RECT struct {
|
||||||
|
Left int16
|
||||||
|
Top int16
|
||||||
|
Right int16
|
||||||
|
Bottom int16
|
||||||
|
}
|
||||||
|
|
||||||
|
// INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
|
||||||
|
INPUT_RECORD struct {
|
||||||
|
EventType uint16
|
||||||
|
KeyEvent KEY_EVENT_RECORD
|
||||||
|
}
|
||||||
|
|
||||||
|
KEY_EVENT_RECORD struct {
|
||||||
|
KeyDown int32
|
||||||
|
RepeatCount uint16
|
||||||
|
VirtualKeyCode uint16
|
||||||
|
VirtualScanCode uint16
|
||||||
|
UnicodeChar uint16
|
||||||
|
ControlKeyState uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
WINDOW_BUFFER_SIZE struct {
|
||||||
|
Size COORD
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// boolToBOOL converts a Go bool into a Windows int32.
|
||||||
|
func boolToBOOL(f bool) int32 {
|
||||||
|
if f {
|
||||||
|
return int32(1)
|
||||||
|
} else {
|
||||||
|
return int32(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor.
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx.
|
||||||
|
func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
|
||||||
|
r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
|
||||||
|
return checkError(r1, r2, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetConsoleCursorInfo sets the size and visiblity of the console cursor.
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx.
|
||||||
|
func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
|
||||||
|
r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
|
||||||
|
return checkError(r1, r2, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetConsoleCursorPosition location of the console cursor.
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx.
|
||||||
|
func SetConsoleCursorPosition(handle uintptr, coord COORD) error {
|
||||||
|
r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord))
|
||||||
|
use(coord)
|
||||||
|
return checkError(r1, r2, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConsoleMode gets the console mode for given file descriptor
|
||||||
|
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx.
|
||||||
|
func GetConsoleMode(handle uintptr) (mode uint32, err error) {
|
||||||
|
err = syscall.GetConsoleMode(syscall.Handle(handle), &mode)
|
||||||
|
return mode, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetConsoleMode sets the console mode for given file descriptor
|
||||||
|
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
|
||||||
|
func SetConsoleMode(handle uintptr, mode uint32) error {
|
||||||
|
r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0)
|
||||||
|
use(mode)
|
||||||
|
return checkError(r1, r2, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
|
||||||
|
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx.
|
||||||
|
func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
|
||||||
|
info := CONSOLE_SCREEN_BUFFER_INFO{}
|
||||||
|
err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error {
|
||||||
|
r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char)))
|
||||||
|
use(scrollRect)
|
||||||
|
use(clipRect)
|
||||||
|
use(destOrigin)
|
||||||
|
use(char)
|
||||||
|
return checkError(r1, r2, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetConsoleScreenBufferSize sets the size of the console screen buffer.
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx.
|
||||||
|
func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error {
|
||||||
|
r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord))
|
||||||
|
use(coord)
|
||||||
|
return checkError(r1, r2, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetConsoleTextAttribute sets the attributes of characters written to the
|
||||||
|
// console screen buffer by the WriteFile or WriteConsole function.
|
||||||
|
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
|
||||||
|
func SetConsoleTextAttribute(handle uintptr, attribute uint16) error {
|
||||||
|
r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)
|
||||||
|
use(attribute)
|
||||||
|
return checkError(r1, r2, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetConsoleWindowInfo sets the size and position of the console screen buffer's window.
|
||||||
|
// Note that the size and location must be within and no larger than the backing console screen buffer.
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx.
|
||||||
|
func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error {
|
||||||
|
r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect)))
|
||||||
|
use(isAbsolute)
|
||||||
|
use(rect)
|
||||||
|
return checkError(r1, r2, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer.
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx.
|
||||||
|
func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error {
|
||||||
|
r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))
|
||||||
|
use(buffer)
|
||||||
|
use(bufferSize)
|
||||||
|
use(bufferCoord)
|
||||||
|
return checkError(r1, r2, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadConsoleInput reads (and removes) data from the console input buffer.
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx.
|
||||||
|
func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error {
|
||||||
|
r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count)))
|
||||||
|
use(buffer)
|
||||||
|
return checkError(r1, r2, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForSingleObject waits for the passed handle to be signaled.
|
||||||
|
// It returns true if the handle was signaled; false otherwise.
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx.
|
||||||
|
func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) {
|
||||||
|
r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait)))
|
||||||
|
switch r1 {
|
||||||
|
case WAIT_ABANDONED, WAIT_TIMEOUT:
|
||||||
|
return false, nil
|
||||||
|
case WAIT_SIGNALED:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
use(msWait)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// String helpers
|
||||||
|
func (info CONSOLE_SCREEN_BUFFER_INFO) String() string {
|
||||||
|
return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (coord COORD) String() string {
|
||||||
|
return fmt.Sprintf("%v,%v", coord.X, coord.Y)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rect SMALL_RECT) String() string {
|
||||||
|
return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom)
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkError evaluates the results of a Windows API call and returns the error if it failed.
|
||||||
|
func checkError(r1, r2 uintptr, err error) error {
|
||||||
|
// Windows APIs return non-zero to indicate success
|
||||||
|
if r1 != 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the error if provided, otherwise default to EINVAL
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
|
||||||
|
// coordToPointer converts a COORD into a uintptr (by fooling the type system).
|
||||||
|
func coordToPointer(c COORD) uintptr {
|
||||||
|
// Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass.
|
||||||
|
return uintptr(*((*uint32)(unsafe.Pointer(&c))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// use is a no-op, but the compiler cannot see that it is.
|
||||||
|
// Calling use(p) ensures that p is kept live until that point.
|
||||||
|
func use(p interface{}) {}
|
100
vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
generated
vendored
Normal file
100
vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winterm
|
||||||
|
|
||||||
|
import "github.com/Azure/go-ansiterm"
|
||||||
|
|
||||||
|
const (
|
||||||
|
FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
|
||||||
|
BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
|
||||||
|
)
|
||||||
|
|
||||||
|
// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the
|
||||||
|
// request represented by the passed ANSI mode.
|
||||||
|
func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) {
|
||||||
|
switch ansiMode {
|
||||||
|
|
||||||
|
// Mode styles
|
||||||
|
case ansiterm.ANSI_SGR_BOLD:
|
||||||
|
windowsMode = windowsMode | FOREGROUND_INTENSITY
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF:
|
||||||
|
windowsMode &^= FOREGROUND_INTENSITY
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_UNDERLINE:
|
||||||
|
windowsMode = windowsMode | COMMON_LVB_UNDERSCORE
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_REVERSE:
|
||||||
|
inverted = true
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_REVERSE_OFF:
|
||||||
|
inverted = false
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_UNDERLINE_OFF:
|
||||||
|
windowsMode &^= COMMON_LVB_UNDERSCORE
|
||||||
|
|
||||||
|
// Foreground colors
|
||||||
|
case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT:
|
||||||
|
windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK)
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_FOREGROUND_BLACK:
|
||||||
|
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK)
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_FOREGROUND_RED:
|
||||||
|
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_FOREGROUND_GREEN:
|
||||||
|
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_FOREGROUND_YELLOW:
|
||||||
|
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_FOREGROUND_BLUE:
|
||||||
|
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA:
|
||||||
|
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_FOREGROUND_CYAN:
|
||||||
|
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_FOREGROUND_WHITE:
|
||||||
|
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
|
||||||
|
|
||||||
|
// Background colors
|
||||||
|
case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT:
|
||||||
|
// Black with no intensity
|
||||||
|
windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK)
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_BACKGROUND_BLACK:
|
||||||
|
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK)
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_BACKGROUND_RED:
|
||||||
|
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_BACKGROUND_GREEN:
|
||||||
|
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_BACKGROUND_YELLOW:
|
||||||
|
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_BACKGROUND_BLUE:
|
||||||
|
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA:
|
||||||
|
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_BACKGROUND_CYAN:
|
||||||
|
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE
|
||||||
|
|
||||||
|
case ansiterm.ANSI_SGR_BACKGROUND_WHITE:
|
||||||
|
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
|
||||||
|
}
|
||||||
|
|
||||||
|
return windowsMode, inverted
|
||||||
|
}
|
||||||
|
|
||||||
|
// invertAttributes inverts the foreground and background colors of a Windows attributes value
|
||||||
|
func invertAttributes(windowsMode uint16) uint16 {
|
||||||
|
return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4)
|
||||||
|
}
|
101
vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
generated
vendored
Normal file
101
vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,101 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winterm
|
||||||
|
|
||||||
|
const (
|
||||||
|
horizontal = iota
|
||||||
|
vertical
|
||||||
|
)
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT {
|
||||||
|
if h.originMode {
|
||||||
|
sr := h.effectiveSr(info.Window)
|
||||||
|
return SMALL_RECT{
|
||||||
|
Top: sr.top,
|
||||||
|
Bottom: sr.bottom,
|
||||||
|
Left: 0,
|
||||||
|
Right: info.Size.X - 1,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return SMALL_RECT{
|
||||||
|
Top: info.Window.Top,
|
||||||
|
Bottom: info.Window.Bottom,
|
||||||
|
Left: 0,
|
||||||
|
Right: info.Size.X - 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// setCursorPosition sets the cursor to the specified position, bounded to the screen size
|
||||||
|
func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error {
|
||||||
|
position.X = ensureInRange(position.X, window.Left, window.Right)
|
||||||
|
position.Y = ensureInRange(position.Y, window.Top, window.Bottom)
|
||||||
|
err := SetConsoleCursorPosition(h.fd, position)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("Cursor position set: (%d, %d)", position.X, position.Y)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error {
|
||||||
|
return h.moveCursor(vertical, param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error {
|
||||||
|
return h.moveCursor(horizontal, param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error {
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
position := info.CursorPosition
|
||||||
|
switch moveMode {
|
||||||
|
case horizontal:
|
||||||
|
position.X += int16(param)
|
||||||
|
case vertical:
|
||||||
|
position.Y += int16(param)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) moveCursorLine(param int) error {
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
position := info.CursorPosition
|
||||||
|
position.X = 0
|
||||||
|
position.Y += int16(param)
|
||||||
|
|
||||||
|
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error {
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
position := info.CursorPosition
|
||||||
|
position.X = int16(param) - 1
|
||||||
|
|
||||||
|
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
84
vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
generated
vendored
Normal file
84
vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winterm
|
||||||
|
|
||||||
|
import "github.com/Azure/go-ansiterm"
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error {
|
||||||
|
// Ignore an invalid (negative area) request
|
||||||
|
if toCoord.Y < fromCoord.Y {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
var coordStart = COORD{}
|
||||||
|
var coordEnd = COORD{}
|
||||||
|
|
||||||
|
xCurrent, yCurrent := fromCoord.X, fromCoord.Y
|
||||||
|
xEnd, yEnd := toCoord.X, toCoord.Y
|
||||||
|
|
||||||
|
// Clear any partial initial line
|
||||||
|
if xCurrent > 0 {
|
||||||
|
coordStart.X, coordStart.Y = xCurrent, yCurrent
|
||||||
|
coordEnd.X, coordEnd.Y = xEnd, yCurrent
|
||||||
|
|
||||||
|
err = h.clearRect(attributes, coordStart, coordEnd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
xCurrent = 0
|
||||||
|
yCurrent += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear intervening rectangular section
|
||||||
|
if yCurrent < yEnd {
|
||||||
|
coordStart.X, coordStart.Y = xCurrent, yCurrent
|
||||||
|
coordEnd.X, coordEnd.Y = xEnd, yEnd-1
|
||||||
|
|
||||||
|
err = h.clearRect(attributes, coordStart, coordEnd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
xCurrent = 0
|
||||||
|
yCurrent = yEnd
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear remaining partial ending line
|
||||||
|
coordStart.X, coordStart.Y = xCurrent, yCurrent
|
||||||
|
coordEnd.X, coordEnd.Y = xEnd, yEnd
|
||||||
|
|
||||||
|
err = h.clearRect(attributes, coordStart, coordEnd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error {
|
||||||
|
region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X}
|
||||||
|
width := toCoord.X - fromCoord.X + 1
|
||||||
|
height := toCoord.Y - fromCoord.Y + 1
|
||||||
|
size := uint32(width) * uint32(height)
|
||||||
|
|
||||||
|
if size <= 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer := make([]CHAR_INFO, size)
|
||||||
|
|
||||||
|
char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes}
|
||||||
|
for i := 0; i < int(size); i++ {
|
||||||
|
buffer[i] = char
|
||||||
|
}
|
||||||
|
|
||||||
|
err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
118
vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
generated
vendored
Normal file
118
vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
generated
vendored
Normal file
|
@ -0,0 +1,118 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winterm
|
||||||
|
|
||||||
|
// effectiveSr gets the current effective scroll region in buffer coordinates
|
||||||
|
func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion {
|
||||||
|
top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom)
|
||||||
|
bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom)
|
||||||
|
if top >= bottom {
|
||||||
|
top = window.Top
|
||||||
|
bottom = window.Bottom
|
||||||
|
}
|
||||||
|
return scrollRegion{top: top, bottom: bottom}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) scrollUp(param int) error {
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sr := h.effectiveSr(info.Window)
|
||||||
|
return h.scroll(param, sr, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) scrollDown(param int) error {
|
||||||
|
return h.scrollUp(-param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) deleteLines(param int) error {
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
start := info.CursorPosition.Y
|
||||||
|
sr := h.effectiveSr(info.Window)
|
||||||
|
// Lines cannot be inserted or deleted outside the scrolling region.
|
||||||
|
if start >= sr.top && start <= sr.bottom {
|
||||||
|
sr.top = start
|
||||||
|
return h.scroll(param, sr, info)
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) insertLines(param int) error {
|
||||||
|
return h.deleteLines(-param)
|
||||||
|
}
|
||||||
|
|
||||||
|
// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
|
||||||
|
func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
|
||||||
|
h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
|
||||||
|
h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
|
||||||
|
|
||||||
|
// Copy from and clip to the scroll region (full buffer width)
|
||||||
|
scrollRect := SMALL_RECT{
|
||||||
|
Top: sr.top,
|
||||||
|
Bottom: sr.bottom,
|
||||||
|
Left: 0,
|
||||||
|
Right: info.Size.X - 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Origin to which area should be copied
|
||||||
|
destOrigin := COORD{
|
||||||
|
X: 0,
|
||||||
|
Y: sr.top - int16(param),
|
||||||
|
}
|
||||||
|
|
||||||
|
char := CHAR_INFO{
|
||||||
|
UnicodeChar: ' ',
|
||||||
|
Attributes: h.attributes,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) deleteCharacters(param int) error {
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return h.scrollLine(param, info.CursorPosition, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) insertCharacters(param int) error {
|
||||||
|
return h.deleteCharacters(-param)
|
||||||
|
}
|
||||||
|
|
||||||
|
// scrollLine scrolls a line horizontally starting at the provided position by a number of columns.
|
||||||
|
func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {
|
||||||
|
// Copy from and clip to the scroll region (full buffer width)
|
||||||
|
scrollRect := SMALL_RECT{
|
||||||
|
Top: position.Y,
|
||||||
|
Bottom: position.Y,
|
||||||
|
Left: position.X,
|
||||||
|
Right: info.Size.X - 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Origin to which area should be copied
|
||||||
|
destOrigin := COORD{
|
||||||
|
X: position.X - int16(columns),
|
||||||
|
Y: position.Y,
|
||||||
|
}
|
||||||
|
|
||||||
|
char := CHAR_INFO{
|
||||||
|
UnicodeChar: ' ',
|
||||||
|
Attributes: h.attributes,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
9
vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
generated
vendored
Normal file
9
vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winterm
|
||||||
|
|
||||||
|
// AddInRange increments a value by the passed quantity while ensuring the values
|
||||||
|
// always remain within the supplied min / max range.
|
||||||
|
func addInRange(n int16, increment int16, min int16, max int16) int16 {
|
||||||
|
return ensureInRange(n+increment, min, max)
|
||||||
|
}
|
743
vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
generated
vendored
Normal file
743
vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
generated
vendored
Normal file
|
@ -0,0 +1,743 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winterm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/Azure/go-ansiterm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type windowsAnsiEventHandler struct {
|
||||||
|
fd uintptr
|
||||||
|
file *os.File
|
||||||
|
infoReset *CONSOLE_SCREEN_BUFFER_INFO
|
||||||
|
sr scrollRegion
|
||||||
|
buffer bytes.Buffer
|
||||||
|
attributes uint16
|
||||||
|
inverted bool
|
||||||
|
wrapNext bool
|
||||||
|
drewMarginByte bool
|
||||||
|
originMode bool
|
||||||
|
marginByte byte
|
||||||
|
curInfo *CONSOLE_SCREEN_BUFFER_INFO
|
||||||
|
curPos COORD
|
||||||
|
logf func(string, ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type Option func(*windowsAnsiEventHandler)
|
||||||
|
|
||||||
|
func WithLogf(f func(string, ...interface{})) Option {
|
||||||
|
return func(w *windowsAnsiEventHandler) {
|
||||||
|
w.logf = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler {
|
||||||
|
infoReset, err := GetConsoleScreenBufferInfo(fd)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
h := &windowsAnsiEventHandler{
|
||||||
|
fd: fd,
|
||||||
|
file: file,
|
||||||
|
infoReset: infoReset,
|
||||||
|
attributes: infoReset.Attributes,
|
||||||
|
}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(h)
|
||||||
|
}
|
||||||
|
|
||||||
|
if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
|
||||||
|
logFile, _ := os.Create("winEventHandler.log")
|
||||||
|
logger := log.New(logFile, "", log.LstdFlags)
|
||||||
|
if h.logf != nil {
|
||||||
|
l := h.logf
|
||||||
|
h.logf = func(s string, v ...interface{}) {
|
||||||
|
l(s, v...)
|
||||||
|
logger.Printf(s, v...)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
h.logf = logger.Printf
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.logf == nil {
|
||||||
|
h.logf = func(string, ...interface{}) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
type scrollRegion struct {
|
||||||
|
top int16
|
||||||
|
bottom int16
|
||||||
|
}
|
||||||
|
|
||||||
|
// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the
|
||||||
|
// current cursor position and scroll region settings, in which case it returns
|
||||||
|
// true. If no special handling is necessary, then it does nothing and returns
|
||||||
|
// false.
|
||||||
|
//
|
||||||
|
// In the false case, the caller should ensure that a carriage return
|
||||||
|
// and line feed are inserted or that the text is otherwise wrapped.
|
||||||
|
func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
|
||||||
|
if h.wrapNext {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
h.clearWrap()
|
||||||
|
}
|
||||||
|
pos, info, err := h.getCurrentInfo()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
sr := h.effectiveSr(info.Window)
|
||||||
|
if pos.Y == sr.bottom {
|
||||||
|
// Scrolling is necessary. Let Windows automatically scroll if the scrolling region
|
||||||
|
// is the full window.
|
||||||
|
if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom {
|
||||||
|
if includeCR {
|
||||||
|
pos.X = 0
|
||||||
|
h.updatePos(pos)
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A custom scroll region is active. Scroll the window manually to simulate
|
||||||
|
// the LF.
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
h.logf("Simulating LF inside scroll region")
|
||||||
|
if err := h.scrollUp(1); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if includeCR {
|
||||||
|
pos.X = 0
|
||||||
|
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
|
||||||
|
} else if pos.Y < info.Window.Bottom {
|
||||||
|
// Let Windows handle the LF.
|
||||||
|
pos.Y++
|
||||||
|
if includeCR {
|
||||||
|
pos.X = 0
|
||||||
|
}
|
||||||
|
h.updatePos(pos)
|
||||||
|
return false, nil
|
||||||
|
} else {
|
||||||
|
// The cursor is at the bottom of the screen but outside the scroll
|
||||||
|
// region. Skip the LF.
|
||||||
|
h.logf("Simulating LF outside scroll region")
|
||||||
|
if includeCR {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
pos.X = 0
|
||||||
|
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeLF executes a LF without a CR.
|
||||||
|
func (h *windowsAnsiEventHandler) executeLF() error {
|
||||||
|
handled, err := h.simulateLF(false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !handled {
|
||||||
|
// Windows LF will reset the cursor column position. Write the LF
|
||||||
|
// and restore the cursor position.
|
||||||
|
pos, _, err := h.getCurrentInfo()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
|
||||||
|
if pos.X != 0 {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("Resetting cursor position for LF without CR")
|
||||||
|
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) Print(b byte) error {
|
||||||
|
if h.wrapNext {
|
||||||
|
h.buffer.WriteByte(h.marginByte)
|
||||||
|
h.clearWrap()
|
||||||
|
if _, err := h.simulateLF(true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pos, info, err := h.getCurrentInfo()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if pos.X == info.Size.X-1 {
|
||||||
|
h.wrapNext = true
|
||||||
|
h.marginByte = b
|
||||||
|
} else {
|
||||||
|
pos.X++
|
||||||
|
h.updatePos(pos)
|
||||||
|
h.buffer.WriteByte(b)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) Execute(b byte) error {
|
||||||
|
switch b {
|
||||||
|
case ansiterm.ANSI_TAB:
|
||||||
|
h.logf("Execute(TAB)")
|
||||||
|
// Move to the next tab stop, but preserve auto-wrap if already set.
|
||||||
|
if !h.wrapNext {
|
||||||
|
pos, info, err := h.getCurrentInfo()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pos.X = (pos.X + 8) - pos.X%8
|
||||||
|
if pos.X >= info.Size.X {
|
||||||
|
pos.X = info.Size.X - 1
|
||||||
|
}
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case ansiterm.ANSI_BEL:
|
||||||
|
h.buffer.WriteByte(ansiterm.ANSI_BEL)
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case ansiterm.ANSI_BACKSPACE:
|
||||||
|
if h.wrapNext {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.clearWrap()
|
||||||
|
}
|
||||||
|
pos, _, err := h.getCurrentInfo()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if pos.X > 0 {
|
||||||
|
pos.X--
|
||||||
|
h.updatePos(pos)
|
||||||
|
h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED:
|
||||||
|
// Treat as true LF.
|
||||||
|
return h.executeLF()
|
||||||
|
|
||||||
|
case ansiterm.ANSI_LINE_FEED:
|
||||||
|
// Simulate a CR and LF for now since there is no way in go-ansiterm
|
||||||
|
// to tell if the LF should include CR (and more things break when it's
|
||||||
|
// missing than when it's incorrectly added).
|
||||||
|
handled, err := h.simulateLF(true)
|
||||||
|
if handled || err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
|
||||||
|
|
||||||
|
case ansiterm.ANSI_CARRIAGE_RETURN:
|
||||||
|
if h.wrapNext {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.clearWrap()
|
||||||
|
}
|
||||||
|
pos, _, err := h.getCurrentInfo()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if pos.X != 0 {
|
||||||
|
pos.X = 0
|
||||||
|
h.updatePos(pos)
|
||||||
|
h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) CUU(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("CUU: [%v]", []string{strconv.Itoa(param)})
|
||||||
|
h.clearWrap()
|
||||||
|
return h.moveCursorVertical(-param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) CUD(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("CUD: [%v]", []string{strconv.Itoa(param)})
|
||||||
|
h.clearWrap()
|
||||||
|
return h.moveCursorVertical(param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) CUF(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("CUF: [%v]", []string{strconv.Itoa(param)})
|
||||||
|
h.clearWrap()
|
||||||
|
return h.moveCursorHorizontal(param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) CUB(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("CUB: [%v]", []string{strconv.Itoa(param)})
|
||||||
|
h.clearWrap()
|
||||||
|
return h.moveCursorHorizontal(-param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) CNL(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("CNL: [%v]", []string{strconv.Itoa(param)})
|
||||||
|
h.clearWrap()
|
||||||
|
return h.moveCursorLine(param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) CPL(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("CPL: [%v]", []string{strconv.Itoa(param)})
|
||||||
|
h.clearWrap()
|
||||||
|
return h.moveCursorLine(-param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) CHA(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("CHA: [%v]", []string{strconv.Itoa(param)})
|
||||||
|
h.clearWrap()
|
||||||
|
return h.moveCursorColumn(param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) VPA(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("VPA: [[%d]]", param)
|
||||||
|
h.clearWrap()
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
window := h.getCursorWindow(info)
|
||||||
|
position := info.CursorPosition
|
||||||
|
position.Y = window.Top + int16(param) - 1
|
||||||
|
return h.setCursorPosition(position, window)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("CUP: [[%d %d]]", row, col)
|
||||||
|
h.clearWrap()
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
window := h.getCursorWindow(info)
|
||||||
|
position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1}
|
||||||
|
return h.setCursorPosition(position, window)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("HVP: [[%d %d]]", row, col)
|
||||||
|
h.clearWrap()
|
||||||
|
return h.CUP(row, col)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
|
||||||
|
h.clearWrap()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)})
|
||||||
|
h.clearWrap()
|
||||||
|
h.originMode = enable
|
||||||
|
return h.CUP(1, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
|
||||||
|
h.clearWrap()
|
||||||
|
if err := h.ED(2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
targetWidth := int16(80)
|
||||||
|
if use132 {
|
||||||
|
targetWidth = 132
|
||||||
|
}
|
||||||
|
if info.Size.X < targetWidth {
|
||||||
|
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
|
||||||
|
h.logf("set buffer failed: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
window := info.Window
|
||||||
|
window.Left = 0
|
||||||
|
window.Right = targetWidth - 1
|
||||||
|
if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
|
||||||
|
h.logf("set window failed: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if info.Size.X > targetWidth {
|
||||||
|
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
|
||||||
|
h.logf("set buffer failed: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return SetConsoleCursorPosition(h.fd, COORD{0, 0})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) ED(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("ED: [%v]", []string{strconv.Itoa(param)})
|
||||||
|
h.clearWrap()
|
||||||
|
|
||||||
|
// [J -- Erases from the cursor to the end of the screen, including the cursor position.
|
||||||
|
// [1J -- Erases from the beginning of the screen to the cursor, including the cursor position.
|
||||||
|
// [2J -- Erases the complete display. The cursor does not move.
|
||||||
|
// Notes:
|
||||||
|
// -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles
|
||||||
|
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var start COORD
|
||||||
|
var end COORD
|
||||||
|
|
||||||
|
switch param {
|
||||||
|
case 0:
|
||||||
|
start = info.CursorPosition
|
||||||
|
end = COORD{info.Size.X - 1, info.Size.Y - 1}
|
||||||
|
|
||||||
|
case 1:
|
||||||
|
start = COORD{0, 0}
|
||||||
|
end = info.CursorPosition
|
||||||
|
|
||||||
|
case 2:
|
||||||
|
start = COORD{0, 0}
|
||||||
|
end = COORD{info.Size.X - 1, info.Size.Y - 1}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = h.clearRange(h.attributes, start, end)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the whole buffer was cleared, move the window to the top while preserving
|
||||||
|
// the window-relative cursor position.
|
||||||
|
if param == 2 {
|
||||||
|
pos := info.CursorPosition
|
||||||
|
window := info.Window
|
||||||
|
pos.Y -= window.Top
|
||||||
|
window.Bottom -= window.Top
|
||||||
|
window.Top = 0
|
||||||
|
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) EL(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("EL: [%v]", strconv.Itoa(param))
|
||||||
|
h.clearWrap()
|
||||||
|
|
||||||
|
// [K -- Erases from the cursor to the end of the line, including the cursor position.
|
||||||
|
// [1K -- Erases from the beginning of the line to the cursor, including the cursor position.
|
||||||
|
// [2K -- Erases the complete line.
|
||||||
|
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var start COORD
|
||||||
|
var end COORD
|
||||||
|
|
||||||
|
switch param {
|
||||||
|
case 0:
|
||||||
|
start = info.CursorPosition
|
||||||
|
end = COORD{info.Size.X, info.CursorPosition.Y}
|
||||||
|
|
||||||
|
case 1:
|
||||||
|
start = COORD{0, info.CursorPosition.Y}
|
||||||
|
end = info.CursorPosition
|
||||||
|
|
||||||
|
case 2:
|
||||||
|
start = COORD{0, info.CursorPosition.Y}
|
||||||
|
end = COORD{info.Size.X, info.CursorPosition.Y}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = h.clearRange(h.attributes, start, end)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) IL(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("IL: [%v]", strconv.Itoa(param))
|
||||||
|
h.clearWrap()
|
||||||
|
return h.insertLines(param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) DL(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("DL: [%v]", strconv.Itoa(param))
|
||||||
|
h.clearWrap()
|
||||||
|
return h.deleteLines(param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) ICH(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("ICH: [%v]", strconv.Itoa(param))
|
||||||
|
h.clearWrap()
|
||||||
|
return h.insertCharacters(param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) DCH(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("DCH: [%v]", strconv.Itoa(param))
|
||||||
|
h.clearWrap()
|
||||||
|
return h.deleteCharacters(param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) SGR(params []int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
strings := []string{}
|
||||||
|
for _, v := range params {
|
||||||
|
strings = append(strings, strconv.Itoa(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
h.logf("SGR: [%v]", strings)
|
||||||
|
|
||||||
|
if len(params) <= 0 {
|
||||||
|
h.attributes = h.infoReset.Attributes
|
||||||
|
h.inverted = false
|
||||||
|
} else {
|
||||||
|
for _, attr := range params {
|
||||||
|
|
||||||
|
if attr == ansiterm.ANSI_SGR_RESET {
|
||||||
|
h.attributes = h.infoReset.Attributes
|
||||||
|
h.inverted = false
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
attributes := h.attributes
|
||||||
|
if h.inverted {
|
||||||
|
attributes = invertAttributes(attributes)
|
||||||
|
}
|
||||||
|
err := SetConsoleTextAttribute(h.fd, attributes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) SU(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("SU: [%v]", []string{strconv.Itoa(param)})
|
||||||
|
h.clearWrap()
|
||||||
|
return h.scrollUp(param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) SD(param int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("SD: [%v]", []string{strconv.Itoa(param)})
|
||||||
|
h.clearWrap()
|
||||||
|
return h.scrollDown(param)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) DA(params []string) error {
|
||||||
|
h.logf("DA: [%v]", params)
|
||||||
|
// DA cannot be implemented because it must send data on the VT100 input stream,
|
||||||
|
// which is not available to go-ansiterm.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("DECSTBM: [%d, %d]", top, bottom)
|
||||||
|
|
||||||
|
// Windows is 0 indexed, Linux is 1 indexed
|
||||||
|
h.sr.top = int16(top - 1)
|
||||||
|
h.sr.bottom = int16(bottom - 1)
|
||||||
|
|
||||||
|
// This command also moves the cursor to the origin.
|
||||||
|
h.clearWrap()
|
||||||
|
return h.CUP(1, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) RI() error {
|
||||||
|
if err := h.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.logf("RI: []")
|
||||||
|
h.clearWrap()
|
||||||
|
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sr := h.effectiveSr(info.Window)
|
||||||
|
if info.CursorPosition.Y == sr.top {
|
||||||
|
return h.scrollDown(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return h.moveCursorVertical(-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) IND() error {
|
||||||
|
h.logf("IND: []")
|
||||||
|
return h.executeLF()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) Flush() error {
|
||||||
|
h.curInfo = nil
|
||||||
|
if h.buffer.Len() > 0 {
|
||||||
|
h.logf("Flush: [%s]", h.buffer.Bytes())
|
||||||
|
if _, err := h.buffer.WriteTo(h.file); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.wrapNext && !h.drewMarginByte {
|
||||||
|
h.logf("Flush: drawing margin byte '%c'", h.marginByte)
|
||||||
|
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}}
|
||||||
|
size := COORD{1, 1}
|
||||||
|
position := COORD{0, 0}
|
||||||
|
region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y}
|
||||||
|
if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.drewMarginByte = true
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cacheConsoleInfo ensures that the current console screen information has been queried
|
||||||
|
// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos.
|
||||||
|
func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) {
|
||||||
|
if h.curInfo == nil {
|
||||||
|
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||||
|
if err != nil {
|
||||||
|
return COORD{}, nil, err
|
||||||
|
}
|
||||||
|
h.curInfo = info
|
||||||
|
h.curPos = info.CursorPosition
|
||||||
|
}
|
||||||
|
return h.curPos, h.curInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *windowsAnsiEventHandler) updatePos(pos COORD) {
|
||||||
|
if h.curInfo == nil {
|
||||||
|
panic("failed to call getCurrentInfo before calling updatePos")
|
||||||
|
}
|
||||||
|
h.curPos = pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// clearWrap clears the state where the cursor is in the margin
|
||||||
|
// waiting for the next character before wrapping the line. This must
|
||||||
|
// be done before most operations that act on the cursor.
|
||||||
|
func (h *windowsAnsiEventHandler) clearWrap() {
|
||||||
|
h.wrapNext = false
|
||||||
|
h.drewMarginByte = false
|
||||||
|
}
|
26
vendor/github.com/Nvveen/Gotty/LICENSE
generated
vendored
Normal file
26
vendor/github.com/Nvveen/Gotty/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com)
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||||
|
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
The views and conclusions contained in the software and documentation are those
|
||||||
|
of the authors and should not be interpreted as representing official policies,
|
||||||
|
either expressed or implied, of the FreeBSD Project.
|
5
vendor/github.com/Nvveen/Gotty/README
generated
vendored
Normal file
5
vendor/github.com/Nvveen/Gotty/README
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
Gotty is a library written in Go that determines and reads termcap database
|
||||||
|
files to produce an interface for interacting with the capabilities of a
|
||||||
|
terminal.
|
||||||
|
See the godoc documentation or the source code for more information about
|
||||||
|
function usage.
|
3
vendor/github.com/Nvveen/Gotty/TODO
generated
vendored
Normal file
3
vendor/github.com/Nvveen/Gotty/TODO
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
gotty.go:// TODO add more concurrency to name lookup, look for more opportunities.
|
||||||
|
all:// TODO add more documentation, with function usage in a doc.go file.
|
||||||
|
all:// TODO add more testing/benchmarking with go test.
|
514
vendor/github.com/Nvveen/Gotty/attributes.go
generated
vendored
Normal file
514
vendor/github.com/Nvveen/Gotty/attributes.go
generated
vendored
Normal file
|
@ -0,0 +1,514 @@
|
||||||
|
// Copyright 2012 Neal van Veen. All rights reserved.
|
||||||
|
// Usage of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
package gotty
|
||||||
|
|
||||||
|
// Boolean capabilities
|
||||||
|
var BoolAttr = [...]string{
|
||||||
|
"auto_left_margin", "bw",
|
||||||
|
"auto_right_margin", "am",
|
||||||
|
"no_esc_ctlc", "xsb",
|
||||||
|
"ceol_standout_glitch", "xhp",
|
||||||
|
"eat_newline_glitch", "xenl",
|
||||||
|
"erase_overstrike", "eo",
|
||||||
|
"generic_type", "gn",
|
||||||
|
"hard_copy", "hc",
|
||||||
|
"has_meta_key", "km",
|
||||||
|
"has_status_line", "hs",
|
||||||
|
"insert_null_glitch", "in",
|
||||||
|
"memory_above", "da",
|
||||||
|
"memory_below", "db",
|
||||||
|
"move_insert_mode", "mir",
|
||||||
|
"move_standout_mode", "msgr",
|
||||||
|
"over_strike", "os",
|
||||||
|
"status_line_esc_ok", "eslok",
|
||||||
|
"dest_tabs_magic_smso", "xt",
|
||||||
|
"tilde_glitch", "hz",
|
||||||
|
"transparent_underline", "ul",
|
||||||
|
"xon_xoff", "nxon",
|
||||||
|
"needs_xon_xoff", "nxon",
|
||||||
|
"prtr_silent", "mc5i",
|
||||||
|
"hard_cursor", "chts",
|
||||||
|
"non_rev_rmcup", "nrrmc",
|
||||||
|
"no_pad_char", "npc",
|
||||||
|
"non_dest_scroll_region", "ndscr",
|
||||||
|
"can_change", "ccc",
|
||||||
|
"back_color_erase", "bce",
|
||||||
|
"hue_lightness_saturation", "hls",
|
||||||
|
"col_addr_glitch", "xhpa",
|
||||||
|
"cr_cancels_micro_mode", "crxm",
|
||||||
|
"has_print_wheel", "daisy",
|
||||||
|
"row_addr_glitch", "xvpa",
|
||||||
|
"semi_auto_right_margin", "sam",
|
||||||
|
"cpi_changes_res", "cpix",
|
||||||
|
"lpi_changes_res", "lpix",
|
||||||
|
"backspaces_with_bs", "",
|
||||||
|
"crt_no_scrolling", "",
|
||||||
|
"no_correctly_working_cr", "",
|
||||||
|
"gnu_has_meta_key", "",
|
||||||
|
"linefeed_is_newline", "",
|
||||||
|
"has_hardware_tabs", "",
|
||||||
|
"return_does_clr_eol", "",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Numerical capabilities
|
||||||
|
var NumAttr = [...]string{
|
||||||
|
"columns", "cols",
|
||||||
|
"init_tabs", "it",
|
||||||
|
"lines", "lines",
|
||||||
|
"lines_of_memory", "lm",
|
||||||
|
"magic_cookie_glitch", "xmc",
|
||||||
|
"padding_baud_rate", "pb",
|
||||||
|
"virtual_terminal", "vt",
|
||||||
|
"width_status_line", "wsl",
|
||||||
|
"num_labels", "nlab",
|
||||||
|
"label_height", "lh",
|
||||||
|
"label_width", "lw",
|
||||||
|
"max_attributes", "ma",
|
||||||
|
"maximum_windows", "wnum",
|
||||||
|
"max_colors", "colors",
|
||||||
|
"max_pairs", "pairs",
|
||||||
|
"no_color_video", "ncv",
|
||||||
|
"buffer_capacity", "bufsz",
|
||||||
|
"dot_vert_spacing", "spinv",
|
||||||
|
"dot_horz_spacing", "spinh",
|
||||||
|
"max_micro_address", "maddr",
|
||||||
|
"max_micro_jump", "mjump",
|
||||||
|
"micro_col_size", "mcs",
|
||||||
|
"micro_line_size", "mls",
|
||||||
|
"number_of_pins", "npins",
|
||||||
|
"output_res_char", "orc",
|
||||||
|
"output_res_line", "orl",
|
||||||
|
"output_res_horz_inch", "orhi",
|
||||||
|
"output_res_vert_inch", "orvi",
|
||||||
|
"print_rate", "cps",
|
||||||
|
"wide_char_size", "widcs",
|
||||||
|
"buttons", "btns",
|
||||||
|
"bit_image_entwining", "bitwin",
|
||||||
|
"bit_image_type", "bitype",
|
||||||
|
"magic_cookie_glitch_ul", "",
|
||||||
|
"carriage_return_delay", "",
|
||||||
|
"new_line_delay", "",
|
||||||
|
"backspace_delay", "",
|
||||||
|
"horizontal_tab_delay", "",
|
||||||
|
"number_of_function_keys", "",
|
||||||
|
}
|
||||||
|
|
||||||
|
// String capabilities
|
||||||
|
var StrAttr = [...]string{
|
||||||
|
"back_tab", "cbt",
|
||||||
|
"bell", "bel",
|
||||||
|
"carriage_return", "cr",
|
||||||
|
"change_scroll_region", "csr",
|
||||||
|
"clear_all_tabs", "tbc",
|
||||||
|
"clear_screen", "clear",
|
||||||
|
"clr_eol", "el",
|
||||||
|
"clr_eos", "ed",
|
||||||
|
"column_address", "hpa",
|
||||||
|
"command_character", "cmdch",
|
||||||
|
"cursor_address", "cup",
|
||||||
|
"cursor_down", "cud1",
|
||||||
|
"cursor_home", "home",
|
||||||
|
"cursor_invisible", "civis",
|
||||||
|
"cursor_left", "cub1",
|
||||||
|
"cursor_mem_address", "mrcup",
|
||||||
|
"cursor_normal", "cnorm",
|
||||||
|
"cursor_right", "cuf1",
|
||||||
|
"cursor_to_ll", "ll",
|
||||||
|
"cursor_up", "cuu1",
|
||||||
|
"cursor_visible", "cvvis",
|
||||||
|
"delete_character", "dch1",
|
||||||
|
"delete_line", "dl1",
|
||||||
|
"dis_status_line", "dsl",
|
||||||
|
"down_half_line", "hd",
|
||||||
|
"enter_alt_charset_mode", "smacs",
|
||||||
|
"enter_blink_mode", "blink",
|
||||||
|
"enter_bold_mode", "bold",
|
||||||
|
"enter_ca_mode", "smcup",
|
||||||
|
"enter_delete_mode", "smdc",
|
||||||
|
"enter_dim_mode", "dim",
|
||||||
|
"enter_insert_mode", "smir",
|
||||||
|
"enter_secure_mode", "invis",
|
||||||
|
"enter_protected_mode", "prot",
|
||||||
|
"enter_reverse_mode", "rev",
|
||||||
|
"enter_standout_mode", "smso",
|
||||||
|
"enter_underline_mode", "smul",
|
||||||
|
"erase_chars", "ech",
|
||||||
|
"exit_alt_charset_mode", "rmacs",
|
||||||
|
"exit_attribute_mode", "sgr0",
|
||||||
|
"exit_ca_mode", "rmcup",
|
||||||
|
"exit_delete_mode", "rmdc",
|
||||||
|
"exit_insert_mode", "rmir",
|
||||||
|
"exit_standout_mode", "rmso",
|
||||||
|
"exit_underline_mode", "rmul",
|
||||||
|
"flash_screen", "flash",
|
||||||
|
"form_feed", "ff",
|
||||||
|
"from_status_line", "fsl",
|
||||||
|
"init_1string", "is1",
|
||||||
|
"init_2string", "is2",
|
||||||
|
"init_3string", "is3",
|
||||||
|
"init_file", "if",
|
||||||
|
"insert_character", "ich1",
|
||||||
|
"insert_line", "il1",
|
||||||
|
"insert_padding", "ip",
|
||||||
|
"key_backspace", "kbs",
|
||||||
|
"key_catab", "ktbc",
|
||||||
|
"key_clear", "kclr",
|
||||||
|
"key_ctab", "kctab",
|
||||||
|
"key_dc", "kdch1",
|
||||||
|
"key_dl", "kdl1",
|
||||||
|
"key_down", "kcud1",
|
||||||
|
"key_eic", "krmir",
|
||||||
|
"key_eol", "kel",
|
||||||
|
"key_eos", "ked",
|
||||||
|
"key_f0", "kf0",
|
||||||
|
"key_f1", "kf1",
|
||||||
|
"key_f10", "kf10",
|
||||||
|
"key_f2", "kf2",
|
||||||
|
"key_f3", "kf3",
|
||||||
|
"key_f4", "kf4",
|
||||||
|
"key_f5", "kf5",
|
||||||
|
"key_f6", "kf6",
|
||||||
|
"key_f7", "kf7",
|
||||||
|
"key_f8", "kf8",
|
||||||
|
"key_f9", "kf9",
|
||||||
|
"key_home", "khome",
|
||||||
|
"key_ic", "kich1",
|
||||||
|
"key_il", "kil1",
|
||||||
|
"key_left", "kcub1",
|
||||||
|
"key_ll", "kll",
|
||||||
|
"key_npage", "knp",
|
||||||
|
"key_ppage", "kpp",
|
||||||
|
"key_right", "kcuf1",
|
||||||
|
"key_sf", "kind",
|
||||||
|
"key_sr", "kri",
|
||||||
|
"key_stab", "khts",
|
||||||
|
"key_up", "kcuu1",
|
||||||
|
"keypad_local", "rmkx",
|
||||||
|
"keypad_xmit", "smkx",
|
||||||
|
"lab_f0", "lf0",
|
||||||
|
"lab_f1", "lf1",
|
||||||
|
"lab_f10", "lf10",
|
||||||
|
"lab_f2", "lf2",
|
||||||
|
"lab_f3", "lf3",
|
||||||
|
"lab_f4", "lf4",
|
||||||
|
"lab_f5", "lf5",
|
||||||
|
"lab_f6", "lf6",
|
||||||
|
"lab_f7", "lf7",
|
||||||
|
"lab_f8", "lf8",
|
||||||
|
"lab_f9", "lf9",
|
||||||
|
"meta_off", "rmm",
|
||||||
|
"meta_on", "smm",
|
||||||
|
"newline", "_glitch",
|
||||||
|
"pad_char", "npc",
|
||||||
|
"parm_dch", "dch",
|
||||||
|
"parm_delete_line", "dl",
|
||||||
|
"parm_down_cursor", "cud",
|
||||||
|
"parm_ich", "ich",
|
||||||
|
"parm_index", "indn",
|
||||||
|
"parm_insert_line", "il",
|
||||||
|
"parm_left_cursor", "cub",
|
||||||
|
"parm_right_cursor", "cuf",
|
||||||
|
"parm_rindex", "rin",
|
||||||
|
"parm_up_cursor", "cuu",
|
||||||
|
"pkey_key", "pfkey",
|
||||||
|
"pkey_local", "pfloc",
|
||||||
|
"pkey_xmit", "pfx",
|
||||||
|
"print_screen", "mc0",
|
||||||
|
"prtr_off", "mc4",
|
||||||
|
"prtr_on", "mc5",
|
||||||
|
"repeat_char", "rep",
|
||||||
|
"reset_1string", "rs1",
|
||||||
|
"reset_2string", "rs2",
|
||||||
|
"reset_3string", "rs3",
|
||||||
|
"reset_file", "rf",
|
||||||
|
"restore_cursor", "rc",
|
||||||
|
"row_address", "mvpa",
|
||||||
|
"save_cursor", "row_address",
|
||||||
|
"scroll_forward", "ind",
|
||||||
|
"scroll_reverse", "ri",
|
||||||
|
"set_attributes", "sgr",
|
||||||
|
"set_tab", "hts",
|
||||||
|
"set_window", "wind",
|
||||||
|
"tab", "s_magic_smso",
|
||||||
|
"to_status_line", "tsl",
|
||||||
|
"underline_char", "uc",
|
||||||
|
"up_half_line", "hu",
|
||||||
|
"init_prog", "iprog",
|
||||||
|
"key_a1", "ka1",
|
||||||
|
"key_a3", "ka3",
|
||||||
|
"key_b2", "kb2",
|
||||||
|
"key_c1", "kc1",
|
||||||
|
"key_c3", "kc3",
|
||||||
|
"prtr_non", "mc5p",
|
||||||
|
"char_padding", "rmp",
|
||||||
|
"acs_chars", "acsc",
|
||||||
|
"plab_norm", "pln",
|
||||||
|
"key_btab", "kcbt",
|
||||||
|
"enter_xon_mode", "smxon",
|
||||||
|
"exit_xon_mode", "rmxon",
|
||||||
|
"enter_am_mode", "smam",
|
||||||
|
"exit_am_mode", "rmam",
|
||||||
|
"xon_character", "xonc",
|
||||||
|
"xoff_character", "xoffc",
|
||||||
|
"ena_acs", "enacs",
|
||||||
|
"label_on", "smln",
|
||||||
|
"label_off", "rmln",
|
||||||
|
"key_beg", "kbeg",
|
||||||
|
"key_cancel", "kcan",
|
||||||
|
"key_close", "kclo",
|
||||||
|
"key_command", "kcmd",
|
||||||
|
"key_copy", "kcpy",
|
||||||
|
"key_create", "kcrt",
|
||||||
|
"key_end", "kend",
|
||||||
|
"key_enter", "kent",
|
||||||
|
"key_exit", "kext",
|
||||||
|
"key_find", "kfnd",
|
||||||
|
"key_help", "khlp",
|
||||||
|
"key_mark", "kmrk",
|
||||||
|
"key_message", "kmsg",
|
||||||
|
"key_move", "kmov",
|
||||||
|
"key_next", "knxt",
|
||||||
|
"key_open", "kopn",
|
||||||
|
"key_options", "kopt",
|
||||||
|
"key_previous", "kprv",
|
||||||
|
"key_print", "kprt",
|
||||||
|
"key_redo", "krdo",
|
||||||
|
"key_reference", "kref",
|
||||||
|
"key_refresh", "krfr",
|
||||||
|
"key_replace", "krpl",
|
||||||
|
"key_restart", "krst",
|
||||||
|
"key_resume", "kres",
|
||||||
|
"key_save", "ksav",
|
||||||
|
"key_suspend", "kspd",
|
||||||
|
"key_undo", "kund",
|
||||||
|
"key_sbeg", "kBEG",
|
||||||
|
"key_scancel", "kCAN",
|
||||||
|
"key_scommand", "kCMD",
|
||||||
|
"key_scopy", "kCPY",
|
||||||
|
"key_screate", "kCRT",
|
||||||
|
"key_sdc", "kDC",
|
||||||
|
"key_sdl", "kDL",
|
||||||
|
"key_select", "kslt",
|
||||||
|
"key_send", "kEND",
|
||||||
|
"key_seol", "kEOL",
|
||||||
|
"key_sexit", "kEXT",
|
||||||
|
"key_sfind", "kFND",
|
||||||
|
"key_shelp", "kHLP",
|
||||||
|
"key_shome", "kHOM",
|
||||||
|
"key_sic", "kIC",
|
||||||
|
"key_sleft", "kLFT",
|
||||||
|
"key_smessage", "kMSG",
|
||||||
|
"key_smove", "kMOV",
|
||||||
|
"key_snext", "kNXT",
|
||||||
|
"key_soptions", "kOPT",
|
||||||
|
"key_sprevious", "kPRV",
|
||||||
|
"key_sprint", "kPRT",
|
||||||
|
"key_sredo", "kRDO",
|
||||||
|
"key_sreplace", "kRPL",
|
||||||
|
"key_sright", "kRIT",
|
||||||
|
"key_srsume", "kRES",
|
||||||
|
"key_ssave", "kSAV",
|
||||||
|
"key_ssuspend", "kSPD",
|
||||||
|
"key_sundo", "kUND",
|
||||||
|
"req_for_input", "rfi",
|
||||||
|
"key_f11", "kf11",
|
||||||
|
"key_f12", "kf12",
|
||||||
|
"key_f13", "kf13",
|
||||||
|
"key_f14", "kf14",
|
||||||
|
"key_f15", "kf15",
|
||||||
|
"key_f16", "kf16",
|
||||||
|
"key_f17", "kf17",
|
||||||
|
"key_f18", "kf18",
|
||||||
|
"key_f19", "kf19",
|
||||||
|
"key_f20", "kf20",
|
||||||
|
"key_f21", "kf21",
|
||||||
|
"key_f22", "kf22",
|
||||||
|
"key_f23", "kf23",
|
||||||
|
"key_f24", "kf24",
|
||||||
|
"key_f25", "kf25",
|
||||||
|
"key_f26", "kf26",
|
||||||
|
"key_f27", "kf27",
|
||||||
|
"key_f28", "kf28",
|
||||||
|
"key_f29", "kf29",
|
||||||
|
"key_f30", "kf30",
|
||||||
|
"key_f31", "kf31",
|
||||||
|
"key_f32", "kf32",
|
||||||
|
"key_f33", "kf33",
|
||||||
|
"key_f34", "kf34",
|
||||||
|
"key_f35", "kf35",
|
||||||
|
"key_f36", "kf36",
|
||||||
|
"key_f37", "kf37",
|
||||||
|
"key_f38", "kf38",
|
||||||
|
"key_f39", "kf39",
|
||||||
|
"key_f40", "kf40",
|
||||||
|
"key_f41", "kf41",
|
||||||
|
"key_f42", "kf42",
|
||||||
|
"key_f43", "kf43",
|
||||||
|
"key_f44", "kf44",
|
||||||
|
"key_f45", "kf45",
|
||||||
|
"key_f46", "kf46",
|
||||||
|
"key_f47", "kf47",
|
||||||
|
"key_f48", "kf48",
|
||||||
|
"key_f49", "kf49",
|
||||||
|
"key_f50", "kf50",
|
||||||
|
"key_f51", "kf51",
|
||||||
|
"key_f52", "kf52",
|
||||||
|
"key_f53", "kf53",
|
||||||
|
"key_f54", "kf54",
|
||||||
|
"key_f55", "kf55",
|
||||||
|
"key_f56", "kf56",
|
||||||
|
"key_f57", "kf57",
|
||||||
|
"key_f58", "kf58",
|
||||||
|
"key_f59", "kf59",
|
||||||
|
"key_f60", "kf60",
|
||||||
|
"key_f61", "kf61",
|
||||||
|
"key_f62", "kf62",
|
||||||
|
"key_f63", "kf63",
|
||||||
|
"clr_bol", "el1",
|
||||||
|
"clear_margins", "mgc",
|
||||||
|
"set_left_margin", "smgl",
|
||||||
|
"set_right_margin", "smgr",
|
||||||
|
"label_format", "fln",
|
||||||
|
"set_clock", "sclk",
|
||||||
|
"display_clock", "dclk",
|
||||||
|
"remove_clock", "rmclk",
|
||||||
|
"create_window", "cwin",
|
||||||
|
"goto_window", "wingo",
|
||||||
|
"hangup", "hup",
|
||||||
|
"dial_phone", "dial",
|
||||||
|
"quick_dial", "qdial",
|
||||||
|
"tone", "tone",
|
||||||
|
"pulse", "pulse",
|
||||||
|
"flash_hook", "hook",
|
||||||
|
"fixed_pause", "pause",
|
||||||
|
"wait_tone", "wait",
|
||||||
|
"user0", "u0",
|
||||||
|
"user1", "u1",
|
||||||
|
"user2", "u2",
|
||||||
|
"user3", "u3",
|
||||||
|
"user4", "u4",
|
||||||
|
"user5", "u5",
|
||||||
|
"user6", "u6",
|
||||||
|
"user7", "u7",
|
||||||
|
"user8", "u8",
|
||||||
|
"user9", "u9",
|
||||||
|
"orig_pair", "op",
|
||||||
|
"orig_colors", "oc",
|
||||||
|
"initialize_color", "initc",
|
||||||
|
"initialize_pair", "initp",
|
||||||
|
"set_color_pair", "scp",
|
||||||
|
"set_foreground", "setf",
|
||||||
|
"set_background", "setb",
|
||||||
|
"change_char_pitch", "cpi",
|
||||||
|
"change_line_pitch", "lpi",
|
||||||
|
"change_res_horz", "chr",
|
||||||
|
"change_res_vert", "cvr",
|
||||||
|
"define_char", "defc",
|
||||||
|
"enter_doublewide_mode", "swidm",
|
||||||
|
"enter_draft_quality", "sdrfq",
|
||||||
|
"enter_italics_mode", "sitm",
|
||||||
|
"enter_leftward_mode", "slm",
|
||||||
|
"enter_micro_mode", "smicm",
|
||||||
|
"enter_near_letter_quality", "snlq",
|
||||||
|
"enter_normal_quality", "snrmq",
|
||||||
|
"enter_shadow_mode", "sshm",
|
||||||
|
"enter_subscript_mode", "ssubm",
|
||||||
|
"enter_superscript_mode", "ssupm",
|
||||||
|
"enter_upward_mode", "sum",
|
||||||
|
"exit_doublewide_mode", "rwidm",
|
||||||
|
"exit_italics_mode", "ritm",
|
||||||
|
"exit_leftward_mode", "rlm",
|
||||||
|
"exit_micro_mode", "rmicm",
|
||||||
|
"exit_shadow_mode", "rshm",
|
||||||
|
"exit_subscript_mode", "rsubm",
|
||||||
|
"exit_superscript_mode", "rsupm",
|
||||||
|
"exit_upward_mode", "rum",
|
||||||
|
"micro_column_address", "mhpa",
|
||||||
|
"micro_down", "mcud1",
|
||||||
|
"micro_left", "mcub1",
|
||||||
|
"micro_right", "mcuf1",
|
||||||
|
"micro_row_address", "mvpa",
|
||||||
|
"micro_up", "mcuu1",
|
||||||
|
"order_of_pins", "porder",
|
||||||
|
"parm_down_micro", "mcud",
|
||||||
|
"parm_left_micro", "mcub",
|
||||||
|
"parm_right_micro", "mcuf",
|
||||||
|
"parm_up_micro", "mcuu",
|
||||||
|
"select_char_set", "scs",
|
||||||
|
"set_bottom_margin", "smgb",
|
||||||
|
"set_bottom_margin_parm", "smgbp",
|
||||||
|
"set_left_margin_parm", "smglp",
|
||||||
|
"set_right_margin_parm", "smgrp",
|
||||||
|
"set_top_margin", "smgt",
|
||||||
|
"set_top_margin_parm", "smgtp",
|
||||||
|
"start_bit_image", "sbim",
|
||||||
|
"start_char_set_def", "scsd",
|
||||||
|
"stop_bit_image", "rbim",
|
||||||
|
"stop_char_set_def", "rcsd",
|
||||||
|
"subscript_characters", "subcs",
|
||||||
|
"superscript_characters", "supcs",
|
||||||
|
"these_cause_cr", "docr",
|
||||||
|
"zero_motion", "zerom",
|
||||||
|
"char_set_names", "csnm",
|
||||||
|
"key_mouse", "kmous",
|
||||||
|
"mouse_info", "minfo",
|
||||||
|
"req_mouse_pos", "reqmp",
|
||||||
|
"get_mouse", "getm",
|
||||||
|
"set_a_foreground", "setaf",
|
||||||
|
"set_a_background", "setab",
|
||||||
|
"pkey_plab", "pfxl",
|
||||||
|
"device_type", "devt",
|
||||||
|
"code_set_init", "csin",
|
||||||
|
"set0_des_seq", "s0ds",
|
||||||
|
"set1_des_seq", "s1ds",
|
||||||
|
"set2_des_seq", "s2ds",
|
||||||
|
"set3_des_seq", "s3ds",
|
||||||
|
"set_lr_margin", "smglr",
|
||||||
|
"set_tb_margin", "smgtb",
|
||||||
|
"bit_image_repeat", "birep",
|
||||||
|
"bit_image_newline", "binel",
|
||||||
|
"bit_image_carriage_return", "bicr",
|
||||||
|
"color_names", "colornm",
|
||||||
|
"define_bit_image_region", "defbi",
|
||||||
|
"end_bit_image_region", "endbi",
|
||||||
|
"set_color_band", "setcolor",
|
||||||
|
"set_page_length", "slines",
|
||||||
|
"display_pc_char", "dispc",
|
||||||
|
"enter_pc_charset_mode", "smpch",
|
||||||
|
"exit_pc_charset_mode", "rmpch",
|
||||||
|
"enter_scancode_mode", "smsc",
|
||||||
|
"exit_scancode_mode", "rmsc",
|
||||||
|
"pc_term_options", "pctrm",
|
||||||
|
"scancode_escape", "scesc",
|
||||||
|
"alt_scancode_esc", "scesa",
|
||||||
|
"enter_horizontal_hl_mode", "ehhlm",
|
||||||
|
"enter_left_hl_mode", "elhlm",
|
||||||
|
"enter_low_hl_mode", "elohlm",
|
||||||
|
"enter_right_hl_mode", "erhlm",
|
||||||
|
"enter_top_hl_mode", "ethlm",
|
||||||
|
"enter_vertical_hl_mode", "evhlm",
|
||||||
|
"set_a_attributes", "sgr1",
|
||||||
|
"set_pglen_inch", "slength",
|
||||||
|
"termcap_init2", "",
|
||||||
|
"termcap_reset", "",
|
||||||
|
"linefeed_if_not_lf", "",
|
||||||
|
"backspace_if_not_bs", "",
|
||||||
|
"other_non_function_keys", "",
|
||||||
|
"arrow_key_map", "",
|
||||||
|
"acs_ulcorner", "",
|
||||||
|
"acs_llcorner", "",
|
||||||
|
"acs_urcorner", "",
|
||||||
|
"acs_lrcorner", "",
|
||||||
|
"acs_ltee", "",
|
||||||
|
"acs_rtee", "",
|
||||||
|
"acs_btee", "",
|
||||||
|
"acs_ttee", "",
|
||||||
|
"acs_hline", "",
|
||||||
|
"acs_vline", "",
|
||||||
|
"acs_plus", "",
|
||||||
|
"memory_lock", "",
|
||||||
|
"memory_unlock", "",
|
||||||
|
"box_chars_1", "",
|
||||||
|
}
|
238
vendor/github.com/Nvveen/Gotty/gotty.go
generated
vendored
Normal file
238
vendor/github.com/Nvveen/Gotty/gotty.go
generated
vendored
Normal file
|
@ -0,0 +1,238 @@
|
||||||
|
// Copyright 2012 Neal van Veen. All rights reserved.
|
||||||
|
// Usage of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// Gotty is a Go-package for reading and parsing the terminfo database
|
||||||
|
package gotty
|
||||||
|
|
||||||
|
// TODO add more concurrency to name lookup, look for more opportunities.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Open a terminfo file by the name given and construct a TermInfo object.
|
||||||
|
// If something went wrong reading the terminfo database file, an error is
|
||||||
|
// returned.
|
||||||
|
func OpenTermInfo(termName string) (*TermInfo, error) {
|
||||||
|
var term *TermInfo
|
||||||
|
var err error
|
||||||
|
// Find the environment variables
|
||||||
|
termloc := os.Getenv("TERMINFO")
|
||||||
|
if len(termloc) == 0 {
|
||||||
|
// Search like ncurses
|
||||||
|
locations := []string{os.Getenv("HOME") + "/.terminfo/", "/etc/terminfo/",
|
||||||
|
"/lib/terminfo/", "/usr/share/terminfo/"}
|
||||||
|
var path string
|
||||||
|
for _, str := range locations {
|
||||||
|
// Construct path
|
||||||
|
path = str + string(termName[0]) + "/" + termName
|
||||||
|
// Check if path can be opened
|
||||||
|
file, _ := os.Open(path)
|
||||||
|
if file != nil {
|
||||||
|
// Path can open, fall out and use current path
|
||||||
|
file.Close()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(path) > 0 {
|
||||||
|
term, err = readTermInfo(path)
|
||||||
|
} else {
|
||||||
|
err = errors.New(fmt.Sprintf("No terminfo file(-location) found"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return term, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open a terminfo file from the environment variable containing the current
|
||||||
|
// terminal name and construct a TermInfo object. If something went wrong
|
||||||
|
// reading the terminfo database file, an error is returned.
|
||||||
|
func OpenTermInfoEnv() (*TermInfo, error) {
|
||||||
|
termenv := os.Getenv("TERM")
|
||||||
|
return OpenTermInfo(termenv)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an attribute by the name attr provided. If none can be found,
|
||||||
|
// an error is returned.
|
||||||
|
func (term *TermInfo) GetAttribute(attr string) (stacker, error) {
|
||||||
|
// Channel to store the main value in.
|
||||||
|
var value stacker
|
||||||
|
// Add a blocking WaitGroup
|
||||||
|
var block sync.WaitGroup
|
||||||
|
// Keep track of variable being written.
|
||||||
|
written := false
|
||||||
|
// Function to put into goroutine.
|
||||||
|
f := func(ats interface{}) {
|
||||||
|
var ok bool
|
||||||
|
var v stacker
|
||||||
|
// Switch on type of map to use and assign value to it.
|
||||||
|
switch reflect.TypeOf(ats).Elem().Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
v, ok = ats.(map[string]bool)[attr]
|
||||||
|
case reflect.Int16:
|
||||||
|
v, ok = ats.(map[string]int16)[attr]
|
||||||
|
case reflect.String:
|
||||||
|
v, ok = ats.(map[string]string)[attr]
|
||||||
|
}
|
||||||
|
// If ok, a value is found, so we can write.
|
||||||
|
if ok {
|
||||||
|
value = v
|
||||||
|
written = true
|
||||||
|
}
|
||||||
|
// Goroutine is done
|
||||||
|
block.Done()
|
||||||
|
}
|
||||||
|
block.Add(3)
|
||||||
|
// Go for all 3 attribute lists.
|
||||||
|
go f(term.boolAttributes)
|
||||||
|
go f(term.numAttributes)
|
||||||
|
go f(term.strAttributes)
|
||||||
|
// Wait until every goroutine is done.
|
||||||
|
block.Wait()
|
||||||
|
// If a value has been written, return it.
|
||||||
|
if written {
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
// Otherwise, error.
|
||||||
|
return nil, fmt.Errorf("Erorr finding attribute")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an attribute by the name attr provided. If none can be found,
|
||||||
|
// an error is returned. A name is first converted to its termcap value.
|
||||||
|
func (term *TermInfo) GetAttributeName(name string) (stacker, error) {
|
||||||
|
tc := GetTermcapName(name)
|
||||||
|
return term.GetAttribute(tc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A utility function that finds and returns the termcap equivalent of a
|
||||||
|
// variable name.
|
||||||
|
func GetTermcapName(name string) string {
|
||||||
|
// Termcap name
|
||||||
|
var tc string
|
||||||
|
// Blocking group
|
||||||
|
var wait sync.WaitGroup
|
||||||
|
// Function to put into a goroutine
|
||||||
|
f := func(attrs []string) {
|
||||||
|
// Find the string corresponding to the name
|
||||||
|
for i, s := range attrs {
|
||||||
|
if s == name {
|
||||||
|
tc = attrs[i+1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Goroutine is finished
|
||||||
|
wait.Done()
|
||||||
|
}
|
||||||
|
wait.Add(3)
|
||||||
|
// Go for all 3 attribute lists
|
||||||
|
go f(BoolAttr[:])
|
||||||
|
go f(NumAttr[:])
|
||||||
|
go f(StrAttr[:])
|
||||||
|
// Wait until every goroutine is done
|
||||||
|
wait.Wait()
|
||||||
|
// Return the termcap name
|
||||||
|
return tc
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function takes a path to a terminfo file and reads it in binary
|
||||||
|
// form to construct the actual TermInfo file.
|
||||||
|
func readTermInfo(path string) (*TermInfo, error) {
|
||||||
|
// Open the terminfo file
|
||||||
|
file, err := os.Open(path)
|
||||||
|
defer file.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize
|
||||||
|
// Header is composed of the magic 0432 octal number, size of the name
|
||||||
|
// section, size of the boolean section, the amount of number values,
|
||||||
|
// the number of offsets of strings, and the size of the string section.
|
||||||
|
var header [6]int16
|
||||||
|
// Byte array is used to read in byte values
|
||||||
|
var byteArray []byte
|
||||||
|
// Short array is used to read in short values
|
||||||
|
var shArray []int16
|
||||||
|
// TermInfo object to store values
|
||||||
|
var term TermInfo
|
||||||
|
|
||||||
|
// Read in the header
|
||||||
|
err = binary.Read(file, binary.LittleEndian, &header)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// If magic number isn't there or isn't correct, we have the wrong filetype
|
||||||
|
if header[0] != 0432 {
|
||||||
|
return nil, errors.New(fmt.Sprintf("Wrong filetype"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read in the names
|
||||||
|
byteArray = make([]byte, header[1])
|
||||||
|
err = binary.Read(file, binary.LittleEndian, &byteArray)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
term.Names = strings.Split(string(byteArray), "|")
|
||||||
|
|
||||||
|
// Read in the booleans
|
||||||
|
byteArray = make([]byte, header[2])
|
||||||
|
err = binary.Read(file, binary.LittleEndian, &byteArray)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
term.boolAttributes = make(map[string]bool)
|
||||||
|
for i, b := range byteArray {
|
||||||
|
if b == 1 {
|
||||||
|
term.boolAttributes[BoolAttr[i*2+1]] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If the number of bytes read is not even, a byte for alignment is added
|
||||||
|
if len(byteArray)%2 != 0 {
|
||||||
|
err = binary.Read(file, binary.LittleEndian, make([]byte, 1))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read in shorts
|
||||||
|
shArray = make([]int16, header[3])
|
||||||
|
err = binary.Read(file, binary.LittleEndian, &shArray)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
term.numAttributes = make(map[string]int16)
|
||||||
|
for i, n := range shArray {
|
||||||
|
if n != 0377 && n > -1 {
|
||||||
|
term.numAttributes[NumAttr[i*2+1]] = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the offsets into the short array
|
||||||
|
shArray = make([]int16, header[4])
|
||||||
|
err = binary.Read(file, binary.LittleEndian, &shArray)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Read the actual strings in the byte array
|
||||||
|
byteArray = make([]byte, header[5])
|
||||||
|
err = binary.Read(file, binary.LittleEndian, &byteArray)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
term.strAttributes = make(map[string]string)
|
||||||
|
// We get an offset, and then iterate until the string is null-terminated
|
||||||
|
for i, offset := range shArray {
|
||||||
|
if offset > -1 {
|
||||||
|
r := offset
|
||||||
|
for ; byteArray[r] != 0; r++ {
|
||||||
|
}
|
||||||
|
term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &term, nil
|
||||||
|
}
|
362
vendor/github.com/Nvveen/Gotty/parser.go
generated
vendored
Normal file
362
vendor/github.com/Nvveen/Gotty/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,362 @@
|
||||||
|
// Copyright 2012 Neal van Veen. All rights reserved.
|
||||||
|
// Usage of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
package gotty
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var exp = [...]string{
|
||||||
|
"%%",
|
||||||
|
"%c",
|
||||||
|
"%s",
|
||||||
|
"%p(\\d)",
|
||||||
|
"%P([A-z])",
|
||||||
|
"%g([A-z])",
|
||||||
|
"%'(.)'",
|
||||||
|
"%{([0-9]+)}",
|
||||||
|
"%l",
|
||||||
|
"%\\+|%-|%\\*|%/|%m",
|
||||||
|
"%&|%\\||%\\^",
|
||||||
|
"%=|%>|%<",
|
||||||
|
"%A|%O",
|
||||||
|
"%!|%~",
|
||||||
|
"%i",
|
||||||
|
"%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]",
|
||||||
|
"%\\?(.*?);",
|
||||||
|
}
|
||||||
|
|
||||||
|
var regex *regexp.Regexp
|
||||||
|
var staticVar map[byte]stacker
|
||||||
|
|
||||||
|
// Parses the attribute that is received with name attr and parameters params.
|
||||||
|
func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) {
|
||||||
|
// Get the attribute name first.
|
||||||
|
iface, err := term.GetAttribute(attr)
|
||||||
|
str, ok := iface.(string)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return str, errors.New("Only string capabilities can be parsed.")
|
||||||
|
}
|
||||||
|
// Construct the hidden parser struct so we can use a recursive stack based
|
||||||
|
// parser.
|
||||||
|
ps := &parser{}
|
||||||
|
// Dynamic variables only exist in this context.
|
||||||
|
ps.dynamicVar = make(map[byte]stacker, 26)
|
||||||
|
ps.parameters = make([]stacker, len(params))
|
||||||
|
// Convert the parameters to insert them into the parser struct.
|
||||||
|
for i, x := range params {
|
||||||
|
ps.parameters[i] = x
|
||||||
|
}
|
||||||
|
// Recursively walk and return.
|
||||||
|
result, err := ps.walk(str)
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parses the attribute that is received with name attr and parameters params.
|
||||||
|
// Only works on full name of a capability that is given, which it uses to
|
||||||
|
// search for the termcap name.
|
||||||
|
func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) {
|
||||||
|
tc := GetTermcapName(attr)
|
||||||
|
return term.Parse(tc, params)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Identify each token in a stack based manner and do the actual parsing.
|
||||||
|
func (ps *parser) walk(attr string) (string, error) {
|
||||||
|
// We use a buffer to get the modified string.
|
||||||
|
var buf bytes.Buffer
|
||||||
|
// Next, find and identify all tokens by their indices and strings.
|
||||||
|
tokens := regex.FindAllStringSubmatch(attr, -1)
|
||||||
|
if len(tokens) == 0 {
|
||||||
|
return attr, nil
|
||||||
|
}
|
||||||
|
indices := regex.FindAllStringIndex(attr, -1)
|
||||||
|
q := 0 // q counts the matches of one token
|
||||||
|
// Iterate through the string per character.
|
||||||
|
for i := 0; i < len(attr); i++ {
|
||||||
|
// If the current position is an identified token, execute the following
|
||||||
|
// steps.
|
||||||
|
if q < len(indices) && i >= indices[q][0] && i < indices[q][1] {
|
||||||
|
// Switch on token.
|
||||||
|
switch {
|
||||||
|
case tokens[q][0][:2] == "%%":
|
||||||
|
// Literal percentage character.
|
||||||
|
buf.WriteByte('%')
|
||||||
|
case tokens[q][0][:2] == "%c":
|
||||||
|
// Pop a character.
|
||||||
|
c, err := ps.st.pop()
|
||||||
|
if err != nil {
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
buf.WriteByte(c.(byte))
|
||||||
|
case tokens[q][0][:2] == "%s":
|
||||||
|
// Pop a string.
|
||||||
|
str, err := ps.st.pop()
|
||||||
|
if err != nil {
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
if _, ok := str.(string); !ok {
|
||||||
|
return buf.String(), errors.New("Stack head is not a string")
|
||||||
|
}
|
||||||
|
buf.WriteString(str.(string))
|
||||||
|
case tokens[q][0][:2] == "%p":
|
||||||
|
// Push a parameter on the stack.
|
||||||
|
index, err := strconv.ParseInt(tokens[q][1], 10, 8)
|
||||||
|
index--
|
||||||
|
if err != nil {
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
if int(index) >= len(ps.parameters) {
|
||||||
|
return buf.String(), errors.New("Parameters index out of bound")
|
||||||
|
}
|
||||||
|
ps.st.push(ps.parameters[index])
|
||||||
|
case tokens[q][0][:2] == "%P":
|
||||||
|
// Pop a variable from the stack as a dynamic or static variable.
|
||||||
|
val, err := ps.st.pop()
|
||||||
|
if err != nil {
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
index := tokens[q][2]
|
||||||
|
if len(index) > 1 {
|
||||||
|
errorStr := fmt.Sprintf("%s is not a valid dynamic variables index",
|
||||||
|
index)
|
||||||
|
return buf.String(), errors.New(errorStr)
|
||||||
|
}
|
||||||
|
// Specify either dynamic or static.
|
||||||
|
if index[0] >= 'a' && index[0] <= 'z' {
|
||||||
|
ps.dynamicVar[index[0]] = val
|
||||||
|
} else if index[0] >= 'A' && index[0] <= 'Z' {
|
||||||
|
staticVar[index[0]] = val
|
||||||
|
}
|
||||||
|
case tokens[q][0][:2] == "%g":
|
||||||
|
// Push a variable from the stack as a dynamic or static variable.
|
||||||
|
index := tokens[q][3]
|
||||||
|
if len(index) > 1 {
|
||||||
|
errorStr := fmt.Sprintf("%s is not a valid static variables index",
|
||||||
|
index)
|
||||||
|
return buf.String(), errors.New(errorStr)
|
||||||
|
}
|
||||||
|
var val stacker
|
||||||
|
if index[0] >= 'a' && index[0] <= 'z' {
|
||||||
|
val = ps.dynamicVar[index[0]]
|
||||||
|
} else if index[0] >= 'A' && index[0] <= 'Z' {
|
||||||
|
val = staticVar[index[0]]
|
||||||
|
}
|
||||||
|
ps.st.push(val)
|
||||||
|
case tokens[q][0][:2] == "%'":
|
||||||
|
// Push a character constant.
|
||||||
|
con := tokens[q][4]
|
||||||
|
if len(con) > 1 {
|
||||||
|
errorStr := fmt.Sprintf("%s is not a valid character constant", con)
|
||||||
|
return buf.String(), errors.New(errorStr)
|
||||||
|
}
|
||||||
|
ps.st.push(con[0])
|
||||||
|
case tokens[q][0][:2] == "%{":
|
||||||
|
// Push an integer constant.
|
||||||
|
con, err := strconv.ParseInt(tokens[q][5], 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
ps.st.push(con)
|
||||||
|
case tokens[q][0][:2] == "%l":
|
||||||
|
// Push the length of the string that is popped from the stack.
|
||||||
|
popStr, err := ps.st.pop()
|
||||||
|
if err != nil {
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
if _, ok := popStr.(string); !ok {
|
||||||
|
errStr := fmt.Sprintf("Stack head is not a string")
|
||||||
|
return buf.String(), errors.New(errStr)
|
||||||
|
}
|
||||||
|
ps.st.push(len(popStr.(string)))
|
||||||
|
case tokens[q][0][:2] == "%?":
|
||||||
|
// If-then-else construct. First, the whole string is identified and
|
||||||
|
// then inside this substring, we can specify which parts to switch on.
|
||||||
|
ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);")
|
||||||
|
ifTokens := ifReg.FindStringSubmatch(tokens[q][0])
|
||||||
|
var (
|
||||||
|
ifStr string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
// Parse the if-part to determine if-else.
|
||||||
|
if len(ifTokens[1]) > 0 {
|
||||||
|
ifStr, err = ps.walk(ifTokens[1])
|
||||||
|
} else { // else
|
||||||
|
ifStr, err = ps.walk(ifTokens[4])
|
||||||
|
}
|
||||||
|
// Return any errors
|
||||||
|
if err != nil {
|
||||||
|
return buf.String(), err
|
||||||
|
} else if len(ifStr) > 0 {
|
||||||
|
// Self-defined limitation, not sure if this is correct, but didn't
|
||||||
|
// seem like it.
|
||||||
|
return buf.String(), errors.New("If-clause cannot print statements")
|
||||||
|
}
|
||||||
|
var thenStr string
|
||||||
|
// Pop the first value that is set by parsing the if-clause.
|
||||||
|
choose, err := ps.st.pop()
|
||||||
|
if err != nil {
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
// Switch to if or else.
|
||||||
|
if choose.(int) == 0 && len(ifTokens[1]) > 0 {
|
||||||
|
thenStr, err = ps.walk(ifTokens[3])
|
||||||
|
} else if choose.(int) != 0 {
|
||||||
|
if len(ifTokens[1]) > 0 {
|
||||||
|
thenStr, err = ps.walk(ifTokens[2])
|
||||||
|
} else {
|
||||||
|
thenStr, err = ps.walk(ifTokens[5])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
buf.WriteString(thenStr)
|
||||||
|
case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing
|
||||||
|
fallthrough
|
||||||
|
case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits.
|
||||||
|
fallthrough
|
||||||
|
case tokens[q][0][len(tokens[q][0])-1] == 'x':
|
||||||
|
fallthrough
|
||||||
|
case tokens[q][0][len(tokens[q][0])-1] == 'X':
|
||||||
|
fallthrough
|
||||||
|
case tokens[q][0][len(tokens[q][0])-1] == 's':
|
||||||
|
token := tokens[q][0]
|
||||||
|
// Remove the : that comes before a flag.
|
||||||
|
if token[1] == ':' {
|
||||||
|
token = token[:1] + token[2:]
|
||||||
|
}
|
||||||
|
digit, err := ps.st.pop()
|
||||||
|
if err != nil {
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
// The rest is determined like the normal formatted prints.
|
||||||
|
digitStr := fmt.Sprintf(token, digit.(int))
|
||||||
|
buf.WriteString(digitStr)
|
||||||
|
case tokens[q][0][:2] == "%i":
|
||||||
|
// Increment the parameters by one.
|
||||||
|
if len(ps.parameters) < 2 {
|
||||||
|
return buf.String(), errors.New("Not enough parameters to increment.")
|
||||||
|
}
|
||||||
|
val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int)
|
||||||
|
val1++
|
||||||
|
val2++
|
||||||
|
ps.parameters[0], ps.parameters[1] = val1, val2
|
||||||
|
default:
|
||||||
|
// The rest of the tokens is a special case, where two values are
|
||||||
|
// popped and then operated on by the token that comes after them.
|
||||||
|
op1, err := ps.st.pop()
|
||||||
|
if err != nil {
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
op2, err := ps.st.pop()
|
||||||
|
if err != nil {
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
var result stacker
|
||||||
|
switch tokens[q][0][:2] {
|
||||||
|
case "%+":
|
||||||
|
// Addition
|
||||||
|
result = op2.(int) + op1.(int)
|
||||||
|
case "%-":
|
||||||
|
// Subtraction
|
||||||
|
result = op2.(int) - op1.(int)
|
||||||
|
case "%*":
|
||||||
|
// Multiplication
|
||||||
|
result = op2.(int) * op1.(int)
|
||||||
|
case "%/":
|
||||||
|
// Division
|
||||||
|
result = op2.(int) / op1.(int)
|
||||||
|
case "%m":
|
||||||
|
// Modulo
|
||||||
|
result = op2.(int) % op1.(int)
|
||||||
|
case "%&":
|
||||||
|
// Bitwise AND
|
||||||
|
result = op2.(int) & op1.(int)
|
||||||
|
case "%|":
|
||||||
|
// Bitwise OR
|
||||||
|
result = op2.(int) | op1.(int)
|
||||||
|
case "%^":
|
||||||
|
// Bitwise XOR
|
||||||
|
result = op2.(int) ^ op1.(int)
|
||||||
|
case "%=":
|
||||||
|
// Equals
|
||||||
|
result = op2 == op1
|
||||||
|
case "%>":
|
||||||
|
// Greater-than
|
||||||
|
result = op2.(int) > op1.(int)
|
||||||
|
case "%<":
|
||||||
|
// Lesser-than
|
||||||
|
result = op2.(int) < op1.(int)
|
||||||
|
case "%A":
|
||||||
|
// Logical AND
|
||||||
|
result = op2.(bool) && op1.(bool)
|
||||||
|
case "%O":
|
||||||
|
// Logical OR
|
||||||
|
result = op2.(bool) || op1.(bool)
|
||||||
|
case "%!":
|
||||||
|
// Logical complement
|
||||||
|
result = !op1.(bool)
|
||||||
|
case "%~":
|
||||||
|
// Bitwise complement
|
||||||
|
result = ^(op1.(int))
|
||||||
|
}
|
||||||
|
ps.st.push(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
i = indices[q][1] - 1
|
||||||
|
q++
|
||||||
|
} else {
|
||||||
|
// We are not "inside" a token, so just skip until the end or the next
|
||||||
|
// token, and add all characters to the buffer.
|
||||||
|
j := i
|
||||||
|
if q != len(indices) {
|
||||||
|
for !(j >= indices[q][0] && j < indices[q][1]) {
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
j = len(attr)
|
||||||
|
}
|
||||||
|
buf.WriteString(string(attr[i:j]))
|
||||||
|
i = j
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Return the buffer as a string.
|
||||||
|
return buf.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push a stacker-value onto the stack.
|
||||||
|
func (st *stack) push(s stacker) {
|
||||||
|
*st = append(*st, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pop a stacker-value from the stack.
|
||||||
|
func (st *stack) pop() (stacker, error) {
|
||||||
|
if len(*st) == 0 {
|
||||||
|
return nil, errors.New("Stack is empty.")
|
||||||
|
}
|
||||||
|
newStack := make(stack, len(*st)-1)
|
||||||
|
val := (*st)[len(*st)-1]
|
||||||
|
copy(newStack, (*st)[:len(*st)-1])
|
||||||
|
*st = newStack
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize regexes and the static vars (that don't get changed between
|
||||||
|
// calls.
|
||||||
|
func init() {
|
||||||
|
// Initialize the main regex.
|
||||||
|
expStr := strings.Join(exp[:], "|")
|
||||||
|
regex, _ = regexp.Compile(expStr)
|
||||||
|
// Initialize the static variables.
|
||||||
|
staticVar = make(map[byte]stacker, 26)
|
||||||
|
}
|
23
vendor/github.com/Nvveen/Gotty/types.go
generated
vendored
Normal file
23
vendor/github.com/Nvveen/Gotty/types.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
// Copyright 2012 Neal van Veen. All rights reserved.
|
||||||
|
// Usage of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
package gotty
|
||||||
|
|
||||||
|
type TermInfo struct {
|
||||||
|
boolAttributes map[string]bool
|
||||||
|
numAttributes map[string]int16
|
||||||
|
strAttributes map[string]string
|
||||||
|
// The various names of the TermInfo file.
|
||||||
|
Names []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type stacker interface {
|
||||||
|
}
|
||||||
|
type stack []stacker
|
||||||
|
|
||||||
|
type parser struct {
|
||||||
|
st stack
|
||||||
|
parameters []stacker
|
||||||
|
dynamicVar map[byte]stacker
|
||||||
|
}
|
2
vendor/github.com/beorn7/perks/.gitignore
generated
vendored
Normal file
2
vendor/github.com/beorn7/perks/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
*.test
|
||||||
|
*.prof
|
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
Copyright (C) 2013 Blake Mizerany
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
31
vendor/github.com/beorn7/perks/README.md
generated
vendored
Normal file
31
vendor/github.com/beorn7/perks/README.md
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
# Perks for Go (golang.org)
|
||||||
|
|
||||||
|
Perks contains the Go package quantile that computes approximate quantiles over
|
||||||
|
an unbounded data stream within low memory and CPU bounds.
|
||||||
|
|
||||||
|
For more information and examples, see:
|
||||||
|
http://godoc.org/github.com/bmizerany/perks
|
||||||
|
|
||||||
|
A very special thank you and shout out to Graham Cormode (Rutgers University),
|
||||||
|
Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and
|
||||||
|
Divesh Srivastava (AT&T Labs–Research) for their research and publication of
|
||||||
|
[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf)
|
||||||
|
|
||||||
|
Thank you, also:
|
||||||
|
* Armon Dadgar (@armon)
|
||||||
|
* Andrew Gerrand (@nf)
|
||||||
|
* Brad Fitzpatrick (@bradfitz)
|
||||||
|
* Keith Rarick (@kr)
|
||||||
|
|
||||||
|
FAQ:
|
||||||
|
|
||||||
|
Q: Why not move the quantile package into the project root?
|
||||||
|
A: I want to add more packages to perks later.
|
||||||
|
|
||||||
|
Copyright (C) 2013 Blake Mizerany
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
26
vendor/github.com/beorn7/perks/histogram/bench_test.go
generated
vendored
Normal file
26
vendor/github.com/beorn7/perks/histogram/bench_test.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
package histogram
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BenchmarkInsert10Bins(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
|
h := New(10)
|
||||||
|
b.StartTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
f := rand.ExpFloat64()
|
||||||
|
h.Insert(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkInsert100Bins(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
|
h := New(100)
|
||||||
|
b.StartTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
f := rand.ExpFloat64()
|
||||||
|
h.Insert(f)
|
||||||
|
}
|
||||||
|
}
|
108
vendor/github.com/beorn7/perks/histogram/histogram.go
generated
vendored
Normal file
108
vendor/github.com/beorn7/perks/histogram/histogram.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
||||||
|
// Package histogram provides a Go implementation of BigML's histogram package
|
||||||
|
// for Clojure/Java. It is currently experimental.
|
||||||
|
package histogram
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/heap"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Bin struct {
|
||||||
|
Count int
|
||||||
|
Sum float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Bin) Update(x *Bin) {
|
||||||
|
b.Count += x.Count
|
||||||
|
b.Sum += x.Sum
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Bin) Mean() float64 {
|
||||||
|
return b.Sum / float64(b.Count)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Bins []*Bin
|
||||||
|
|
||||||
|
func (bs Bins) Len() int { return len(bs) }
|
||||||
|
func (bs Bins) Less(i, j int) bool { return bs[i].Mean() < bs[j].Mean() }
|
||||||
|
func (bs Bins) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
|
||||||
|
|
||||||
|
func (bs *Bins) Push(x interface{}) {
|
||||||
|
*bs = append(*bs, x.(*Bin))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs *Bins) Pop() interface{} {
|
||||||
|
return bs.remove(len(*bs) - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs *Bins) remove(n int) *Bin {
|
||||||
|
if n < 0 || len(*bs) < n {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
x := (*bs)[n]
|
||||||
|
*bs = append((*bs)[:n], (*bs)[n+1:]...)
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
type Histogram struct {
|
||||||
|
res *reservoir
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(maxBins int) *Histogram {
|
||||||
|
return &Histogram{res: newReservoir(maxBins)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Histogram) Insert(f float64) {
|
||||||
|
h.res.insert(&Bin{1, f})
|
||||||
|
h.res.compress()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Histogram) Bins() Bins {
|
||||||
|
return h.res.bins
|
||||||
|
}
|
||||||
|
|
||||||
|
type reservoir struct {
|
||||||
|
n int
|
||||||
|
maxBins int
|
||||||
|
bins Bins
|
||||||
|
}
|
||||||
|
|
||||||
|
func newReservoir(maxBins int) *reservoir {
|
||||||
|
return &reservoir{maxBins: maxBins}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reservoir) insert(bin *Bin) {
|
||||||
|
r.n += bin.Count
|
||||||
|
i := sort.Search(len(r.bins), func(i int) bool {
|
||||||
|
return r.bins[i].Mean() >= bin.Mean()
|
||||||
|
})
|
||||||
|
if i < 0 || i == r.bins.Len() {
|
||||||
|
// TODO(blake): Maybe use an .insert(i, bin) instead of
|
||||||
|
// performing the extra work of a heap.Push.
|
||||||
|
heap.Push(&r.bins, bin)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.bins[i].Update(bin)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reservoir) compress() {
|
||||||
|
for r.bins.Len() > r.maxBins {
|
||||||
|
minGapIndex := -1
|
||||||
|
minGap := math.MaxFloat64
|
||||||
|
for i := 0; i < r.bins.Len()-1; i++ {
|
||||||
|
gap := gapWeight(r.bins[i], r.bins[i+1])
|
||||||
|
if minGap > gap {
|
||||||
|
minGap = gap
|
||||||
|
minGapIndex = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prev := r.bins[minGapIndex]
|
||||||
|
next := r.bins.remove(minGapIndex + 1)
|
||||||
|
prev.Update(next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func gapWeight(prev, next *Bin) float64 {
|
||||||
|
return next.Mean() - prev.Mean()
|
||||||
|
}
|
38
vendor/github.com/beorn7/perks/histogram/histogram_test.go
generated
vendored
Normal file
38
vendor/github.com/beorn7/perks/histogram/histogram_test.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
package histogram
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHistogram(t *testing.T) {
|
||||||
|
const numPoints = 1e6
|
||||||
|
const maxBins = 3
|
||||||
|
|
||||||
|
h := New(maxBins)
|
||||||
|
for i := 0; i < numPoints; i++ {
|
||||||
|
f := rand.ExpFloat64()
|
||||||
|
h.Insert(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
bins := h.Bins()
|
||||||
|
if g := len(bins); g > maxBins {
|
||||||
|
t.Fatalf("got %d bins, wanted <= %d", g, maxBins)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, b := range bins {
|
||||||
|
t.Logf("%+v", b)
|
||||||
|
}
|
||||||
|
|
||||||
|
if g := count(h.Bins()); g != numPoints {
|
||||||
|
t.Fatalf("binned %d points, wanted %d", g, numPoints)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func count(bins Bins) int {
|
||||||
|
binCounts := 0
|
||||||
|
for _, b := range bins {
|
||||||
|
binCounts += b.Count
|
||||||
|
}
|
||||||
|
return binCounts
|
||||||
|
}
|
63
vendor/github.com/beorn7/perks/quantile/bench_test.go
generated
vendored
Normal file
63
vendor/github.com/beorn7/perks/quantile/bench_test.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
package quantile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BenchmarkInsertTargeted(b *testing.B) {
|
||||||
|
b.ReportAllocs()
|
||||||
|
|
||||||
|
s := NewTargeted(Targets)
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := float64(0); i < float64(b.N); i++ {
|
||||||
|
s.Insert(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) {
|
||||||
|
s := NewTargeted(TargetsSmallEpsilon)
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := float64(0); i < float64(b.N); i++ {
|
||||||
|
s.Insert(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkInsertBiased(b *testing.B) {
|
||||||
|
s := NewLowBiased(0.01)
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := float64(0); i < float64(b.N); i++ {
|
||||||
|
s.Insert(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) {
|
||||||
|
s := NewLowBiased(0.0001)
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := float64(0); i < float64(b.N); i++ {
|
||||||
|
s.Insert(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkQuery(b *testing.B) {
|
||||||
|
s := NewTargeted(Targets)
|
||||||
|
for i := float64(0); i < 1e6; i++ {
|
||||||
|
s.Insert(i)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
n := float64(b.N)
|
||||||
|
for i := float64(0); i < n; i++ {
|
||||||
|
s.Query(i / n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkQuerySmallEpsilon(b *testing.B) {
|
||||||
|
s := NewTargeted(TargetsSmallEpsilon)
|
||||||
|
for i := float64(0); i < 1e6; i++ {
|
||||||
|
s.Insert(i)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
n := float64(b.N)
|
||||||
|
for i := float64(0); i < n; i++ {
|
||||||
|
s.Query(i / n)
|
||||||
|
}
|
||||||
|
}
|
121
vendor/github.com/beorn7/perks/quantile/example_test.go
generated
vendored
Normal file
121
vendor/github.com/beorn7/perks/quantile/example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
// +build go1.1
|
||||||
|
|
||||||
|
package quantile_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/beorn7/perks/quantile"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Example_simple() {
|
||||||
|
ch := make(chan float64)
|
||||||
|
go sendFloats(ch)
|
||||||
|
|
||||||
|
// Compute the 50th, 90th, and 99th percentile.
|
||||||
|
q := quantile.NewTargeted(map[float64]float64{
|
||||||
|
0.50: 0.005,
|
||||||
|
0.90: 0.001,
|
||||||
|
0.99: 0.0001,
|
||||||
|
})
|
||||||
|
for v := range ch {
|
||||||
|
q.Insert(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("perc50:", q.Query(0.50))
|
||||||
|
fmt.Println("perc90:", q.Query(0.90))
|
||||||
|
fmt.Println("perc99:", q.Query(0.99))
|
||||||
|
fmt.Println("count:", q.Count())
|
||||||
|
// Output:
|
||||||
|
// perc50: 5
|
||||||
|
// perc90: 16
|
||||||
|
// perc99: 223
|
||||||
|
// count: 2388
|
||||||
|
}
|
||||||
|
|
||||||
|
func Example_mergeMultipleStreams() {
|
||||||
|
// Scenario:
|
||||||
|
// We have multiple database shards. On each shard, there is a process
|
||||||
|
// collecting query response times from the database logs and inserting
|
||||||
|
// them into a Stream (created via NewTargeted(0.90)), much like the
|
||||||
|
// Simple example. These processes expose a network interface for us to
|
||||||
|
// ask them to serialize and send us the results of their
|
||||||
|
// Stream.Samples so we may Merge and Query them.
|
||||||
|
//
|
||||||
|
// NOTES:
|
||||||
|
// * These sample sets are small, allowing us to get them
|
||||||
|
// across the network much faster than sending the entire list of data
|
||||||
|
// points.
|
||||||
|
//
|
||||||
|
// * For this to work correctly, we must supply the same quantiles
|
||||||
|
// a priori the process collecting the samples supplied to NewTargeted,
|
||||||
|
// even if we do not plan to query them all here.
|
||||||
|
ch := make(chan quantile.Samples)
|
||||||
|
getDBQuerySamples(ch)
|
||||||
|
q := quantile.NewTargeted(map[float64]float64{0.90: 0.001})
|
||||||
|
for samples := range ch {
|
||||||
|
q.Merge(samples)
|
||||||
|
}
|
||||||
|
fmt.Println("perc90:", q.Query(0.90))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Example_window() {
|
||||||
|
// Scenario: We want the 90th, 95th, and 99th percentiles for each
|
||||||
|
// minute.
|
||||||
|
|
||||||
|
ch := make(chan float64)
|
||||||
|
go sendStreamValues(ch)
|
||||||
|
|
||||||
|
tick := time.NewTicker(1 * time.Minute)
|
||||||
|
q := quantile.NewTargeted(map[float64]float64{
|
||||||
|
0.90: 0.001,
|
||||||
|
0.95: 0.0005,
|
||||||
|
0.99: 0.0001,
|
||||||
|
})
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case t := <-tick.C:
|
||||||
|
flushToDB(t, q.Samples())
|
||||||
|
q.Reset()
|
||||||
|
case v := <-ch:
|
||||||
|
q.Insert(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sendStreamValues(ch chan float64) {
|
||||||
|
// Use your imagination
|
||||||
|
}
|
||||||
|
|
||||||
|
func flushToDB(t time.Time, samples quantile.Samples) {
|
||||||
|
// Use your imagination
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is a stub for the above example. In reality this would hit the remote
|
||||||
|
// servers via http or something like it.
|
||||||
|
func getDBQuerySamples(ch chan quantile.Samples) {}
|
||||||
|
|
||||||
|
func sendFloats(ch chan<- float64) {
|
||||||
|
f, err := os.Open("exampledata.txt")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
sc := bufio.NewScanner(f)
|
||||||
|
for sc.Scan() {
|
||||||
|
b := sc.Bytes()
|
||||||
|
v, err := strconv.ParseFloat(string(b), 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
ch <- v
|
||||||
|
}
|
||||||
|
if sc.Err() != nil {
|
||||||
|
log.Fatal(sc.Err())
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
}
|
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
|
@ -0,0 +1,292 @@
|
||||||
|
// Package quantile computes approximate quantiles over an unbounded data
|
||||||
|
// stream within low memory and CPU bounds.
|
||||||
|
//
|
||||||
|
// A small amount of accuracy is traded to achieve the above properties.
|
||||||
|
//
|
||||||
|
// Multiple streams can be merged before calling Query to generate a single set
|
||||||
|
// of results. This is meaningful when the streams represent the same type of
|
||||||
|
// data. See Merge and Samples.
|
||||||
|
//
|
||||||
|
// For more detailed information about the algorithm used, see:
|
||||||
|
//
|
||||||
|
// Effective Computation of Biased Quantiles over Data Streams
|
||||||
|
//
|
||||||
|
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||||
|
package quantile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sample holds an observed value and meta information for compression. JSON
|
||||||
|
// tags have been added for convenience.
|
||||||
|
type Sample struct {
|
||||||
|
Value float64 `json:",string"`
|
||||||
|
Width float64 `json:",string"`
|
||||||
|
Delta float64 `json:",string"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Samples represents a slice of samples. It implements sort.Interface.
|
||||||
|
type Samples []Sample
|
||||||
|
|
||||||
|
func (a Samples) Len() int { return len(a) }
|
||||||
|
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||||
|
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
|
||||||
|
type invariant func(s *stream, r float64) float64
|
||||||
|
|
||||||
|
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||||
|
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||||
|
// error guarantees can still be given even for the lower ranks of the data
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||||
|
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||||
|
// properties.
|
||||||
|
func NewLowBiased(epsilon float64) *Stream {
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
return 2 * epsilon * r
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||||
|
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||||
|
// error guarantees can still be given even for the higher ranks of the data
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||||
|
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||||
|
// properties.
|
||||||
|
func NewHighBiased(epsilon float64) *Stream {
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
return 2 * epsilon * (s.n - r)
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||||
|
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||||
|
// space and computation time. The targets map maps the desired quantiles to
|
||||||
|
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||||
|
// is guaranteed to be within (Quantile±Epsilon).
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||||
|
func NewTargeted(targets map[float64]float64) *Stream {
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
var m = math.MaxFloat64
|
||||||
|
var f float64
|
||||||
|
for quantile, epsilon := range targets {
|
||||||
|
if quantile*s.n <= r {
|
||||||
|
f = (2 * epsilon * r) / quantile
|
||||||
|
} else {
|
||||||
|
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||||
|
}
|
||||||
|
if f < m {
|
||||||
|
m = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||||
|
// design. Take care when using across multiple goroutines.
|
||||||
|
type Stream struct {
|
||||||
|
*stream
|
||||||
|
b Samples
|
||||||
|
sorted bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStream(ƒ invariant) *Stream {
|
||||||
|
x := &stream{ƒ: ƒ}
|
||||||
|
return &Stream{x, make(Samples, 0, 500), true}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert inserts v into the stream.
|
||||||
|
func (s *Stream) Insert(v float64) {
|
||||||
|
s.insert(Sample{Value: v, Width: 1})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) insert(sample Sample) {
|
||||||
|
s.b = append(s.b, sample)
|
||||||
|
s.sorted = false
|
||||||
|
if len(s.b) == cap(s.b) {
|
||||||
|
s.flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query returns the computed qth percentiles value. If s was created with
|
||||||
|
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||||
|
// will return an unspecified result.
|
||||||
|
func (s *Stream) Query(q float64) float64 {
|
||||||
|
if !s.flushed() {
|
||||||
|
// Fast path when there hasn't been enough data for a flush;
|
||||||
|
// this also yields better accuracy for small sets of data.
|
||||||
|
l := len(s.b)
|
||||||
|
if l == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
i := int(math.Ceil(float64(l) * q))
|
||||||
|
if i > 0 {
|
||||||
|
i -= 1
|
||||||
|
}
|
||||||
|
s.maybeSort()
|
||||||
|
return s.b[i].Value
|
||||||
|
}
|
||||||
|
s.flush()
|
||||||
|
return s.stream.query(q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges samples into the underlying streams samples. This is handy when
|
||||||
|
// merging multiple streams from separate threads, database shards, etc.
|
||||||
|
//
|
||||||
|
// ATTENTION: This method is broken and does not yield correct results. The
|
||||||
|
// underlying algorithm is not capable of merging streams correctly.
|
||||||
|
func (s *Stream) Merge(samples Samples) {
|
||||||
|
sort.Sort(samples)
|
||||||
|
s.stream.merge(samples)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||||
|
func (s *Stream) Reset() {
|
||||||
|
s.stream.reset()
|
||||||
|
s.b = s.b[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Samples returns stream samples held by s.
|
||||||
|
func (s *Stream) Samples() Samples {
|
||||||
|
if !s.flushed() {
|
||||||
|
return s.b
|
||||||
|
}
|
||||||
|
s.flush()
|
||||||
|
return s.stream.samples()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the total number of samples observed in the stream
|
||||||
|
// since initialization.
|
||||||
|
func (s *Stream) Count() int {
|
||||||
|
return len(s.b) + s.stream.count()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) flush() {
|
||||||
|
s.maybeSort()
|
||||||
|
s.stream.merge(s.b)
|
||||||
|
s.b = s.b[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) maybeSort() {
|
||||||
|
if !s.sorted {
|
||||||
|
s.sorted = true
|
||||||
|
sort.Sort(s.b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) flushed() bool {
|
||||||
|
return len(s.stream.l) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type stream struct {
|
||||||
|
n float64
|
||||||
|
l []Sample
|
||||||
|
ƒ invariant
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) reset() {
|
||||||
|
s.l = s.l[:0]
|
||||||
|
s.n = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) insert(v float64) {
|
||||||
|
s.merge(Samples{{v, 1, 0}})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) merge(samples Samples) {
|
||||||
|
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||||
|
// whole summaries. The paper doesn't mention merging summaries at
|
||||||
|
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||||
|
// do merges properly.
|
||||||
|
var r float64
|
||||||
|
i := 0
|
||||||
|
for _, sample := range samples {
|
||||||
|
for ; i < len(s.l); i++ {
|
||||||
|
c := s.l[i]
|
||||||
|
if c.Value > sample.Value {
|
||||||
|
// Insert at position i.
|
||||||
|
s.l = append(s.l, Sample{})
|
||||||
|
copy(s.l[i+1:], s.l[i:])
|
||||||
|
s.l[i] = Sample{
|
||||||
|
sample.Value,
|
||||||
|
sample.Width,
|
||||||
|
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||||
|
// TODO(beorn7): How to calculate delta correctly?
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
goto inserted
|
||||||
|
}
|
||||||
|
r += c.Width
|
||||||
|
}
|
||||||
|
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||||
|
i++
|
||||||
|
inserted:
|
||||||
|
s.n += sample.Width
|
||||||
|
r += sample.Width
|
||||||
|
}
|
||||||
|
s.compress()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) count() int {
|
||||||
|
return int(s.n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) query(q float64) float64 {
|
||||||
|
t := math.Ceil(q * s.n)
|
||||||
|
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||||
|
p := s.l[0]
|
||||||
|
var r float64
|
||||||
|
for _, c := range s.l[1:] {
|
||||||
|
r += p.Width
|
||||||
|
if r+c.Width+c.Delta > t {
|
||||||
|
return p.Value
|
||||||
|
}
|
||||||
|
p = c
|
||||||
|
}
|
||||||
|
return p.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) compress() {
|
||||||
|
if len(s.l) < 2 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
x := s.l[len(s.l)-1]
|
||||||
|
xi := len(s.l) - 1
|
||||||
|
r := s.n - 1 - x.Width
|
||||||
|
|
||||||
|
for i := len(s.l) - 2; i >= 0; i-- {
|
||||||
|
c := s.l[i]
|
||||||
|
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||||
|
x.Width += c.Width
|
||||||
|
s.l[xi] = x
|
||||||
|
// Remove element at i.
|
||||||
|
copy(s.l[i:], s.l[i+1:])
|
||||||
|
s.l = s.l[:len(s.l)-1]
|
||||||
|
xi -= 1
|
||||||
|
} else {
|
||||||
|
x = c
|
||||||
|
xi = i
|
||||||
|
}
|
||||||
|
r -= c.Width
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) samples() Samples {
|
||||||
|
samples := make(Samples, len(s.l))
|
||||||
|
copy(samples, s.l)
|
||||||
|
return samples
|
||||||
|
}
|
215
vendor/github.com/beorn7/perks/quantile/stream_test.go
generated
vendored
Normal file
215
vendor/github.com/beorn7/perks/quantile/stream_test.go
generated
vendored
Normal file
|
@ -0,0 +1,215 @@
|
||||||
|
package quantile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
Targets = map[float64]float64{
|
||||||
|
0.01: 0.001,
|
||||||
|
0.10: 0.01,
|
||||||
|
0.50: 0.05,
|
||||||
|
0.90: 0.01,
|
||||||
|
0.99: 0.001,
|
||||||
|
}
|
||||||
|
TargetsSmallEpsilon = map[float64]float64{
|
||||||
|
0.01: 0.0001,
|
||||||
|
0.10: 0.001,
|
||||||
|
0.50: 0.005,
|
||||||
|
0.90: 0.001,
|
||||||
|
0.99: 0.0001,
|
||||||
|
}
|
||||||
|
LowQuantiles = []float64{0.01, 0.1, 0.5}
|
||||||
|
HighQuantiles = []float64{0.99, 0.9, 0.5}
|
||||||
|
)
|
||||||
|
|
||||||
|
const RelativeEpsilon = 0.01
|
||||||
|
|
||||||
|
func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||||
|
sort.Float64s(a)
|
||||||
|
for quantile, epsilon := range Targets {
|
||||||
|
n := float64(len(a))
|
||||||
|
k := int(quantile * n)
|
||||||
|
if k < 1 {
|
||||||
|
k = 1
|
||||||
|
}
|
||||||
|
lower := int((quantile - epsilon) * n)
|
||||||
|
if lower < 1 {
|
||||||
|
lower = 1
|
||||||
|
}
|
||||||
|
upper := int(math.Ceil((quantile + epsilon) * n))
|
||||||
|
if upper > len(a) {
|
||||||
|
upper = len(a)
|
||||||
|
}
|
||||||
|
w, min, max := a[k-1], a[lower-1], a[upper-1]
|
||||||
|
if g := s.Query(quantile); g < min || g > max {
|
||||||
|
t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||||
|
sort.Float64s(a)
|
||||||
|
for _, qu := range LowQuantiles {
|
||||||
|
n := float64(len(a))
|
||||||
|
k := int(qu * n)
|
||||||
|
|
||||||
|
lowerRank := int((1 - RelativeEpsilon) * qu * n)
|
||||||
|
upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n))
|
||||||
|
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
|
||||||
|
if g := s.Query(qu); g < min || g > max {
|
||||||
|
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||||
|
sort.Float64s(a)
|
||||||
|
for _, qu := range HighQuantiles {
|
||||||
|
n := float64(len(a))
|
||||||
|
k := int(qu * n)
|
||||||
|
|
||||||
|
lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n)
|
||||||
|
upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n))
|
||||||
|
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
|
||||||
|
if g := s.Query(qu); g < min || g > max {
|
||||||
|
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateStream(s *Stream) []float64 {
|
||||||
|
a := make([]float64, 0, 1e5+100)
|
||||||
|
for i := 0; i < cap(a); i++ {
|
||||||
|
v := rand.NormFloat64()
|
||||||
|
// Add 5% asymmetric outliers.
|
||||||
|
if i%20 == 0 {
|
||||||
|
v = v*v + 1
|
||||||
|
}
|
||||||
|
s.Insert(v)
|
||||||
|
a = append(a, v)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTargetedQuery(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s := NewTargeted(Targets)
|
||||||
|
a := populateStream(s)
|
||||||
|
verifyPercsWithAbsoluteEpsilon(t, a, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTargetedQuerySmallSampleSize(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s := NewTargeted(TargetsSmallEpsilon)
|
||||||
|
a := []float64{1, 2, 3, 4, 5}
|
||||||
|
for _, v := range a {
|
||||||
|
s.Insert(v)
|
||||||
|
}
|
||||||
|
verifyPercsWithAbsoluteEpsilon(t, a, s)
|
||||||
|
// If not yet flushed, results should be precise:
|
||||||
|
if !s.flushed() {
|
||||||
|
for φ, want := range map[float64]float64{
|
||||||
|
0.01: 1,
|
||||||
|
0.10: 1,
|
||||||
|
0.50: 3,
|
||||||
|
0.90: 5,
|
||||||
|
0.99: 5,
|
||||||
|
} {
|
||||||
|
if got := s.Query(φ); got != want {
|
||||||
|
t.Errorf("want %f for φ=%f, got %f", want, φ, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLowBiasedQuery(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s := NewLowBiased(RelativeEpsilon)
|
||||||
|
a := populateStream(s)
|
||||||
|
verifyLowPercsWithRelativeEpsilon(t, a, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHighBiasedQuery(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s := NewHighBiased(RelativeEpsilon)
|
||||||
|
a := populateStream(s)
|
||||||
|
verifyHighPercsWithRelativeEpsilon(t, a, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BrokenTestTargetedMerge is broken, see Merge doc comment.
|
||||||
|
func BrokenTestTargetedMerge(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s1 := NewTargeted(Targets)
|
||||||
|
s2 := NewTargeted(Targets)
|
||||||
|
a := populateStream(s1)
|
||||||
|
a = append(a, populateStream(s2)...)
|
||||||
|
s1.Merge(s2.Samples())
|
||||||
|
verifyPercsWithAbsoluteEpsilon(t, a, s1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BrokenTestLowBiasedMerge is broken, see Merge doc comment.
|
||||||
|
func BrokenTestLowBiasedMerge(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s1 := NewLowBiased(RelativeEpsilon)
|
||||||
|
s2 := NewLowBiased(RelativeEpsilon)
|
||||||
|
a := populateStream(s1)
|
||||||
|
a = append(a, populateStream(s2)...)
|
||||||
|
s1.Merge(s2.Samples())
|
||||||
|
verifyLowPercsWithRelativeEpsilon(t, a, s2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BrokenTestHighBiasedMerge is broken, see Merge doc comment.
|
||||||
|
func BrokenTestHighBiasedMerge(t *testing.T) {
|
||||||
|
rand.Seed(42)
|
||||||
|
s1 := NewHighBiased(RelativeEpsilon)
|
||||||
|
s2 := NewHighBiased(RelativeEpsilon)
|
||||||
|
a := populateStream(s1)
|
||||||
|
a = append(a, populateStream(s2)...)
|
||||||
|
s1.Merge(s2.Samples())
|
||||||
|
verifyHighPercsWithRelativeEpsilon(t, a, s2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUncompressed(t *testing.T) {
|
||||||
|
q := NewTargeted(Targets)
|
||||||
|
for i := 100; i > 0; i-- {
|
||||||
|
q.Insert(float64(i))
|
||||||
|
}
|
||||||
|
if g := q.Count(); g != 100 {
|
||||||
|
t.Errorf("want count 100, got %d", g)
|
||||||
|
}
|
||||||
|
// Before compression, Query should have 100% accuracy.
|
||||||
|
for quantile := range Targets {
|
||||||
|
w := quantile * 100
|
||||||
|
if g := q.Query(quantile); g != w {
|
||||||
|
t.Errorf("want %f, got %f", w, g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUncompressedSamples(t *testing.T) {
|
||||||
|
q := NewTargeted(map[float64]float64{0.99: 0.001})
|
||||||
|
for i := 1; i <= 100; i++ {
|
||||||
|
q.Insert(float64(i))
|
||||||
|
}
|
||||||
|
if g := q.Samples().Len(); g != 100 {
|
||||||
|
t.Errorf("want count 100, got %d", g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUncompressedOne(t *testing.T) {
|
||||||
|
q := NewTargeted(map[float64]float64{0.99: 0.01})
|
||||||
|
q.Insert(3.14)
|
||||||
|
if g := q.Query(0.90); g != 3.14 {
|
||||||
|
t.Error("want PI, got", g)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaults(t *testing.T) {
|
||||||
|
if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 {
|
||||||
|
t.Errorf("want 0, got %f", g)
|
||||||
|
}
|
||||||
|
}
|
90
vendor/github.com/beorn7/perks/topk/topk.go
generated
vendored
Normal file
90
vendor/github.com/beorn7/perks/topk/topk.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
package topk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// http://www.cs.ucsb.edu/research/tech_reports/reports/2005-23.pdf
|
||||||
|
|
||||||
|
type Element struct {
|
||||||
|
Value string
|
||||||
|
Count int
|
||||||
|
}
|
||||||
|
|
||||||
|
type Samples []*Element
|
||||||
|
|
||||||
|
func (sm Samples) Len() int {
|
||||||
|
return len(sm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm Samples) Less(i, j int) bool {
|
||||||
|
return sm[i].Count < sm[j].Count
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm Samples) Swap(i, j int) {
|
||||||
|
sm[i], sm[j] = sm[j], sm[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
type Stream struct {
|
||||||
|
k int
|
||||||
|
mon map[string]*Element
|
||||||
|
|
||||||
|
// the minimum Element
|
||||||
|
min *Element
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(k int) *Stream {
|
||||||
|
s := new(Stream)
|
||||||
|
s.k = k
|
||||||
|
s.mon = make(map[string]*Element)
|
||||||
|
s.min = &Element{}
|
||||||
|
|
||||||
|
// Track k+1 so that less frequenet items contended for that spot,
|
||||||
|
// resulting in k being more accurate.
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) Insert(x string) {
|
||||||
|
s.insert(&Element{x, 1})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) Merge(sm Samples) {
|
||||||
|
for _, e := range sm {
|
||||||
|
s.insert(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) insert(in *Element) {
|
||||||
|
e := s.mon[in.Value]
|
||||||
|
if e != nil {
|
||||||
|
e.Count++
|
||||||
|
} else {
|
||||||
|
if len(s.mon) < s.k+1 {
|
||||||
|
e = &Element{in.Value, in.Count}
|
||||||
|
s.mon[in.Value] = e
|
||||||
|
} else {
|
||||||
|
e = s.min
|
||||||
|
delete(s.mon, e.Value)
|
||||||
|
e.Value = in.Value
|
||||||
|
e.Count += in.Count
|
||||||
|
s.min = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if e.Count < s.min.Count {
|
||||||
|
s.min = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) Query() Samples {
|
||||||
|
var sm Samples
|
||||||
|
for _, e := range s.mon {
|
||||||
|
sm = append(sm, e)
|
||||||
|
}
|
||||||
|
sort.Sort(sort.Reverse(sm))
|
||||||
|
|
||||||
|
if len(sm) < s.k {
|
||||||
|
return sm
|
||||||
|
}
|
||||||
|
|
||||||
|
return sm[:s.k]
|
||||||
|
}
|
57
vendor/github.com/beorn7/perks/topk/topk_test.go
generated
vendored
Normal file
57
vendor/github.com/beorn7/perks/topk/topk_test.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
package topk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTopK(t *testing.T) {
|
||||||
|
stream := New(10)
|
||||||
|
ss := []*Stream{New(10), New(10), New(10)}
|
||||||
|
m := make(map[string]int)
|
||||||
|
for _, s := range ss {
|
||||||
|
for i := 0; i < 1e6; i++ {
|
||||||
|
v := fmt.Sprintf("%x", int8(rand.ExpFloat64()))
|
||||||
|
s.Insert(v)
|
||||||
|
m[v]++
|
||||||
|
}
|
||||||
|
stream.Merge(s.Query())
|
||||||
|
}
|
||||||
|
|
||||||
|
var sm Samples
|
||||||
|
for x, s := range m {
|
||||||
|
sm = append(sm, &Element{x, s})
|
||||||
|
}
|
||||||
|
sort.Sort(sort.Reverse(sm))
|
||||||
|
|
||||||
|
g := stream.Query()
|
||||||
|
if len(g) != 10 {
|
||||||
|
t.Fatalf("got %d, want 10", len(g))
|
||||||
|
}
|
||||||
|
for i, e := range g {
|
||||||
|
if sm[i].Value != e.Value {
|
||||||
|
t.Errorf("at %d: want %q, got %q", i, sm[i].Value, e.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestQuery(t *testing.T) {
|
||||||
|
queryTests := []struct {
|
||||||
|
value string
|
||||||
|
expected int
|
||||||
|
}{
|
||||||
|
{"a", 1},
|
||||||
|
{"b", 2},
|
||||||
|
{"c", 2},
|
||||||
|
}
|
||||||
|
|
||||||
|
stream := New(2)
|
||||||
|
for _, tt := range queryTests {
|
||||||
|
stream.Insert(tt.value)
|
||||||
|
if n := len(stream.Query()); n != tt.expected {
|
||||||
|
t.Errorf("want %d, got %d", tt.expected, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
2
vendor/github.com/docker/cli/.dockerignore
generated
vendored
Normal file
2
vendor/github.com/docker/cli/.dockerignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
.git
|
||||||
|
build
|
9
vendor/github.com/docker/cli/.github/CODEOWNERS
generated
vendored
Normal file
9
vendor/github.com/docker/cli/.github/CODEOWNERS
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
# GitHub code owners
|
||||||
|
# See https://github.com/blog/2392-introducing-code-owners
|
||||||
|
|
||||||
|
cli/command/stack/** @dnephin @vdemeester
|
||||||
|
cli/compose/** @dnephin @vdemeester
|
||||||
|
contrib/completion/bash/** @albers
|
||||||
|
contrib/completion/zsh/** @sdurrheimer
|
||||||
|
docs/** @mistyhacks @vdemeester @thaJeztah
|
||||||
|
scripts/** @dnephin
|
64
vendor/github.com/docker/cli/.github/ISSUE_TEMPLATE.md
generated
vendored
Normal file
64
vendor/github.com/docker/cli/.github/ISSUE_TEMPLATE.md
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
<!--
|
||||||
|
If you are reporting a new issue, make sure that we do not have any duplicates
|
||||||
|
already open. You can ensure this by searching the issue list for this
|
||||||
|
repository. If there is a duplicate, please close your issue and add a comment
|
||||||
|
to the existing issue instead.
|
||||||
|
|
||||||
|
If you suspect your issue is a bug, please edit your issue description to
|
||||||
|
include the BUG REPORT INFORMATION shown below. If you fail to provide this
|
||||||
|
information within 7 days, we cannot debug your issue and will close it. We
|
||||||
|
will, however, reopen it if you later provide the information.
|
||||||
|
|
||||||
|
For more information about reporting issues, see
|
||||||
|
https://github.com/docker/cli/blob/master/CONTRIBUTING.md#reporting-other-issues
|
||||||
|
|
||||||
|
---------------------------------------------------
|
||||||
|
GENERAL SUPPORT INFORMATION
|
||||||
|
---------------------------------------------------
|
||||||
|
|
||||||
|
The GitHub issue tracker is for bug reports and feature requests.
|
||||||
|
General support can be found at the following locations:
|
||||||
|
|
||||||
|
- Docker Support Forums - https://forums.docker.com
|
||||||
|
- Docker Community Slack - https://dockr.ly/community
|
||||||
|
- Post a question on StackOverflow, using the Docker tag
|
||||||
|
|
||||||
|
---------------------------------------------------
|
||||||
|
BUG REPORT INFORMATION
|
||||||
|
---------------------------------------------------
|
||||||
|
Use the commands below to provide key information from your environment:
|
||||||
|
You do NOT have to include this information if this is a FEATURE REQUEST
|
||||||
|
-->
|
||||||
|
|
||||||
|
**Description**
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Briefly describe the problem you are having in a few paragraphs.
|
||||||
|
-->
|
||||||
|
|
||||||
|
**Steps to reproduce the issue:**
|
||||||
|
1.
|
||||||
|
2.
|
||||||
|
3.
|
||||||
|
|
||||||
|
**Describe the results you received:**
|
||||||
|
|
||||||
|
|
||||||
|
**Describe the results you expected:**
|
||||||
|
|
||||||
|
|
||||||
|
**Additional information you deem important (e.g. issue happens only occasionally):**
|
||||||
|
|
||||||
|
**Output of `docker version`:**
|
||||||
|
|
||||||
|
```
|
||||||
|
(paste your output here)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output of `docker info`:**
|
||||||
|
|
||||||
|
```
|
||||||
|
(paste your output here)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Additional environment details (AWS, VirtualBox, physical, etc.):**
|
30
vendor/github.com/docker/cli/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
Normal file
30
vendor/github.com/docker/cli/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
<!--
|
||||||
|
Please make sure you've read and understood our contributing guidelines;
|
||||||
|
https://github.com/docker/cli/blob/master/CONTRIBUTING.md
|
||||||
|
|
||||||
|
** Make sure all your commits include a signature generated with `git commit -s` **
|
||||||
|
|
||||||
|
For additional information on our contributing process, read our contributing
|
||||||
|
guide https://docs.docker.com/opensource/code/
|
||||||
|
|
||||||
|
If this is a bug fix, make sure your description includes "fixes #xxxx", or
|
||||||
|
"closes #xxxx"
|
||||||
|
|
||||||
|
Please provide the following information:
|
||||||
|
-->
|
||||||
|
|
||||||
|
**- What I did**
|
||||||
|
|
||||||
|
**- How I did it**
|
||||||
|
|
||||||
|
**- How to verify it**
|
||||||
|
|
||||||
|
**- Description for the changelog**
|
||||||
|
<!--
|
||||||
|
Write a short (one line) summary that describes the changes in this
|
||||||
|
pull request for inclusion in the changelog:
|
||||||
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
**- A picture of a cute animal (not mandatory but encouraged)**
|
||||||
|
|
18
vendor/github.com/docker/cli/.gitignore
generated
vendored
Normal file
18
vendor/github.com/docker/cli/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
# if you want to ignore files created by your editor/tools,
|
||||||
|
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
|
||||||
|
*.exe
|
||||||
|
*.exe~
|
||||||
|
*.orig
|
||||||
|
.*.swp
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
.editorconfig
|
||||||
|
/build/
|
||||||
|
cli/winresources/rsrc_386.syso
|
||||||
|
cli/winresources/rsrc_amd64.syso
|
||||||
|
/man/man1/
|
||||||
|
/man/man5/
|
||||||
|
/man/man8/
|
||||||
|
/docs/yaml/gen/
|
||||||
|
coverage.txt
|
||||||
|
profile.out
|
475
vendor/github.com/docker/cli/.mailmap
generated
vendored
Normal file
475
vendor/github.com/docker/cli/.mailmap
generated
vendored
Normal file
|
@ -0,0 +1,475 @@
|
||||||
|
# Generate AUTHORS: scripts/docs/generate-authors.sh
|
||||||
|
|
||||||
|
# Tip for finding duplicates (besides scanning the output of AUTHORS for name
|
||||||
|
# duplicates that aren't also email duplicates): scan the output of:
|
||||||
|
# git log --format='%aE - %aN' | sort -uf
|
||||||
|
#
|
||||||
|
# For explanation on this file format: man git-shortlog
|
||||||
|
|
||||||
|
Aaron L. Xu <liker.xu@foxmail.com>
|
||||||
|
Abhinandan Prativadi <abhi@docker.com>
|
||||||
|
Adrien Gallouët <adrien@gallouet.fr> <angt@users.noreply.github.com>
|
||||||
|
Ahmed Kamal <email.ahmedkamal@googlemail.com>
|
||||||
|
Ahmet Alp Balkan <ahmetb@microsoft.com> <ahmetalpbalkan@gmail.com>
|
||||||
|
AJ Bowen <aj@gandi.net>
|
||||||
|
AJ Bowen <aj@gandi.net> <amy@gandi.net>
|
||||||
|
Akihiro Matsushima <amatsusbit@gmail.com> <amatsus@users.noreply.github.com>
|
||||||
|
Akihiro Suda <suda.akihiro@lab.ntt.co.jp> <suda.kyoto@gmail.com>
|
||||||
|
Aleksa Sarai <asarai@suse.de>
|
||||||
|
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
|
||||||
|
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
|
||||||
|
Aleksandrs Fadins <aleks@s-ko.net>
|
||||||
|
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@docker.com>
|
||||||
|
Alex Chen <alexchenunix@gmail.com> <root@localhost.localdomain>
|
||||||
|
Alex Ellis <alexellis2@gmail.com>
|
||||||
|
Alexander Larsson <alexl@redhat.com> <alexander.larsson@gmail.com>
|
||||||
|
Alexander Morozov <lk4d4@docker.com>
|
||||||
|
Alexander Morozov <lk4d4@docker.com> <lk4d4math@gmail.com>
|
||||||
|
Alexandre Beslic <alexandre.beslic@gmail.com> <abronan@docker.com>
|
||||||
|
Alicia Lauerman <alicia@eta.im> <allydevour@me.com>
|
||||||
|
Allen Sun <allensun.shl@alibaba-inc.com> <allen.sun@daocloud.io>
|
||||||
|
Allen Sun <allensun.shl@alibaba-inc.com> <shlallen1990@gmail.com>
|
||||||
|
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@microsoft.com>
|
||||||
|
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@outlook.com>
|
||||||
|
André Martins <aanm90@gmail.com> <martins@noironetworks.com>
|
||||||
|
Andy Rothfusz <github@developersupport.net> <github@metaliveblog.com>
|
||||||
|
Andy Smith <github@anarkystic.com>
|
||||||
|
Ankush Agarwal <ankushagarwal11@gmail.com> <ankushagarwal@users.noreply.github.com>
|
||||||
|
Antonio Murdaca <antonio.murdaca@gmail.com> <amurdaca@redhat.com>
|
||||||
|
Antonio Murdaca <antonio.murdaca@gmail.com> <me@runcom.ninja>
|
||||||
|
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@linux.com>
|
||||||
|
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@redhat.com>
|
||||||
|
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@users.noreply.github.com>
|
||||||
|
Anuj Bahuguna <anujbahuguna.dev@gmail.com>
|
||||||
|
Anuj Bahuguna <anujbahuguna.dev@gmail.com> <abahuguna@fiberlink.com>
|
||||||
|
Anusha Ragunathan <anusha.ragunathan@docker.com> <anusha@docker.com>
|
||||||
|
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||||
|
Arnaud Porterie <arnaud.porterie@docker.com> <icecrime@gmail.com>
|
||||||
|
Arthur Gautier <baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
|
||||||
|
Avi Miller <avi.miller@oracle.com> <avi.miller@gmail.com>
|
||||||
|
Ben Bonnefoy <frenchben@docker.com>
|
||||||
|
Ben Golub <ben.golub@dotcloud.com>
|
||||||
|
Ben Toews <mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
|
||||||
|
Benoit Chesneau <bchesneau@gmail.com>
|
||||||
|
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||||
|
Bhumika Bayani <bhumikabayani@gmail.com>
|
||||||
|
Bilal Amarni <bilal.amarni@gmail.com> <bamarni@users.noreply.github.com>
|
||||||
|
Bill Wang <ozbillwang@gmail.com> <SydOps@users.noreply.github.com>
|
||||||
|
Bin Liu <liubin0329@gmail.com>
|
||||||
|
Bin Liu <liubin0329@gmail.com> <liubin0329@users.noreply.github.com>
|
||||||
|
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
||||||
|
Boaz Shuster <ripcurld.github@gmail.com>
|
||||||
|
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.co>
|
||||||
|
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.org>
|
||||||
|
Brent Salisbury <brent.salisbury@docker.com> <brent@docker.com>
|
||||||
|
Brian Goff <cpuguy83@gmail.com>
|
||||||
|
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
|
||||||
|
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.local>
|
||||||
|
Chander Govindarajan <chandergovind@gmail.com>
|
||||||
|
Chao Wang <wangchao.fnst@cn.fujitsu.com> <chaowang@localhost.localdomain>
|
||||||
|
Charles Hooper <charles.hooper@dotcloud.com> <chooper@plumata.com>
|
||||||
|
Chen Chao <cc272309126@gmail.com>
|
||||||
|
Chen Chuanliang <chen.chuanliang@zte.com.cn>
|
||||||
|
Chen Mingjie <chenmingjie0828@163.com>
|
||||||
|
Chen Qiu <cheney-90@hotmail.com>
|
||||||
|
Chen Qiu <cheney-90@hotmail.com> <21321229@zju.edu.cn>
|
||||||
|
Chris Dias <cdias@microsoft.com>
|
||||||
|
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
||||||
|
Christopher Biscardi <biscarch@sketcht.com>
|
||||||
|
Christopher Latham <sudosurootdev@gmail.com>
|
||||||
|
Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
|
||||||
|
Corbin Coleman <corbin.coleman@docker.com>
|
||||||
|
Cristian Staretu <cristian.staretu@gmail.com>
|
||||||
|
Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
|
||||||
|
Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
|
||||||
|
CUI Wei <ghostplant@qq.com> cuiwei13 <cuiwei13@pku.edu.cn>
|
||||||
|
Daehyeok Mun <daehyeok@gmail.com>
|
||||||
|
Daehyeok Mun <daehyeok@gmail.com> <daehyeok@daehyeok-ui-MacBook-Air.local>
|
||||||
|
Daehyeok Mun <daehyeok@gmail.com> <daehyeok@daehyeokui-MacBook-Air.local>
|
||||||
|
Dan Feldman <danf@jfrog.com>
|
||||||
|
Daniel Dao <dqminh@cloudflare.com>
|
||||||
|
Daniel Dao <dqminh@cloudflare.com> <dqminh89@gmail.com>
|
||||||
|
Daniel Garcia <daniel@danielgarcia.info>
|
||||||
|
Daniel Gasienica <daniel@gasienica.ch> <dgasienica@zynga.com>
|
||||||
|
Daniel Goosen <daniel.goosen@surveysampling.com> <djgoosen@users.noreply.github.com>
|
||||||
|
Daniel Grunwell <mwgrunny@gmail.com>
|
||||||
|
Daniel J Walsh <dwalsh@redhat.com>
|
||||||
|
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
|
||||||
|
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
|
||||||
|
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> <root@vagrant-ubuntu-12.10.vagrantup.com>
|
||||||
|
Daniel Nephin <dnephin@docker.com> <dnephin@gmail.com>
|
||||||
|
Daniel Norberg <dano@spotify.com> <daniel.norberg@gmail.com>
|
||||||
|
Daniel Watkins <daniel@daniel-watkins.co.uk>
|
||||||
|
Danny Yates <danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
|
||||||
|
Darren Shepherd <darren.s.shepherd@gmail.com> <darren@rancher.com>
|
||||||
|
Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
|
||||||
|
Dave Goodchild <buddhamagnet@gmail.com>
|
||||||
|
Dave Henderson <dhenderson@gmail.com> <Dave.Henderson@ca.ibm.com>
|
||||||
|
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
|
||||||
|
David M. Karr <davidmichaelkarr@gmail.com>
|
||||||
|
David Sheets <dsheets@docker.com> <sheets@alum.mit.edu>
|
||||||
|
David Sissitka <me@dsissitka.com>
|
||||||
|
David Williamson <david.williamson@docker.com> <davidwilliamson@users.noreply.github.com>
|
||||||
|
Deshi Xiao <dxiao@redhat.com> <dsxiao@dataman-inc.com>
|
||||||
|
Deshi Xiao <dxiao@redhat.com> <xiaods@gmail.com>
|
||||||
|
Diego Siqueira <dieg0@live.com>
|
||||||
|
Diogo Monica <diogo@docker.com> <diogo.monica@gmail.com>
|
||||||
|
Dominik Honnef <dominik@honnef.co> <dominikh@fork-bomb.org>
|
||||||
|
Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
|
||||||
|
Doug Tangren <d.tangren@gmail.com>
|
||||||
|
Elan Ruusamäe <glen@pld-linux.org>
|
||||||
|
Elan Ruusamäe <glen@pld-linux.org> <glen@delfi.ee>
|
||||||
|
Eric G. Noriega <enoriega@vizuri.com> <egnoriega@users.noreply.github.com>
|
||||||
|
Eric Hanchrow <ehanchrow@ine.com> <eric.hanchrow@gmail.com>
|
||||||
|
Eric Rosenberg <ehaydenr@gmail.com> <ehaydenr@users.noreply.github.com>
|
||||||
|
Erica Windisch <erica@windisch.us> <eric@windisch.us>
|
||||||
|
Erica Windisch <erica@windisch.us> <ewindisch@docker.com>
|
||||||
|
Erik Hollensbe <github@hollensbe.org> <erik+github@hollensbe.org>
|
||||||
|
Erwin van der Koogh <info@erronis.nl>
|
||||||
|
Euan Kemp <euan.kemp@coreos.com> <euank@amazon.com>
|
||||||
|
Eugen Krizo <eugen.krizo@gmail.com>
|
||||||
|
Evan Hazlett <ejhazlett@gmail.com> <ehazlett@users.noreply.github.com>
|
||||||
|
Evelyn Xu <evelynhsu21@gmail.com>
|
||||||
|
Evgeny Shmarnev <shmarnev@gmail.com>
|
||||||
|
Faiz Khan <faizkhan00@gmail.com>
|
||||||
|
Felix Hupfeld <felix@quobyte.com> <quofelix@users.noreply.github.com>
|
||||||
|
Felix Ruess <felix.ruess@gmail.com> <felix.ruess@roboception.de>
|
||||||
|
Feng Yan <fy2462@gmail.com>
|
||||||
|
Fengtu Wang <wangfengtu@huawei.com> <wangfengtu@huawei.com>
|
||||||
|
Francisco Carriedo <fcarriedo@gmail.com>
|
||||||
|
Frank Rosquin <frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
|
||||||
|
Frederick F. Kautz IV <fkautz@redhat.com> <fkautz@alumni.cmu.edu>
|
||||||
|
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
|
||||||
|
Gaetan de Villele <gdevillele@gmail.com>
|
||||||
|
Gang Qiao <qiaohai8866@gmail.com> <1373319223@qq.com>
|
||||||
|
George Kontridze <george@bugsnag.com>
|
||||||
|
Gerwim Feiken <g.feiken@tfe.nl> <gerwim@gmail.com>
|
||||||
|
Giampaolo Mancini <giampaolo@trampolineup.com>
|
||||||
|
Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
|
||||||
|
Gou Rao <gou@portworx.com> <gourao@users.noreply.github.com>
|
||||||
|
Greg Stephens <greg@udon.org>
|
||||||
|
Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
|
||||||
|
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
|
||||||
|
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume@charmes.net>
|
||||||
|
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume@docker.com>
|
||||||
|
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume@dotcloud.com>
|
||||||
|
Gurjeet Singh <gurjeet@singh.im> <singh.gurjeet@gmail.com>
|
||||||
|
Gustav Sinder <gustav.sinder@gmail.com>
|
||||||
|
Günther Jungbluth <gunther@gameslabs.net>
|
||||||
|
Hakan Özler <hakan.ozler@kodcu.com>
|
||||||
|
Hao Shu Wei <haosw@cn.ibm.com>
|
||||||
|
Hao Shu Wei <haosw@cn.ibm.com> <haoshuwei1989@163.com>
|
||||||
|
Harald Albers <github@albersweb.de> <albers@users.noreply.github.com>
|
||||||
|
Harold Cooper <hrldcpr@gmail.com>
|
||||||
|
Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>
|
||||||
|
Harry Zhang <harryz@hyper.sh> <resouer@163.com>
|
||||||
|
Harry Zhang <harryz@hyper.sh> <resouer@gmail.com>
|
||||||
|
Harry Zhang <resouer@163.com>
|
||||||
|
Harshal Patil <harshal.patil@in.ibm.com> <harche@users.noreply.github.com>
|
||||||
|
Helen Xie <chenjg@harmonycloud.cn>
|
||||||
|
Hollie Teal <hollie@docker.com>
|
||||||
|
Hollie Teal <hollie@docker.com> <hollie.teal@docker.com>
|
||||||
|
Hollie Teal <hollie@docker.com> <hollietealok@users.noreply.github.com>
|
||||||
|
Hu Keping <hukeping@huawei.com>
|
||||||
|
Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
|
||||||
|
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||||
|
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com> <1187766782@qq.com>
|
||||||
|
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||||
|
Jack Laxson <jackjrabbit@gmail.com>
|
||||||
|
Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
|
||||||
|
Jacob Tomlinson <jacob@tom.linson.uk> <jacobtomlinson@users.noreply.github.com>
|
||||||
|
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
||||||
|
Jamie Hannaford <jamie@limetree.org> <jamie.hannaford@rackspace.com>
|
||||||
|
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
||||||
|
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||||
|
Jean-Tiare Le Bigot <jt@yadutaf.fr> <admin@jtlebi.fr>
|
||||||
|
Jeff Anderson <jeff@docker.com> <jefferya@programmerq.net>
|
||||||
|
Jeff Nickoloff <jeff.nickoloff@gmail.com> <jeff@allingeek.com>
|
||||||
|
Jeroen Franse <jeroenfranse@gmail.com>
|
||||||
|
Jessica Frazelle <jessfraz@google.com>
|
||||||
|
Jessica Frazelle <jessfraz@google.com> <acidburn@docker.com>
|
||||||
|
Jessica Frazelle <jessfraz@google.com> <acidburn@google.com>
|
||||||
|
Jessica Frazelle <jessfraz@google.com> <jess@docker.com>
|
||||||
|
Jessica Frazelle <jessfraz@google.com> <jess@mesosphere.com>
|
||||||
|
Jessica Frazelle <jessfraz@google.com> <jfrazelle@users.noreply.github.com>
|
||||||
|
Jessica Frazelle <jessfraz@google.com> <me@jessfraz.com>
|
||||||
|
Jessica Frazelle <jessfraz@google.com> <princess@docker.com>
|
||||||
|
Jim Galasyn <jim.galasyn@docker.com>
|
||||||
|
Jiuyue Ma <majiuyue@huawei.com>
|
||||||
|
Joey Geiger <jgeiger@gmail.com>
|
||||||
|
Joffrey F <joffrey@docker.com>
|
||||||
|
Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
|
||||||
|
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
|
||||||
|
Johan Euphrosine <proppy@google.com> <proppy@aminche.com>
|
||||||
|
John Harris <john@johnharris.io>
|
||||||
|
John Howard (VM) <John.Howard@microsoft.com>
|
||||||
|
John Howard (VM) <John.Howard@microsoft.com> <jhoward@microsoft.com>
|
||||||
|
John Howard (VM) <John.Howard@microsoft.com> <jhoward@ntdev.microsoft.com>
|
||||||
|
John Howard (VM) <John.Howard@microsoft.com> <jhowardmsft@users.noreply.github.com>
|
||||||
|
John Howard (VM) <John.Howard@microsoft.com> <john.howard@microsoft.com>
|
||||||
|
John Stephens <johnstep@docker.com> <johnstep@users.noreply.github.com>
|
||||||
|
Jordan Arentsen <blissdev@gmail.com>
|
||||||
|
Jordan Jennings <jjn2009@gmail.com> <jjn2009@users.noreply.github.com>
|
||||||
|
Jorit Kleine-Möllhoff <joppich@bricknet.de> <joppich@users.noreply.github.com>
|
||||||
|
Jose Diaz-Gonzalez <jose@seatgeek.com> <josegonzalez@users.noreply.github.com>
|
||||||
|
Josh Eveleth <joshe@opendns.com> <jeveleth@users.noreply.github.com>
|
||||||
|
Josh Hawn <josh.hawn@docker.com> <jlhawn@berkeley.edu>
|
||||||
|
Josh Horwitz <horwitz@addthis.com> <horwitzja@gmail.com>
|
||||||
|
Josh Soref <jsoref@gmail.com> <jsoref@users.noreply.github.com>
|
||||||
|
Josh Wilson <josh.wilson@fivestars.com> <jcwilson@users.noreply.github.com>
|
||||||
|
Joyce Jang <mail@joycejang.com>
|
||||||
|
Julien Bordellier <julienbordellier@gmail.com> <git@julienbordellier.com>
|
||||||
|
Julien Bordellier <julienbordellier@gmail.com> <me@julienbordellier.com>
|
||||||
|
Justin Cormack <justin.cormack@docker.com>
|
||||||
|
Justin Cormack <justin.cormack@docker.com> <justin.cormack@unikernel.com>
|
||||||
|
Justin Cormack <justin.cormack@docker.com> <justin@specialbusservice.com>
|
||||||
|
Justin Simonelis <justin.p.simonelis@gmail.com> <justin.simonelis@PTS-JSIMON2.toronto.exclamation.com>
|
||||||
|
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jerome.petazzoni@dotcloud.com>
|
||||||
|
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jerome.petazzoni@gmail.com>
|
||||||
|
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jp@enix.org>
|
||||||
|
K. Heller <pestophagous@gmail.com> <pestophagous@users.noreply.github.com>
|
||||||
|
Kai Qiang Wu (Kennan) <wkq5325@gmail.com>
|
||||||
|
Kai Qiang Wu (Kennan) <wkq5325@gmail.com> <wkqwu@cn.ibm.com>
|
||||||
|
Kamil Domański <kamil@domanski.co>
|
||||||
|
Kamjar Gerami <kami.gerami@gmail.com>
|
||||||
|
Kat Samperi <kat.samperi@gmail.com> <kizzie@users.noreply.github.com>
|
||||||
|
Ken Cochrane <kencochrane@gmail.com> <KenCochrane@gmail.com>
|
||||||
|
Ken Herner <kherner@progress.com> <chosenken@gmail.com>
|
||||||
|
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
||||||
|
Kevin Feyrer <kevin.feyrer@btinternet.com> <kevinfeyrer@users.noreply.github.com>
|
||||||
|
Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||||
|
Kevin Meredith <kevin.m.meredith@gmail.com>
|
||||||
|
Kir Kolyshkin <kolyshkin@gmail.com>
|
||||||
|
Kir Kolyshkin <kolyshkin@gmail.com> <kir@openvz.org>
|
||||||
|
Kir Kolyshkin <kolyshkin@gmail.com> <kolyshkin@users.noreply.github.com>
|
||||||
|
Konrad Kleine <konrad.wilhelm.kleine@gmail.com> <kwk@users.noreply.github.com>
|
||||||
|
Konstantin Gribov <grossws@gmail.com>
|
||||||
|
Konstantin Pelykh <kpelykh@zettaset.com>
|
||||||
|
Kotaro Yoshimatsu <kotaro.yoshimatsu@gmail.com>
|
||||||
|
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp> <kunal.kushwaha@gmail.com>
|
||||||
|
Lajos Papp <lajos.papp@sequenceiq.com> <lalyos@yahoo.com>
|
||||||
|
Lei Jitang <leijitang@huawei.com>
|
||||||
|
Lei Jitang <leijitang@huawei.com> <leijitang@gmail.com>
|
||||||
|
Liang Mingqiang <mqliang.zju@gmail.com>
|
||||||
|
Liang-Chi Hsieh <viirya@gmail.com>
|
||||||
|
Liao Qingwei <liaoqingwei@huawei.com>
|
||||||
|
Linus Heckemann <lheckemann@twig-world.com>
|
||||||
|
Linus Heckemann <lheckemann@twig-world.com> <anonymouse2048@gmail.com>
|
||||||
|
Lokesh Mandvekar <lsm5@fedoraproject.org> <lsm5@redhat.com>
|
||||||
|
Lorenzo Fontana <lo@linux.com> <fontanalorenzo@me.com>
|
||||||
|
Louis Opter <kalessin@kalessin.fr>
|
||||||
|
Louis Opter <kalessin@kalessin.fr> <louis@dotcloud.com>
|
||||||
|
Luca Favatella <luca.favatella@erlang-solutions.com> <lucafavatella@users.noreply.github.com>
|
||||||
|
Luke Marsden <me@lukemarsden.net> <luke@digital-crocus.com>
|
||||||
|
Lyn <energylyn@zju.edu.cn>
|
||||||
|
Lynda O'Leary <lyndaoleary29@gmail.com>
|
||||||
|
Lynda O'Leary <lyndaoleary29@gmail.com> <lyndaoleary@hotmail.com>
|
||||||
|
Ma Müller <mueller-ma@users.noreply.github.com>
|
||||||
|
Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com> <madhanm@microsoft.com>
|
||||||
|
Madhu Venugopal <madhu@socketplane.io> <madhu@docker.com>
|
||||||
|
Mageee <fangpuyi@foxmail.com> <21521230.zju.edu.cn>
|
||||||
|
Mansi Nahar <mmn4185@rit.edu> <mansi.nahar@macbookpro-mansinahar.local>
|
||||||
|
Mansi Nahar <mmn4185@rit.edu> <mansinahar@users.noreply.github.com>
|
||||||
|
Marc Abramowitz <marc@marc-abramowitz.com> <msabramo@gmail.com>
|
||||||
|
Marcelo Horacio Fortino <info@fortinux.com> <fortinux@users.noreply.github.com>
|
||||||
|
Marcus Linke <marcus.linke@gmx.de>
|
||||||
|
Marianna Tessel <mtesselh@gmail.com>
|
||||||
|
Mark Oates <fl0yd@me.com>
|
||||||
|
Markan Patel <mpatel678@gmail.com>
|
||||||
|
Markus Kortlang <hyp3rdino@googlemail.com> <markus.kortlang@lhsystems.com>
|
||||||
|
Martin Redmond <redmond.martin@gmail.com> <martin@tinychat.com>
|
||||||
|
Martin Redmond <redmond.martin@gmail.com> <xgithub@redmond5.com>
|
||||||
|
Mary Anthony <mary.anthony@docker.com> <mary@docker.com>
|
||||||
|
Mary Anthony <mary.anthony@docker.com> <moxieandmore@gmail.com>
|
||||||
|
Mary Anthony <mary.anthony@docker.com> moxiegirl <mary@docker.com>
|
||||||
|
Mateusz Major <apkd@users.noreply.github.com>
|
||||||
|
Matt Bentley <matt.bentley@docker.com> <mbentley@mbentley.net>
|
||||||
|
Matt Schurenko <matt.schurenko@gmail.com>
|
||||||
|
Matt Williams <mattyw@me.com>
|
||||||
|
Matt Williams <mattyw@me.com> <gh@mattyw.net>
|
||||||
|
Matthew Heon <mheon@redhat.com> <mheon@mheonlaptop.redhat.com>
|
||||||
|
Matthew Mosesohn <raytrac3r@gmail.com>
|
||||||
|
Matthew Mueller <mattmuelle@gmail.com>
|
||||||
|
Matthias Kühnle <git.nivoc@neverbox.com> <kuehnle@online.de>
|
||||||
|
Mauricio Garavaglia <mauricio@medallia.com> <mauriciogaravaglia@gmail.com>
|
||||||
|
Michael Crosby <michael@docker.com> <crosby.michael@gmail.com>
|
||||||
|
Michael Crosby <michael@docker.com> <crosbymichael@gmail.com>
|
||||||
|
Michael Crosby <michael@docker.com> <michael@crosbymichael.com>
|
||||||
|
Michael Hudson-Doyle <michael.hudson@canonical.com> <michael.hudson@linaro.org>
|
||||||
|
Michael Huettermann <michael@huettermann.net>
|
||||||
|
Michael Käufl <docker@c.michael-kaeufl.de> <michael-k@users.noreply.github.com>
|
||||||
|
Michael Spetsiotis <michael_spets@hotmail.com>
|
||||||
|
Michal Minář <miminar@redhat.com>
|
||||||
|
Miguel Angel Alvarez Cabrerizo <doncicuto@gmail.com> <30386061+doncicuto@users.noreply.github.com>
|
||||||
|
Miguel Angel Fernández <elmendalerenda@gmail.com>
|
||||||
|
Mihai Borobocea <MihaiBorob@gmail.com> <MihaiBorobocea@gmail.com>
|
||||||
|
Mike Casas <mkcsas0@gmail.com> <mikecasas@users.noreply.github.com>
|
||||||
|
Mike Goelzer <mike.goelzer@docker.com> <mgoelzer@docker.com>
|
||||||
|
Milind Chawre <milindchawre@gmail.com>
|
||||||
|
Misty Stanley-Jones <misty@docker.com> <misty@apache.org>
|
||||||
|
Mohit Soni <mosoni@ebay.com> <mohitsoni1989@gmail.com>
|
||||||
|
Moorthy RS <rsmoorthy@gmail.com> <rsmoorthy@users.noreply.github.com>
|
||||||
|
Moysés Borges <moysesb@gmail.com>
|
||||||
|
Moysés Borges <moysesb@gmail.com> <moyses.furtado@wplex.com.br>
|
||||||
|
Nace Oroz <orkica@gmail.com>
|
||||||
|
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
|
||||||
|
Nathan LeClaire <nathan.leclaire@docker.com> <nathanleclaire@gmail.com>
|
||||||
|
Neil Horman <nhorman@tuxdriver.com> <nhorman@hmswarspite.think-freely.org>
|
||||||
|
Nick Russo <nicholasjamesrusso@gmail.com> <nicholasrusso@icloud.com>
|
||||||
|
Nicolas Borboën <ponsfrilus@gmail.com> <ponsfrilus@users.noreply.github.com>
|
||||||
|
Nigel Poulton <nigelpoulton@hotmail.com>
|
||||||
|
Nik Nyby <nikolas@gnu.org> <nnyby@columbia.edu>
|
||||||
|
Nolan Darilek <nolan@thewordnerd.info>
|
||||||
|
O.S. Tezer <ostezer@gmail.com>
|
||||||
|
O.S. Tezer <ostezer@gmail.com> <ostezer@users.noreply.github.com>
|
||||||
|
Oh Jinkyun <tintypemolly@gmail.com> <tintypemolly@Ohui-MacBook-Pro.local>
|
||||||
|
Ouyang Liduo <oyld0210@163.com>
|
||||||
|
Patrick Stapleton <github@gdi2290.com>
|
||||||
|
Paul Liljenberg <liljenberg.paul@gmail.com> <letters@paulnotcom.se>
|
||||||
|
Pavel Tikhomirov <ptikhomirov@virtuozzo.com> <ptikhomirov@parallels.com>
|
||||||
|
Pawel Konczalski <mail@konczalski.de>
|
||||||
|
Peter Choi <phkchoi89@gmail.com> <reikani@Peters-MacBook-Pro.local>
|
||||||
|
Peter Dave Hello <hsu@peterdavehello.org> <PeterDaveHello@users.noreply.github.com>
|
||||||
|
Peter Jaffe <pjaffe@nevo.com>
|
||||||
|
Peter Nagy <xificurC@gmail.com> <pnagy@gratex.com>
|
||||||
|
Peter Waller <p@pwaller.net> <peter@scraperwiki.com>
|
||||||
|
Phil Estes <estesp@linux.vnet.ibm.com> <estesp@gmail.com>
|
||||||
|
Philip Alexander Etling <paetling@gmail.com>
|
||||||
|
Philipp Gillé <philipp.gille@gmail.com> <philippgille@users.noreply.github.com>
|
||||||
|
Qiang Huang <h.huangqiang@huawei.com>
|
||||||
|
Qiang Huang <h.huangqiang@huawei.com> <qhuang@10.0.2.15>
|
||||||
|
Ray Tsang <rayt@google.com> <saturnism@users.noreply.github.com>
|
||||||
|
Renaud Gaubert <rgaubert@nvidia.com> <renaud.gaubert@gmail.com>
|
||||||
|
Robert Terhaar <rterhaar@atlanticdynamic.com> <robbyt@users.noreply.github.com>
|
||||||
|
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
||||||
|
Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
|
||||||
|
Roman Dudin <katrmr@gmail.com> <decadent@users.noreply.github.com>
|
||||||
|
Ross Boucher <rboucher@gmail.com>
|
||||||
|
Runshen Zhu <runshen.zhu@gmail.com>
|
||||||
|
Ryan Stelly <ryan.stelly@live.com>
|
||||||
|
Sakeven Jiang <jc5930@sina.cn>
|
||||||
|
Sandeep Bansal <sabansal@microsoft.com>
|
||||||
|
Sandeep Bansal <sabansal@microsoft.com> <msabansal@microsoft.com>
|
||||||
|
Sargun Dhillon <sargun@netflix.com> <sargun@sargun.me>
|
||||||
|
Sean Lee <seanlee@tw.ibm.com> <scaleoutsean@users.noreply.github.com>
|
||||||
|
Sebastiaan van Stijn <github@gone.nl> <sebastiaan@ws-key-sebas3.dpi1.dpi>
|
||||||
|
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||||
|
Shaun Kaasten <shaunk@gmail.com>
|
||||||
|
Shawn Landden <shawn@churchofgit.com> <shawnlandden@gmail.com>
|
||||||
|
Shengbo Song <thomassong@tencent.com>
|
||||||
|
Shengbo Song <thomassong@tencent.com> <mymneo@163.com>
|
||||||
|
Shih-Yuan Lee <fourdollars@gmail.com>
|
||||||
|
Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
|
||||||
|
Shukui Yang <yangshukui@huawei.com>
|
||||||
|
Shuwei Hao <haosw@cn.ibm.com>
|
||||||
|
Shuwei Hao <haosw@cn.ibm.com> <haoshuwei24@gmail.com>
|
||||||
|
Sidhartha Mani <sidharthamn@gmail.com>
|
||||||
|
Sjoerd Langkemper <sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
|
||||||
|
Solomon Hykes <solomon@docker.com> <s@docker.com>
|
||||||
|
Solomon Hykes <solomon@docker.com> <solomon.hykes@dotcloud.com>
|
||||||
|
Solomon Hykes <solomon@docker.com> <solomon@dotcloud.com>
|
||||||
|
Soshi Katsuta <soshi.katsuta@gmail.com>
|
||||||
|
Soshi Katsuta <soshi.katsuta@gmail.com> <katsuta_soshi@cyberagent.co.jp>
|
||||||
|
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||||
|
Sridhar Ratnakumar <sridharr@activestate.com> <github@srid.name>
|
||||||
|
Srini Brahmaroutu <srbrahma@us.ibm.com> <sbrahma@us.ibm.com>
|
||||||
|
Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com> <srinsriv@users.noreply.github.com>
|
||||||
|
Stefan Berger <stefanb@linux.vnet.ibm.com>
|
||||||
|
Stefan Berger <stefanb@linux.vnet.ibm.com> <stefanb@us.ibm.com>
|
||||||
|
Stefan J. Wernli <swernli@microsoft.com> <swernli@ntdev.microsoft.com>
|
||||||
|
Stefan S. <tronicum@user.github.com>
|
||||||
|
Stephen Day <stephen.day@docker.com>
|
||||||
|
Stephen Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com>
|
||||||
|
Steve Desmond <steve@vtsv.ca> <stevedesmond-ca@users.noreply.github.com>
|
||||||
|
Sun Gengze <690388648@qq.com>
|
||||||
|
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||||
|
Sun Jianbo <wonderflow.sun@gmail.com> <wonderflow@zju.edu.cn>
|
||||||
|
Sven Dowideit <SvenDowideit@home.org.au>
|
||||||
|
Sven Dowideit <SvenDowideit@home.org.au> <sven@t440s.home.gateway>
|
||||||
|
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@docker.com>
|
||||||
|
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
|
||||||
|
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@home.org.au>
|
||||||
|
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
|
||||||
|
Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
|
||||||
|
Sylvain Bellemare <sylvain@ascribe.io>
|
||||||
|
Sylvain Bellemare <sylvain@ascribe.io> <sylvain.bellemare@ezeep.com>
|
||||||
|
Tangi Colin <tangicolin@gmail.com>
|
||||||
|
Tejesh Mehta <tejesh.mehta@gmail.com> <tj@init.me>
|
||||||
|
Thatcher Peskens <thatcher@docker.com>
|
||||||
|
Thatcher Peskens <thatcher@docker.com> <thatcher@dotcloud.com>
|
||||||
|
Thatcher Peskens <thatcher@docker.com> <thatcher@gmx.net>
|
||||||
|
Thomas Gazagnaire <thomas@gazagnaire.org> <thomas@gazagnaire.com>
|
||||||
|
Thomas Krzero <thomas.kovatchitch@gmail.com>
|
||||||
|
Thomas Léveil <thomasleveil@gmail.com>
|
||||||
|
Thomas Léveil <thomasleveil@gmail.com> <thomasleveil@users.noreply.github.com>
|
||||||
|
Tibor Vass <teabee89@gmail.com> <tibor@docker.com>
|
||||||
|
Tibor Vass <teabee89@gmail.com> <tiborvass@users.noreply.github.com>
|
||||||
|
Tim Bart <tim@fewagainstmany.com>
|
||||||
|
Tim Bosse <taim@bosboot.org> <maztaim@users.noreply.github.com>
|
||||||
|
Tim Ruffles <oi@truffles.me.uk> <timruffles@googlemail.com>
|
||||||
|
Tim Terhorst <mynamewastaken+git@gmail.com>
|
||||||
|
Tim Zju <21651152@zju.edu.cn>
|
||||||
|
Timothy Hobbs <timothyhobbs@seznam.cz>
|
||||||
|
Toli Kuznets <toli@docker.com>
|
||||||
|
Tom Barlow <tomwbarlow@gmail.com>
|
||||||
|
Tom Sweeney <tsweeney@redhat.com>
|
||||||
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
|
Trishna Guha <trishnaguha17@gmail.com>
|
||||||
|
Tristan Carel <tristan@cogniteev.com>
|
||||||
|
Tristan Carel <tristan@cogniteev.com> <tristan.carel@gmail.com>
|
||||||
|
Umesh Yadav <umesh4257@gmail.com>
|
||||||
|
Umesh Yadav <umesh4257@gmail.com> <dungeonmaster18@users.noreply.github.com>
|
||||||
|
Victor Lyuboslavsky <victor@victoreda.com>
|
||||||
|
Victor Vieux <victor.vieux@docker.com> <dev@vvieux.com>
|
||||||
|
Victor Vieux <victor.vieux@docker.com> <victor.vieux@dotcloud.com>
|
||||||
|
Victor Vieux <victor.vieux@docker.com> <victor@docker.com>
|
||||||
|
Victor Vieux <victor.vieux@docker.com> <victor@dotcloud.com>
|
||||||
|
Victor Vieux <victor.vieux@docker.com> <victorvieux@gmail.com>
|
||||||
|
Victor Vieux <victor.vieux@docker.com> <vieux@docker.com>
|
||||||
|
Viktor Vojnovski <viktor.vojnovski@amadeus.com> <vojnovski@gmail.com>
|
||||||
|
Vincent Batts <vbatts@redhat.com> <vbatts@hashbangbash.com>
|
||||||
|
Vincent Bernat <Vincent.Bernat@exoscale.ch> <bernat@luffy.cx>
|
||||||
|
Vincent Bernat <Vincent.Bernat@exoscale.ch> <vincent@bernat.im>
|
||||||
|
Vincent Demeester <vincent.demeester@docker.com> <vincent+github@demeester.fr>
|
||||||
|
Vincent Demeester <vincent.demeester@docker.com> <vincent@demeester.fr>
|
||||||
|
Vincent Demeester <vincent.demeester@docker.com> <vincent@sbr.pm>
|
||||||
|
Vishnu Kannan <vishnuk@google.com>
|
||||||
|
Vladimir Rutsky <altsysrq@gmail.com> <iamironbob@gmail.com>
|
||||||
|
Walter Stanish <walter@pratyeka.org>
|
||||||
|
Wang Guoliang <liangcszzu@163.com>
|
||||||
|
Wang Jie <wangjie5@chinaskycloud.com>
|
||||||
|
Wang Ping <present.wp@icloud.com>
|
||||||
|
Wang Xing <hzwangxing@corp.netease.com> <root@localhost>
|
||||||
|
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||||
|
Wayne Chang <wayne@neverfear.org>
|
||||||
|
Wayne Song <wsong@docker.com> <wsong@users.noreply.github.com>
|
||||||
|
Wei Wu <wuwei4455@gmail.com> cizixs <cizixs@163.com>
|
||||||
|
Wenjun Tang <tangwj2@lenovo.com> <dodia@163.com>
|
||||||
|
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||||
|
Will Weaver <monkey@buildingbananas.com>
|
||||||
|
Xianglin Gao <xlgao@zju.edu.cn>
|
||||||
|
Xianlu Bird <xianlubird@gmail.com>
|
||||||
|
Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
|
||||||
|
Xuecong Liao <satorulogic@gmail.com>
|
||||||
|
Yamasaki Masahide <masahide.y@gmail.com>
|
||||||
|
Yao Zaiyong <yaozaiyong@hotmail.com>
|
||||||
|
Yassine Tijani <yasstij11@gmail.com>
|
||||||
|
Yazhong Liu <yorkiefixer@gmail.com>
|
||||||
|
Yestin Sun <sunyi0804@gmail.com> <yestin.sun@polyera.com>
|
||||||
|
Yi EungJun <eungjun.yi@navercorp.com> <semtlenori@gmail.com>
|
||||||
|
Ying Li <ying.li@docker.com>
|
||||||
|
Ying Li <ying.li@docker.com> <cyli@twistedmatrix.com>
|
||||||
|
Yong Tang <yong.tang.github@outlook.com> <yongtang@users.noreply.github.com>
|
||||||
|
Yosef Fertel <yfertel@gmail.com> <frosforever@users.noreply.github.com>
|
||||||
|
Yu Changchun <yuchangchun1@huawei.com>
|
||||||
|
Yu Chengxia <yuchengxia@huawei.com>
|
||||||
|
Yu Peng <yu.peng36@zte.com.cn>
|
||||||
|
Yu Peng <yu.peng36@zte.com.cn> <yupeng36@zte.com.cn>
|
||||||
|
Zachary Jaffee <zjaffee@us.ibm.com> <zij@case.edu>
|
||||||
|
Zachary Jaffee <zjaffee@us.ibm.com> <zjaffee@apache.org>
|
||||||
|
ZhangHang <stevezhang2014@gmail.com>
|
||||||
|
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
||||||
|
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||||
|
Zhu Kunjia <zhu.kunjia@zte.com.cn>
|
||||||
|
Zou Yu <zouyu7@huawei.com>
|
||||||
|
|
638
vendor/github.com/docker/cli/AUTHORS
generated
vendored
Normal file
638
vendor/github.com/docker/cli/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,638 @@
|
||||||
|
# This file lists all individuals having contributed content to the repository.
|
||||||
|
# For how it is generated, see `scripts/docs/generate-authors.sh`.
|
||||||
|
|
||||||
|
Aanand Prasad <aanand.prasad@gmail.com>
|
||||||
|
Aaron L. Xu <liker.xu@foxmail.com>
|
||||||
|
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||||
|
Aaron.L.Xu <likexu@harmonycloud.cn>
|
||||||
|
Abdur Rehman <abdur_rehman@mentor.com>
|
||||||
|
Abhinandan Prativadi <abhi@docker.com>
|
||||||
|
Abin Shahab <ashahab@altiscale.com>
|
||||||
|
Addam Hardy <addam.hardy@gmail.com>
|
||||||
|
Adolfo Ochagavía <aochagavia92@gmail.com>
|
||||||
|
Adrien Duermael <adrien@duermael.com>
|
||||||
|
Adrien Folie <folie.adrien@gmail.com>
|
||||||
|
Ahmet Alp Balkan <ahmetb@microsoft.com>
|
||||||
|
Aidan Feldman <aidan.feldman@gmail.com>
|
||||||
|
Aidan Hobson Sayers <aidanhs@cantab.net>
|
||||||
|
AJ Bowen <aj@gandi.net>
|
||||||
|
Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
|
||||||
|
Akim Demaille <akim.demaille@docker.com>
|
||||||
|
Alan Thompson <cloojure@gmail.com>
|
||||||
|
Albert Callarisa <shark234@gmail.com>
|
||||||
|
Aleksa Sarai <asarai@suse.de>
|
||||||
|
Alessandro Boch <aboch@tetrationanalytics.com>
|
||||||
|
Alex Mavrogiannis <alex.mavrogiannis@docker.com>
|
||||||
|
Alexander Boyd <alex@opengroove.org>
|
||||||
|
Alexander Larsson <alexl@redhat.com>
|
||||||
|
Alexander Morozov <lk4d4@docker.com>
|
||||||
|
Alexandre González <agonzalezro@gmail.com>
|
||||||
|
Alfred Landrum <alfred.landrum@docker.com>
|
||||||
|
Alicia Lauerman <alicia@eta.im>
|
||||||
|
Allen Sun <allensun.shl@alibaba-inc.com>
|
||||||
|
Alvin Deng <alvin.q.deng@utexas.edu>
|
||||||
|
Amen Belayneh <amenbelayneh@gmail.com>
|
||||||
|
Amir Goldstein <amir73il@aquasec.com>
|
||||||
|
Amit Krishnan <amit.krishnan@oracle.com>
|
||||||
|
Amit Shukla <amit.shukla@docker.com>
|
||||||
|
Amy Lindburg <amy.lindburg@docker.com>
|
||||||
|
Andrea Luzzardi <aluzzardi@gmail.com>
|
||||||
|
Andreas Köhler <andi5.py@gmx.net>
|
||||||
|
Andrew France <andrew@avito.co.uk>
|
||||||
|
Andrew Hsu <andrewhsu@docker.com>
|
||||||
|
Andrew Macpherson <hopscotch23@gmail.com>
|
||||||
|
Andrew McDonnell <bugs@andrewmcdonnell.net>
|
||||||
|
Andrew Po <absourd.noise@gmail.com>
|
||||||
|
Andrey Petrov <andrey.petrov@shazow.net>
|
||||||
|
André Martins <aanm90@gmail.com>
|
||||||
|
Andy Goldstein <agoldste@redhat.com>
|
||||||
|
Andy Rothfusz <github@developersupport.net>
|
||||||
|
Anil Madhavapeddy <anil@recoil.org>
|
||||||
|
Ankush Agarwal <ankushagarwal11@gmail.com>
|
||||||
|
Anton Polonskiy <anton.polonskiy@gmail.com>
|
||||||
|
Antonio Murdaca <antonio.murdaca@gmail.com>
|
||||||
|
Antonis Kalipetis <akalipetis@gmail.com>
|
||||||
|
Anusha Ragunathan <anusha.ragunathan@docker.com>
|
||||||
|
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||||
|
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||||
|
Ashwini Oruganti <ashwini.oruganti@gmail.com>
|
||||||
|
Azat Khuyiyakhmetov <shadow_uz@mail.ru>
|
||||||
|
Bardia Keyoumarsi <bkeyouma@ucsc.edu>
|
||||||
|
Barnaby Gray <barnaby@pickle.me.uk>
|
||||||
|
Bastiaan Bakker <bbakker@xebia.com>
|
||||||
|
BastianHofmann <bastianhofmann@me.com>
|
||||||
|
Ben Bonnefoy <frenchben@docker.com>
|
||||||
|
Ben Firshman <ben@firshman.co.uk>
|
||||||
|
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
||||||
|
Bhumika Bayani <bhumikabayani@gmail.com>
|
||||||
|
Bill Wang <ozbillwang@gmail.com>
|
||||||
|
Bin Liu <liubin0329@gmail.com>
|
||||||
|
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
||||||
|
Boaz Shuster <ripcurld.github@gmail.com>
|
||||||
|
Boris Pruessmann <boris@pruessmann.org>
|
||||||
|
Bradley Cicenas <bradley.cicenas@gmail.com>
|
||||||
|
Brandon Philips <brandon.philips@coreos.com>
|
||||||
|
Brent Salisbury <brent.salisbury@docker.com>
|
||||||
|
Bret Fisher <bret@bretfisher.com>
|
||||||
|
Brian (bex) Exelbierd <bexelbie@redhat.com>
|
||||||
|
Brian Goff <cpuguy83@gmail.com>
|
||||||
|
Bryan Bess <squarejaw@bsbess.com>
|
||||||
|
Bryan Boreham <bjboreham@gmail.com>
|
||||||
|
Bryan Murphy <bmurphy1976@gmail.com>
|
||||||
|
bryfry <bryon.fryer@gmail.com>
|
||||||
|
Cameron Spear <cameronspear@gmail.com>
|
||||||
|
Cao Weiwei <cao.weiwei30@zte.com.cn>
|
||||||
|
Carlo Mion <mion00@gmail.com>
|
||||||
|
Carlos Alexandro Becker <caarlos0@gmail.com>
|
||||||
|
Ce Gao <ce.gao@outlook.com>
|
||||||
|
Cedric Davies <cedricda@microsoft.com>
|
||||||
|
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||||
|
Chao Wang <wangchao.fnst@cn.fujitsu.com>
|
||||||
|
Charles Chan <charleswhchan@users.noreply.github.com>
|
||||||
|
Charles Law <claw@conduce.com>
|
||||||
|
Charles Smith <charles.smith@docker.com>
|
||||||
|
Charlie Drage <charlie@charliedrage.com>
|
||||||
|
ChaYoung You <yousbe@gmail.com>
|
||||||
|
Chen Chuanliang <chen.chuanliang@zte.com.cn>
|
||||||
|
Chen Hanxiao <chenhanxiao@cn.fujitsu.com>
|
||||||
|
Chen Mingjie <chenmingjie0828@163.com>
|
||||||
|
Chen Qiu <cheney-90@hotmail.com>
|
||||||
|
Chris Gavin <chris@chrisgavin.me>
|
||||||
|
Chris Gibson <chris@chrisg.io>
|
||||||
|
Chris McKinnel <chrismckinnel@gmail.com>
|
||||||
|
Chris Snow <chsnow123@gmail.com>
|
||||||
|
Chris Weyl <cweyl@alumni.drew.edu>
|
||||||
|
Christian Persson <saser@live.se>
|
||||||
|
Christian Stefanescu <st.chris@gmail.com>
|
||||||
|
Christophe Robin <crobin@nekoo.com>
|
||||||
|
Christophe Vidal <kriss@krizalys.com>
|
||||||
|
Christopher Biscardi <biscarch@sketcht.com>
|
||||||
|
Christopher Jones <tophj@linux.vnet.ibm.com>
|
||||||
|
Christy Perez <christy@linux.vnet.ibm.com>
|
||||||
|
Chun Chen <ramichen@tencent.com>
|
||||||
|
Clinton Kitson <clintonskitson@gmail.com>
|
||||||
|
Coenraad Loubser <coenraad@wish.org.za>
|
||||||
|
Colin Hebert <hebert.colin@gmail.com>
|
||||||
|
Collin Guarino <collin.guarino@gmail.com>
|
||||||
|
Colm Hally <colmhally@gmail.com>
|
||||||
|
Corey Farrell <git@cfware.com>
|
||||||
|
Cristian Staretu <cristian.staretu@gmail.com>
|
||||||
|
Daehyeok Mun <daehyeok@gmail.com>
|
||||||
|
Dafydd Crosby <dtcrsby@gmail.com>
|
||||||
|
dalanlan <dalanlan925@gmail.com>
|
||||||
|
Damien Nadé <github@livna.org>
|
||||||
|
Dan Cotora <dan@bluevision.ro>
|
||||||
|
Daniel Dao <dqminh@cloudflare.com>
|
||||||
|
Daniel Farrell <dfarrell@redhat.com>
|
||||||
|
Daniel Gasienica <daniel@gasienica.ch>
|
||||||
|
Daniel Goosen <daniel.goosen@surveysampling.com>
|
||||||
|
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||||
|
Daniel J Walsh <dwalsh@redhat.com>
|
||||||
|
Daniel Nephin <dnephin@docker.com>
|
||||||
|
Daniel Norberg <dano@spotify.com>
|
||||||
|
Daniel Watkins <daniel@daniel-watkins.co.uk>
|
||||||
|
Daniel Zhang <jmzwcn@gmail.com>
|
||||||
|
Danny Berger <dpb587@gmail.com>
|
||||||
|
Darren Shepherd <darren.s.shepherd@gmail.com>
|
||||||
|
Darren Stahl <darst@microsoft.com>
|
||||||
|
Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
|
||||||
|
Dave Goodchild <buddhamagnet@gmail.com>
|
||||||
|
Dave Henderson <dhenderson@gmail.com>
|
||||||
|
Dave Tucker <dt@docker.com>
|
||||||
|
David Calavera <david.calavera@gmail.com>
|
||||||
|
David Cramer <davcrame@cisco.com>
|
||||||
|
David Dooling <dooling@gmail.com>
|
||||||
|
David Gageot <david@gageot.net>
|
||||||
|
David Lechner <david@lechnology.com>
|
||||||
|
David Sheets <dsheets@docker.com>
|
||||||
|
David Williamson <david.williamson@docker.com>
|
||||||
|
David Xia <dxia@spotify.com>
|
||||||
|
David Young <yangboh@cn.ibm.com>
|
||||||
|
Deng Guangxing <dengguangxing@huawei.com>
|
||||||
|
Denis Defreyne <denis@soundcloud.com>
|
||||||
|
Denis Gladkikh <denis@gladkikh.email>
|
||||||
|
Denis Ollier <larchunix@users.noreply.github.com>
|
||||||
|
Dennis Docter <dennis@d23.nl>
|
||||||
|
Derek McGowan <derek@mcgstyle.net>
|
||||||
|
Deshi Xiao <dxiao@redhat.com>
|
||||||
|
Dharmit Shah <shahdharmit@gmail.com>
|
||||||
|
Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
|
||||||
|
Dieter Reuter <dieter.reuter@me.com>
|
||||||
|
Dima Stopel <dima@twistlock.com>
|
||||||
|
Dimitry Andric <d.andric@activevideo.com>
|
||||||
|
Ding Fei <dingfei@stars.org.cn>
|
||||||
|
Diogo Monica <diogo@docker.com>
|
||||||
|
Dmitry Gusev <dmitry.gusev@gmail.com>
|
||||||
|
Dmitry Smirnov <onlyjob@member.fsf.org>
|
||||||
|
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
|
||||||
|
Don Kjer <don.kjer@gmail.com>
|
||||||
|
Dong Chen <dongluo.chen@docker.com>
|
||||||
|
Doug Davis <dug@us.ibm.com>
|
||||||
|
Drew Erny <drew.erny@docker.com>
|
||||||
|
Ed Costello <epc@epcostello.com>
|
||||||
|
Eli Uriegas <eli.uriegas@docker.com>
|
||||||
|
Eli Uriegas <seemethere101@gmail.com>
|
||||||
|
Elias Faxö <elias.faxo@tre.se>
|
||||||
|
Eric G. Noriega <enoriega@vizuri.com>
|
||||||
|
Eric Rosenberg <ehaydenr@gmail.com>
|
||||||
|
Eric Sage <eric.david.sage@gmail.com>
|
||||||
|
Eric-Olivier Lamey <eo@lamey.me>
|
||||||
|
Erica Windisch <erica@windisch.us>
|
||||||
|
Erik Hollensbe <github@hollensbe.org>
|
||||||
|
Erik St. Martin <alakriti@gmail.com>
|
||||||
|
Ethan Haynes <ethanhaynes@alumni.harvard.edu>
|
||||||
|
Eugene Yakubovich <eugene.yakubovich@coreos.com>
|
||||||
|
Evan Allrich <evan@unguku.com>
|
||||||
|
Evan Hazlett <ejhazlett@gmail.com>
|
||||||
|
Evan Krall <krall@yelp.com>
|
||||||
|
Evelyn Xu <evelynhsu21@gmail.com>
|
||||||
|
Everett Toews <everett.toews@rackspace.com>
|
||||||
|
Fabio Falci <fabiofalci@gmail.com>
|
||||||
|
Fabrizio Soppelsa <fsoppelsa@mirantis.com>
|
||||||
|
Felix Hupfeld <felix@quobyte.com>
|
||||||
|
Felix Rabe <felix@rabe.io>
|
||||||
|
Flavio Crisciani <flavio.crisciani@docker.com>
|
||||||
|
Florian Klein <florian.klein@free.fr>
|
||||||
|
Foysal Iqbal <foysal.iqbal.fb@gmail.com>
|
||||||
|
Fred Lifton <fred.lifton@docker.com>
|
||||||
|
Frederick F. Kautz IV <fkautz@redhat.com>
|
||||||
|
Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
|
||||||
|
Frieder Bluemle <frieder.bluemle@gmail.com>
|
||||||
|
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
|
||||||
|
Gaetan de Villele <gdevillele@gmail.com>
|
||||||
|
Gang Qiao <qiaohai8866@gmail.com>
|
||||||
|
Gary Schaetz <gary@schaetzkc.com>
|
||||||
|
Genki Takiuchi <genki@s21g.com>
|
||||||
|
George MacRorie <gmacr31@gmail.com>
|
||||||
|
George Xie <georgexsh@gmail.com>
|
||||||
|
Gianluca Borello <g.borello@gmail.com>
|
||||||
|
Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
|
||||||
|
Gou Rao <gou@portworx.com>
|
||||||
|
Grant Reaber <grant.reaber@gmail.com>
|
||||||
|
Greg Pflaum <gpflaum@users.noreply.github.com>
|
||||||
|
Guilhem Lettron <guilhem+github@lettron.fr>
|
||||||
|
Guillaume J. Charmes <guillaume.charmes@docker.com>
|
||||||
|
gwx296173 <gaojing3@huawei.com>
|
||||||
|
Günther Jungbluth <gunther@gameslabs.net>
|
||||||
|
Hakan Özler <hakan.ozler@kodcu.com>
|
||||||
|
Hao Zhang <21521210@zju.edu.cn>
|
||||||
|
Harald Albers <github@albersweb.de>
|
||||||
|
Harold Cooper <hrldcpr@gmail.com>
|
||||||
|
Harry Zhang <harryz@hyper.sh>
|
||||||
|
He Simei <hesimei@zju.edu.cn>
|
||||||
|
Helen Xie <chenjg@harmonycloud.cn>
|
||||||
|
Henning Sprang <henning.sprang@gmail.com>
|
||||||
|
Hernan Garcia <hernandanielg@gmail.com>
|
||||||
|
Hongbin Lu <hongbin034@gmail.com>
|
||||||
|
Hu Keping <hukeping@huawei.com>
|
||||||
|
Huayi Zhang <irachex@gmail.com>
|
||||||
|
huqun <huqun@zju.edu.cn>
|
||||||
|
Huu Nguyen <huu@prismskylabs.com>
|
||||||
|
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||||
|
Ian Campbell <ian.campbell@docker.com>
|
||||||
|
Ian Philpot <ian.philpot@microsoft.com>
|
||||||
|
Ignacio Capurro <icapurrofagian@gmail.com>
|
||||||
|
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||||
|
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||||
|
Ilya Sotkov <ilya@sotkov.com>
|
||||||
|
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
||||||
|
Ivan Grcic <igrcic@gmail.com>
|
||||||
|
Ivan Markin <sw@nogoegst.net>
|
||||||
|
Jacob Atzen <jacob@jacobatzen.dk>
|
||||||
|
Jacob Tomlinson <jacob@tom.linson.uk>
|
||||||
|
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
||||||
|
Jake Sanders <jsand@google.com>
|
||||||
|
James Nesbitt <james.nesbitt@wunderkraut.com>
|
||||||
|
James Turnbull <james@lovedthanlost.net>
|
||||||
|
Jamie Hannaford <jamie@limetree.org>
|
||||||
|
Jan Koprowski <jan.koprowski@gmail.com>
|
||||||
|
Jan Pazdziora <jpazdziora@redhat.com>
|
||||||
|
Jan-Jaap Driessen <janjaapdriessen@gmail.com>
|
||||||
|
Jana Radhakrishnan <mrjana@docker.com>
|
||||||
|
Jared Hocutt <jaredh@netapp.com>
|
||||||
|
Jasmine Hegman <jasmine@jhegman.com>
|
||||||
|
Jason Heiss <jheiss@aput.net>
|
||||||
|
Jason Plum <jplum@devonit.com>
|
||||||
|
Jay Kamat <github@jgkamat.33mail.com>
|
||||||
|
Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
|
||||||
|
Jean-Pierre Huynh <jp@moogsoft.com>
|
||||||
|
Jeff Lindsay <progrium@gmail.com>
|
||||||
|
Jeff Nickoloff <jeff.nickoloff@gmail.com>
|
||||||
|
Jeff Silberman <jsilberm@gmail.com>
|
||||||
|
Jeremy Chambers <jeremy@thehipbot.com>
|
||||||
|
Jeremy Unruh <jeremybunruh@gmail.com>
|
||||||
|
Jeremy Yallop <yallop@docker.com>
|
||||||
|
Jeroen Franse <jeroenfranse@gmail.com>
|
||||||
|
Jesse Adametz <jesseadametz@gmail.com>
|
||||||
|
Jessica Frazelle <jessfraz@google.com>
|
||||||
|
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||||
|
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||||
|
Jie Luo <luo612@zju.edu.cn>
|
||||||
|
Jilles Oldenbeuving <ojilles@gmail.com>
|
||||||
|
Jim Galasyn <jim.galasyn@docker.com>
|
||||||
|
Jimmy Leger <jimmy.leger@gmail.com>
|
||||||
|
Jimmy Song <rootsongjc@gmail.com>
|
||||||
|
jimmyxian <jimmyxian2004@yahoo.com.cn>
|
||||||
|
Joao Fernandes <joao.fernandes@docker.com>
|
||||||
|
Joe Doliner <jdoliner@pachyderm.io>
|
||||||
|
Joe Gordon <joe.gordon0@gmail.com>
|
||||||
|
Joel Handwell <joelhandwell@gmail.com>
|
||||||
|
Joey Geiger <jgeiger@gmail.com>
|
||||||
|
Joffrey F <joffrey@docker.com>
|
||||||
|
Johan Euphrosine <proppy@google.com>
|
||||||
|
Johannes 'fish' Ziemke <github@freigeist.org>
|
||||||
|
John Feminella <jxf@jxf.me>
|
||||||
|
John Harris <john@johnharris.io>
|
||||||
|
John Howard (VM) <John.Howard@microsoft.com>
|
||||||
|
John Laswell <john.n.laswell@gmail.com>
|
||||||
|
John Maguire <jmaguire@duosecurity.com>
|
||||||
|
John Mulhausen <john@docker.com>
|
||||||
|
John Starks <jostarks@microsoft.com>
|
||||||
|
John Stephens <johnstep@docker.com>
|
||||||
|
John Tims <john.k.tims@gmail.com>
|
||||||
|
John V. Martinez <jvmatl@gmail.com>
|
||||||
|
John Willis <john.willis@docker.com>
|
||||||
|
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||||
|
Jonathan Lee <jonjohn1232009@gmail.com>
|
||||||
|
Jonathan Lomas <jonathan@floatinglomas.ca>
|
||||||
|
Jonathan McCrohan <jmccrohan@gmail.com>
|
||||||
|
Jonh Wendell <jonh.wendell@redhat.com>
|
||||||
|
Jordan Jennings <jjn2009@gmail.com>
|
||||||
|
Joseph Kern <jkern@semafour.net>
|
||||||
|
Josh Bodah <jb3689@yahoo.com>
|
||||||
|
Josh Chorlton <jchorlton@gmail.com>
|
||||||
|
Josh Hawn <josh.hawn@docker.com>
|
||||||
|
Josh Horwitz <horwitz@addthis.com>
|
||||||
|
Josh Soref <jsoref@gmail.com>
|
||||||
|
Julien Barbier <write0@gmail.com>
|
||||||
|
Julien Kassar <github@kassisol.com>
|
||||||
|
Julien Maitrehenry <julien.maitrehenry@me.com>
|
||||||
|
Justas Brazauskas <brazauskasjustas@gmail.com>
|
||||||
|
Justin Cormack <justin.cormack@docker.com>
|
||||||
|
Justin Simonelis <justin.p.simonelis@gmail.com>
|
||||||
|
Jyrki Puttonen <jyrkiput@gmail.com>
|
||||||
|
Jérôme Petazzoni <jerome.petazzoni@docker.com>
|
||||||
|
Jörg Thalheim <joerg@higgsboson.tk>
|
||||||
|
Kai Blin <kai@samba.org>
|
||||||
|
Kai Qiang Wu (Kennan) <wkq5325@gmail.com>
|
||||||
|
Kara Alexandra <kalexandra@us.ibm.com>
|
||||||
|
Kareem Khazem <karkhaz@karkhaz.com>
|
||||||
|
Karthik Nayak <Karthik.188@gmail.com>
|
||||||
|
Kat Samperi <kat.samperi@gmail.com>
|
||||||
|
Katie McLaughlin <katie@glasnt.com>
|
||||||
|
Ke Xu <leonhartx.k@gmail.com>
|
||||||
|
Kei Ohmura <ohmura.kei@gmail.com>
|
||||||
|
Keith Hudgins <greenman@greenman.org>
|
||||||
|
Ken Cochrane <kencochrane@gmail.com>
|
||||||
|
Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
|
||||||
|
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
||||||
|
Kevin Burke <kev@inburke.com>
|
||||||
|
Kevin Feyrer <kevin.feyrer@btinternet.com>
|
||||||
|
Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||||
|
Kevin Meredith <kevin.m.meredith@gmail.com>
|
||||||
|
Kevin Richardson <kevin@kevinrichardson.co>
|
||||||
|
khaled souf <khaled.souf@gmail.com>
|
||||||
|
Kim Eik <kim@heldig.org>
|
||||||
|
Kir Kolyshkin <kolyshkin@gmail.com>
|
||||||
|
Kotaro Yoshimatsu <kotaro.yoshimatsu@gmail.com>
|
||||||
|
Krasi Georgiev <krasi@vip-consult.solutions>
|
||||||
|
Kris-Mikael Krister <krismikael@protonmail.com>
|
||||||
|
Kun Zhang <zkazure@gmail.com>
|
||||||
|
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
||||||
|
Kyle Spiers <kyle@spiers.me>
|
||||||
|
Lachlan Cooper <lachlancooper@gmail.com>
|
||||||
|
Lai Jiangshan <jiangshanlai@gmail.com>
|
||||||
|
Lars Kellogg-Stedman <lars@redhat.com>
|
||||||
|
Laura Frank <ljfrank@gmail.com>
|
||||||
|
Laurent Erignoux <lerignoux@gmail.com>
|
||||||
|
Lei Jitang <leijitang@huawei.com>
|
||||||
|
Lennie <github@consolejunkie.net>
|
||||||
|
Leo Gallucci <elgalu3@gmail.com>
|
||||||
|
Lewis Daly <lewisdaly@me.com>
|
||||||
|
Li Yi <denverdino@gmail.com>
|
||||||
|
Li Yi <weiyuan.yl@alibaba-inc.com>
|
||||||
|
Liang-Chi Hsieh <viirya@gmail.com>
|
||||||
|
Lily Guo <lily.guo@docker.com>
|
||||||
|
Lin Lu <doraalin@163.com>
|
||||||
|
Linus Heckemann <lheckemann@twig-world.com>
|
||||||
|
Liping Xue <lipingxue@gmail.com>
|
||||||
|
Liron Levin <liron@twistlock.com>
|
||||||
|
liwenqi <vikilwq@zju.edu.cn>
|
||||||
|
lixiaobing10051267 <li.xiaobing1@zte.com.cn>
|
||||||
|
Lloyd Dewolf <foolswisdom@gmail.com>
|
||||||
|
Lorenzo Fontana <lo@linux.com>
|
||||||
|
Louis Opter <kalessin@kalessin.fr>
|
||||||
|
Luca Favatella <luca.favatella@erlang-solutions.com>
|
||||||
|
Luca Marturana <lucamarturana@gmail.com>
|
||||||
|
Lucas Chan <lucas-github@lucaschan.com>
|
||||||
|
Luka Hartwig <mail@lukahartwig.de>
|
||||||
|
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
|
||||||
|
Lénaïc Huard <lhuard@amadeus.com>
|
||||||
|
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
|
||||||
|
Mabin <bin.ma@huawei.com>
|
||||||
|
Madhav Puri <madhav.puri@gmail.com>
|
||||||
|
Madhu Venugopal <madhu@socketplane.io>
|
||||||
|
Malte Janduda <mail@janduda.net>
|
||||||
|
Manjunath A Kumatagi <mkumatag@in.ibm.com>
|
||||||
|
Mansi Nahar <mmn4185@rit.edu>
|
||||||
|
mapk0y <mapk0y@gmail.com>
|
||||||
|
Marc Bihlmaier <marc.bihlmaier@reddoxx.com>
|
||||||
|
Marco Mariani <marco.mariani@alterway.fr>
|
||||||
|
Marcus Martins <marcus@docker.com>
|
||||||
|
Marianna Tessel <mtesselh@gmail.com>
|
||||||
|
Marius Sturm <marius@graylog.com>
|
||||||
|
Mark Oates <fl0yd@me.com>
|
||||||
|
Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
|
||||||
|
Mary Anthony <mary.anthony@docker.com>
|
||||||
|
Mason Malone <mason.malone@gmail.com>
|
||||||
|
Mateusz Major <apkd@users.noreply.github.com>
|
||||||
|
Matt Gucci <matt9ucci@gmail.com>
|
||||||
|
Matt Robenolt <matt@ydekproductions.com>
|
||||||
|
Matthew Heon <mheon@redhat.com>
|
||||||
|
Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
|
||||||
|
Max Shytikov <mshytikov@gmail.com>
|
||||||
|
Maxime Petazzoni <max@signalfuse.com>
|
||||||
|
Mei ChunTao <mei.chuntao@zte.com.cn>
|
||||||
|
Micah Zoltu <micah@newrelic.com>
|
||||||
|
Michael A. Smith <michael@smith-li.com>
|
||||||
|
Michael Bridgen <mikeb@squaremobius.net>
|
||||||
|
Michael Crosby <michael@docker.com>
|
||||||
|
Michael Friis <friism@gmail.com>
|
||||||
|
Michael Irwin <mikesir87@gmail.com>
|
||||||
|
Michael Käufl <docker@c.michael-kaeufl.de>
|
||||||
|
Michael Prokop <github@michael-prokop.at>
|
||||||
|
Michael Scharf <github@scharf.gr>
|
||||||
|
Michael Spetsiotis <michael_spets@hotmail.com>
|
||||||
|
Michael Steinert <mike.steinert@gmail.com>
|
||||||
|
Michael West <mwest@mdsol.com>
|
||||||
|
Michal Minář <miminar@redhat.com>
|
||||||
|
Michał Czeraszkiewicz <czerasz@gmail.com>
|
||||||
|
Miguel Angel Alvarez Cabrerizo <doncicuto@gmail.com>
|
||||||
|
Mihai Borobocea <MihaiBorob@gmail.com>
|
||||||
|
Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
|
||||||
|
Mike Brown <brownwm@us.ibm.com>
|
||||||
|
Mike Casas <mkcsas0@gmail.com>
|
||||||
|
Mike Danese <mikedanese@google.com>
|
||||||
|
Mike Dillon <mike@embody.org>
|
||||||
|
Mike Goelzer <mike.goelzer@docker.com>
|
||||||
|
Mike MacCana <mike.maccana@gmail.com>
|
||||||
|
mikelinjie <294893458@qq.com>
|
||||||
|
Mikhail Vasin <vasin@cloud-tv.ru>
|
||||||
|
Milind Chawre <milindchawre@gmail.com>
|
||||||
|
Misty Stanley-Jones <misty@docker.com>
|
||||||
|
Mohammad Banikazemi <mb@us.ibm.com>
|
||||||
|
Mohammed Aaqib Ansari <maaquib@gmail.com>
|
||||||
|
Moorthy RS <rsmoorthy@gmail.com>
|
||||||
|
Morgan Bauer <mbauer@us.ibm.com>
|
||||||
|
Moysés Borges <moysesb@gmail.com>
|
||||||
|
Mrunal Patel <mrunalp@gmail.com>
|
||||||
|
muicoder <muicoder@gmail.com>
|
||||||
|
Muthukumar R <muthur@gmail.com>
|
||||||
|
Máximo Cuadros <mcuadros@gmail.com>
|
||||||
|
Nace Oroz <orkica@gmail.com>
|
||||||
|
Nahum Shalman <nshalman@omniti.com>
|
||||||
|
Nalin Dahyabhai <nalin@redhat.com>
|
||||||
|
Natalie Parker <nparker@omnifone.com>
|
||||||
|
Nate Brennand <nate.brennand@clever.com>
|
||||||
|
Nathan Hsieh <hsieh.nathan@gmail.com>
|
||||||
|
Nathan LeClaire <nathan.leclaire@docker.com>
|
||||||
|
Nathan McCauley <nathan.mccauley@docker.com>
|
||||||
|
Neil Peterson <neilpeterson@outlook.com>
|
||||||
|
Nicola Kabar <nicolaka@gmail.com>
|
||||||
|
Nicolas Borboën <ponsfrilus@gmail.com>
|
||||||
|
Nicolas De Loof <nicolas.deloof@gmail.com>
|
||||||
|
Nikhil Chawla <chawlanikhil24@gmail.com>
|
||||||
|
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
||||||
|
Nikolay Milovanov <nmil@itransformers.net>
|
||||||
|
Nishant Totla <nishanttotla@gmail.com>
|
||||||
|
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
|
||||||
|
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||||
|
O.S. Tezer <ostezer@gmail.com>
|
||||||
|
ohmystack <jun.jiang02@ele.me>
|
||||||
|
Olle Jonsson <olle.jonsson@gmail.com>
|
||||||
|
Otto Kekäläinen <otto@seravo.fi>
|
||||||
|
Ovidio Mallo <ovidio.mallo@gmail.com>
|
||||||
|
Pascal Borreli <pascal@borreli.com>
|
||||||
|
Patrick Böänziger <patrick.baenziger@bsi-software.com>
|
||||||
|
Patrick Hemmer <patrick.hemmer@gmail.com>
|
||||||
|
Patrick Lang <plang@microsoft.com>
|
||||||
|
Paul <paul9869@gmail.com>
|
||||||
|
Paul Kehrer <paul.l.kehrer@gmail.com>
|
||||||
|
Paul Lietar <paul@lietar.net>
|
||||||
|
Paul Weaver <pauweave@cisco.com>
|
||||||
|
Pavel Pospisil <pospispa@gmail.com>
|
||||||
|
Paweł Szczekutowicz <pszczekutowicz@gmail.com>
|
||||||
|
Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
|
||||||
|
Peter Edge <peter.edge@gmail.com>
|
||||||
|
Peter Jaffe <pjaffe@nevo.com>
|
||||||
|
Peter Nagy <xificurC@gmail.com>
|
||||||
|
Peter Salvatore <peter@psftw.com>
|
||||||
|
Peter Waller <p@pwaller.net>
|
||||||
|
Phil Estes <estesp@linux.vnet.ibm.com>
|
||||||
|
Philip Alexander Etling <paetling@gmail.com>
|
||||||
|
Philipp Gillé <philipp.gille@gmail.com>
|
||||||
|
pidster <pid@pidster.com>
|
||||||
|
pixelistik <pixelistik@users.noreply.github.com>
|
||||||
|
Pratik Karki <prertik@outlook.com>
|
||||||
|
Prayag Verma <prayag.verma@gmail.com>
|
||||||
|
Pure White <daniel48@126.com>
|
||||||
|
Qiang Huang <h.huangqiang@huawei.com>
|
||||||
|
Qinglan Peng <qinglanpeng@zju.edu.cn>
|
||||||
|
qudongfang <qudongfang@gmail.com>
|
||||||
|
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
|
||||||
|
Ray Tsang <rayt@google.com>
|
||||||
|
Reficul <xuzhenglun@gmail.com>
|
||||||
|
Remy Suen <remy.suen@gmail.com>
|
||||||
|
Renaud Gaubert <rgaubert@nvidia.com>
|
||||||
|
Ricardo N Feliciano <FelicianoTech@gmail.com>
|
||||||
|
Rich Moyse <rich@moyse.us>
|
||||||
|
Richard Mathie <richard.mathie@amey.co.uk>
|
||||||
|
Richard Scothern <richard.scothern@gmail.com>
|
||||||
|
Rick Wieman <git@rickw.nl>
|
||||||
|
Ritesh H Shukla <sritesh@vmware.com>
|
||||||
|
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
|
||||||
|
Robert Wallis <smilingrob@gmail.com>
|
||||||
|
Robin Naundorf <r.naundorf@fh-muenster.de>
|
||||||
|
Robin Speekenbrink <robin@kingsquare.nl>
|
||||||
|
Rodolfo Ortiz <rodolfo.ortiz@definityfirst.com>
|
||||||
|
Rogelio Canedo <rcanedo@mappy.priv>
|
||||||
|
Roland Kammerer <roland.kammerer@linbit.com>
|
||||||
|
Roman Dudin <katrmr@gmail.com>
|
||||||
|
Rory Hunter <roryhunter2@gmail.com>
|
||||||
|
Ross Boucher <rboucher@gmail.com>
|
||||||
|
Rubens Figueiredo <r.figueiredo.52@gmail.com>
|
||||||
|
Ryan Belgrave <rmb1993@gmail.com>
|
||||||
|
Ryan Detzel <ryan.detzel@gmail.com>
|
||||||
|
Ryan Stelly <ryan.stelly@live.com>
|
||||||
|
Sainath Grandhi <sainath.grandhi@intel.com>
|
||||||
|
Sakeven Jiang <jc5930@sina.cn>
|
||||||
|
Sally O'Malley <somalley@redhat.com>
|
||||||
|
Sam Neirinck <sam@samneirinck.com>
|
||||||
|
Sambuddha Basu <sambuddhabasu1@gmail.com>
|
||||||
|
Samuel Karp <skarp@amazon.com>
|
||||||
|
Santhosh Manohar <santhosh@docker.com>
|
||||||
|
Scott Collier <emailscottcollier@gmail.com>
|
||||||
|
Sean Christopherson <sean.j.christopherson@intel.com>
|
||||||
|
Sean Rodman <srodman7689@gmail.com>
|
||||||
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
|
Sergey Tryuber <Sergeant007@users.noreply.github.com>
|
||||||
|
Serhat Gülçiçek <serhat25@gmail.com>
|
||||||
|
Sevki Hasirci <s@sevki.org>
|
||||||
|
Shaun Kaasten <shaunk@gmail.com>
|
||||||
|
Sheng Yang <sheng@yasker.org>
|
||||||
|
Shijiang Wei <mountkin@gmail.com>
|
||||||
|
Shishir Mahajan <shishir.mahajan@redhat.com>
|
||||||
|
Shoubhik Bose <sbose78@gmail.com>
|
||||||
|
Shukui Yang <yangshukui@huawei.com>
|
||||||
|
Sian Lerk Lau <kiawin@gmail.com>
|
||||||
|
Sidhartha Mani <sidharthamn@gmail.com>
|
||||||
|
sidharthamani <sid@rancher.com>
|
||||||
|
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||||
|
Simei He <hesimei@zju.edu.cn>
|
||||||
|
Simon Ferquel <simon.ferquel@docker.com>
|
||||||
|
Sindhu S <sindhus@live.in>
|
||||||
|
Slava Semushin <semushin@redhat.com>
|
||||||
|
Solomon Hykes <solomon@docker.com>
|
||||||
|
Song Gao <song@gao.io>
|
||||||
|
Spencer Brown <spencer@spencerbrown.org>
|
||||||
|
squeegels <1674195+squeegels@users.noreply.github.com>
|
||||||
|
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
||||||
|
Stefan S. <tronicum@user.github.com>
|
||||||
|
Stefan Scherer <scherer_stefan@icloud.com>
|
||||||
|
Stefan Weil <sw@weilnetz.de>
|
||||||
|
Stephen Day <stephen.day@docker.com>
|
||||||
|
Stephen Rust <srust@blockbridge.com>
|
||||||
|
Steve Durrheimer <s.durrheimer@gmail.com>
|
||||||
|
Steven Burgess <steven.a.burgess@hotmail.com>
|
||||||
|
Subhajit Ghosh <isubuz.g@gmail.com>
|
||||||
|
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||||
|
Sungwon Han <sungwon.han@navercorp.com>
|
||||||
|
Sven Dowideit <SvenDowideit@home.org.au>
|
||||||
|
Sylvain Baubeau <sbaubeau@redhat.com>
|
||||||
|
Sébastien HOUZÉ <cto@verylastroom.com>
|
||||||
|
T K Sourabh <sourabhtk37@gmail.com>
|
||||||
|
TAGOMORI Satoshi <tagomoris@gmail.com>
|
||||||
|
Taylor Jones <monitorjbl@gmail.com>
|
||||||
|
Thatcher Peskens <thatcher@docker.com>
|
||||||
|
Thomas Gazagnaire <thomas@gazagnaire.org>
|
||||||
|
Thomas Krzero <thomas.kovatchitch@gmail.com>
|
||||||
|
Thomas Leonard <thomas.leonard@docker.com>
|
||||||
|
Thomas Léveil <thomasleveil@gmail.com>
|
||||||
|
Thomas Riccardi <riccardi@systran.fr>
|
||||||
|
Thomas Swift <tgs242@gmail.com>
|
||||||
|
Tianon Gravi <admwiggin@gmail.com>
|
||||||
|
Tianyi Wang <capkurmagati@gmail.com>
|
||||||
|
Tibor Vass <teabee89@gmail.com>
|
||||||
|
Tim Dettrick <t.dettrick@uq.edu.au>
|
||||||
|
Tim Hockin <thockin@google.com>
|
||||||
|
Tim Smith <timbot@google.com>
|
||||||
|
Tim Waugh <twaugh@redhat.com>
|
||||||
|
Tim Wraight <tim.wraight@tangentlabs.co.uk>
|
||||||
|
timfeirg <kkcocogogo@gmail.com>
|
||||||
|
Timothy Hobbs <timothyhobbs@seznam.cz>
|
||||||
|
Tobias Bradtke <webwurst@gmail.com>
|
||||||
|
Tobias Gesellchen <tobias@gesellix.de>
|
||||||
|
Todd Whiteman <todd.whiteman@joyent.com>
|
||||||
|
Tom Denham <tom@tomdee.co.uk>
|
||||||
|
Tom Fotherby <tom+github@peopleperhour.com>
|
||||||
|
Tom X. Tobin <tomxtobin@tomxtobin.com>
|
||||||
|
Tomas Tomecek <ttomecek@redhat.com>
|
||||||
|
Tomasz Kopczynski <tomek@kopczynski.net.pl>
|
||||||
|
Tomáš Hrčka <thrcka@redhat.com>
|
||||||
|
Tony Abboud <tdabboud@hotmail.com>
|
||||||
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
|
Trapier Marshall <trapier.marshall@docker.com>
|
||||||
|
Travis Cline <travis.cline@gmail.com>
|
||||||
|
Tristan Carel <tristan@cogniteev.com>
|
||||||
|
Tycho Andersen <tycho@docker.com>
|
||||||
|
Tycho Andersen <tycho@tycho.ws>
|
||||||
|
uhayate <uhayate.gong@daocloud.io>
|
||||||
|
Umesh Yadav <umesh4257@gmail.com>
|
||||||
|
Valentin Lorentz <progval+git@progval.net>
|
||||||
|
Veres Lajos <vlajos@gmail.com>
|
||||||
|
Victor Vieux <victor.vieux@docker.com>
|
||||||
|
Victoria Bialas <victoria.bialas@docker.com>
|
||||||
|
Viktor Stanchev <me@viktorstanchev.com>
|
||||||
|
Vincent Batts <vbatts@redhat.com>
|
||||||
|
Vincent Bernat <Vincent.Bernat@exoscale.ch>
|
||||||
|
Vincent Demeester <vincent.demeester@docker.com>
|
||||||
|
Vincent Woo <me@vincentwoo.com>
|
||||||
|
Vishnu Kannan <vishnuk@google.com>
|
||||||
|
Vivek Goyal <vgoyal@redhat.com>
|
||||||
|
Wang Jie <wangjie5@chinaskycloud.com>
|
||||||
|
Wang Long <long.wanglong@huawei.com>
|
||||||
|
Wang Ping <present.wp@icloud.com>
|
||||||
|
Wang Xing <hzwangxing@corp.netease.com>
|
||||||
|
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||||
|
Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
|
||||||
|
Wayne Song <wsong@docker.com>
|
||||||
|
Wen Cheng Ma <wenchma@cn.ibm.com>
|
||||||
|
Wenzhi Liang <wenzhi.liang@gmail.com>
|
||||||
|
Wes Morgan <cap10morgan@gmail.com>
|
||||||
|
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||||
|
William Henry <whenry@redhat.com>
|
||||||
|
Xianglin Gao <xlgao@zju.edu.cn>
|
||||||
|
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
||||||
|
Xuecong Liao <satorulogic@gmail.com>
|
||||||
|
Yan Feng <yanfeng2@huawei.com>
|
||||||
|
Yanqiang Miao <miao.yanqiang@zte.com.cn>
|
||||||
|
Yassine Tijani <yasstij11@gmail.com>
|
||||||
|
Yi EungJun <eungjun.yi@navercorp.com>
|
||||||
|
Ying Li <ying.li@docker.com>
|
||||||
|
Yong Tang <yong.tang.github@outlook.com>
|
||||||
|
Yosef Fertel <yfertel@gmail.com>
|
||||||
|
Yu Peng <yu.peng36@zte.com.cn>
|
||||||
|
Yuan Sun <sunyuan3@huawei.com>
|
||||||
|
Yunxiang Huang <hyxqshk@vip.qq.com>
|
||||||
|
zebrilee <zebrilee@gmail.com>
|
||||||
|
Zhang Kun <zkazure@gmail.com>
|
||||||
|
Zhang Wei <zhangwei555@huawei.com>
|
||||||
|
Zhang Wentao <zhangwentao234@huawei.com>
|
||||||
|
ZhangHang <stevezhang2014@gmail.com>
|
||||||
|
zhenghenghuo <zhenghenghuo@zju.edu.cn>
|
||||||
|
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||||
|
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
|
||||||
|
Álex González <agonzalezro@gmail.com>
|
||||||
|
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
||||||
|
Átila Camurça Alves <camurca.home@gmail.com>
|
||||||
|
徐俊杰 <paco.xu@daocloud.io>
|
365
vendor/github.com/docker/cli/CONTRIBUTING.md
generated
vendored
Normal file
365
vendor/github.com/docker/cli/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,365 @@
|
||||||
|
# Contributing to Docker
|
||||||
|
|
||||||
|
Want to hack on Docker? Awesome! We have a contributor's guide that explains
|
||||||
|
[setting up a Docker development environment and the contribution
|
||||||
|
process](https://docs.docker.com/opensource/project/who-written-for/).
|
||||||
|
|
||||||
|
This page contains information about reporting issues as well as some tips and
|
||||||
|
guidelines useful to experienced open source contributors. Finally, make sure
|
||||||
|
you read our [community guidelines](#docker-community-guidelines) before you
|
||||||
|
start participating.
|
||||||
|
|
||||||
|
## Topics
|
||||||
|
|
||||||
|
* [Reporting Security Issues](#reporting-security-issues)
|
||||||
|
* [Design and Cleanup Proposals](#design-and-cleanup-proposals)
|
||||||
|
* [Reporting Issues](#reporting-other-issues)
|
||||||
|
* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines)
|
||||||
|
* [Community Guidelines](#docker-community-guidelines)
|
||||||
|
|
||||||
|
## Reporting security issues
|
||||||
|
|
||||||
|
The Docker maintainers take security seriously. If you discover a security
|
||||||
|
issue, please bring it to their attention right away!
|
||||||
|
|
||||||
|
Please **DO NOT** file a public issue, instead send your report privately to
|
||||||
|
[security@docker.com](mailto:security@docker.com).
|
||||||
|
|
||||||
|
Security reports are greatly appreciated and we will publicly thank you for it.
|
||||||
|
We also like to send gifts—if you're into Docker schwag, make sure to let
|
||||||
|
us know. We currently do not offer a paid security bounty program, but are not
|
||||||
|
ruling it out in the future.
|
||||||
|
|
||||||
|
|
||||||
|
## Reporting other issues
|
||||||
|
|
||||||
|
A great way to contribute to the project is to send a detailed report when you
|
||||||
|
encounter an issue. We always appreciate a well-written, thorough bug report,
|
||||||
|
and will thank you for it!
|
||||||
|
|
||||||
|
Check that [our issue database](https://github.com/docker/cli/issues)
|
||||||
|
doesn't already include that problem or suggestion before submitting an issue.
|
||||||
|
If you find a match, you can use the "subscribe" button to get notified on
|
||||||
|
updates. Do *not* leave random "+1" or "I have this too" comments, as they
|
||||||
|
only clutter the discussion, and don't help resolving it. However, if you
|
||||||
|
have ways to reproduce the issue or have additional information that may help
|
||||||
|
resolving the issue, please leave a comment.
|
||||||
|
|
||||||
|
When reporting issues, always include:
|
||||||
|
|
||||||
|
* The output of `docker version`.
|
||||||
|
* The output of `docker info`.
|
||||||
|
|
||||||
|
Also include the steps required to reproduce the problem if possible and
|
||||||
|
applicable. This information will help us review and fix your issue faster.
|
||||||
|
When sending lengthy log-files, consider posting them as a gist (https://gist.github.com).
|
||||||
|
Don't forget to remove sensitive data from your logfiles before posting (you can
|
||||||
|
replace those parts with "REDACTED").
|
||||||
|
|
||||||
|
## Quick contribution tips and guidelines
|
||||||
|
|
||||||
|
This section gives the experienced contributor some tips and guidelines.
|
||||||
|
|
||||||
|
### Pull requests are always welcome
|
||||||
|
|
||||||
|
Not sure if that typo is worth a pull request? Found a bug and know how to fix
|
||||||
|
it? Do it! We will appreciate it. Any significant improvement should be
|
||||||
|
documented as [a GitHub issue](https://github.com/docker/cli/issues) before
|
||||||
|
anybody starts working on it.
|
||||||
|
|
||||||
|
We are always thrilled to receive pull requests. We do our best to process them
|
||||||
|
quickly. If your pull request is not accepted on the first try,
|
||||||
|
don't get discouraged! Our contributor's guide explains [the review process we
|
||||||
|
use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/).
|
||||||
|
|
||||||
|
### Talking to other Docker users and contributors
|
||||||
|
|
||||||
|
<table class="tg">
|
||||||
|
<col width="45%">
|
||||||
|
<col width="65%">
|
||||||
|
<tr>
|
||||||
|
<td>Forums</td>
|
||||||
|
<td>
|
||||||
|
A public forum for users to discuss questions and explore current design patterns and
|
||||||
|
best practices about Docker and related projects in the Docker Ecosystem. To participate,
|
||||||
|
just log in with your Docker Hub account on <a href="https://forums.docker.com" target="_blank">https://forums.docker.com</a>.
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Community Slack</td>
|
||||||
|
<td>
|
||||||
|
The Docker Community has a dedicated Slack chat to discuss features and issues. You can sign-up <a href="https://community.docker.com/registrations/groups/4316" target="_blank">with this link</a>.
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Twitter</td>
|
||||||
|
<td>
|
||||||
|
You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
|
||||||
|
to get updates on our products. You can also tweet us questions or just
|
||||||
|
share blogs or stories.
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Stack Overflow</td>
|
||||||
|
<td>
|
||||||
|
Stack Overflow has over 17000 Docker questions listed. We regularly
|
||||||
|
monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
|
||||||
|
and so do many other knowledgeable Docker users.
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
|
||||||
|
### Conventions
|
||||||
|
|
||||||
|
Fork the repository and make changes on your fork in a feature branch:
|
||||||
|
|
||||||
|
- If it's a bug fix branch, name it XXXX-something where XXXX is the number of
|
||||||
|
the issue.
|
||||||
|
- If it's a feature branch, create an enhancement issue to announce
|
||||||
|
your intentions, and name it XXXX-something where XXXX is the number of the
|
||||||
|
issue.
|
||||||
|
|
||||||
|
Submit unit tests for your changes. Go has a great test framework built in; use
|
||||||
|
it! Take a look at existing tests for inspiration. [Run the full test
|
||||||
|
suite](README.md) on your branch before
|
||||||
|
submitting a pull request.
|
||||||
|
|
||||||
|
Update the documentation when creating or modifying features. Test your
|
||||||
|
documentation changes for clarity, concision, and correctness, as well as a
|
||||||
|
clean documentation build. See our contributors guide for [our style
|
||||||
|
guide](https://docs.docker.com/opensource/doc-style) and instructions on [building
|
||||||
|
the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation).
|
||||||
|
|
||||||
|
Write clean code. Universally formatted code promotes ease of writing, reading,
|
||||||
|
and maintenance. Always run `gofmt -s -w file.go` on each changed file before
|
||||||
|
committing your changes. Most editors have plug-ins that do this automatically.
|
||||||
|
|
||||||
|
Pull request descriptions should be as clear as possible and include a reference
|
||||||
|
to all the issues that they address.
|
||||||
|
|
||||||
|
Commit messages must start with a capitalized and short summary (max. 50 chars)
|
||||||
|
written in the imperative, followed by an optional, more detailed explanatory
|
||||||
|
text which is separated from the summary by an empty line.
|
||||||
|
|
||||||
|
Code review comments may be added to your pull request. Discuss, then make the
|
||||||
|
suggested modifications and push additional commits to your feature branch. Post
|
||||||
|
a comment after pushing. New commits show up in the pull request automatically,
|
||||||
|
but the reviewers are notified only when you comment.
|
||||||
|
|
||||||
|
Pull requests must be cleanly rebased on top of master without multiple branches
|
||||||
|
mixed into the PR.
|
||||||
|
|
||||||
|
**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your
|
||||||
|
feature branch to update your pull request rather than `merge master`.
|
||||||
|
|
||||||
|
Before you make a pull request, squash your commits into logical units of work
|
||||||
|
using `git rebase -i` and `git push -f`. A logical unit of work is a consistent
|
||||||
|
set of patches that should be reviewed together: for example, upgrading the
|
||||||
|
version of a vendored dependency and taking advantage of its now available new
|
||||||
|
feature constitute two separate units of work. Implementing a new function and
|
||||||
|
calling it in another file constitute a single logical unit of work. The very
|
||||||
|
high majority of submissions should have a single commit, so if in doubt: squash
|
||||||
|
down to one.
|
||||||
|
|
||||||
|
After every commit, make sure the test suite passes. Include documentation
|
||||||
|
changes in the same pull request so that a revert would remove all traces of
|
||||||
|
the feature or fix.
|
||||||
|
|
||||||
|
Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in the pull request
|
||||||
|
description that close an issue. Including references automatically closes the issue
|
||||||
|
on a merge.
|
||||||
|
|
||||||
|
Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly
|
||||||
|
from the Git history.
|
||||||
|
|
||||||
|
Please see the [Coding Style](#coding-style) for further guidelines.
|
||||||
|
|
||||||
|
### Merge approval
|
||||||
|
|
||||||
|
Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to
|
||||||
|
indicate acceptance.
|
||||||
|
|
||||||
|
A change requires LGTMs from an absolute majority of the maintainers of each
|
||||||
|
component affected. For example, if a change affects `docs/` and `registry/`, it
|
||||||
|
needs an absolute majority from the maintainers of `docs/` AND, separately, an
|
||||||
|
absolute majority of the maintainers of `registry/`.
|
||||||
|
|
||||||
|
For more details, see the [MAINTAINERS](MAINTAINERS) page.
|
||||||
|
|
||||||
|
### Sign your work
|
||||||
|
|
||||||
|
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||||
|
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||||
|
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||||
|
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||||
|
|
||||||
|
```
|
||||||
|
Developer Certificate of Origin
|
||||||
|
Version 1.1
|
||||||
|
|
||||||
|
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||||
|
660 York Street, Suite 102,
|
||||||
|
San Francisco, CA 94110 USA
|
||||||
|
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies of this
|
||||||
|
license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Developer's Certificate of Origin 1.1
|
||||||
|
|
||||||
|
By making a contribution to this project, I certify that:
|
||||||
|
|
||||||
|
(a) The contribution was created in whole or in part by me and I
|
||||||
|
have the right to submit it under the open source license
|
||||||
|
indicated in the file; or
|
||||||
|
|
||||||
|
(b) The contribution is based upon previous work that, to the best
|
||||||
|
of my knowledge, is covered under an appropriate open source
|
||||||
|
license and I have the right under that license to submit that
|
||||||
|
work with modifications, whether created in whole or in part
|
||||||
|
by me, under the same open source license (unless I am
|
||||||
|
permitted to submit under a different license), as indicated
|
||||||
|
in the file; or
|
||||||
|
|
||||||
|
(c) The contribution was provided directly to me by some other
|
||||||
|
person who certified (a), (b) or (c) and I have not modified
|
||||||
|
it.
|
||||||
|
|
||||||
|
(d) I understand and agree that this project and the contribution
|
||||||
|
are public and that a record of the contribution (including all
|
||||||
|
personal information I submit with it, including my sign-off) is
|
||||||
|
maintained indefinitely and may be redistributed consistent with
|
||||||
|
this project or the open source license(s) involved.
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you just add a line to every git commit message:
|
||||||
|
|
||||||
|
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||||
|
|
||||||
|
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||||
|
|
||||||
|
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||||
|
commit automatically with `git commit -s`.
|
||||||
|
|
||||||
|
### How can I become a maintainer?
|
||||||
|
|
||||||
|
The procedures for adding new maintainers are explained in the
|
||||||
|
global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS)
|
||||||
|
file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/)
|
||||||
|
repository.
|
||||||
|
|
||||||
|
Don't forget: being a maintainer is a time investment. Make sure you
|
||||||
|
will have time to make yourself available. You don't have to be a
|
||||||
|
maintainer to make a difference on the project!
|
||||||
|
|
||||||
|
## Docker community guidelines
|
||||||
|
|
||||||
|
We want to keep the Docker community awesome, growing and collaborative. We need
|
||||||
|
your help to keep it that way. To help with this we've come up with some general
|
||||||
|
guidelines for the community as a whole:
|
||||||
|
|
||||||
|
* Be nice: Be courteous, respectful and polite to fellow community members:
|
||||||
|
no regional, racial, gender, or other abuse will be tolerated. We like
|
||||||
|
nice people way better than mean ones!
|
||||||
|
|
||||||
|
* Encourage diversity and participation: Make everyone in our community feel
|
||||||
|
welcome, regardless of their background and the extent of their
|
||||||
|
contributions, and do everything possible to encourage participation in
|
||||||
|
our community.
|
||||||
|
|
||||||
|
* Keep it legal: Basically, don't get us in trouble. Share only content that
|
||||||
|
you own, do not share private or sensitive information, and don't break
|
||||||
|
the law.
|
||||||
|
|
||||||
|
* Stay on topic: Make sure that you are posting to the correct channel and
|
||||||
|
avoid off-topic discussions. Remember when you update an issue or respond
|
||||||
|
to an email you are potentially sending to a large number of people. Please
|
||||||
|
consider this before you update. Also remember that nobody likes spam.
|
||||||
|
|
||||||
|
* Don't send email to the maintainers: There's no need to send email to the
|
||||||
|
maintainers to ask them to investigate an issue or to take a look at a
|
||||||
|
pull request. Instead of sending an email, GitHub mentions should be
|
||||||
|
used to ping maintainers to review a pull request, a proposal or an
|
||||||
|
issue.
|
||||||
|
|
||||||
|
### Guideline violations — 3 strikes method
|
||||||
|
|
||||||
|
The point of this section is not to find opportunities to punish people, but we
|
||||||
|
do need a fair way to deal with people who are making our community suck.
|
||||||
|
|
||||||
|
1. First occurrence: We'll give you a friendly, but public reminder that the
|
||||||
|
behavior is inappropriate according to our guidelines.
|
||||||
|
|
||||||
|
2. Second occurrence: We will send you a private message with a warning that
|
||||||
|
any additional violations will result in removal from the community.
|
||||||
|
|
||||||
|
3. Third occurrence: Depending on the violation, we may need to delete or ban
|
||||||
|
your account.
|
||||||
|
|
||||||
|
**Notes:**
|
||||||
|
|
||||||
|
* Obvious spammers are banned on first occurrence. If we don't do this, we'll
|
||||||
|
have spam all over the place.
|
||||||
|
|
||||||
|
* Violations are forgiven after 6 months of good behavior, and we won't hold a
|
||||||
|
grudge.
|
||||||
|
|
||||||
|
* People who commit minor infractions will get some education, rather than
|
||||||
|
hammering them in the 3 strikes process.
|
||||||
|
|
||||||
|
* The rules apply equally to everyone in the community, no matter how much
|
||||||
|
you've contributed.
|
||||||
|
|
||||||
|
* Extreme violations of a threatening, abusive, destructive or illegal nature
|
||||||
|
will be addressed immediately and are not subject to 3 strikes or forgiveness.
|
||||||
|
|
||||||
|
* Contact abuse@docker.com to report abuse or appeal violations. In the case of
|
||||||
|
appeals, we know that mistakes happen, and we'll work with you to come up with a
|
||||||
|
fair solution if there has been a misunderstanding.
|
||||||
|
|
||||||
|
## Coding Style
|
||||||
|
|
||||||
|
Unless explicitly stated, we follow all coding guidelines from the Go
|
||||||
|
community. While some of these standards may seem arbitrary, they somehow seem
|
||||||
|
to result in a solid, consistent codebase.
|
||||||
|
|
||||||
|
It is possible that the code base does not currently comply with these
|
||||||
|
guidelines. We are not looking for a massive PR that fixes this, since that
|
||||||
|
goes against the spirit of the guidelines. All new contributions should make a
|
||||||
|
best effort to clean up and make the code base better than they left it.
|
||||||
|
Obviously, apply your best judgement. Remember, the goal here is to make the
|
||||||
|
code base easier for humans to navigate and understand. Always keep that in
|
||||||
|
mind when nudging others to comply.
|
||||||
|
|
||||||
|
The rules:
|
||||||
|
|
||||||
|
1. All code should be formatted with `gofmt -s`.
|
||||||
|
2. All code should pass the default levels of
|
||||||
|
[`golint`](https://github.com/golang/lint).
|
||||||
|
3. All code should follow the guidelines covered in [Effective
|
||||||
|
Go](http://golang.org/doc/effective_go.html) and [Go Code Review
|
||||||
|
Comments](https://github.com/golang/go/wiki/CodeReviewComments).
|
||||||
|
4. Comment the code. Tell us the why, the history and the context.
|
||||||
|
5. Document _all_ declarations and methods, even private ones. Declare
|
||||||
|
expectations, caveats and anything else that may be important. If a type
|
||||||
|
gets exported, having the comments already there will ensure it's ready.
|
||||||
|
6. Variable name length should be proportional to its context and no longer.
|
||||||
|
`noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
|
||||||
|
In practice, short methods will have short variable names and globals will
|
||||||
|
have longer names.
|
||||||
|
7. No underscores in package names. If you need a compound name, step back,
|
||||||
|
and re-examine why you need a compound name. If you still think you need a
|
||||||
|
compound name, lose the underscore.
|
||||||
|
8. No utils or helpers packages. If a function is not general enough to
|
||||||
|
warrant its own package, it has not been written generally enough to be a
|
||||||
|
part of a util package. Just leave it unexported and well-documented.
|
||||||
|
9. All tests should run with `go test` and outside tooling should not be
|
||||||
|
required. No, we don't need another unit testing framework. Assertion
|
||||||
|
packages are acceptable if they provide _real_ incremental value.
|
||||||
|
10. Even though we call these "rules" above, they are actually just
|
||||||
|
guidelines. Since you've read all the rules, you now know that.
|
||||||
|
|
||||||
|
If you are having trouble getting into the mood of idiomatic Go, we recommend
|
||||||
|
reading through [Effective Go](https://golang.org/doc/effective_go.html). The
|
||||||
|
[Go Blog](https://blog.golang.org) is also a great resource. Drinking the
|
||||||
|
kool-aid is a lot easier than going thirsty.
|
12
vendor/github.com/docker/cli/Jenkinsfile
generated
vendored
Normal file
12
vendor/github.com/docker/cli/Jenkinsfile
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
wrappedNode(label: 'linux && x86_64', cleanWorkspace: true) {
|
||||||
|
timeout(time: 60, unit: 'MINUTES') {
|
||||||
|
stage "Git Checkout"
|
||||||
|
checkout scm
|
||||||
|
|
||||||
|
stage "Run end-to-end test suite"
|
||||||
|
sh "docker version"
|
||||||
|
sh "E2E_UNIQUE_ID=clie2e${BUILD_NUMBER} \
|
||||||
|
IMAGE_TAG=clie2e${BUILD_NUMBER} \
|
||||||
|
make -f docker.Makefile test-e2e"
|
||||||
|
}
|
||||||
|
}
|
191
vendor/github.com/docker/cli/LICENSE
generated
vendored
Normal file
191
vendor/github.com/docker/cli/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2013-2017 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
135
vendor/github.com/docker/cli/MAINTAINERS
generated
vendored
Normal file
135
vendor/github.com/docker/cli/MAINTAINERS
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
||||||
|
# Docker maintainers file
|
||||||
|
#
|
||||||
|
# This file describes who runs the docker/cli project and how.
|
||||||
|
# This is a living document - if you see something out of date or missing, speak up!
|
||||||
|
#
|
||||||
|
# It is structured to be consumable by both humans and programs.
|
||||||
|
# To extract its contents programmatically, use any TOML-compliant
|
||||||
|
# parser.
|
||||||
|
#
|
||||||
|
# This file is compiled into the MAINTAINERS file in docker/opensource.
|
||||||
|
#
|
||||||
|
[Org]
|
||||||
|
|
||||||
|
[Org."Core maintainers"]
|
||||||
|
|
||||||
|
# The Core maintainers are the ghostbusters of the project: when there's a problem others
|
||||||
|
# can't solve, they show up and fix it with bizarre devices and weaponry.
|
||||||
|
# They have final say on technical implementation and coding style.
|
||||||
|
# They are ultimately responsible for quality in all its forms: usability polish,
|
||||||
|
# bugfixes, performance, stability, etc. When ownership can cleanly be passed to
|
||||||
|
# a subsystem, they are responsible for doing so and holding the
|
||||||
|
# subsystem maintainers accountable. If ownership is unclear, they are the de facto owners.
|
||||||
|
|
||||||
|
people = [
|
||||||
|
"aaronlehmann",
|
||||||
|
"albers",
|
||||||
|
"cpuguy83",
|
||||||
|
"dnephin",
|
||||||
|
"justincormack",
|
||||||
|
"stevvooe",
|
||||||
|
"tibor",
|
||||||
|
"tonistiigi",
|
||||||
|
"vdemeester",
|
||||||
|
"vieux",
|
||||||
|
]
|
||||||
|
|
||||||
|
[Org."Docs maintainers"]
|
||||||
|
|
||||||
|
# TODO Describe the docs maintainers role.
|
||||||
|
|
||||||
|
people = [
|
||||||
|
"misty",
|
||||||
|
"thajeztah"
|
||||||
|
]
|
||||||
|
|
||||||
|
[Org.Curators]
|
||||||
|
|
||||||
|
# The curators help ensure that incoming issues and pull requests are properly triaged and
|
||||||
|
# that our various contribution and reviewing processes are respected. With their knowledge of
|
||||||
|
# the repository activity, they can also guide contributors to relevant material or
|
||||||
|
# discussions.
|
||||||
|
#
|
||||||
|
# They are neither code nor docs reviewers, so they are never expected to merge. They can
|
||||||
|
# however:
|
||||||
|
# - close an issue or pull request when it's an exact duplicate
|
||||||
|
# - close an issue or pull request when it's inappropriate or off-topic
|
||||||
|
|
||||||
|
people = [
|
||||||
|
"programmerq",
|
||||||
|
"thajeztah"
|
||||||
|
]
|
||||||
|
|
||||||
|
[people]
|
||||||
|
|
||||||
|
# A reference list of all people associated with the project.
|
||||||
|
# All other sections should refer to people by their canonical key
|
||||||
|
# in the people section.
|
||||||
|
|
||||||
|
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
|
||||||
|
|
||||||
|
[people.aaronlehmann]
|
||||||
|
Name = "Aaron Lehmann"
|
||||||
|
Email = "aaron.lehmann@docker.com"
|
||||||
|
GitHub = "aaronlehmann"
|
||||||
|
|
||||||
|
[people.albers]
|
||||||
|
Name = "Harald Albers"
|
||||||
|
Email = "github@albersweb.de"
|
||||||
|
GitHub = "albers"
|
||||||
|
|
||||||
|
[people.cpuguy83]
|
||||||
|
Name = "Brian Goff"
|
||||||
|
Email = "cpuguy83@gmail.com"
|
||||||
|
GitHub = "cpuguy83"
|
||||||
|
|
||||||
|
[people.dnephin]
|
||||||
|
Name = "Daniel Nephin"
|
||||||
|
Email = "dnephin@gmail.com"
|
||||||
|
GitHub = "dnephin"
|
||||||
|
|
||||||
|
[people.justincormack]
|
||||||
|
Name = "Justin Cormack"
|
||||||
|
Email = "justin.cormack@docker.com"
|
||||||
|
GitHub = "justincormack"
|
||||||
|
|
||||||
|
[people.misty]
|
||||||
|
Name = "Misty Stanley-Jones"
|
||||||
|
Email = "misty@docker.com"
|
||||||
|
GitHub = "mistyhacks"
|
||||||
|
|
||||||
|
[people.programmerq]
|
||||||
|
Name = "Jeff Anderson"
|
||||||
|
Email = "jeff@docker.com"
|
||||||
|
GitHub = "programmerq"
|
||||||
|
|
||||||
|
[people.stevvooe]
|
||||||
|
Name = "Stephen Day"
|
||||||
|
Email = "stephen.day@docker.com"
|
||||||
|
GitHub = "stevvooe"
|
||||||
|
|
||||||
|
[people.thajeztah]
|
||||||
|
Name = "Sebastiaan van Stijn"
|
||||||
|
Email = "github@gone.nl"
|
||||||
|
GitHub = "thaJeztah"
|
||||||
|
|
||||||
|
[people.tibor]
|
||||||
|
Name = "Tibor Vass"
|
||||||
|
Email = "tibor@docker.com"
|
||||||
|
GitHub = "tiborvass"
|
||||||
|
|
||||||
|
[people.tonistiigi]
|
||||||
|
Name = "Tõnis Tiigi"
|
||||||
|
Email = "tonis@docker.com"
|
||||||
|
GitHub = "tonistiigi"
|
||||||
|
|
||||||
|
[people.vdemeester]
|
||||||
|
Name = "Vincent Demeester"
|
||||||
|
Email = "vincent@sbr.pm"
|
||||||
|
GitHub = "vdemeester"
|
||||||
|
|
||||||
|
[people.vieux]
|
||||||
|
Name = "Victor Vieux"
|
||||||
|
Email = "vieux@docker.com"
|
||||||
|
GitHub = "vieux"
|
||||||
|
|
90
vendor/github.com/docker/cli/Makefile
generated
vendored
Normal file
90
vendor/github.com/docker/cli/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
#
|
||||||
|
# github.com/docker/cli
|
||||||
|
#
|
||||||
|
all: binary
|
||||||
|
|
||||||
|
|
||||||
|
_:=$(shell ./scripts/warn-outside-container $(MAKECMDGOALS))
|
||||||
|
|
||||||
|
.PHONY: clean
|
||||||
|
clean: ## remove build artifacts
|
||||||
|
rm -rf ./build/* cli/winresources/rsrc_* ./man/man[1-9] docs/yaml/gen
|
||||||
|
|
||||||
|
.PHONY: test-unit
|
||||||
|
test-unit: ## run unit test
|
||||||
|
./scripts/test/unit $(shell go list ./... | grep -vE '/vendor/|/e2e/')
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
|
test: test-unit ## run tests
|
||||||
|
|
||||||
|
.PHONY: test-coverage
|
||||||
|
test-coverage: ## run test coverage
|
||||||
|
./scripts/test/unit-with-coverage $(shell go list ./... | grep -vE '/vendor/|/e2e/')
|
||||||
|
|
||||||
|
.PHONY: lint
|
||||||
|
lint: ## run all the lint tools
|
||||||
|
gometalinter --config gometalinter.json ./...
|
||||||
|
|
||||||
|
.PHONY: binary
|
||||||
|
binary: ## build executable for Linux
|
||||||
|
@echo "WARNING: binary creates a Linux executable. Use cross for macOS or Windows."
|
||||||
|
./scripts/build/binary
|
||||||
|
|
||||||
|
.PHONY: cross
|
||||||
|
cross: ## build executable for macOS and Windows
|
||||||
|
./scripts/build/cross
|
||||||
|
|
||||||
|
.PHONY: binary-windows
|
||||||
|
binary-windows: ## build executable for Windows
|
||||||
|
./scripts/build/windows
|
||||||
|
|
||||||
|
.PHONY: binary-osx
|
||||||
|
binary-osx: ## build executable for macOS
|
||||||
|
./scripts/build/osx
|
||||||
|
|
||||||
|
.PHONY: dynbinary
|
||||||
|
dynbinary: ## build dynamically linked binary
|
||||||
|
./scripts/build/dynbinary
|
||||||
|
|
||||||
|
.PHONY: watch
|
||||||
|
watch: ## monitor file changes and run go test
|
||||||
|
./scripts/test/watch
|
||||||
|
|
||||||
|
vendor: vendor.conf ## check that vendor matches vendor.conf
|
||||||
|
rm -rf vendor
|
||||||
|
bash -c 'vndr |& grep -v -i clone'
|
||||||
|
scripts/validate/check-git-diff vendor
|
||||||
|
|
||||||
|
.PHONY: authors
|
||||||
|
authors: ## generate AUTHORS file from git history
|
||||||
|
scripts/docs/generate-authors.sh
|
||||||
|
|
||||||
|
.PHONY: manpages
|
||||||
|
manpages: ## generate man pages from go source and markdown
|
||||||
|
scripts/docs/generate-man.sh
|
||||||
|
|
||||||
|
.PHONY: yamldocs
|
||||||
|
yamldocs: ## generate documentation YAML files consumed by docs repo
|
||||||
|
scripts/docs/generate-yaml.sh
|
||||||
|
|
||||||
|
.PHONY: shellcheck
|
||||||
|
shellcheck: ## run shellcheck validation
|
||||||
|
scripts/validate/shellcheck
|
||||||
|
|
||||||
|
.PHONY: help
|
||||||
|
help: ## print this help
|
||||||
|
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||||
|
|
||||||
|
|
||||||
|
cli/compose/schema/bindata.go: cli/compose/schema/data/*.json
|
||||||
|
go generate github.com/docker/cli/cli/compose/schema
|
||||||
|
|
||||||
|
compose-jsonschema: cli/compose/schema/bindata.go
|
||||||
|
scripts/validate/check-git-diff cli/compose/schema/bindata.go
|
||||||
|
|
||||||
|
.PHONY: ci-validate
|
||||||
|
ci-validate:
|
||||||
|
time make -B vendor
|
||||||
|
time make -B compose-jsonschema
|
||||||
|
time make manpages
|
||||||
|
time make yamldocs
|
19
vendor/github.com/docker/cli/NOTICE
generated
vendored
Normal file
19
vendor/github.com/docker/cli/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
Docker
|
||||||
|
Copyright 2012-2017 Docker, Inc.
|
||||||
|
|
||||||
|
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||||
|
|
||||||
|
This product contains software (https://github.com/kr/pty) developed
|
||||||
|
by Keith Rarick, licensed under the MIT License.
|
||||||
|
|
||||||
|
The following is courtesy of our legal counsel:
|
||||||
|
|
||||||
|
|
||||||
|
Use and transfer of Docker may be subject to certain restrictions by the
|
||||||
|
United States and other governments.
|
||||||
|
It is your responsibility to ensure that your use and/or transfer does not
|
||||||
|
violate applicable laws.
|
||||||
|
|
||||||
|
For more information, please see https://www.bis.doc.gov
|
||||||
|
|
||||||
|
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
69
vendor/github.com/docker/cli/README.md
generated
vendored
Normal file
69
vendor/github.com/docker/cli/README.md
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
[![build status](https://circleci.com/gh/docker/cli.svg?style=shield)](https://circleci.com/gh/docker/cli/tree/master) [![Build Status](https://jenkins.dockerproject.org/job/docker/job/cli/job/master/badge/icon)](https://jenkins.dockerproject.org/job/docker/job/cli/job/master/)
|
||||||
|
|
||||||
|
docker/cli
|
||||||
|
==========
|
||||||
|
|
||||||
|
This repository is the home of the cli used in the Docker CE and
|
||||||
|
Docker EE products.
|
||||||
|
|
||||||
|
Development
|
||||||
|
===========
|
||||||
|
|
||||||
|
`docker/cli` is developed using Docker.
|
||||||
|
|
||||||
|
Build a linux binary:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make -f docker.Makefile binary
|
||||||
|
```
|
||||||
|
|
||||||
|
Build binaries for all supported platforms:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make -f docker.Makefile cross
|
||||||
|
```
|
||||||
|
|
||||||
|
Run all linting:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make -f docker.Makefile lint
|
||||||
|
```
|
||||||
|
|
||||||
|
List all the available targets:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make help
|
||||||
|
```
|
||||||
|
|
||||||
|
### In-container development environment
|
||||||
|
|
||||||
|
Start an interactive development environment:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make -f docker.Makefile shell
|
||||||
|
```
|
||||||
|
|
||||||
|
In the development environment you can run many tasks, including build binaries:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make binary
|
||||||
|
```
|
||||||
|
|
||||||
|
Legal
|
||||||
|
=====
|
||||||
|
*Brought to you courtesy of our legal counsel. For more context,
|
||||||
|
please see the [NOTICE](https://github.com/docker/cli/blob/master/NOTICE) document in this repo.*
|
||||||
|
|
||||||
|
Use and transfer of Docker may be subject to certain restrictions by the
|
||||||
|
United States and other governments.
|
||||||
|
|
||||||
|
It is your responsibility to ensure that your use and/or transfer does not
|
||||||
|
violate applicable laws.
|
||||||
|
|
||||||
|
For more information, please see https://www.bis.doc.gov
|
||||||
|
|
||||||
|
Licensing
|
||||||
|
=========
|
||||||
|
docker/cli is licensed under the Apache License, Version 2.0. See
|
||||||
|
[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full
|
||||||
|
license text.
|
87
vendor/github.com/docker/cli/TESTING.md
generated
vendored
Normal file
87
vendor/github.com/docker/cli/TESTING.md
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
# Testing
|
||||||
|
|
||||||
|
The following guidelines summarize the testing policy for docker/cli.
|
||||||
|
|
||||||
|
## Unit Test Suite
|
||||||
|
|
||||||
|
All code changes should have unit test coverage.
|
||||||
|
|
||||||
|
Error cases should be tested with unit tests.
|
||||||
|
|
||||||
|
Bug fixes should be covered by new unit tests or additional assertions in
|
||||||
|
existing unit tests.
|
||||||
|
|
||||||
|
### Details
|
||||||
|
|
||||||
|
The unit test suite follows the standard Go testing convention. Tests are
|
||||||
|
located in the package directory in `_test.go` files.
|
||||||
|
|
||||||
|
Unit tests should be named using the convention:
|
||||||
|
|
||||||
|
```
|
||||||
|
Test<Function Name><Test Case Name>
|
||||||
|
```
|
||||||
|
|
||||||
|
[Table tests](https://github.com/golang/go/wiki/TableDrivenTests) should be used
|
||||||
|
where appropriate, but may not be appropriate in all cases.
|
||||||
|
|
||||||
|
Assertions should be made using
|
||||||
|
[testify/assert](https://godoc.org/github.com/stretchr/testify/assert) and test
|
||||||
|
requirements should be verified using
|
||||||
|
[testify/require](https://godoc.org/github.com/stretchr/testify/require).
|
||||||
|
|
||||||
|
Fakes, and testing utilities can be found in
|
||||||
|
[internal/test](https://godoc.org/github.com/docker/cli/internal/test) and
|
||||||
|
[gotestyourself](https://godoc.org/github.com/gotestyourself/gotestyourself).
|
||||||
|
|
||||||
|
## End-to-End Test Suite
|
||||||
|
|
||||||
|
The end-to-end test suite tests a cli binary against a real API backend.
|
||||||
|
|
||||||
|
### Guidelines
|
||||||
|
|
||||||
|
Each feature (subcommand) should have a single end-to-end test for
|
||||||
|
the success case. The test should include all (or most) flags/options supported
|
||||||
|
by that feature.
|
||||||
|
|
||||||
|
In some rare cases a couple additional end-to-end tests may be written for a
|
||||||
|
sufficiently complex and critical feature (ex: `container run`, `service
|
||||||
|
create`, `service update`, and `docker build` may have ~3-5 cases each).
|
||||||
|
|
||||||
|
In some rare cases a sufficiently critical error paths may have a single
|
||||||
|
end-to-end test case.
|
||||||
|
|
||||||
|
In all other cases the behaviour should be covered by unit tests.
|
||||||
|
|
||||||
|
If a code change adds a new flag, that flag should be added to the existing
|
||||||
|
"success case" end-to-end test.
|
||||||
|
|
||||||
|
If a code change fixes a bug, that bug fix should be covered either by adding
|
||||||
|
assertions to the existing end-to-end test, or with one or more unit test.
|
||||||
|
|
||||||
|
### Details
|
||||||
|
|
||||||
|
The end-to-end test suite is located in
|
||||||
|
[./e2e](https://github.com/docker/cli/tree/master/e2e). Each directory in `e2e`
|
||||||
|
corresponds to a directory in `cli/command` and contains the tests for that
|
||||||
|
subcommand. Files in each directory should be named `<command>_test.go` where
|
||||||
|
command is the basename of the command (ex: the test for `docker stack deploy`
|
||||||
|
is found in `e2e/stack/deploy_test.go`).
|
||||||
|
|
||||||
|
Tests should be named using the convention:
|
||||||
|
|
||||||
|
```
|
||||||
|
Test<Command Basename>[<Test Case Name>]
|
||||||
|
```
|
||||||
|
|
||||||
|
where the test case name is only required when there are multiple test cases for
|
||||||
|
a single command.
|
||||||
|
|
||||||
|
End-to-end test should run the `docker` binary using
|
||||||
|
[gotestyourself/icmd](https://godoc.org/github.com/gotestyourself/gotestyourself/icmd)
|
||||||
|
and make assertions about the exit code, stdout, stderr, and local file system.
|
||||||
|
|
||||||
|
Any Docker image or registry operations should use `registry:5000/<image name>`
|
||||||
|
to communicate with the local instance of the Docker registry. To load
|
||||||
|
additional fixture images to the registry see
|
||||||
|
[scripts/test/e2e/run](https://github.com/docker/cli/blob/master/scripts/test/e2e/run).
|
1
vendor/github.com/docker/cli/VERSION
generated
vendored
Normal file
1
vendor/github.com/docker/cli/VERSION
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
18.04.0-dev
|
23
vendor/github.com/docker/cli/appveyor.yml
generated
vendored
Normal file
23
vendor/github.com/docker/cli/appveyor.yml
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
version: "{build}"
|
||||||
|
|
||||||
|
clone_folder: c:\gopath\src\github.com\docker\cli
|
||||||
|
|
||||||
|
environment:
|
||||||
|
GOPATH: c:\gopath
|
||||||
|
GOVERSION: 1.10
|
||||||
|
DEPVERSION: v0.4.1
|
||||||
|
|
||||||
|
install:
|
||||||
|
- rmdir c:\go /s /q
|
||||||
|
- appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi
|
||||||
|
- msiexec /i go%GOVERSION%.windows-amd64.msi /q
|
||||||
|
- go version
|
||||||
|
- go env
|
||||||
|
|
||||||
|
deploy: false
|
||||||
|
|
||||||
|
build_script:
|
||||||
|
- ps: .\scripts\make.ps1 -Binary
|
||||||
|
|
||||||
|
test_script:
|
||||||
|
- ps: .\scripts\make.ps1 -TestUnit
|
116
vendor/github.com/docker/cli/circle.yml
generated
vendored
Normal file
116
vendor/github.com/docker/cli/circle.yml
generated
vendored
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
version: 2
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
lint:
|
||||||
|
working_directory: /work
|
||||||
|
docker: [{image: 'docker:17.06-git'}]
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- setup_remote_docker:
|
||||||
|
reusable: true
|
||||||
|
exclusive: false
|
||||||
|
- run:
|
||||||
|
command: docker version
|
||||||
|
- run:
|
||||||
|
name: "Lint"
|
||||||
|
command: |
|
||||||
|
dockerfile=dockerfiles/Dockerfile.lint
|
||||||
|
echo "COPY . ." >> $dockerfile
|
||||||
|
docker build -f $dockerfile --tag cli-linter:$CIRCLE_BUILD_NUM .
|
||||||
|
docker run --rm cli-linter:$CIRCLE_BUILD_NUM
|
||||||
|
|
||||||
|
cross:
|
||||||
|
working_directory: /work
|
||||||
|
docker: [{image: 'docker:17.06-git'}]
|
||||||
|
parallelism: 3
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- setup_remote_docker:
|
||||||
|
reusable: true
|
||||||
|
exclusive: false
|
||||||
|
- run:
|
||||||
|
name: "Cross"
|
||||||
|
command: |
|
||||||
|
dockerfile=dockerfiles/Dockerfile.cross
|
||||||
|
echo "COPY . ." >> $dockerfile
|
||||||
|
docker build -f $dockerfile --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||||
|
name=cross-$CIRCLE_BUILD_NUM-$CIRCLE_NODE_INDEX
|
||||||
|
docker run \
|
||||||
|
-e CROSS_GROUP=$CIRCLE_NODE_INDEX \
|
||||||
|
--name $name cli-builder:$CIRCLE_BUILD_NUM \
|
||||||
|
make cross
|
||||||
|
docker cp \
|
||||||
|
$name:/go/src/github.com/docker/cli/build \
|
||||||
|
/work/build
|
||||||
|
- store_artifacts:
|
||||||
|
path: /work/build
|
||||||
|
|
||||||
|
test:
|
||||||
|
working_directory: /work
|
||||||
|
docker: [{image: 'docker:17.06-git'}]
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- setup_remote_docker:
|
||||||
|
reusable: true
|
||||||
|
exclusive: false
|
||||||
|
- run:
|
||||||
|
name: "Unit Test with Coverage"
|
||||||
|
command: |
|
||||||
|
dockerfile=dockerfiles/Dockerfile.dev
|
||||||
|
echo "COPY . ." >> $dockerfile
|
||||||
|
docker build -f $dockerfile --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||||
|
docker run --name \
|
||||||
|
test-$CIRCLE_BUILD_NUM cli-builder:$CIRCLE_BUILD_NUM \
|
||||||
|
make test-coverage
|
||||||
|
|
||||||
|
- run:
|
||||||
|
name: "Upload to Codecov"
|
||||||
|
command: |
|
||||||
|
docker cp \
|
||||||
|
test-$CIRCLE_BUILD_NUM:/go/src/github.com/docker/cli/coverage.txt \
|
||||||
|
coverage.txt
|
||||||
|
apk add -U bash curl
|
||||||
|
curl -s https://codecov.io/bash | bash || \
|
||||||
|
echo 'Codecov failed to upload'
|
||||||
|
|
||||||
|
validate:
|
||||||
|
working_directory: /work
|
||||||
|
docker: [{image: 'docker:17.06-git'}]
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- setup_remote_docker:
|
||||||
|
reusable: true
|
||||||
|
exclusive: false
|
||||||
|
- run:
|
||||||
|
name: "Validate Vendor, Docs, and Code Generation"
|
||||||
|
command: |
|
||||||
|
dockerfile=dockerfiles/Dockerfile.dev
|
||||||
|
echo "COPY . ." >> $dockerfile
|
||||||
|
rm -f .dockerignore # include .git
|
||||||
|
docker build -f $dockerfile --tag cli-builder-with-git:$CIRCLE_BUILD_NUM .
|
||||||
|
docker run --rm cli-builder-with-git:$CIRCLE_BUILD_NUM \
|
||||||
|
make ci-validate
|
||||||
|
shellcheck:
|
||||||
|
working_directory: /work
|
||||||
|
docker: [{image: 'docker:17.06-git'}]
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- setup_remote_docker
|
||||||
|
- run:
|
||||||
|
name: "Run shellcheck"
|
||||||
|
command: |
|
||||||
|
dockerfile=dockerfiles/Dockerfile.shellcheck
|
||||||
|
echo "COPY . ." >> $dockerfile
|
||||||
|
docker build -f $dockerfile --tag cli-validator:$CIRCLE_BUILD_NUM .
|
||||||
|
docker run --rm cli-validator:$CIRCLE_BUILD_NUM \
|
||||||
|
make shellcheck
|
||||||
|
workflows:
|
||||||
|
version: 2
|
||||||
|
ci:
|
||||||
|
jobs:
|
||||||
|
- lint
|
||||||
|
- cross
|
||||||
|
- test
|
||||||
|
- validate
|
||||||
|
- shellcheck
|
162
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
Normal file
162
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
Normal file
|
@ -0,0 +1,162 @@
|
||||||
|
package cli
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/term"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetupRootCommand sets default usage, help, and error handling for the
|
||||||
|
// root command.
|
||||||
|
func SetupRootCommand(rootCmd *cobra.Command) {
|
||||||
|
cobra.AddTemplateFunc("hasSubCommands", hasSubCommands)
|
||||||
|
cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands)
|
||||||
|
cobra.AddTemplateFunc("operationSubCommands", operationSubCommands)
|
||||||
|
cobra.AddTemplateFunc("managementSubCommands", managementSubCommands)
|
||||||
|
cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages)
|
||||||
|
cobra.AddTemplateFunc("useLine", UseLine)
|
||||||
|
|
||||||
|
rootCmd.SetUsageTemplate(usageTemplate)
|
||||||
|
rootCmd.SetHelpTemplate(helpTemplate)
|
||||||
|
rootCmd.SetFlagErrorFunc(FlagErrorFunc)
|
||||||
|
rootCmd.SetHelpCommand(helpCommand)
|
||||||
|
|
||||||
|
rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage")
|
||||||
|
rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help")
|
||||||
|
rootCmd.PersistentFlags().Lookup("help").Hidden = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlagErrorFunc prints an error message which matches the format of the
|
||||||
|
// docker/cli/cli error messages
|
||||||
|
func FlagErrorFunc(cmd *cobra.Command, err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
usage := ""
|
||||||
|
if cmd.HasSubCommands() {
|
||||||
|
usage = "\n\n" + cmd.UsageString()
|
||||||
|
}
|
||||||
|
return StatusError{
|
||||||
|
Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage),
|
||||||
|
StatusCode: 125,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var helpCommand = &cobra.Command{
|
||||||
|
Use: "help [command]",
|
||||||
|
Short: "Help about the command",
|
||||||
|
PersistentPreRun: func(cmd *cobra.Command, args []string) {},
|
||||||
|
PersistentPostRun: func(cmd *cobra.Command, args []string) {},
|
||||||
|
RunE: func(c *cobra.Command, args []string) error {
|
||||||
|
cmd, args, e := c.Root().Find(args)
|
||||||
|
if cmd == nil || e != nil || len(args) > 0 {
|
||||||
|
return errors.Errorf("unknown help topic: %v", strings.Join(args, " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
helpFunc := cmd.HelpFunc()
|
||||||
|
helpFunc(cmd, args)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasSubCommands(cmd *cobra.Command) bool {
|
||||||
|
return len(operationSubCommands(cmd)) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasManagementSubCommands(cmd *cobra.Command) bool {
|
||||||
|
return len(managementSubCommands(cmd)) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func operationSubCommands(cmd *cobra.Command) []*cobra.Command {
|
||||||
|
cmds := []*cobra.Command{}
|
||||||
|
for _, sub := range cmd.Commands() {
|
||||||
|
if sub.IsAvailableCommand() && !sub.HasSubCommands() {
|
||||||
|
cmds = append(cmds, sub)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cmds
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrappedFlagUsages(cmd *cobra.Command) string {
|
||||||
|
width := 80
|
||||||
|
if ws, err := term.GetWinsize(0); err == nil {
|
||||||
|
width = int(ws.Width)
|
||||||
|
}
|
||||||
|
return cmd.Flags().FlagUsagesWrapped(width - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func managementSubCommands(cmd *cobra.Command) []*cobra.Command {
|
||||||
|
cmds := []*cobra.Command{}
|
||||||
|
for _, sub := range cmd.Commands() {
|
||||||
|
if sub.IsAvailableCommand() && sub.HasSubCommands() {
|
||||||
|
cmds = append(cmds, sub)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cmds
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseLine returns the usage line for a command. This implementation is different
|
||||||
|
// from the default Command.UseLine in that it does not add a `[flags]` to the
|
||||||
|
// end of the line.
|
||||||
|
func UseLine(cmd *cobra.Command) string {
|
||||||
|
if cmd.HasParent() {
|
||||||
|
return cmd.Parent().CommandPath() + " " + cmd.Use
|
||||||
|
}
|
||||||
|
return cmd.Use
|
||||||
|
}
|
||||||
|
|
||||||
|
var usageTemplate = `Usage:
|
||||||
|
|
||||||
|
{{- if not .HasSubCommands}} {{ useLine . }}{{end}}
|
||||||
|
{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}}
|
||||||
|
|
||||||
|
{{ .Short | trim }}
|
||||||
|
|
||||||
|
{{- if gt .Aliases 0}}
|
||||||
|
|
||||||
|
Aliases:
|
||||||
|
{{.NameAndAliases}}
|
||||||
|
|
||||||
|
{{- end}}
|
||||||
|
{{- if .HasExample}}
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
{{ .Example }}
|
||||||
|
|
||||||
|
{{- end}}
|
||||||
|
{{- if .HasFlags}}
|
||||||
|
|
||||||
|
Options:
|
||||||
|
{{ wrappedFlagUsages . | trimRightSpace}}
|
||||||
|
|
||||||
|
{{- end}}
|
||||||
|
{{- if hasManagementSubCommands . }}
|
||||||
|
|
||||||
|
Management Commands:
|
||||||
|
|
||||||
|
{{- range managementSubCommands . }}
|
||||||
|
{{rpad .Name .NamePadding }} {{.Short}}
|
||||||
|
{{- end}}
|
||||||
|
|
||||||
|
{{- end}}
|
||||||
|
{{- if hasSubCommands .}}
|
||||||
|
|
||||||
|
Commands:
|
||||||
|
|
||||||
|
{{- range operationSubCommands . }}
|
||||||
|
{{rpad .Name .NamePadding }} {{.Short}}
|
||||||
|
{{- end}}
|
||||||
|
{{- end}}
|
||||||
|
|
||||||
|
{{- if .HasSubCommands }}
|
||||||
|
|
||||||
|
Run '{{.CommandPath}} COMMAND --help' for more information on a command.
|
||||||
|
{{- end}}
|
||||||
|
`
|
||||||
|
|
||||||
|
var helpTemplate = `
|
||||||
|
{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
|
70
vendor/github.com/docker/cli/cli/command/bundlefile/bundlefile.go
generated
vendored
Normal file
70
vendor/github.com/docker/cli/cli/command/bundlefile/bundlefile.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
package bundlefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Bundlefile stores the contents of a bundlefile
|
||||||
|
type Bundlefile struct {
|
||||||
|
Version string
|
||||||
|
Services map[string]Service
|
||||||
|
}
|
||||||
|
|
||||||
|
// Service is a service from a bundlefile
|
||||||
|
type Service struct {
|
||||||
|
Image string
|
||||||
|
Command []string `json:",omitempty"`
|
||||||
|
Args []string `json:",omitempty"`
|
||||||
|
Env []string `json:",omitempty"`
|
||||||
|
Labels map[string]string `json:",omitempty"`
|
||||||
|
Ports []Port `json:",omitempty"`
|
||||||
|
WorkingDir *string `json:",omitempty"`
|
||||||
|
User *string `json:",omitempty"`
|
||||||
|
Networks []string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Port is a port as defined in a bundlefile
|
||||||
|
type Port struct {
|
||||||
|
Protocol string
|
||||||
|
Port uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFile loads a bundlefile from a path to the file
|
||||||
|
func LoadFile(reader io.Reader) (*Bundlefile, error) {
|
||||||
|
bundlefile := &Bundlefile{}
|
||||||
|
|
||||||
|
decoder := json.NewDecoder(reader)
|
||||||
|
if err := decoder.Decode(bundlefile); err != nil {
|
||||||
|
switch jsonErr := err.(type) {
|
||||||
|
case *json.SyntaxError:
|
||||||
|
return nil, errors.Errorf(
|
||||||
|
"JSON syntax error at byte %v: %s",
|
||||||
|
jsonErr.Offset,
|
||||||
|
jsonErr.Error())
|
||||||
|
case *json.UnmarshalTypeError:
|
||||||
|
return nil, errors.Errorf(
|
||||||
|
"Unexpected type at byte %v. Expected %s but received %s.",
|
||||||
|
jsonErr.Offset,
|
||||||
|
jsonErr.Type,
|
||||||
|
jsonErr.Value)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return bundlefile, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print writes the contents of the bundlefile to the output writer
|
||||||
|
// as human readable json
|
||||||
|
func Print(out io.Writer, bundle *Bundlefile) error {
|
||||||
|
bytes, err := json.MarshalIndent(*bundle, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = out.Write(bytes)
|
||||||
|
return err
|
||||||
|
}
|
78
vendor/github.com/docker/cli/cli/command/bundlefile/bundlefile_test.go
generated
vendored
Normal file
78
vendor/github.com/docker/cli/cli/command/bundlefile/bundlefile_test.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
package bundlefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/gotestyourself/gotestyourself/assert"
|
||||||
|
is "github.com/gotestyourself/gotestyourself/assert/cmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLoadFileV01Success(t *testing.T) {
|
||||||
|
reader := strings.NewReader(`{
|
||||||
|
"Version": "0.1",
|
||||||
|
"Services": {
|
||||||
|
"redis": {
|
||||||
|
"Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce",
|
||||||
|
"Networks": ["default"]
|
||||||
|
},
|
||||||
|
"web": {
|
||||||
|
"Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d",
|
||||||
|
"Networks": ["default"],
|
||||||
|
"User": "web"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
bundle, err := LoadFile(reader)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Check(t, is.Equal("0.1", bundle.Version))
|
||||||
|
assert.Check(t, is.Len(bundle.Services, 2))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadFileSyntaxError(t *testing.T) {
|
||||||
|
reader := strings.NewReader(`{
|
||||||
|
"Version": "0.1",
|
||||||
|
"Services": unquoted string
|
||||||
|
}`)
|
||||||
|
|
||||||
|
_, err := LoadFile(reader)
|
||||||
|
assert.Error(t, err, "JSON syntax error at byte 37: invalid character 'u' looking for beginning of value")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadFileTypeError(t *testing.T) {
|
||||||
|
reader := strings.NewReader(`{
|
||||||
|
"Version": "0.1",
|
||||||
|
"Services": {
|
||||||
|
"web": {
|
||||||
|
"Image": "redis",
|
||||||
|
"Networks": "none"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
_, err := LoadFile(reader)
|
||||||
|
assert.Error(t, err, "Unexpected type at byte 94. Expected []string but received string.")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrint(t *testing.T) {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
bundle := &Bundlefile{
|
||||||
|
Version: "0.1",
|
||||||
|
Services: map[string]Service{
|
||||||
|
"web": {
|
||||||
|
Image: "image",
|
||||||
|
Command: []string{"echo", "something"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.Check(t, Print(&buffer, bundle))
|
||||||
|
output := buffer.String()
|
||||||
|
assert.Check(t, is.Contains(output, "\"Image\": \"image\""))
|
||||||
|
assert.Check(t, is.Contains(output,
|
||||||
|
`"Command": [
|
||||||
|
"echo",
|
||||||
|
"something"
|
||||||
|
]`))
|
||||||
|
}
|
35
vendor/github.com/docker/cli/cli/command/checkpoint/client_test.go
generated
vendored
Normal file
35
vendor/github.com/docker/cli/cli/command/checkpoint/client_test.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
package checkpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/client"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fakeClient struct {
|
||||||
|
client.Client
|
||||||
|
checkpointCreateFunc func(container string, options types.CheckpointCreateOptions) error
|
||||||
|
checkpointDeleteFunc func(container string, options types.CheckpointDeleteOptions) error
|
||||||
|
checkpointListFunc func(container string, options types.CheckpointListOptions) ([]types.Checkpoint, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *fakeClient) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error {
|
||||||
|
if cli.checkpointCreateFunc != nil {
|
||||||
|
return cli.checkpointCreateFunc(container, options)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *fakeClient) CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error {
|
||||||
|
if cli.checkpointDeleteFunc != nil {
|
||||||
|
return cli.checkpointDeleteFunc(container, options)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *fakeClient) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) {
|
||||||
|
if cli.checkpointListFunc != nil {
|
||||||
|
return cli.checkpointListFunc(container, options)
|
||||||
|
}
|
||||||
|
return []types.Checkpoint{}, nil
|
||||||
|
}
|
24
vendor/github.com/docker/cli/cli/command/checkpoint/cmd.go
generated
vendored
Normal file
24
vendor/github.com/docker/cli/cli/command/checkpoint/cmd.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
package checkpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewCheckpointCommand returns the `checkpoint` subcommand (only in experimental)
|
||||||
|
func NewCheckpointCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "checkpoint",
|
||||||
|
Short: "Manage checkpoints",
|
||||||
|
Args: cli.NoArgs,
|
||||||
|
RunE: command.ShowHelp(dockerCli.Err()),
|
||||||
|
Annotations: map[string]string{"experimental": "", "version": "1.25"},
|
||||||
|
}
|
||||||
|
cmd.AddCommand(
|
||||||
|
newCreateCommand(dockerCli),
|
||||||
|
newListCommand(dockerCli),
|
||||||
|
newRemoveCommand(dockerCli),
|
||||||
|
)
|
||||||
|
return cmd
|
||||||
|
}
|
58
vendor/github.com/docker/cli/cli/command/checkpoint/create.go
generated
vendored
Normal file
58
vendor/github.com/docker/cli/cli/command/checkpoint/create.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
package checkpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type createOptions struct {
|
||||||
|
container string
|
||||||
|
checkpoint string
|
||||||
|
checkpointDir string
|
||||||
|
leaveRunning bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCreateCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
|
var opts createOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "create [OPTIONS] CONTAINER CHECKPOINT",
|
||||||
|
Short: "Create a checkpoint from a running container",
|
||||||
|
Args: cli.ExactArgs(2),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
opts.container = args[0]
|
||||||
|
opts.checkpoint = args[1]
|
||||||
|
return runCreate(dockerCli, opts)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.BoolVar(&opts.leaveRunning, "leave-running", false, "Leave the container running after checkpoint")
|
||||||
|
flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCreate(dockerCli command.Cli, opts createOptions) error {
|
||||||
|
client := dockerCli.Client()
|
||||||
|
|
||||||
|
checkpointOpts := types.CheckpointCreateOptions{
|
||||||
|
CheckpointID: opts.checkpoint,
|
||||||
|
CheckpointDir: opts.checkpointDir,
|
||||||
|
Exit: !opts.leaveRunning,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.CheckpointCreate(context.Background(), opts.container, checkpointOpts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(dockerCli.Out(), "%s\n", opts.checkpoint)
|
||||||
|
return nil
|
||||||
|
}
|
72
vendor/github.com/docker/cli/cli/command/checkpoint/create_test.go
generated
vendored
Normal file
72
vendor/github.com/docker/cli/cli/command/checkpoint/create_test.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
package checkpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/cli/internal/test"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/gotestyourself/gotestyourself/assert"
|
||||||
|
is "github.com/gotestyourself/gotestyourself/assert/cmp"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCheckpointCreateErrors(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
args []string
|
||||||
|
checkpointCreateFunc func(container string, options types.CheckpointCreateOptions) error
|
||||||
|
expectedError string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
args: []string{"too-few-arguments"},
|
||||||
|
expectedError: "requires exactly 2 arguments",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{"too", "many", "arguments"},
|
||||||
|
expectedError: "requires exactly 2 arguments",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{"foo", "bar"},
|
||||||
|
checkpointCreateFunc: func(container string, options types.CheckpointCreateOptions) error {
|
||||||
|
return errors.Errorf("error creating checkpoint for container foo")
|
||||||
|
},
|
||||||
|
expectedError: "error creating checkpoint for container foo",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
cli := test.NewFakeCli(&fakeClient{
|
||||||
|
checkpointCreateFunc: tc.checkpointCreateFunc,
|
||||||
|
})
|
||||||
|
cmd := newCreateCommand(cli)
|
||||||
|
cmd.SetArgs(tc.args)
|
||||||
|
cmd.SetOutput(ioutil.Discard)
|
||||||
|
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckpointCreateWithOptions(t *testing.T) {
|
||||||
|
var containerID, checkpointID, checkpointDir string
|
||||||
|
var exit bool
|
||||||
|
cli := test.NewFakeCli(&fakeClient{
|
||||||
|
checkpointCreateFunc: func(container string, options types.CheckpointCreateOptions) error {
|
||||||
|
containerID = container
|
||||||
|
checkpointID = options.CheckpointID
|
||||||
|
checkpointDir = options.CheckpointDir
|
||||||
|
exit = options.Exit
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
})
|
||||||
|
cmd := newCreateCommand(cli)
|
||||||
|
checkpoint := "checkpoint-bar"
|
||||||
|
cmd.SetArgs([]string{"container-foo", checkpoint})
|
||||||
|
cmd.Flags().Set("leave-running", "true")
|
||||||
|
cmd.Flags().Set("checkpoint-dir", "/dir/foo")
|
||||||
|
assert.NilError(t, cmd.Execute())
|
||||||
|
assert.Check(t, is.Equal("container-foo", containerID))
|
||||||
|
assert.Check(t, is.Equal(checkpoint, checkpointID))
|
||||||
|
assert.Check(t, is.Equal("/dir/foo", checkpointDir))
|
||||||
|
assert.Check(t, is.Equal(false, exit))
|
||||||
|
assert.Check(t, is.Equal(checkpoint, strings.TrimSpace(cli.OutBuffer().String())))
|
||||||
|
}
|
54
vendor/github.com/docker/cli/cli/command/checkpoint/list.go
generated
vendored
Normal file
54
vendor/github.com/docker/cli/cli/command/checkpoint/list.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
package checkpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type listOptions struct {
|
||||||
|
checkpointDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newListCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
|
var opts listOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "ls [OPTIONS] CONTAINER",
|
||||||
|
Aliases: []string{"list"},
|
||||||
|
Short: "List checkpoints for a container",
|
||||||
|
Args: cli.ExactArgs(1),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return runList(dockerCli, args[0], opts)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func runList(dockerCli command.Cli, container string, opts listOptions) error {
|
||||||
|
client := dockerCli.Client()
|
||||||
|
|
||||||
|
listOpts := types.CheckpointListOptions{
|
||||||
|
CheckpointDir: opts.checkpointDir,
|
||||||
|
}
|
||||||
|
|
||||||
|
checkpoints, err := client.CheckpointList(context.Background(), container, listOpts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cpCtx := formatter.Context{
|
||||||
|
Output: dockerCli.Out(),
|
||||||
|
Format: formatter.NewCheckpointFormat(formatter.TableFormatKey),
|
||||||
|
}
|
||||||
|
return formatter.CheckpointWrite(cpCtx, checkpoints)
|
||||||
|
}
|
67
vendor/github.com/docker/cli/cli/command/checkpoint/list_test.go
generated
vendored
Normal file
67
vendor/github.com/docker/cli/cli/command/checkpoint/list_test.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
package checkpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/cli/internal/test"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/gotestyourself/gotestyourself/assert"
|
||||||
|
is "github.com/gotestyourself/gotestyourself/assert/cmp"
|
||||||
|
"github.com/gotestyourself/gotestyourself/golden"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCheckpointListErrors(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
args []string
|
||||||
|
checkpointListFunc func(container string, options types.CheckpointListOptions) ([]types.Checkpoint, error)
|
||||||
|
expectedError string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
args: []string{},
|
||||||
|
expectedError: "requires exactly 1 argument",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{"too", "many", "arguments"},
|
||||||
|
expectedError: "requires exactly 1 argument",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{"foo"},
|
||||||
|
checkpointListFunc: func(container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) {
|
||||||
|
return []types.Checkpoint{}, errors.Errorf("error getting checkpoints for container foo")
|
||||||
|
},
|
||||||
|
expectedError: "error getting checkpoints for container foo",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
cli := test.NewFakeCli(&fakeClient{
|
||||||
|
checkpointListFunc: tc.checkpointListFunc,
|
||||||
|
})
|
||||||
|
cmd := newListCommand(cli)
|
||||||
|
cmd.SetArgs(tc.args)
|
||||||
|
cmd.SetOutput(ioutil.Discard)
|
||||||
|
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckpointListWithOptions(t *testing.T) {
|
||||||
|
var containerID, checkpointDir string
|
||||||
|
cli := test.NewFakeCli(&fakeClient{
|
||||||
|
checkpointListFunc: func(container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) {
|
||||||
|
containerID = container
|
||||||
|
checkpointDir = options.CheckpointDir
|
||||||
|
return []types.Checkpoint{
|
||||||
|
{Name: "checkpoint-foo"},
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
})
|
||||||
|
cmd := newListCommand(cli)
|
||||||
|
cmd.SetArgs([]string{"container-foo"})
|
||||||
|
cmd.Flags().Set("checkpoint-dir", "/dir/foo")
|
||||||
|
assert.NilError(t, cmd.Execute())
|
||||||
|
assert.Check(t, is.Equal("container-foo", containerID))
|
||||||
|
assert.Check(t, is.Equal("/dir/foo", checkpointDir))
|
||||||
|
golden.Assert(t, cli.OutBuffer().String(), "checkpoint-list-with-options.golden")
|
||||||
|
}
|
44
vendor/github.com/docker/cli/cli/command/checkpoint/remove.go
generated
vendored
Normal file
44
vendor/github.com/docker/cli/cli/command/checkpoint/remove.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
package checkpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type removeOptions struct {
|
||||||
|
checkpointDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRemoveCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
|
var opts removeOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "rm [OPTIONS] CONTAINER CHECKPOINT",
|
||||||
|
Aliases: []string{"remove"},
|
||||||
|
Short: "Remove a checkpoint",
|
||||||
|
Args: cli.ExactArgs(2),
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return runRemove(dockerCli, args[0], args[1], opts)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func runRemove(dockerCli command.Cli, container string, checkpoint string, opts removeOptions) error {
|
||||||
|
client := dockerCli.Client()
|
||||||
|
|
||||||
|
removeOpts := types.CheckpointDeleteOptions{
|
||||||
|
CheckpointID: checkpoint,
|
||||||
|
CheckpointDir: opts.checkpointDir,
|
||||||
|
}
|
||||||
|
|
||||||
|
return client.CheckpointDelete(context.Background(), container, removeOpts)
|
||||||
|
}
|
65
vendor/github.com/docker/cli/cli/command/checkpoint/remove_test.go
generated
vendored
Normal file
65
vendor/github.com/docker/cli/cli/command/checkpoint/remove_test.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
package checkpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/cli/internal/test"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/gotestyourself/gotestyourself/assert"
|
||||||
|
is "github.com/gotestyourself/gotestyourself/assert/cmp"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCheckpointRemoveErrors(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
args []string
|
||||||
|
checkpointDeleteFunc func(container string, options types.CheckpointDeleteOptions) error
|
||||||
|
expectedError string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
args: []string{"too-few-arguments"},
|
||||||
|
expectedError: "requires exactly 2 arguments",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{"too", "many", "arguments"},
|
||||||
|
expectedError: "requires exactly 2 arguments",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{"foo", "bar"},
|
||||||
|
checkpointDeleteFunc: func(container string, options types.CheckpointDeleteOptions) error {
|
||||||
|
return errors.Errorf("error deleting checkpoint")
|
||||||
|
},
|
||||||
|
expectedError: "error deleting checkpoint",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
cli := test.NewFakeCli(&fakeClient{
|
||||||
|
checkpointDeleteFunc: tc.checkpointDeleteFunc,
|
||||||
|
})
|
||||||
|
cmd := newRemoveCommand(cli)
|
||||||
|
cmd.SetArgs(tc.args)
|
||||||
|
cmd.SetOutput(ioutil.Discard)
|
||||||
|
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckpointRemoveWithOptions(t *testing.T) {
|
||||||
|
var containerID, checkpointID, checkpointDir string
|
||||||
|
cli := test.NewFakeCli(&fakeClient{
|
||||||
|
checkpointDeleteFunc: func(container string, options types.CheckpointDeleteOptions) error {
|
||||||
|
containerID = container
|
||||||
|
checkpointID = options.CheckpointID
|
||||||
|
checkpointDir = options.CheckpointDir
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
})
|
||||||
|
cmd := newRemoveCommand(cli)
|
||||||
|
cmd.SetArgs([]string{"container-foo", "checkpoint-bar"})
|
||||||
|
cmd.Flags().Set("checkpoint-dir", "/dir/foo")
|
||||||
|
assert.NilError(t, cmd.Execute())
|
||||||
|
assert.Check(t, is.Equal("container-foo", containerID))
|
||||||
|
assert.Check(t, is.Equal("checkpoint-bar", checkpointID))
|
||||||
|
assert.Check(t, is.Equal("/dir/foo", checkpointDir))
|
||||||
|
}
|
2
vendor/github.com/docker/cli/cli/command/checkpoint/testdata/checkpoint-list-with-options.golden
generated
vendored
Normal file
2
vendor/github.com/docker/cli/cli/command/checkpoint/testdata/checkpoint-list-with-options.golden
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
CHECKPOINT NAME
|
||||||
|
checkpoint-foo
|
323
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
Normal file
323
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
Normal file
|
@ -0,0 +1,323 @@
|
||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/config"
|
||||||
|
cliconfig "github.com/docker/cli/cli/config"
|
||||||
|
"github.com/docker/cli/cli/config/configfile"
|
||||||
|
cliflags "github.com/docker/cli/cli/flags"
|
||||||
|
manifeststore "github.com/docker/cli/cli/manifest/store"
|
||||||
|
registryclient "github.com/docker/cli/cli/registry/client"
|
||||||
|
"github.com/docker/cli/cli/trust"
|
||||||
|
dopts "github.com/docker/cli/opts"
|
||||||
|
"github.com/docker/docker/api"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
registrytypes "github.com/docker/docker/api/types/registry"
|
||||||
|
"github.com/docker/docker/client"
|
||||||
|
"github.com/docker/go-connections/tlsconfig"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/theupdateframework/notary"
|
||||||
|
notaryclient "github.com/theupdateframework/notary/client"
|
||||||
|
"github.com/theupdateframework/notary/passphrase"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Streams is an interface which exposes the standard input and output streams
|
||||||
|
type Streams interface {
|
||||||
|
In() *InStream
|
||||||
|
Out() *OutStream
|
||||||
|
Err() io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cli represents the docker command line client.
|
||||||
|
type Cli interface {
|
||||||
|
Client() client.APIClient
|
||||||
|
Out() *OutStream
|
||||||
|
Err() io.Writer
|
||||||
|
In() *InStream
|
||||||
|
SetIn(in *InStream)
|
||||||
|
ConfigFile() *configfile.ConfigFile
|
||||||
|
ServerInfo() ServerInfo
|
||||||
|
ClientInfo() ClientInfo
|
||||||
|
NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
|
||||||
|
DefaultVersion() string
|
||||||
|
ManifestStore() manifeststore.Store
|
||||||
|
RegistryClient(bool) registryclient.RegistryClient
|
||||||
|
ContentTrustEnabled() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// DockerCli is an instance the docker command line client.
|
||||||
|
// Instances of the client can be returned from NewDockerCli.
|
||||||
|
type DockerCli struct {
|
||||||
|
configFile *configfile.ConfigFile
|
||||||
|
in *InStream
|
||||||
|
out *OutStream
|
||||||
|
err io.Writer
|
||||||
|
client client.APIClient
|
||||||
|
serverInfo ServerInfo
|
||||||
|
clientInfo ClientInfo
|
||||||
|
contentTrust bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified.
|
||||||
|
func (cli *DockerCli) DefaultVersion() string {
|
||||||
|
return cli.clientInfo.DefaultVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client returns the APIClient
|
||||||
|
func (cli *DockerCli) Client() client.APIClient {
|
||||||
|
return cli.client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Out returns the writer used for stdout
|
||||||
|
func (cli *DockerCli) Out() *OutStream {
|
||||||
|
return cli.out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err returns the writer used for stderr
|
||||||
|
func (cli *DockerCli) Err() io.Writer {
|
||||||
|
return cli.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIn sets the reader used for stdin
|
||||||
|
func (cli *DockerCli) SetIn(in *InStream) {
|
||||||
|
cli.in = in
|
||||||
|
}
|
||||||
|
|
||||||
|
// In returns the reader used for stdin
|
||||||
|
func (cli *DockerCli) In() *InStream {
|
||||||
|
return cli.in
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShowHelp shows the command help.
|
||||||
|
func ShowHelp(err io.Writer) func(*cobra.Command, []string) error {
|
||||||
|
return func(cmd *cobra.Command, args []string) error {
|
||||||
|
cmd.SetOutput(err)
|
||||||
|
cmd.HelpFunc()(cmd, args)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigFile returns the ConfigFile
|
||||||
|
func (cli *DockerCli) ConfigFile() *configfile.ConfigFile {
|
||||||
|
return cli.configFile
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerInfo returns the server version details for the host this client is
|
||||||
|
// connected to
|
||||||
|
func (cli *DockerCli) ServerInfo() ServerInfo {
|
||||||
|
return cli.serverInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientInfo returns the client details for the cli
|
||||||
|
func (cli *DockerCli) ClientInfo() ClientInfo {
|
||||||
|
return cli.clientInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentTrustEnabled returns whether content trust has been enabled by an
|
||||||
|
// environment variable.
|
||||||
|
func (cli *DockerCli) ContentTrustEnabled() bool {
|
||||||
|
return cli.contentTrust
|
||||||
|
}
|
||||||
|
|
||||||
|
// ManifestStore returns a store for local manifests
|
||||||
|
func (cli *DockerCli) ManifestStore() manifeststore.Store {
|
||||||
|
// TODO: support override default location from config file
|
||||||
|
return manifeststore.NewStore(filepath.Join(config.Dir(), "manifests"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegistryClient returns a client for communicating with a Docker distribution
|
||||||
|
// registry
|
||||||
|
func (cli *DockerCli) RegistryClient(allowInsecure bool) registryclient.RegistryClient {
|
||||||
|
resolver := func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
|
||||||
|
return ResolveAuthConfig(ctx, cli, index)
|
||||||
|
}
|
||||||
|
return registryclient.NewRegistryClient(resolver, UserAgent(), allowInsecure)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the dockerCli runs initialization that must happen after command
|
||||||
|
// line flags are parsed.
|
||||||
|
func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error {
|
||||||
|
cli.configFile = cliconfig.LoadDefaultConfigFile(cli.err)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile)
|
||||||
|
if tlsconfig.IsErrEncryptedKey(err) {
|
||||||
|
passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil)
|
||||||
|
newClient := func(password string) (client.APIClient, error) {
|
||||||
|
opts.Common.TLSOptions.Passphrase = password
|
||||||
|
return NewAPIClientFromFlags(opts.Common, cli.configFile)
|
||||||
|
}
|
||||||
|
cli.client, err = getClientWithPassword(passRetriever, newClient)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hasExperimental, err := isEnabled(cli.configFile.Experimental)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "Experimental field")
|
||||||
|
}
|
||||||
|
orchestrator := GetOrchestrator(hasExperimental, opts.Common.Orchestrator, cli.configFile.Orchestrator)
|
||||||
|
cli.clientInfo = ClientInfo{
|
||||||
|
DefaultVersion: cli.client.ClientVersion(),
|
||||||
|
HasExperimental: hasExperimental,
|
||||||
|
Orchestrator: orchestrator,
|
||||||
|
}
|
||||||
|
cli.initializeFromClient()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isEnabled(value string) (bool, error) {
|
||||||
|
switch value {
|
||||||
|
case "enabled":
|
||||||
|
return true, nil
|
||||||
|
case "", "disabled":
|
||||||
|
return false, nil
|
||||||
|
default:
|
||||||
|
return false, errors.Errorf("%q is not valid, should be either enabled or disabled", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *DockerCli) initializeFromClient() {
|
||||||
|
ping, err := cli.client.Ping(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
// Default to true if we fail to connect to daemon
|
||||||
|
cli.serverInfo = ServerInfo{HasExperimental: true}
|
||||||
|
|
||||||
|
if ping.APIVersion != "" {
|
||||||
|
cli.client.NegotiateAPIVersionPing(ping)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cli.serverInfo = ServerInfo{
|
||||||
|
HasExperimental: ping.Experimental,
|
||||||
|
OSType: ping.OSType,
|
||||||
|
}
|
||||||
|
cli.client.NegotiateAPIVersionPing(ping)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getClientWithPassword(passRetriever notary.PassRetriever, newClient func(password string) (client.APIClient, error)) (client.APIClient, error) {
|
||||||
|
for attempts := 0; ; attempts++ {
|
||||||
|
passwd, giveup, err := passRetriever("private", "encrypted TLS private", false, attempts)
|
||||||
|
if giveup || err != nil {
|
||||||
|
return nil, errors.Wrap(err, "private key is encrypted, but could not get passphrase")
|
||||||
|
}
|
||||||
|
|
||||||
|
apiclient, err := newClient(passwd)
|
||||||
|
if !tlsconfig.IsErrEncryptedKey(err) {
|
||||||
|
return apiclient, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotaryClient provides a Notary Repository to interact with signed metadata for an image
|
||||||
|
func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) {
|
||||||
|
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerInfo stores details about the supported features and platform of the
|
||||||
|
// server
|
||||||
|
type ServerInfo struct {
|
||||||
|
HasExperimental bool
|
||||||
|
OSType string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientInfo stores details about the supported features of the client
|
||||||
|
type ClientInfo struct {
|
||||||
|
HasExperimental bool
|
||||||
|
DefaultVersion string
|
||||||
|
Orchestrator Orchestrator
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasKubernetes checks if kubernetes orchestrator is enabled
|
||||||
|
func (c ClientInfo) HasKubernetes() bool {
|
||||||
|
return c.HasExperimental && c.Orchestrator == OrchestratorKubernetes
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
|
||||||
|
func NewDockerCli(in io.ReadCloser, out, err io.Writer, isTrusted bool) *DockerCli {
|
||||||
|
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err, contentTrust: isTrusted}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAPIClientFromFlags creates a new APIClient from command line flags
|
||||||
|
func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
|
||||||
|
host, err := getServerHost(opts.Hosts, opts.TLSOptions)
|
||||||
|
if err != nil {
|
||||||
|
return &client.Client{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
customHeaders := configFile.HTTPHeaders
|
||||||
|
if customHeaders == nil {
|
||||||
|
customHeaders = map[string]string{}
|
||||||
|
}
|
||||||
|
customHeaders["User-Agent"] = UserAgent()
|
||||||
|
|
||||||
|
verStr := api.DefaultVersion
|
||||||
|
if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" {
|
||||||
|
verStr = tmpStr
|
||||||
|
}
|
||||||
|
|
||||||
|
return client.NewClientWithOpts(
|
||||||
|
withHTTPClient(opts.TLSOptions),
|
||||||
|
client.WithHTTPHeaders(customHeaders),
|
||||||
|
client.WithVersion(verStr),
|
||||||
|
client.WithHost(host),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) {
|
||||||
|
var host string
|
||||||
|
switch len(hosts) {
|
||||||
|
case 0:
|
||||||
|
host = os.Getenv("DOCKER_HOST")
|
||||||
|
case 1:
|
||||||
|
host = hosts[0]
|
||||||
|
default:
|
||||||
|
return "", errors.New("Please specify only one -H")
|
||||||
|
}
|
||||||
|
|
||||||
|
return dopts.ParseHost(tlsOptions != nil, host)
|
||||||
|
}
|
||||||
|
|
||||||
|
func withHTTPClient(tlsOpts *tlsconfig.Options) func(*client.Client) error {
|
||||||
|
return func(c *client.Client) error {
|
||||||
|
if tlsOpts == nil {
|
||||||
|
// Use the default HTTPClient
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := *tlsOpts
|
||||||
|
opts.ExclusiveRootPools = true
|
||||||
|
tlsConfig, err := tlsconfig.Client(opts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
httpClient := &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
TLSClientConfig: tlsConfig,
|
||||||
|
DialContext: (&net.Dialer{
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
}).DialContext,
|
||||||
|
},
|
||||||
|
CheckRedirect: client.CheckRedirect,
|
||||||
|
}
|
||||||
|
return client.WithHTTPClient(httpClient)(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserAgent returns the user agent string used for making API requests
|
||||||
|
func UserAgent() string {
|
||||||
|
return "Docker-Client/" + cli.Version + " (" + runtime.GOOS + ")"
|
||||||
|
}
|
344
vendor/github.com/docker/cli/cli/command/cli_test.go
generated
vendored
Normal file
344
vendor/github.com/docker/cli/cli/command/cli_test.go
generated
vendored
Normal file
|
@ -0,0 +1,344 @@
|
||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/x509"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
cliconfig "github.com/docker/cli/cli/config"
|
||||||
|
"github.com/docker/cli/cli/config/configfile"
|
||||||
|
"github.com/docker/cli/cli/flags"
|
||||||
|
"github.com/docker/docker/api"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/client"
|
||||||
|
"github.com/gotestyourself/gotestyourself/assert"
|
||||||
|
is "github.com/gotestyourself/gotestyourself/assert/cmp"
|
||||||
|
"github.com/gotestyourself/gotestyourself/fs"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewAPIClientFromFlags(t *testing.T) {
|
||||||
|
host := "unix://path"
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
host = "npipe://./"
|
||||||
|
}
|
||||||
|
opts := &flags.CommonOptions{Hosts: []string{host}}
|
||||||
|
configFile := &configfile.ConfigFile{
|
||||||
|
HTTPHeaders: map[string]string{
|
||||||
|
"My-Header": "Custom-Value",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
apiclient, err := NewAPIClientFromFlags(opts, configFile)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Check(t, is.Equal(host, apiclient.DaemonHost()))
|
||||||
|
|
||||||
|
expectedHeaders := map[string]string{
|
||||||
|
"My-Header": "Custom-Value",
|
||||||
|
"User-Agent": UserAgent(),
|
||||||
|
}
|
||||||
|
assert.Check(t, is.DeepEqual(expectedHeaders, apiclient.(*client.Client).CustomHTTPHeaders()))
|
||||||
|
assert.Check(t, is.Equal(api.DefaultVersion, apiclient.ClientVersion()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewAPIClientFromFlagsWithAPIVersionFromEnv(t *testing.T) {
|
||||||
|
customVersion := "v3.3.3"
|
||||||
|
defer patchEnvVariable(t, "DOCKER_API_VERSION", customVersion)()
|
||||||
|
|
||||||
|
opts := &flags.CommonOptions{}
|
||||||
|
configFile := &configfile.ConfigFile{}
|
||||||
|
apiclient, err := NewAPIClientFromFlags(opts, configFile)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Check(t, is.Equal(customVersion, apiclient.ClientVersion()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: use gotestyourself/env.Patch
|
||||||
|
func patchEnvVariable(t *testing.T, key, value string) func() {
|
||||||
|
oldValue, ok := os.LookupEnv(key)
|
||||||
|
assert.NilError(t, os.Setenv(key, value))
|
||||||
|
return func() {
|
||||||
|
if !ok {
|
||||||
|
assert.NilError(t, os.Unsetenv(key))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
assert.NilError(t, os.Setenv(key, oldValue))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type fakeClient struct {
|
||||||
|
client.Client
|
||||||
|
pingFunc func() (types.Ping, error)
|
||||||
|
version string
|
||||||
|
negotiated bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeClient) Ping(_ context.Context) (types.Ping, error) {
|
||||||
|
return c.pingFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeClient) ClientVersion() string {
|
||||||
|
return c.version
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeClient) NegotiateAPIVersionPing(types.Ping) {
|
||||||
|
c.negotiated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitializeFromClient(t *testing.T) {
|
||||||
|
defaultVersion := "v1.55"
|
||||||
|
|
||||||
|
var testcases = []struct {
|
||||||
|
doc string
|
||||||
|
pingFunc func() (types.Ping, error)
|
||||||
|
expectedServer ServerInfo
|
||||||
|
negotiated bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
doc: "successful ping",
|
||||||
|
pingFunc: func() (types.Ping, error) {
|
||||||
|
return types.Ping{Experimental: true, OSType: "linux", APIVersion: "v1.30"}, nil
|
||||||
|
},
|
||||||
|
expectedServer: ServerInfo{HasExperimental: true, OSType: "linux"},
|
||||||
|
negotiated: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "failed ping, no API version",
|
||||||
|
pingFunc: func() (types.Ping, error) {
|
||||||
|
return types.Ping{}, errors.New("failed")
|
||||||
|
},
|
||||||
|
expectedServer: ServerInfo{HasExperimental: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "failed ping, with API version",
|
||||||
|
pingFunc: func() (types.Ping, error) {
|
||||||
|
return types.Ping{APIVersion: "v1.33"}, errors.New("failed")
|
||||||
|
},
|
||||||
|
expectedServer: ServerInfo{HasExperimental: true},
|
||||||
|
negotiated: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testcase := range testcases {
|
||||||
|
t.Run(testcase.doc, func(t *testing.T) {
|
||||||
|
apiclient := &fakeClient{
|
||||||
|
pingFunc: testcase.pingFunc,
|
||||||
|
version: defaultVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
cli := &DockerCli{client: apiclient}
|
||||||
|
cli.initializeFromClient()
|
||||||
|
assert.Check(t, is.DeepEqual(testcase.expectedServer, cli.serverInfo))
|
||||||
|
assert.Check(t, is.Equal(testcase.negotiated, apiclient.negotiated))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExperimentalCLI(t *testing.T) {
|
||||||
|
defaultVersion := "v1.55"
|
||||||
|
|
||||||
|
var testcases = []struct {
|
||||||
|
doc string
|
||||||
|
configfile string
|
||||||
|
expectedExperimentalCLI bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
doc: "default",
|
||||||
|
configfile: `{}`,
|
||||||
|
expectedExperimentalCLI: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "experimental",
|
||||||
|
configfile: `{
|
||||||
|
"experimental": "enabled"
|
||||||
|
}`,
|
||||||
|
expectedExperimentalCLI: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testcase := range testcases {
|
||||||
|
t.Run(testcase.doc, func(t *testing.T) {
|
||||||
|
dir := fs.NewDir(t, testcase.doc, fs.WithFile("config.json", testcase.configfile))
|
||||||
|
defer dir.Remove()
|
||||||
|
apiclient := &fakeClient{
|
||||||
|
version: defaultVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
cli := &DockerCli{client: apiclient, err: os.Stderr}
|
||||||
|
cliconfig.SetDir(dir.Path())
|
||||||
|
err := cli.Initialize(flags.NewClientOptions())
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Check(t, is.Equal(testcase.expectedExperimentalCLI, cli.ClientInfo().HasExperimental))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOrchestratorSwitch(t *testing.T) {
|
||||||
|
defaultVersion := "v0.00"
|
||||||
|
|
||||||
|
var testcases = []struct {
|
||||||
|
doc string
|
||||||
|
configfile string
|
||||||
|
envOrchestrator string
|
||||||
|
flagOrchestrator string
|
||||||
|
expectedOrchestrator string
|
||||||
|
expectedKubernetes bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
doc: "default",
|
||||||
|
configfile: `{
|
||||||
|
"experimental": "enabled"
|
||||||
|
}`,
|
||||||
|
expectedOrchestrator: "swarm",
|
||||||
|
expectedKubernetes: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "kubernetesIsExperimental",
|
||||||
|
configfile: `{
|
||||||
|
"experimental": "disabled",
|
||||||
|
"orchestrator": "kubernetes"
|
||||||
|
}`,
|
||||||
|
envOrchestrator: "kubernetes",
|
||||||
|
flagOrchestrator: "kubernetes",
|
||||||
|
expectedOrchestrator: "swarm",
|
||||||
|
expectedKubernetes: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "kubernetesConfigFile",
|
||||||
|
configfile: `{
|
||||||
|
"experimental": "enabled",
|
||||||
|
"orchestrator": "kubernetes"
|
||||||
|
}`,
|
||||||
|
expectedOrchestrator: "kubernetes",
|
||||||
|
expectedKubernetes: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "kubernetesEnv",
|
||||||
|
configfile: `{
|
||||||
|
"experimental": "enabled"
|
||||||
|
}`,
|
||||||
|
envOrchestrator: "kubernetes",
|
||||||
|
expectedOrchestrator: "kubernetes",
|
||||||
|
expectedKubernetes: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "kubernetesFlag",
|
||||||
|
configfile: `{
|
||||||
|
"experimental": "enabled"
|
||||||
|
}`,
|
||||||
|
flagOrchestrator: "kubernetes",
|
||||||
|
expectedOrchestrator: "kubernetes",
|
||||||
|
expectedKubernetes: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "envOverridesConfigFile",
|
||||||
|
configfile: `{
|
||||||
|
"experimental": "enabled",
|
||||||
|
"orchestrator": "kubernetes"
|
||||||
|
}`,
|
||||||
|
envOrchestrator: "swarm",
|
||||||
|
expectedOrchestrator: "swarm",
|
||||||
|
expectedKubernetes: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "flagOverridesEnv",
|
||||||
|
configfile: `{
|
||||||
|
"experimental": "enabled"
|
||||||
|
}`,
|
||||||
|
envOrchestrator: "kubernetes",
|
||||||
|
flagOrchestrator: "swarm",
|
||||||
|
expectedOrchestrator: "swarm",
|
||||||
|
expectedKubernetes: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testcase := range testcases {
|
||||||
|
t.Run(testcase.doc, func(t *testing.T) {
|
||||||
|
dir := fs.NewDir(t, testcase.doc, fs.WithFile("config.json", testcase.configfile))
|
||||||
|
defer dir.Remove()
|
||||||
|
apiclient := &fakeClient{
|
||||||
|
version: defaultVersion,
|
||||||
|
}
|
||||||
|
if testcase.envOrchestrator != "" {
|
||||||
|
defer patchEnvVariable(t, "DOCKER_ORCHESTRATOR", testcase.envOrchestrator)()
|
||||||
|
}
|
||||||
|
|
||||||
|
cli := &DockerCli{client: apiclient, err: os.Stderr}
|
||||||
|
cliconfig.SetDir(dir.Path())
|
||||||
|
options := flags.NewClientOptions()
|
||||||
|
if testcase.flagOrchestrator != "" {
|
||||||
|
options.Common.Orchestrator = testcase.flagOrchestrator
|
||||||
|
}
|
||||||
|
err := cli.Initialize(options)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Check(t, is.Equal(testcase.expectedKubernetes, cli.ClientInfo().HasKubernetes()))
|
||||||
|
assert.Check(t, is.Equal(testcase.expectedOrchestrator, string(cli.ClientInfo().Orchestrator)))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetClientWithPassword(t *testing.T) {
|
||||||
|
expected := "password"
|
||||||
|
|
||||||
|
var testcases = []struct {
|
||||||
|
doc string
|
||||||
|
password string
|
||||||
|
retrieverErr error
|
||||||
|
retrieverGiveup bool
|
||||||
|
newClientErr error
|
||||||
|
expectedErr string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
doc: "successful connect",
|
||||||
|
password: expected,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "password retriever exhausted",
|
||||||
|
retrieverGiveup: true,
|
||||||
|
retrieverErr: errors.New("failed"),
|
||||||
|
expectedErr: "private key is encrypted, but could not get passphrase",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "password retriever error",
|
||||||
|
retrieverErr: errors.New("failed"),
|
||||||
|
expectedErr: "failed",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
doc: "newClient error",
|
||||||
|
newClientErr: errors.New("failed to connect"),
|
||||||
|
expectedErr: "failed to connect",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testcase := range testcases {
|
||||||
|
t.Run(testcase.doc, func(t *testing.T) {
|
||||||
|
passRetriever := func(_, _ string, _ bool, attempts int) (passphrase string, giveup bool, err error) {
|
||||||
|
// Always return an invalid pass first to test iteration
|
||||||
|
switch attempts {
|
||||||
|
case 0:
|
||||||
|
return "something else", false, nil
|
||||||
|
default:
|
||||||
|
return testcase.password, testcase.retrieverGiveup, testcase.retrieverErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
newClient := func(currentPassword string) (client.APIClient, error) {
|
||||||
|
if testcase.newClientErr != nil {
|
||||||
|
return nil, testcase.newClientErr
|
||||||
|
}
|
||||||
|
if currentPassword == expected {
|
||||||
|
return &client.Client{}, nil
|
||||||
|
}
|
||||||
|
return &client.Client{}, x509.IncorrectPasswordError
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := getClientWithPassword(passRetriever, newClient)
|
||||||
|
if testcase.expectedErr != "" {
|
||||||
|
assert.ErrorContains(t, err, testcase.expectedErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.NilError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
133
vendor/github.com/docker/cli/cli/command/commands/commands.go
generated
vendored
Normal file
133
vendor/github.com/docker/cli/cli/command/commands/commands.go
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
||||||
|
package commands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/command/checkpoint"
|
||||||
|
"github.com/docker/cli/cli/command/config"
|
||||||
|
"github.com/docker/cli/cli/command/container"
|
||||||
|
"github.com/docker/cli/cli/command/image"
|
||||||
|
"github.com/docker/cli/cli/command/manifest"
|
||||||
|
"github.com/docker/cli/cli/command/network"
|
||||||
|
"github.com/docker/cli/cli/command/node"
|
||||||
|
"github.com/docker/cli/cli/command/plugin"
|
||||||
|
"github.com/docker/cli/cli/command/registry"
|
||||||
|
"github.com/docker/cli/cli/command/secret"
|
||||||
|
"github.com/docker/cli/cli/command/service"
|
||||||
|
"github.com/docker/cli/cli/command/stack"
|
||||||
|
"github.com/docker/cli/cli/command/swarm"
|
||||||
|
"github.com/docker/cli/cli/command/system"
|
||||||
|
"github.com/docker/cli/cli/command/trust"
|
||||||
|
"github.com/docker/cli/cli/command/volume"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AddCommands adds all the commands from cli/command to the root command
|
||||||
|
func AddCommands(cmd *cobra.Command, dockerCli *command.DockerCli) {
|
||||||
|
cmd.AddCommand(
|
||||||
|
// checkpoint
|
||||||
|
checkpoint.NewCheckpointCommand(dockerCli),
|
||||||
|
|
||||||
|
// config
|
||||||
|
config.NewConfigCommand(dockerCli),
|
||||||
|
|
||||||
|
// container
|
||||||
|
container.NewContainerCommand(dockerCli),
|
||||||
|
container.NewRunCommand(dockerCli),
|
||||||
|
|
||||||
|
// image
|
||||||
|
image.NewImageCommand(dockerCli),
|
||||||
|
image.NewBuildCommand(dockerCli),
|
||||||
|
|
||||||
|
// manifest
|
||||||
|
manifest.NewManifestCommand(dockerCli),
|
||||||
|
|
||||||
|
// network
|
||||||
|
network.NewNetworkCommand(dockerCli),
|
||||||
|
|
||||||
|
// node
|
||||||
|
node.NewNodeCommand(dockerCli),
|
||||||
|
|
||||||
|
// plugin
|
||||||
|
plugin.NewPluginCommand(dockerCli),
|
||||||
|
|
||||||
|
// registry
|
||||||
|
registry.NewLoginCommand(dockerCli),
|
||||||
|
registry.NewLogoutCommand(dockerCli),
|
||||||
|
registry.NewSearchCommand(dockerCli),
|
||||||
|
|
||||||
|
// secret
|
||||||
|
secret.NewSecretCommand(dockerCli),
|
||||||
|
|
||||||
|
// service
|
||||||
|
service.NewServiceCommand(dockerCli),
|
||||||
|
|
||||||
|
// system
|
||||||
|
system.NewSystemCommand(dockerCli),
|
||||||
|
system.NewVersionCommand(dockerCli),
|
||||||
|
|
||||||
|
// stack
|
||||||
|
stack.NewStackCommand(dockerCli),
|
||||||
|
stack.NewTopLevelDeployCommand(dockerCli),
|
||||||
|
|
||||||
|
// swarm
|
||||||
|
swarm.NewSwarmCommand(dockerCli),
|
||||||
|
|
||||||
|
// trust
|
||||||
|
trust.NewTrustCommand(dockerCli),
|
||||||
|
|
||||||
|
// volume
|
||||||
|
volume.NewVolumeCommand(dockerCli),
|
||||||
|
|
||||||
|
// legacy commands may be hidden
|
||||||
|
hide(system.NewEventsCommand(dockerCli)),
|
||||||
|
hide(system.NewInfoCommand(dockerCli)),
|
||||||
|
hide(system.NewInspectCommand(dockerCli)),
|
||||||
|
hide(container.NewAttachCommand(dockerCli)),
|
||||||
|
hide(container.NewCommitCommand(dockerCli)),
|
||||||
|
hide(container.NewCopyCommand(dockerCli)),
|
||||||
|
hide(container.NewCreateCommand(dockerCli)),
|
||||||
|
hide(container.NewDiffCommand(dockerCli)),
|
||||||
|
hide(container.NewExecCommand(dockerCli)),
|
||||||
|
hide(container.NewExportCommand(dockerCli)),
|
||||||
|
hide(container.NewKillCommand(dockerCli)),
|
||||||
|
hide(container.NewLogsCommand(dockerCli)),
|
||||||
|
hide(container.NewPauseCommand(dockerCli)),
|
||||||
|
hide(container.NewPortCommand(dockerCli)),
|
||||||
|
hide(container.NewPsCommand(dockerCli)),
|
||||||
|
hide(container.NewRenameCommand(dockerCli)),
|
||||||
|
hide(container.NewRestartCommand(dockerCli)),
|
||||||
|
hide(container.NewRmCommand(dockerCli)),
|
||||||
|
hide(container.NewStartCommand(dockerCli)),
|
||||||
|
hide(container.NewStatsCommand(dockerCli)),
|
||||||
|
hide(container.NewStopCommand(dockerCli)),
|
||||||
|
hide(container.NewTopCommand(dockerCli)),
|
||||||
|
hide(container.NewUnpauseCommand(dockerCli)),
|
||||||
|
hide(container.NewUpdateCommand(dockerCli)),
|
||||||
|
hide(container.NewWaitCommand(dockerCli)),
|
||||||
|
hide(image.NewHistoryCommand(dockerCli)),
|
||||||
|
hide(image.NewImagesCommand(dockerCli)),
|
||||||
|
hide(image.NewImportCommand(dockerCli)),
|
||||||
|
hide(image.NewLoadCommand(dockerCli)),
|
||||||
|
hide(image.NewPullCommand(dockerCli)),
|
||||||
|
hide(image.NewPushCommand(dockerCli)),
|
||||||
|
hide(image.NewRemoveCommand(dockerCli)),
|
||||||
|
hide(image.NewSaveCommand(dockerCli)),
|
||||||
|
hide(image.NewTagCommand(dockerCli)),
|
||||||
|
)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func hide(cmd *cobra.Command) *cobra.Command {
|
||||||
|
// If the environment variable with name "DOCKER_HIDE_LEGACY_COMMANDS" is not empty,
|
||||||
|
// these legacy commands (such as `docker ps`, `docker exec`, etc)
|
||||||
|
// will not be shown in output console.
|
||||||
|
if os.Getenv("DOCKER_HIDE_LEGACY_COMMANDS") == "" {
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
cmdCopy := *cmd
|
||||||
|
cmdCopy.Hidden = true
|
||||||
|
cmdCopy.Aliases = []string{}
|
||||||
|
return &cmdCopy
|
||||||
|
}
|
44
vendor/github.com/docker/cli/cli/command/config/client_test.go
generated
vendored
Normal file
44
vendor/github.com/docker/cli/cli/command/config/client_test.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/swarm"
|
||||||
|
"github.com/docker/docker/client"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fakeClient struct {
|
||||||
|
client.Client
|
||||||
|
configCreateFunc func(swarm.ConfigSpec) (types.ConfigCreateResponse, error)
|
||||||
|
configInspectFunc func(string) (swarm.Config, []byte, error)
|
||||||
|
configListFunc func(types.ConfigListOptions) ([]swarm.Config, error)
|
||||||
|
configRemoveFunc func(string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeClient) ConfigCreate(ctx context.Context, spec swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
|
||||||
|
if c.configCreateFunc != nil {
|
||||||
|
return c.configCreateFunc(spec)
|
||||||
|
}
|
||||||
|
return types.ConfigCreateResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeClient) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) {
|
||||||
|
if c.configInspectFunc != nil {
|
||||||
|
return c.configInspectFunc(id)
|
||||||
|
}
|
||||||
|
return swarm.Config{}, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeClient) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) {
|
||||||
|
if c.configListFunc != nil {
|
||||||
|
return c.configListFunc(options)
|
||||||
|
}
|
||||||
|
return []swarm.Config{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeClient) ConfigRemove(ctx context.Context, name string) error {
|
||||||
|
if c.configRemoveFunc != nil {
|
||||||
|
return c.configRemoveFunc(name)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
29
vendor/github.com/docker/cli/cli/command/config/cmd.go
generated
vendored
Normal file
29
vendor/github.com/docker/cli/cli/command/config/cmd.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewConfigCommand returns a cobra command for `config` subcommands
|
||||||
|
func NewConfigCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "config",
|
||||||
|
Short: "Manage Docker configs",
|
||||||
|
Args: cli.NoArgs,
|
||||||
|
RunE: command.ShowHelp(dockerCli.Err()),
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"version": "1.30",
|
||||||
|
"swarm": "",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
cmd.AddCommand(
|
||||||
|
newConfigListCommand(dockerCli),
|
||||||
|
newConfigCreateCommand(dockerCli),
|
||||||
|
newConfigInspectCommand(dockerCli),
|
||||||
|
newConfigRemoveCommand(dockerCli),
|
||||||
|
)
|
||||||
|
return cmd
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue