*: switch from godep to glide
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
parent
0d7b500cee
commit
4bc8701fc0
673 changed files with 57012 additions and 46916 deletions
437
Godeps/Godeps.json
generated
437
Godeps/Godeps.json
generated
|
@ -1,437 +0,0 @@
|
||||||
{
|
|
||||||
"ImportPath": "github.com/mrunalp/ocid",
|
|
||||||
"GoVersion": "go1.6",
|
|
||||||
"GodepVersion": "v74",
|
|
||||||
"Packages": [
|
|
||||||
"./..."
|
|
||||||
],
|
|
||||||
"Deps": [
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/Microsoft/go-winio",
|
|
||||||
"Comment": "v0.3.5-2-gce2922f",
|
|
||||||
"Rev": "ce2922f643c8fd76b46cadc7f404a06282678b34"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/Sirupsen/logrus",
|
|
||||||
"Comment": "v0.10.0-28-ga283a10",
|
|
||||||
"Rev": "a283a10442df8dc09befd873fab202bf8a253d6a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/cloudfoundry-incubator/candiedyaml",
|
|
||||||
"Rev": "99c3df83b51532e3615f851d8c2dbb638f5313bf"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containernetworking/cni/libcni",
|
|
||||||
"Comment": "v0.3.0-67-gc5e39a8",
|
|
||||||
"Rev": "c5e39a87f702f757a1e98f701d873992bd4d37b1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containernetworking/cni/pkg/invoke",
|
|
||||||
"Comment": "v0.3.0-67-gc5e39a8",
|
|
||||||
"Rev": "c5e39a87f702f757a1e98f701d873992bd4d37b1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containernetworking/cni/pkg/types",
|
|
||||||
"Comment": "v0.3.0-67-gc5e39a8",
|
|
||||||
"Rev": "c5e39a87f702f757a1e98f701d873992bd4d37b1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containers/image/directory",
|
|
||||||
"Rev": "0c795ea71fce8641e408312e1628d20b38903fe6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containers/image/directory/explicitfilepath",
|
|
||||||
"Rev": "0c795ea71fce8641e408312e1628d20b38903fe6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containers/image/docker",
|
|
||||||
"Rev": "0c795ea71fce8641e408312e1628d20b38903fe6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containers/image/docker/policyconfiguration",
|
|
||||||
"Rev": "0c795ea71fce8641e408312e1628d20b38903fe6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containers/image/image",
|
|
||||||
"Rev": "0c795ea71fce8641e408312e1628d20b38903fe6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containers/image/manifest",
|
|
||||||
"Rev": "0c795ea71fce8641e408312e1628d20b38903fe6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containers/image/oci",
|
|
||||||
"Rev": "0c795ea71fce8641e408312e1628d20b38903fe6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containers/image/openshift",
|
|
||||||
"Rev": "0c795ea71fce8641e408312e1628d20b38903fe6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containers/image/transports",
|
|
||||||
"Rev": "0c795ea71fce8641e408312e1628d20b38903fe6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containers/image/types",
|
|
||||||
"Rev": "0c795ea71fce8641e408312e1628d20b38903fe6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/containers/image/version",
|
|
||||||
"Rev": "0c795ea71fce8641e408312e1628d20b38903fe6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/distribution",
|
|
||||||
"Comment": "v2.5.0-rc.1-47-g2ea0a41",
|
|
||||||
"Rev": "2ea0a4187293bbd307fb680d7a39b0d5afdc61ac"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/distribution/context",
|
|
||||||
"Comment": "v2.5.0-rc.1-47-g2ea0a41",
|
|
||||||
"Rev": "2ea0a4187293bbd307fb680d7a39b0d5afdc61ac"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/distribution/digest",
|
|
||||||
"Comment": "v2.5.0-rc.1-47-g2ea0a41",
|
|
||||||
"Rev": "2ea0a4187293bbd307fb680d7a39b0d5afdc61ac"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/distribution/reference",
|
|
||||||
"Comment": "v2.5.0-rc.1-47-g2ea0a41",
|
|
||||||
"Rev": "2ea0a4187293bbd307fb680d7a39b0d5afdc61ac"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/distribution/uuid",
|
|
||||||
"Comment": "v2.5.0-rc.1-47-g2ea0a41",
|
|
||||||
"Rev": "2ea0a4187293bbd307fb680d7a39b0d5afdc61ac"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/daemon/graphdriver",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/image",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/image/v1",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/layer",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/archive",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/chrootarchive",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/fileutils",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/homedir",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/idtools",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/ioutils",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/longpath",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/mount",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/plugins",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/plugins/transport",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/pools",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/promise",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/random",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/reexec",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/stringid",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/pkg/system",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/docker/reference",
|
|
||||||
"Comment": "v1.4.1-13700-g0ec1adf",
|
|
||||||
"Rev": "0ec1adf9d1e3f0ea4fdcb98c83753d5575d3c52c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/engine-api/types/blkiodev",
|
|
||||||
"Comment": "v0.3.1-236-gd505506",
|
|
||||||
"Rev": "d505506c67611e8364526339452c20d6d0babcf6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/engine-api/types/container",
|
|
||||||
"Comment": "v0.3.1-236-gd505506",
|
|
||||||
"Rev": "d505506c67611e8364526339452c20d6d0babcf6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/engine-api/types/strslice",
|
|
||||||
"Comment": "v0.3.1-236-gd505506",
|
|
||||||
"Rev": "d505506c67611e8364526339452c20d6d0babcf6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/engine-api/types/versions",
|
|
||||||
"Comment": "v0.3.1-236-gd505506",
|
|
||||||
"Rev": "d505506c67611e8364526339452c20d6d0babcf6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/go-connections/nat",
|
|
||||||
"Comment": "v0.2.1",
|
|
||||||
"Rev": "990a1a1a70b0da4c4cb70e117971a4f0babfbf1a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/go-connections/sockets",
|
|
||||||
"Comment": "v0.2.1",
|
|
||||||
"Rev": "990a1a1a70b0da4c4cb70e117971a4f0babfbf1a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/go-connections/tlsconfig",
|
|
||||||
"Comment": "v0.2.1",
|
|
||||||
"Rev": "990a1a1a70b0da4c4cb70e117971a4f0babfbf1a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/go-units",
|
|
||||||
"Comment": "v0.3.1",
|
|
||||||
"Rev": "f2d77a61e3c169b43402a0a1e84f06daf29b8190"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/docker/libtrust",
|
|
||||||
"Rev": "aabc10ec26b754e797f9028f4589c5b7bd90dc20"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/ghodss/yaml",
|
|
||||||
"Rev": "aa0c862057666179de291b67d9f093d12b5a8473"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
|
||||||
"Comment": "v0.2-22-g2752d97",
|
|
||||||
"Rev": "2752d97bbd91927dd1c43296dbf8700e50e2708c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/golang/glog",
|
|
||||||
"Rev": "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/golang/protobuf/proto",
|
|
||||||
"Rev": "f7137ae6b19afbfd61a94b746fda3b3fe0491874"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/gorilla/context",
|
|
||||||
"Comment": "v1.1-4-gaed02d1",
|
|
||||||
"Rev": "aed02d124ae4a0e94fea4541c8effd05bf0c8296"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/gorilla/mux",
|
|
||||||
"Comment": "v1.1-15-gd391bea",
|
|
||||||
"Rev": "d391bea3118c9fc17a88d62c9189bb791255e0ef"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/imdario/mergo",
|
|
||||||
"Comment": "0.2.2-6-g50d4dbd",
|
|
||||||
"Rev": "50d4dbd4eb0e84778abe37cefef140271d96fade"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime",
|
|
||||||
"Comment": "v1.4.0-alpha.3-265-g3fd14d9",
|
|
||||||
"Rev": "3fd14d97fb13ba2849e0c908aaff18efcece70c1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/opencontainers/ocitools/generate",
|
|
||||||
"Rev": "03e8b89f9eccc62d36f742a0af737adbfdcea635"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/user",
|
|
||||||
"Comment": "v1.0.0-rc1-73-gbd1d3ac",
|
|
||||||
"Rev": "bd1d3ac0480c5d3babac10dc32cff2886563219c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/opencontainers/runtime-spec/specs-go",
|
|
||||||
"Comment": "v1.0.0-rc1-29-gbbaf29e",
|
|
||||||
"Rev": "bbaf29e6173a1ef017d75db11e4d698ac4de0f1e"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/rajatchopra/ocicni",
|
|
||||||
"Rev": "df6e1fb00fd0763d67f8f6f6ebaf3496c3eea451"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/syndtr/gocapability/capability",
|
|
||||||
"Rev": "2c00daeb6c3b45114c80ac44119e7b8801fdd852"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/urfave/cli",
|
|
||||||
"Comment": "v1.18.0-6-g3a52162",
|
|
||||||
"Rev": "3a5216227e14699bf7810b2573db60bf4b3f71b5"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/vbatts/tar-split/archive/tar",
|
|
||||||
"Comment": "v0.9.13-6-g28bc4c3",
|
|
||||||
"Rev": "28bc4c32f9fa9725118a685c9ddd7ffdbdbfe2c8"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/vbatts/tar-split/tar/asm",
|
|
||||||
"Comment": "v0.9.13-6-g28bc4c3",
|
|
||||||
"Rev": "28bc4c32f9fa9725118a685c9ddd7ffdbdbfe2c8"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/vbatts/tar-split/tar/storage",
|
|
||||||
"Comment": "v0.9.13-6-g28bc4c3",
|
|
||||||
"Rev": "28bc4c32f9fa9725118a685c9ddd7ffdbdbfe2c8"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/context",
|
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/internal/timeseries",
|
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/proxy",
|
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/trace",
|
|
||||||
"Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/sys/unix",
|
|
||||||
"Rev": "a646d33e2ee3172a661fc09bca23bb4889a41bc8"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/sys/windows",
|
|
||||||
"Rev": "a646d33e2ee3172a661fc09bca23bb4889a41bc8"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc",
|
|
||||||
"Comment": "v1.0.1-GA-29-g79b7c34",
|
|
||||||
"Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/codes",
|
|
||||||
"Comment": "v1.0.1-GA-29-g79b7c34",
|
|
||||||
"Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/credentials",
|
|
||||||
"Comment": "v1.0.1-GA-29-g79b7c34",
|
|
||||||
"Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
|
||||||
"Comment": "v1.0.1-GA-29-g79b7c34",
|
|
||||||
"Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/internal",
|
|
||||||
"Comment": "v1.0.1-GA-29-g79b7c34",
|
|
||||||
"Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/metadata",
|
|
||||||
"Comment": "v1.0.1-GA-29-g79b7c34",
|
|
||||||
"Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/naming",
|
|
||||||
"Comment": "v1.0.1-GA-29-g79b7c34",
|
|
||||||
"Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/peer",
|
|
||||||
"Comment": "v1.0.1-GA-29-g79b7c34",
|
|
||||||
"Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/transport",
|
|
||||||
"Comment": "v1.0.1-GA-29-g79b7c34",
|
|
||||||
"Rev": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "k8s.io/kubernetes/pkg/util/errors",
|
|
||||||
"Comment": "v1.4.0-alpha.1-489-g976ca09",
|
|
||||||
"Rev": "976ca09d714cf114fb7a9e681bc0b170760cbdab"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "k8s.io/kubernetes/pkg/util/homedir",
|
|
||||||
"Comment": "v1.4.0-alpha.1-489-g976ca09",
|
|
||||||
"Rev": "976ca09d714cf114fb7a9e681bc0b170760cbdab"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "k8s.io/kubernetes/pkg/util/net",
|
|
||||||
"Comment": "v1.4.0-alpha.1-489-g976ca09",
|
|
||||||
"Rev": "976ca09d714cf114fb7a9e681bc0b170760cbdab"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "k8s.io/kubernetes/pkg/util/sets",
|
|
||||||
"Comment": "v1.4.0-alpha.1-489-g976ca09",
|
|
||||||
"Rev": "976ca09d714cf114fb7a9e681bc0b170760cbdab"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
5
Godeps/Readme
generated
5
Godeps/Readme
generated
|
@ -1,5 +0,0 @@
|
||||||
This directory tree is generated automatically by godep.
|
|
||||||
|
|
||||||
Please do not edit.
|
|
||||||
|
|
||||||
See https://github.com/tools/godep for more information.
|
|
9
Makefile
9
Makefile
|
@ -1,4 +1,4 @@
|
||||||
.PHONY: all clean conmon ocid ocic
|
.PHONY: all clean conmon ocid ocic update-deps
|
||||||
|
|
||||||
all: conmon ocid ocic
|
all: conmon ocid ocic
|
||||||
|
|
||||||
|
@ -13,3 +13,10 @@ ocic:
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -f ocic ocid
|
rm -f ocic ocid
|
||||||
|
|
||||||
|
update-deps:
|
||||||
|
@which glide > /dev/null 2>/dev/null || (echo "ERROR: glide not found." && false)
|
||||||
|
glide update --strip-vcs --strip-vendor --update-vendored --delete
|
||||||
|
glide-vc --only-code --no-tests
|
||||||
|
# see http://sed.sourceforge.net/sed1line.txt
|
||||||
|
find vendor -type f -exec sed -i -e :a -e '/^\n*$$/{$$d;N;ba' -e '}' "{}" \;
|
||||||
|
|
213
glide.lock
generated
Normal file
213
glide.lock
generated
Normal file
|
@ -0,0 +1,213 @@
|
||||||
|
hash: e1c100879f58ef4902c4e13d6710505f0ec62fbb5eaa237abeff8faf0ddf30f1
|
||||||
|
updated: 2016-09-17T15:49:39.168380014+02:00
|
||||||
|
imports:
|
||||||
|
- name: github.com/containernetworking/cni
|
||||||
|
version: 9d5e6e60e79491207834ae8439e80c943db65a69
|
||||||
|
subpackages:
|
||||||
|
- libcni
|
||||||
|
- pkg/invoke
|
||||||
|
- pkg/types
|
||||||
|
- name: github.com/containers/image
|
||||||
|
version: f6f11ab5cf8b1e70ef4aa3f8b6fdb4b671d16abd
|
||||||
|
subpackages:
|
||||||
|
- directory
|
||||||
|
- image
|
||||||
|
- transports
|
||||||
|
- directory/explicitfilepath
|
||||||
|
- types
|
||||||
|
- manifest
|
||||||
|
- docker
|
||||||
|
- oci/layout
|
||||||
|
- openshift
|
||||||
|
- docker/policyconfiguration
|
||||||
|
- version
|
||||||
|
- name: github.com/docker/distribution
|
||||||
|
version: cd27f179f2c10c5d300e6d09025b538c475b0d51
|
||||||
|
subpackages:
|
||||||
|
- digest
|
||||||
|
- reference
|
||||||
|
- name: github.com/docker/docker
|
||||||
|
version: b9f10c951893f9a00865890a5232e85d770c1087
|
||||||
|
subpackages:
|
||||||
|
- pkg/jsonlog
|
||||||
|
- pkg/jsonmessage
|
||||||
|
- pkg/longpath
|
||||||
|
- pkg/mount
|
||||||
|
- pkg/stdcopy
|
||||||
|
- pkg/symlink
|
||||||
|
- pkg/system
|
||||||
|
- pkg/term
|
||||||
|
- pkg/term/windows
|
||||||
|
- reference
|
||||||
|
- image
|
||||||
|
- image/v1
|
||||||
|
- pkg/homedir
|
||||||
|
- layer
|
||||||
|
- pkg/version
|
||||||
|
- daemon/graphdriver
|
||||||
|
- pkg/archive
|
||||||
|
- pkg/idtools
|
||||||
|
- pkg/ioutils
|
||||||
|
- pkg/stringid
|
||||||
|
- pkg/chrootarchive
|
||||||
|
- pkg/plugins
|
||||||
|
- pkg/fileutils
|
||||||
|
- pkg/pools
|
||||||
|
- pkg/promise
|
||||||
|
- pkg/random
|
||||||
|
- pkg/reexec
|
||||||
|
- pkg/plugins/transport
|
||||||
|
- name: github.com/docker/engine-api
|
||||||
|
version: dea108d3aa0c67d7162a3fd8aa65f38a430019fd
|
||||||
|
subpackages:
|
||||||
|
- client
|
||||||
|
- client/transport
|
||||||
|
- client/transport/cancellable
|
||||||
|
- types
|
||||||
|
- types/blkiodev
|
||||||
|
- types/container
|
||||||
|
- types/filters
|
||||||
|
- types/network
|
||||||
|
- types/reference
|
||||||
|
- types/registry
|
||||||
|
- types/strslice
|
||||||
|
- types/time
|
||||||
|
- types/versions
|
||||||
|
- name: github.com/docker/go-connections
|
||||||
|
version: f549a9393d05688dff0992ef3efd8bbe6c628aeb
|
||||||
|
subpackages:
|
||||||
|
- nat
|
||||||
|
- sockets
|
||||||
|
- tlsconfig
|
||||||
|
- name: github.com/docker/go-units
|
||||||
|
version: 0bbddae09c5a5419a8c6dcdd7ff90da3d450393b
|
||||||
|
- name: github.com/docker/libtrust
|
||||||
|
version: aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||||
|
- name: github.com/ghodss/yaml
|
||||||
|
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||||
|
- name: github.com/gogo/protobuf
|
||||||
|
version: e18d7aa8f8c624c915db340349aad4c49b10d173
|
||||||
|
subpackages:
|
||||||
|
- gogoproto
|
||||||
|
- plugin/compare
|
||||||
|
- plugin/defaultcheck
|
||||||
|
- plugin/description
|
||||||
|
- plugin/embedcheck
|
||||||
|
- plugin/enumstringer
|
||||||
|
- plugin/equal
|
||||||
|
- plugin/face
|
||||||
|
- plugin/gostring
|
||||||
|
- plugin/marshalto
|
||||||
|
- plugin/oneofcheck
|
||||||
|
- plugin/populate
|
||||||
|
- plugin/size
|
||||||
|
- plugin/stringer
|
||||||
|
- plugin/testgen
|
||||||
|
- plugin/union
|
||||||
|
- plugin/unmarshal
|
||||||
|
- proto
|
||||||
|
- protoc-gen-gogo/descriptor
|
||||||
|
- protoc-gen-gogo/generator
|
||||||
|
- protoc-gen-gogo/grpc
|
||||||
|
- protoc-gen-gogo/plugin
|
||||||
|
- sortkeys
|
||||||
|
- vanity
|
||||||
|
- vanity/command
|
||||||
|
- name: github.com/golang/glog
|
||||||
|
version: 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||||
|
- name: github.com/golang/protobuf
|
||||||
|
version: 8616e8ee5e20a1704615e6c8d7afcdac06087a67
|
||||||
|
subpackages:
|
||||||
|
- jsonpb
|
||||||
|
- proto
|
||||||
|
- name: github.com/imdario/mergo
|
||||||
|
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||||
|
- name: github.com/kubernetes/kubernetes
|
||||||
|
version: ff3ca3d616518087dc20180f69bb4038379f1028
|
||||||
|
subpackages:
|
||||||
|
- pkg/kubelet/api/v1alpha1/runtime
|
||||||
|
- name: github.com/Microsoft/go-winio
|
||||||
|
version: 8f9387ea7efabb228a981b9c381142be7667967f
|
||||||
|
- name: github.com/opencontainers/image-spec
|
||||||
|
version: 287772a24ab02d5d2fe3fbbba4f13dff9b9ce003
|
||||||
|
subpackages:
|
||||||
|
- specs-go/v1
|
||||||
|
- specs-go
|
||||||
|
- name: github.com/opencontainers/ocitools
|
||||||
|
version: 5c1fa311dbe42206d9286be684d8f30a3fe7ba97
|
||||||
|
subpackages:
|
||||||
|
- generate
|
||||||
|
- name: github.com/opencontainers/runc
|
||||||
|
version: 142df3836b740af53dc6da59eed8dbc92f62917c
|
||||||
|
subpackages:
|
||||||
|
- libcontainer
|
||||||
|
- libcontainer/apparmor
|
||||||
|
- libcontainer/cgroups
|
||||||
|
- libcontainer/cgroups/fs
|
||||||
|
- libcontainer/cgroups/systemd
|
||||||
|
- libcontainer/configs
|
||||||
|
- libcontainer/configs/validate
|
||||||
|
- libcontainer/criurpc
|
||||||
|
- libcontainer/keys
|
||||||
|
- libcontainer/label
|
||||||
|
- libcontainer/seccomp
|
||||||
|
- libcontainer/selinux
|
||||||
|
- libcontainer/stacktrace
|
||||||
|
- libcontainer/system
|
||||||
|
- libcontainer/user
|
||||||
|
- libcontainer/utils
|
||||||
|
- name: github.com/opencontainers/runtime-spec
|
||||||
|
version: 7dab1a245d3e13d0ad03b77409d0bf7e00a6ada4
|
||||||
|
subpackages:
|
||||||
|
- specs-go
|
||||||
|
- name: github.com/rajatchopra/ocicni
|
||||||
|
version: daea0034a4bc2d193d9bf5031bace0403094011a
|
||||||
|
- name: github.com/Sirupsen/logrus
|
||||||
|
version: 4b6ea7319e214d98c938f12692336f7ca9348d6b
|
||||||
|
- name: github.com/syndtr/gocapability
|
||||||
|
version: 2c00daeb6c3b45114c80ac44119e7b8801fdd852
|
||||||
|
subpackages:
|
||||||
|
- capability
|
||||||
|
- name: github.com/urfave/cli
|
||||||
|
version: a14d7d367bc02b1f57d88de97926727f2d936387
|
||||||
|
- name: github.com/vbatts/tar-split
|
||||||
|
version: 6810cedb21b2c3d0b9bb8f9af12ff2dc7a2f14df
|
||||||
|
subpackages:
|
||||||
|
- tar/asm
|
||||||
|
- tar/storage
|
||||||
|
- archive/tar
|
||||||
|
- name: golang.org/x/net
|
||||||
|
version: e90d6d0afc4c315a0d87a568ae68577cc15149a0
|
||||||
|
subpackages:
|
||||||
|
- context
|
||||||
|
- http2
|
||||||
|
- trace
|
||||||
|
- http2/hpack
|
||||||
|
- lex/httplex
|
||||||
|
- internal/timeseries
|
||||||
|
- proxy
|
||||||
|
- name: golang.org/x/sys
|
||||||
|
version: 9c60d1c508f5134d1ca726b4641db998f2523357
|
||||||
|
subpackages:
|
||||||
|
- unix
|
||||||
|
- name: google.golang.org/grpc
|
||||||
|
version: 0032a855ba5c8a3c8e0d71c2deef354b70af1584
|
||||||
|
subpackages:
|
||||||
|
- codes
|
||||||
|
- credentials
|
||||||
|
- grpclog
|
||||||
|
- internal
|
||||||
|
- metadata
|
||||||
|
- naming
|
||||||
|
- transport
|
||||||
|
- peer
|
||||||
|
- name: gopkg.in/yaml.v2
|
||||||
|
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
|
||||||
|
- name: k8s.io/kubernetes
|
||||||
|
version: 8fd414537b5143ab039cb910590237cabf4af783
|
||||||
|
subpackages:
|
||||||
|
- pkg/util/errors
|
||||||
|
- pkg/util/homedir
|
||||||
|
- pkg/util/net
|
||||||
|
- pkg/util/sets
|
||||||
|
testImports: []
|
28
glide.yaml
Normal file
28
glide.yaml
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
package: github.com/kubernetes-incubator/ocid
|
||||||
|
import:
|
||||||
|
- package: github.com/Sirupsen/logrus
|
||||||
|
version: ~0.10.0
|
||||||
|
- package: github.com/containers/image
|
||||||
|
subpackages:
|
||||||
|
- directory
|
||||||
|
- image
|
||||||
|
- transports
|
||||||
|
- package: github.com/kubernetes/kubernetes
|
||||||
|
version: ~1.5.0-alpha.0
|
||||||
|
subpackages:
|
||||||
|
- pkg/kubelet/api/v1alpha1/runtime
|
||||||
|
- package: github.com/opencontainers/ocitools
|
||||||
|
subpackages:
|
||||||
|
- generate
|
||||||
|
- package: github.com/opencontainers/runtime-spec
|
||||||
|
version: ~1.0.0-rc2
|
||||||
|
subpackages:
|
||||||
|
- specs-go
|
||||||
|
- package: github.com/rajatchopra/ocicni
|
||||||
|
- package: github.com/urfave/cli
|
||||||
|
version: ~1.18.1
|
||||||
|
- package: golang.org/x/net
|
||||||
|
subpackages:
|
||||||
|
- context
|
||||||
|
- package: google.golang.org/grpc
|
||||||
|
version: ~1.0.1-GA
|
|
@ -44,11 +44,11 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
||||||
src, err := tr.NewImageSource("", true)
|
src, err := tr.NewImageSource(nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
i := image.FromSource(src, nil)
|
i := image.FromSource(src)
|
||||||
blobs, err := i.BlobDigests()
|
blobs, err := i.BlobDigests()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -62,7 +62,7 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
||||||
dest, err := dir.NewImageDestination("", true)
|
dest, err := dir.NewImageDestination(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,7 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := dest.PutBlob(b, r); err != nil {
|
if _, _, err := dest.PutBlob(r, b, -1); err != nil {
|
||||||
r.Close()
|
r.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
|
@ -1 +0,0 @@
|
||||||
*.exe
|
|
1
vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
1
vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
|
@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
SOFTWARE.
|
SOFTWARE.
|
||||||
|
|
||||||
|
|
22
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
22
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
|
@ -1,22 +0,0 @@
|
||||||
# go-winio
|
|
||||||
|
|
||||||
This repository contains utilities for efficiently performing Win32 IO operations in
|
|
||||||
Go. Currently, this is focused on accessing named pipes and other file handles, and
|
|
||||||
for using named pipes as a net transport.
|
|
||||||
|
|
||||||
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
|
|
||||||
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
|
|
||||||
newer operating systems. This is similar to the implementation of network sockets in Go's net
|
|
||||||
package.
|
|
||||||
|
|
||||||
Please see the LICENSE file for licensing information.
|
|
||||||
|
|
||||||
This project has adopted the [Microsoft Open Source Code of
|
|
||||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
|
||||||
see the [Code of Conduct
|
|
||||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
|
||||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
|
||||||
questions or comments.
|
|
||||||
|
|
||||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
|
|
||||||
for another named pipe implementation.
|
|
25
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
25
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
|
@ -26,18 +26,10 @@ const (
|
||||||
BackupReparseData
|
BackupReparseData
|
||||||
BackupSparseBlock
|
BackupSparseBlock
|
||||||
BackupTxfsData
|
BackupTxfsData
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
StreamSparseAttributes = uint32(8)
|
StreamSparseAttributes = uint32(8)
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
WRITE_DAC = 0x40000
|
|
||||||
WRITE_OWNER = 0x80000
|
|
||||||
ACCESS_SYSTEM_SECURITY = 0x1000000
|
|
||||||
)
|
|
||||||
|
|
||||||
// BackupHeader represents a backup stream of a file.
|
// BackupHeader represents a backup stream of a file.
|
||||||
type BackupHeader struct {
|
type BackupHeader struct {
|
||||||
Id uint32 // The backup stream ID
|
Id uint32 // The backup stream ID
|
||||||
|
@ -247,20 +239,3 @@ func (w *BackupFileWriter) Close() error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
|
|
||||||
// or restore privileges have been acquired.
|
|
||||||
//
|
|
||||||
// If the file opened was a directory, it cannot be used with Readdir().
|
|
||||||
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
|
|
||||||
winPath, err := syscall.UTF16FromString(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
|
|
||||||
if err != nil {
|
|
||||||
err = &os.PathError{Op: "open", Path: path, Err: err}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return os.NewFile(uintptr(h), path), nil
|
|
||||||
}
|
|
||||||
|
|
32
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
32
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
|
@ -9,46 +9,22 @@ import (
|
||||||
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
|
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
|
||||||
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
|
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
|
||||||
|
|
||||||
const (
|
|
||||||
fileBasicInfo = 0
|
|
||||||
fileIDInfo = 0x12
|
|
||||||
)
|
|
||||||
|
|
||||||
// FileBasicInfo contains file access time and file attributes information.
|
|
||||||
type FileBasicInfo struct {
|
type FileBasicInfo struct {
|
||||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
|
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
|
||||||
FileAttributes uintptr // includes padding
|
FileAttributes uintptr // includes padding
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
|
||||||
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||||
bi := &FileBasicInfo{}
|
bi := &FileBasicInfo{}
|
||||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), 0, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
return nil, &os.PathError{"GetFileInformationByHandleEx", f.Name(), err}
|
||||||
}
|
}
|
||||||
return bi, nil
|
return bi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetFileBasicInfo sets times and attributes for a file.
|
|
||||||
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
||||||
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), 0, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||||
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
|
return &os.PathError{"SetFileInformationByHandle", f.Name(), err}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
|
|
||||||
// unique on a system.
|
|
||||||
type FileIDInfo struct {
|
|
||||||
VolumeSerialNumber uint64
|
|
||||||
FileID [16]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFileID retrieves the unique (volume, file ID) pair for a file.
|
|
||||||
func GetFileID(f *os.File) (*FileIDInfo, error) {
|
|
||||||
fileID := &FileIDInfo{}
|
|
||||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
|
|
||||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
|
||||||
}
|
|
||||||
return fileID, nil
|
|
||||||
}
|
|
||||||
|
|
79
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
79
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
|
@ -5,17 +5,14 @@ import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
|
||||||
"syscall"
|
"syscall"
|
||||||
"unicode/utf16"
|
"unicode/utf16"
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
|
//sys adjustTokenPrivileges(token syscall.Handle, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
|
||||||
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
|
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
|
||||||
//sys revertToSelf() (err error) = advapi32.RevertToSelf
|
//sys revertToSelf() (err error) = advapi32.RevertToSelf
|
||||||
//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
|
//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *syscall.Handle) (err error) = advapi32.OpenThreadToken
|
||||||
//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
|
//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
|
||||||
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
|
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
|
||||||
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
|
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
|
||||||
|
@ -37,12 +34,6 @@ const (
|
||||||
securityDelegation
|
securityDelegation
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
privNames = make(map[string]uint64)
|
|
||||||
privNameMutex sync.Mutex
|
|
||||||
)
|
|
||||||
|
|
||||||
// PrivilegeError represents an error enabling privileges.
|
|
||||||
type PrivilegeError struct {
|
type PrivilegeError struct {
|
||||||
privileges []uint64
|
privileges []uint64
|
||||||
}
|
}
|
||||||
|
@ -65,16 +56,19 @@ func (e *PrivilegeError) Error() string {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunWithPrivilege enables a single privilege for a function call.
|
|
||||||
func RunWithPrivilege(name string, fn func() error) error {
|
func RunWithPrivilege(name string, fn func() error) error {
|
||||||
return RunWithPrivileges([]string{name}, fn)
|
return RunWithPrivileges([]string{name}, fn)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunWithPrivileges enables privileges for a function call.
|
|
||||||
func RunWithPrivileges(names []string, fn func() error) error {
|
func RunWithPrivileges(names []string, fn func() error) error {
|
||||||
privileges, err := mapPrivileges(names)
|
var privileges []uint64
|
||||||
if err != nil {
|
for _, name := range names {
|
||||||
return err
|
p := uint64(0)
|
||||||
|
err := lookupPrivilegeValue("", name, &p)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
privileges = append(privileges, p)
|
||||||
}
|
}
|
||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
defer runtime.UnlockOSThread()
|
defer runtime.UnlockOSThread()
|
||||||
|
@ -90,43 +84,7 @@ func RunWithPrivileges(names []string, fn func() error) error {
|
||||||
return fn()
|
return fn()
|
||||||
}
|
}
|
||||||
|
|
||||||
func mapPrivileges(names []string) ([]uint64, error) {
|
func adjustPrivileges(token syscall.Handle, privileges []uint64) error {
|
||||||
var privileges []uint64
|
|
||||||
privNameMutex.Lock()
|
|
||||||
defer privNameMutex.Unlock()
|
|
||||||
for _, name := range names {
|
|
||||||
p, ok := privNames[name]
|
|
||||||
if !ok {
|
|
||||||
err := lookupPrivilegeValue("", name, &p)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
privNames[name] = p
|
|
||||||
}
|
|
||||||
privileges = append(privileges, p)
|
|
||||||
}
|
|
||||||
return privileges, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnableProcessPrivileges enables privileges globally for the process.
|
|
||||||
func EnableProcessPrivileges(names []string) error {
|
|
||||||
privileges, err := mapPrivileges(names)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
p, _ := windows.GetCurrentProcess()
|
|
||||||
var token windows.Token
|
|
||||||
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer token.Close()
|
|
||||||
return adjustPrivileges(token, privileges)
|
|
||||||
}
|
|
||||||
|
|
||||||
func adjustPrivileges(token windows.Token, privileges []uint64) error {
|
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
|
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
|
||||||
for _, p := range privileges {
|
for _, p := range privileges {
|
||||||
|
@ -155,22 +113,23 @@ func getPrivilegeName(luid uint64) string {
|
||||||
|
|
||||||
var displayNameBuffer [256]uint16
|
var displayNameBuffer [256]uint16
|
||||||
displayBufSize := uint32(len(displayNameBuffer))
|
displayBufSize := uint32(len(displayNameBuffer))
|
||||||
var langID uint32
|
var langId uint32
|
||||||
err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID)
|
err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Sprintf("<unknown privilege %s>", string(utf16.Decode(nameBuffer[:bufSize])))
|
return fmt.Sprintf("<unknown privilege %s>", utf16.Decode(nameBuffer[:bufSize]))
|
||||||
}
|
}
|
||||||
|
|
||||||
return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
|
return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
|
||||||
}
|
}
|
||||||
|
|
||||||
func newThreadToken() (windows.Token, error) {
|
func newThreadToken() (syscall.Handle, error) {
|
||||||
err := impersonateSelf(securityImpersonation)
|
err := impersonateSelf(securityImpersonation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var token windows.Token
|
var token syscall.Handle
|
||||||
err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
|
err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rerr := revertToSelf()
|
rerr := revertToSelf()
|
||||||
|
@ -182,10 +141,10 @@ func newThreadToken() (windows.Token, error) {
|
||||||
return token, nil
|
return token, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func releaseThreadToken(h windows.Token) {
|
func releaseThreadToken(h syscall.Handle) {
|
||||||
err := revertToSelf()
|
err := revertToSelf()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
h.Close()
|
syscall.Close(h)
|
||||||
}
|
}
|
||||||
|
|
12
vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
12
vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
|
@ -43,12 +43,8 @@ func (e *UnsupportedReparsePointError) Error() string {
|
||||||
// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
|
// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
|
||||||
// or a mount point.
|
// or a mount point.
|
||||||
func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
|
func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
|
||||||
tag := binary.LittleEndian.Uint32(b[0:4])
|
|
||||||
return DecodeReparsePointData(tag, b[8:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) {
|
|
||||||
isMountPoint := false
|
isMountPoint := false
|
||||||
|
tag := binary.LittleEndian.Uint32(b[0:4])
|
||||||
switch tag {
|
switch tag {
|
||||||
case reparseTagMountPoint:
|
case reparseTagMountPoint:
|
||||||
isMountPoint = true
|
isMountPoint = true
|
||||||
|
@ -56,11 +52,11 @@ func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) {
|
||||||
default:
|
default:
|
||||||
return nil, &UnsupportedReparsePointError{tag}
|
return nil, &UnsupportedReparsePointError{tag}
|
||||||
}
|
}
|
||||||
nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6])
|
nameOffset := 16 + binary.LittleEndian.Uint16(b[12:14])
|
||||||
if !isMountPoint {
|
if !isMountPoint {
|
||||||
nameOffset += 4
|
nameOffset += 4
|
||||||
}
|
}
|
||||||
nameLength := binary.LittleEndian.Uint16(b[6:8])
|
nameLength := binary.LittleEndian.Uint16(b[14:16])
|
||||||
name := make([]uint16, nameLength/2)
|
name := make([]uint16, nameLength/2)
|
||||||
err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
|
err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -80,7 +76,7 @@ func EncodeReparsePoint(rp *ReparsePoint) []byte {
|
||||||
var ntTarget string
|
var ntTarget string
|
||||||
relative := false
|
relative := false
|
||||||
if strings.HasPrefix(rp.Target, `\\?\`) {
|
if strings.HasPrefix(rp.Target, `\\?\`) {
|
||||||
ntTarget = `\??\` + rp.Target[4:]
|
ntTarget = rp.Target
|
||||||
} else if strings.HasPrefix(rp.Target, `\\`) {
|
} else if strings.HasPrefix(rp.Target, `\\`) {
|
||||||
ntTarget = `\??\UNC\` + rp.Target[2:]
|
ntTarget = `\??\UNC\` + rp.Target[2:]
|
||||||
} else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
|
} else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
|
||||||
|
|
12
vendor/github.com/Microsoft/go-winio/zsyscall.go
generated
vendored
12
vendor/github.com/Microsoft/go-winio/zsyscall.go
generated
vendored
|
@ -2,12 +2,8 @@
|
||||||
|
|
||||||
package winio
|
package winio
|
||||||
|
|
||||||
import (
|
import "unsafe"
|
||||||
"syscall"
|
import "syscall"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ unsafe.Pointer
|
var _ unsafe.Pointer
|
||||||
|
|
||||||
|
@ -304,7 +300,7 @@ func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, si
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
|
func adjustTokenPrivileges(token syscall.Handle, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
|
||||||
var _p0 uint32
|
var _p0 uint32
|
||||||
if releaseAll {
|
if releaseAll {
|
||||||
_p0 = 1
|
_p0 = 1
|
||||||
|
@ -347,7 +343,7 @@ func revertToSelf() (err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
|
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *syscall.Handle) (err error) {
|
||||||
var _p0 uint32
|
var _p0 uint32
|
||||||
if openAsSelf {
|
if openAsSelf {
|
||||||
_p0 = 1
|
_p0 = 1
|
||||||
|
|
1
vendor/github.com/Sirupsen/logrus/.gitignore
generated
vendored
1
vendor/github.com/Sirupsen/logrus/.gitignore
generated
vendored
|
@ -1 +0,0 @@
|
||||||
logrus
|
|
10
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
10
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
|
@ -1,10 +0,0 @@
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.3
|
|
||||||
- 1.4
|
|
||||||
- 1.5
|
|
||||||
- 1.6
|
|
||||||
- tip
|
|
||||||
install:
|
|
||||||
- go get -t ./...
|
|
||||||
script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./...
|
|
66
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
66
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
|
@ -1,66 +0,0 @@
|
||||||
# 0.10.0
|
|
||||||
|
|
||||||
* feature: Add a test hook (#180)
|
|
||||||
* feature: `ParseLevel` is now case-insensitive (#326)
|
|
||||||
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
|
|
||||||
* performance: avoid re-allocations on `WithFields` (#335)
|
|
||||||
|
|
||||||
# 0.9.0
|
|
||||||
|
|
||||||
* logrus/text_formatter: don't emit empty msg
|
|
||||||
* logrus/hooks/airbrake: move out of main repository
|
|
||||||
* logrus/hooks/sentry: move out of main repository
|
|
||||||
* logrus/hooks/papertrail: move out of main repository
|
|
||||||
* logrus/hooks/bugsnag: move out of main repository
|
|
||||||
* logrus/core: run tests with `-race`
|
|
||||||
* logrus/core: detect TTY based on `stderr`
|
|
||||||
* logrus/core: support `WithError` on logger
|
|
||||||
* logrus/core: Solaris support
|
|
||||||
|
|
||||||
# 0.8.7
|
|
||||||
|
|
||||||
* logrus/core: fix possible race (#216)
|
|
||||||
* logrus/doc: small typo fixes and doc improvements
|
|
||||||
|
|
||||||
|
|
||||||
# 0.8.6
|
|
||||||
|
|
||||||
* hooks/raven: allow passing an initialized client
|
|
||||||
|
|
||||||
# 0.8.5
|
|
||||||
|
|
||||||
* logrus/core: revert #208
|
|
||||||
|
|
||||||
# 0.8.4
|
|
||||||
|
|
||||||
* formatter/text: fix data race (#218)
|
|
||||||
|
|
||||||
# 0.8.3
|
|
||||||
|
|
||||||
* logrus/core: fix entry log level (#208)
|
|
||||||
* logrus/core: improve performance of text formatter by 40%
|
|
||||||
* logrus/core: expose `LevelHooks` type
|
|
||||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
|
||||||
* formatter/text: print structs more verbosely
|
|
||||||
|
|
||||||
# 0.8.2
|
|
||||||
|
|
||||||
* logrus: fix more Fatal family functions
|
|
||||||
|
|
||||||
# 0.8.1
|
|
||||||
|
|
||||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
|
||||||
|
|
||||||
# 0.8.0
|
|
||||||
|
|
||||||
* logrus: defaults to stderr instead of stdout
|
|
||||||
* hooks/sentry: add special field for `*http.Request`
|
|
||||||
* formatter/text: ignore Windows for colors
|
|
||||||
|
|
||||||
# 0.7.3
|
|
||||||
|
|
||||||
* formatter/\*: allow configuration of timestamp layout
|
|
||||||
|
|
||||||
# 0.7.2
|
|
||||||
|
|
||||||
* formatter/text: Add configuration option for time format (#158)
|
|
402
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
402
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
|
@ -1,402 +0,0 @@
|
||||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
|
|
||||||
|
|
||||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
|
||||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
|
||||||
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
|
||||||
many large deployments. The core API is unlikely to change much but please
|
|
||||||
version control your Logrus to make sure you aren't fetching latest `master` on
|
|
||||||
every build.**
|
|
||||||
|
|
||||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
|
||||||
plain text):
|
|
||||||
|
|
||||||
![Colored](http://i.imgur.com/PY7qMwd.png)
|
|
||||||
|
|
||||||
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
|
|
||||||
or Splunk:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
|
||||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
|
||||||
|
|
||||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
|
||||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
|
||||||
|
|
||||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
|
||||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
|
||||||
|
|
||||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
|
||||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
|
||||||
|
|
||||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
|
||||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
|
||||||
```
|
|
||||||
|
|
||||||
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
|
|
||||||
attached, the output is compatible with the
|
|
||||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
|
||||||
|
|
||||||
```text
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
|
||||||
exit status 1
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Example
|
|
||||||
|
|
||||||
The simplest way to use Logrus is simply the package-level exported logger:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
}).Info("A walrus appears")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
|
||||||
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
|
||||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
|
||||||
want:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Log as JSON instead of the default ASCII formatter.
|
|
||||||
log.SetFormatter(&log.JSONFormatter{})
|
|
||||||
|
|
||||||
// Output to stderr instead of stdout, could also be a file.
|
|
||||||
log.SetOutput(os.Stderr)
|
|
||||||
|
|
||||||
// Only log the warning severity or above.
|
|
||||||
log.SetLevel(log.WarnLevel)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
"size": 10,
|
|
||||||
}).Info("A group of walrus emerges from the ocean")
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"omg": true,
|
|
||||||
"number": 122,
|
|
||||||
}).Warn("The group's number increased tremendously!")
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"omg": true,
|
|
||||||
"number": 100,
|
|
||||||
}).Fatal("The ice breaks!")
|
|
||||||
|
|
||||||
// A common pattern is to re-use fields between logging statements by re-using
|
|
||||||
// the logrus.Entry returned from WithFields()
|
|
||||||
contextLogger := log.WithFields(log.Fields{
|
|
||||||
"common": "this is a common field",
|
|
||||||
"other": "I also should be logged always",
|
|
||||||
})
|
|
||||||
|
|
||||||
contextLogger.Info("I'll be logged with common and other field")
|
|
||||||
contextLogger.Info("Me too")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
For more advanced usage such as logging to multiple locations from the same
|
|
||||||
application, you can also create an instance of the `logrus` Logger:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Create a new instance of the logger. You can have any number of instances.
|
|
||||||
var log = logrus.New()
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// The API for setting attributes is a little different than the package level
|
|
||||||
// exported logger. See Godoc.
|
|
||||||
log.Out = os.Stderr
|
|
||||||
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
"size": 10,
|
|
||||||
}).Info("A group of walrus emerges from the ocean")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Fields
|
|
||||||
|
|
||||||
Logrus encourages careful, structured logging though logging fields instead of
|
|
||||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
|
||||||
to send event %s to topic %s with key %d")`, you should log the much more
|
|
||||||
discoverable:
|
|
||||||
|
|
||||||
```go
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"event": event,
|
|
||||||
"topic": topic,
|
|
||||||
"key": key,
|
|
||||||
}).Fatal("Failed to send event")
|
|
||||||
```
|
|
||||||
|
|
||||||
We've found this API forces you to think about logging in a way that produces
|
|
||||||
much more useful logging messages. We've been in countless situations where just
|
|
||||||
a single added field to a log statement that was already there would've saved us
|
|
||||||
hours. The `WithFields` call is optional.
|
|
||||||
|
|
||||||
In general, with Logrus using any of the `printf`-family functions should be
|
|
||||||
seen as a hint you should add a field, however, you can still use the
|
|
||||||
`printf`-family functions with Logrus.
|
|
||||||
|
|
||||||
#### Hooks
|
|
||||||
|
|
||||||
You can add hooks for logging levels. For example to send errors to an exception
|
|
||||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
|
||||||
multiple places simultaneously, e.g. syslog.
|
|
||||||
|
|
||||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
|
||||||
`init`:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
|
||||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
|
||||||
"log/syslog"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
|
|
||||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
|
||||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
|
||||||
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
|
||||||
|
|
||||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Unable to connect to local syslog daemon")
|
|
||||||
} else {
|
|
||||||
log.AddHook(hook)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
|
||||||
|
|
||||||
| Hook | Description |
|
|
||||||
| ----- | ----------- |
|
|
||||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
|
||||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
|
||||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
|
||||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
|
||||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
|
||||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
|
||||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
|
||||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
|
||||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
|
||||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
|
||||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
|
||||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
|
||||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
|
||||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
|
||||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
|
||||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
|
||||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
|
||||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
|
||||||
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
|
|
||||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
|
||||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
|
||||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
|
||||||
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
|
||||||
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
|
||||||
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
|
|
||||||
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
|
||||||
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
|
||||||
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
|
||||||
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
|
||||||
|
|
||||||
#### Level logging
|
|
||||||
|
|
||||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
|
||||||
|
|
||||||
```go
|
|
||||||
log.Debug("Useful debugging information.")
|
|
||||||
log.Info("Something noteworthy happened!")
|
|
||||||
log.Warn("You should probably take a look at this.")
|
|
||||||
log.Error("Something failed but I'm not quitting.")
|
|
||||||
// Calls os.Exit(1) after logging
|
|
||||||
log.Fatal("Bye.")
|
|
||||||
// Calls panic() after logging
|
|
||||||
log.Panic("I'm bailing.")
|
|
||||||
```
|
|
||||||
|
|
||||||
You can set the logging level on a `Logger`, then it will only log entries with
|
|
||||||
that severity or anything above it:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
|
||||||
log.SetLevel(log.InfoLevel)
|
|
||||||
```
|
|
||||||
|
|
||||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
|
||||||
environment if your application has that.
|
|
||||||
|
|
||||||
#### Entries
|
|
||||||
|
|
||||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
|
||||||
automatically added to all logging events:
|
|
||||||
|
|
||||||
1. `time`. The timestamp when the entry was created.
|
|
||||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
|
||||||
the `AddFields` call. E.g. `Failed to send event.`
|
|
||||||
3. `level`. The logging level. E.g. `info`.
|
|
||||||
|
|
||||||
#### Environments
|
|
||||||
|
|
||||||
Logrus has no notion of environment.
|
|
||||||
|
|
||||||
If you wish for hooks and formatters to only be used in specific environments,
|
|
||||||
you should handle that yourself. For example, if your application has a global
|
|
||||||
variable `Environment`, which is a string representation of the environment you
|
|
||||||
could do:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
init() {
|
|
||||||
// do something here to set environment depending on an environment variable
|
|
||||||
// or command-line flag
|
|
||||||
if Environment == "production" {
|
|
||||||
log.SetFormatter(&log.JSONFormatter{})
|
|
||||||
} else {
|
|
||||||
// The TextFormatter is default, you don't actually have to do this.
|
|
||||||
log.SetFormatter(&log.TextFormatter{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This configuration is how `logrus` was intended to be used, but JSON in
|
|
||||||
production is mostly only useful if you do log aggregation with tools like
|
|
||||||
Splunk or Logstash.
|
|
||||||
|
|
||||||
#### Formatters
|
|
||||||
|
|
||||||
The built-in logging formatters are:
|
|
||||||
|
|
||||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
|
||||||
without colors.
|
|
||||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
|
||||||
field to `true`. To force no colored output even if there is a TTY set the
|
|
||||||
`DisableColors` field to `true`
|
|
||||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
|
||||||
|
|
||||||
Third party logging formatters:
|
|
||||||
|
|
||||||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
|
||||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
|
||||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
|
||||||
|
|
||||||
You can define your formatter by implementing the `Formatter` interface,
|
|
||||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
|
||||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
|
||||||
default ones (see Entries section above):
|
|
||||||
|
|
||||||
```go
|
|
||||||
type MyJSONFormatter struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
log.SetFormatter(new(MyJSONFormatter))
|
|
||||||
|
|
||||||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|
||||||
// Note this doesn't include Time, Level and Message which are available on
|
|
||||||
// the Entry. Consult `godoc` on information about those fields or read the
|
|
||||||
// source of the official loggers.
|
|
||||||
serialized, err := json.Marshal(entry.Data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
|
||||||
}
|
|
||||||
return append(serialized, '\n'), nil
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Logger as an `io.Writer`
|
|
||||||
|
|
||||||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
|
||||||
|
|
||||||
```go
|
|
||||||
w := logger.Writer()
|
|
||||||
defer w.Close()
|
|
||||||
|
|
||||||
srv := http.Server{
|
|
||||||
// create a stdlib log.Logger that writes to
|
|
||||||
// logrus.Logger.
|
|
||||||
ErrorLog: log.New(w, "", 0),
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Each line written to that writer will be printed the usual way, using formatters
|
|
||||||
and hooks. The level for those entries is `info`.
|
|
||||||
|
|
||||||
#### Rotation
|
|
||||||
|
|
||||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
|
||||||
external program (like `logrotate(8)`) that can compress and delete old log
|
|
||||||
entries. It should not be a feature of the application-level logger.
|
|
||||||
|
|
||||||
#### Tools
|
|
||||||
|
|
||||||
| Tool | Description |
|
|
||||||
| ---- | ----------- |
|
|
||||||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
|
||||||
|
|
||||||
#### Testing
|
|
||||||
|
|
||||||
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
|
|
||||||
|
|
||||||
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
|
|
||||||
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
|
||||||
|
|
||||||
```go
|
|
||||||
logger, hook := NewNullLogger()
|
|
||||||
logger.Error("Hello error")
|
|
||||||
|
|
||||||
assert.Equal(1, len(hook.Entries))
|
|
||||||
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
|
||||||
assert.Equal("Hello error", hook.LastEntry().Message)
|
|
||||||
|
|
||||||
hook.Reset()
|
|
||||||
assert.Nil(hook.LastEntry())
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Fatal handlers
|
|
||||||
|
|
||||||
Logrus can register one or more functions that will be called when any `fatal`
|
|
||||||
level message is logged. The registered handlers will be executed before
|
|
||||||
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
|
|
||||||
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
|
|
||||||
|
|
||||||
```
|
|
||||||
...
|
|
||||||
handler := func() {
|
|
||||||
// gracefully shutdown something...
|
|
||||||
}
|
|
||||||
logrus.RegisterExitHandler(handler)
|
|
||||||
...
|
|
||||||
```
|
|
64
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
64
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
|
@ -1,64 +0,0 @@
|
||||||
package logrus
|
|
||||||
|
|
||||||
// The following code was sourced and modified from the
|
|
||||||
// https://bitbucket.org/tebeka/atexit package governed by the following license:
|
|
||||||
//
|
|
||||||
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
|
||||||
//
|
|
||||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
||||||
// this software and associated documentation files (the "Software"), to deal in
|
|
||||||
// the Software without restriction, including without limitation the rights to
|
|
||||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
|
||||||
// the Software, and to permit persons to whom the Software is furnished to do so,
|
|
||||||
// subject to the following conditions:
|
|
||||||
//
|
|
||||||
// The above copyright notice and this permission notice shall be included in all
|
|
||||||
// copies or substantial portions of the Software.
|
|
||||||
//
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
|
||||||
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
|
||||||
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
|
||||||
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
var handlers = []func(){}
|
|
||||||
|
|
||||||
func runHandler(handler func()) {
|
|
||||||
defer func() {
|
|
||||||
if err := recover(); err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
handler()
|
|
||||||
}
|
|
||||||
|
|
||||||
func runHandlers() {
|
|
||||||
for _, handler := range handlers {
|
|
||||||
runHandler(handler)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
|
|
||||||
func Exit(code int) {
|
|
||||||
runHandlers()
|
|
||||||
os.Exit(code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
|
|
||||||
// all handlers. The handlers will also be invoked when any Fatal log entry is
|
|
||||||
// made.
|
|
||||||
//
|
|
||||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
|
||||||
// message but also needs to gracefully shutdown. An example usecase could be
|
|
||||||
// closing database connections, or sending a alert that the application is
|
|
||||||
// closing.
|
|
||||||
func RegisterExitHandler(handler func()) {
|
|
||||||
handlers = append(handlers, handler)
|
|
||||||
}
|
|
6
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
6
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
|
@ -150,7 +150,7 @@ func (entry *Entry) Fatal(args ...interface{}) {
|
||||||
if entry.Logger.Level >= FatalLevel {
|
if entry.Logger.Level >= FatalLevel {
|
||||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panic(args ...interface{}) {
|
func (entry *Entry) Panic(args ...interface{}) {
|
||||||
|
@ -198,7 +198,7 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||||
if entry.Logger.Level >= FatalLevel {
|
if entry.Logger.Level >= FatalLevel {
|
||||||
entry.Fatal(fmt.Sprintf(format, args...))
|
entry.Fatal(fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||||
|
@ -245,7 +245,7 @@ func (entry *Entry) Fatalln(args ...interface{}) {
|
||||||
if entry.Logger.Level >= FatalLevel {
|
if entry.Logger.Level >= FatalLevel {
|
||||||
entry.Fatal(entry.sprintlnn(args...))
|
entry.Fatal(entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panicln(args ...interface{}) {
|
func (entry *Entry) Panicln(args ...interface{}) {
|
||||||
|
|
15
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
15
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
|
@ -31,15 +31,18 @@ type Formatter interface {
|
||||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||||
// avoid code duplication between the two default formatters.
|
// avoid code duplication between the two default formatters.
|
||||||
func prefixFieldClashes(data Fields) {
|
func prefixFieldClashes(data Fields) {
|
||||||
if t, ok := data["time"]; ok {
|
_, ok := data["time"]
|
||||||
data["fields.time"] = t
|
if ok {
|
||||||
|
data["fields.time"] = data["time"]
|
||||||
}
|
}
|
||||||
|
|
||||||
if m, ok := data["msg"]; ok {
|
_, ok = data["msg"]
|
||||||
data["fields.msg"] = m
|
if ok {
|
||||||
|
data["fields.msg"] = data["msg"]
|
||||||
}
|
}
|
||||||
|
|
||||||
if l, ok := data["level"]; ok {
|
_, ok = data["level"]
|
||||||
data["fields.level"] = l
|
if ok {
|
||||||
|
data["fields.level"] = data["level"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
8
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
8
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
|
@ -51,7 +51,7 @@ func New() *Logger {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds a field to the log entry, note that it doesn't log until you call
|
// Adds a field to the log entry, note that you it doesn't log until you call
|
||||||
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
||||||
// If you want multiple fields, use `WithFields`.
|
// If you want multiple fields, use `WithFields`.
|
||||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||||
|
@ -108,7 +108,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||||
if logger.Level >= FatalLevel {
|
if logger.Level >= FatalLevel {
|
||||||
NewEntry(logger).Fatalf(format, args...)
|
NewEntry(logger).Fatalf(format, args...)
|
||||||
}
|
}
|
||||||
Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||||
|
@ -155,7 +155,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
|
||||||
if logger.Level >= FatalLevel {
|
if logger.Level >= FatalLevel {
|
||||||
NewEntry(logger).Fatal(args...)
|
NewEntry(logger).Fatal(args...)
|
||||||
}
|
}
|
||||||
Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panic(args ...interface{}) {
|
func (logger *Logger) Panic(args ...interface{}) {
|
||||||
|
@ -202,7 +202,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
|
||||||
if logger.Level >= FatalLevel {
|
if logger.Level >= FatalLevel {
|
||||||
NewEntry(logger).Fatalln(args...)
|
NewEntry(logger).Fatalln(args...)
|
||||||
}
|
}
|
||||||
Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panicln(args ...interface{}) {
|
func (logger *Logger) Panicln(args ...interface{}) {
|
||||||
|
|
8
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
8
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
|
@ -128,10 +128,10 @@ func needsQuoting(text string) bool {
|
||||||
(ch >= 'A' && ch <= 'Z') ||
|
(ch >= 'A' && ch <= 'Z') ||
|
||||||
(ch >= '0' && ch <= '9') ||
|
(ch >= '0' && ch <= '9') ||
|
||||||
ch == '-' || ch == '.') {
|
ch == '-' || ch == '.') {
|
||||||
return true
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||||
|
@ -141,14 +141,14 @@ func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interf
|
||||||
|
|
||||||
switch value := value.(type) {
|
switch value := value.(type) {
|
||||||
case string:
|
case string:
|
||||||
if !needsQuoting(value) {
|
if needsQuoting(value) {
|
||||||
b.WriteString(value)
|
b.WriteString(value)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(b, "%q", value)
|
fmt.Fprintf(b, "%q", value)
|
||||||
}
|
}
|
||||||
case error:
|
case error:
|
||||||
errmsg := value.Error()
|
errmsg := value.Error()
|
||||||
if !needsQuoting(errmsg) {
|
if needsQuoting(errmsg) {
|
||||||
b.WriteString(errmsg)
|
b.WriteString(errmsg)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(b, "%q", value)
|
fmt.Fprintf(b, "%q", value)
|
||||||
|
|
28
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
28
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
|
@ -7,40 +7,18 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func (logger *Logger) Writer() *io.PipeWriter {
|
func (logger *Logger) Writer() *io.PipeWriter {
|
||||||
return logger.WriterLevel(InfoLevel)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
|
|
||||||
reader, writer := io.Pipe()
|
reader, writer := io.Pipe()
|
||||||
|
|
||||||
var printFunc func(args ...interface{})
|
go logger.writerScanner(reader)
|
||||||
switch level {
|
|
||||||
case DebugLevel:
|
|
||||||
printFunc = logger.Debug
|
|
||||||
case InfoLevel:
|
|
||||||
printFunc = logger.Info
|
|
||||||
case WarnLevel:
|
|
||||||
printFunc = logger.Warn
|
|
||||||
case ErrorLevel:
|
|
||||||
printFunc = logger.Error
|
|
||||||
case FatalLevel:
|
|
||||||
printFunc = logger.Fatal
|
|
||||||
case PanicLevel:
|
|
||||||
printFunc = logger.Panic
|
|
||||||
default:
|
|
||||||
printFunc = logger.Print
|
|
||||||
}
|
|
||||||
|
|
||||||
go logger.writerScanner(reader, printFunc)
|
|
||||||
runtime.SetFinalizer(writer, writerFinalizer)
|
runtime.SetFinalizer(writer, writerFinalizer)
|
||||||
|
|
||||||
return writer
|
return writer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
func (logger *Logger) writerScanner(reader *io.PipeReader) {
|
||||||
scanner := bufio.NewScanner(reader)
|
scanner := bufio.NewScanner(reader)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
printFunc(scanner.Text())
|
logger.Print(scanner.Text())
|
||||||
}
|
}
|
||||||
if err := scanner.Err(); err != nil {
|
if err := scanner.Err(); err != nil {
|
||||||
logger.Errorf("Error while reading from Writer: %s", err)
|
logger.Errorf("Error while reading from Writer: %s", err)
|
||||||
|
|
1
vendor/github.com/cloudfoundry-incubator/candiedyaml/.gitignore
generated
vendored
1
vendor/github.com/cloudfoundry-incubator/candiedyaml/.gitignore
generated
vendored
|
@ -1 +0,0 @@
|
||||||
*.coverprofile
|
|
12
vendor/github.com/cloudfoundry-incubator/candiedyaml/.travis.yml
generated
vendored
12
vendor/github.com/cloudfoundry-incubator/candiedyaml/.travis.yml
generated
vendored
|
@ -1,12 +0,0 @@
|
||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4.1
|
|
||||||
|
|
||||||
install:
|
|
||||||
- go get -t -v ./...
|
|
||||||
- go install github.com/onsi/ginkgo/ginkgo
|
|
||||||
|
|
||||||
script:
|
|
||||||
- export PATH=$HOME/gopath/bin:$PATH
|
|
||||||
- ginkgo -r -failOnPending -randomizeAllSpecs -race
|
|
59
vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md
generated
vendored
59
vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md
generated
vendored
|
@ -1,59 +0,0 @@
|
||||||
[![Build Status](https://travis-ci.org/cloudfoundry-incubator/candiedyaml.svg)](https://travis-ci.org/cloudfoundry-incubator/candiedyaml)
|
|
||||||
[![GoDoc](https://godoc.org/github.com/cloudfoundry-incubator/candiedyaml?status.svg)](https://godoc.org/github.com/cloudfoundry-incubator/candiedyaml)
|
|
||||||
|
|
||||||
|
|
||||||
candiedyaml
|
|
||||||
===========
|
|
||||||
|
|
||||||
YAML for Go
|
|
||||||
|
|
||||||
A YAML 1.1 parser with support for YAML 1.2 features
|
|
||||||
|
|
||||||
Usage
|
|
||||||
-----
|
|
||||||
|
|
||||||
```go
|
|
||||||
package myApp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/cloudfoundry-incubator/candiedyaml"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
file, err := os.Open("path/to/some/file.yml")
|
|
||||||
if err != nil {
|
|
||||||
println("File does not exist:", err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
document := new(interface{})
|
|
||||||
decoder := candiedyaml.NewDecoder(file)
|
|
||||||
err = decoder.Decode(document)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
println("Failed to decode document:", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
println("parsed yml into interface:", fmt.Sprintf("%#v", document))
|
|
||||||
|
|
||||||
fileToWrite, err := os.Create("path/to/some/new/file.yml")
|
|
||||||
if err != nil {
|
|
||||||
println("Failed to open file for writing:", err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
defer fileToWrite.Close()
|
|
||||||
|
|
||||||
encoder := candiedyaml.NewEncoder(fileToWrite)
|
|
||||||
err = encoder.Encode(document)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
println("Failed to encode document:", err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
```
|
|
834
vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go
generated
vendored
834
vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go
generated
vendored
|
@ -1,834 +0,0 @@
|
||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create a new parser object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
|
||||||
*parser = yaml_parser_t{
|
|
||||||
raw_buffer: make([]byte, 0, INPUT_RAW_BUFFER_SIZE),
|
|
||||||
buffer: make([]byte, 0, INPUT_BUFFER_SIZE),
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Destroy a parser object.
|
|
||||||
*/
|
|
||||||
func yaml_parser_delete(parser *yaml_parser_t) {
|
|
||||||
*parser = yaml_parser_t{}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* String read handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
|
|
||||||
if parser.input_pos == len(parser.input) {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
n := copy(buffer, parser.input[parser.input_pos:])
|
|
||||||
parser.input_pos += n
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* File read handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
|
|
||||||
return parser.input_reader.Read(buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a string input.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
|
||||||
if parser.read_handler != nil {
|
|
||||||
panic("input already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.read_handler = yaml_string_read_handler
|
|
||||||
|
|
||||||
parser.input = input
|
|
||||||
parser.input_pos = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a reader input
|
|
||||||
*/
|
|
||||||
func yaml_parser_set_input_reader(parser *yaml_parser_t, reader io.Reader) {
|
|
||||||
if parser.read_handler != nil {
|
|
||||||
panic("input already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.read_handler = yaml_file_read_handler
|
|
||||||
parser.input_reader = reader
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a generic input.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_set_input(parser *yaml_parser_t, handler yaml_read_handler_t) {
|
|
||||||
if parser.read_handler != nil {
|
|
||||||
panic("input already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.read_handler = handler
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the source encoding.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
|
||||||
if parser.encoding != yaml_ANY_ENCODING {
|
|
||||||
panic("encoding already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.encoding = encoding
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create a new emitter object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
|
||||||
*emitter = yaml_emitter_t{
|
|
||||||
buffer: make([]byte, OUTPUT_BUFFER_SIZE),
|
|
||||||
raw_buffer: make([]byte, 0, OUTPUT_RAW_BUFFER_SIZE),
|
|
||||||
states: make([]yaml_emitter_state_t, 0, INITIAL_STACK_SIZE),
|
|
||||||
events: make([]yaml_event_t, 0, INITIAL_QUEUE_SIZE),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
|
||||||
*emitter = yaml_emitter_t{}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* String write handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
|
||||||
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* File write handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
|
||||||
_, err := emitter.output_writer.Write(buffer)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a string output.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, buffer *[]byte) {
|
|
||||||
if emitter.write_handler != nil {
|
|
||||||
panic("output already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.write_handler = yaml_string_write_handler
|
|
||||||
emitter.output_buffer = buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a file output.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
|
|
||||||
if emitter.write_handler != nil {
|
|
||||||
panic("output already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.write_handler = yaml_writer_write_handler
|
|
||||||
emitter.output_writer = w
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a generic output handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_output(emitter *yaml_emitter_t, handler yaml_write_handler_t) {
|
|
||||||
if emitter.write_handler != nil {
|
|
||||||
panic("output already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.write_handler = handler
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the output encoding.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
|
||||||
if emitter.encoding != yaml_ANY_ENCODING {
|
|
||||||
panic("encoding already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.encoding = encoding
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the canonical output style.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
|
||||||
emitter.canonical = canonical
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the indentation increment.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
|
||||||
if indent < 2 || indent > 9 {
|
|
||||||
indent = 2
|
|
||||||
}
|
|
||||||
emitter.best_indent = indent
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the preferred line width.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
|
||||||
if width < 0 {
|
|
||||||
width = -1
|
|
||||||
}
|
|
||||||
emitter.best_width = width
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set if unescaped non-ASCII characters are allowed.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
|
||||||
emitter.unicode = unicode
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the preferred line break character.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
|
||||||
emitter.line_break = line_break
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Destroy a token object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// yaml_DECLARE(void)
|
|
||||||
// yaml_token_delete(yaml_token_t *token)
|
|
||||||
// {
|
|
||||||
// assert(token); /* Non-NULL token object expected. */
|
|
||||||
//
|
|
||||||
// switch (token.type)
|
|
||||||
// {
|
|
||||||
// case yaml_TAG_DIRECTIVE_TOKEN:
|
|
||||||
// yaml_free(token.data.tag_directive.handle);
|
|
||||||
// yaml_free(token.data.tag_directive.prefix);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case yaml_ALIAS_TOKEN:
|
|
||||||
// yaml_free(token.data.alias.value);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case yaml_ANCHOR_TOKEN:
|
|
||||||
// yaml_free(token.data.anchor.value);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case yaml_TAG_TOKEN:
|
|
||||||
// yaml_free(token.data.tag.handle);
|
|
||||||
// yaml_free(token.data.tag.suffix);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case yaml_SCALAR_TOKEN:
|
|
||||||
// yaml_free(token.data.scalar.value);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// default:
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// memset(token, 0, sizeof(yaml_token_t));
|
|
||||||
// }
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if a string is a valid UTF-8 sequence.
|
|
||||||
*
|
|
||||||
* Check 'reader.c' for more details on UTF-8 encoding.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// static int
|
|
||||||
// yaml_check_utf8(yaml_char_t *start, size_t length)
|
|
||||||
// {
|
|
||||||
// yaml_char_t *end = start+length;
|
|
||||||
// yaml_char_t *pointer = start;
|
|
||||||
//
|
|
||||||
// while (pointer < end) {
|
|
||||||
// unsigned char octet;
|
|
||||||
// unsigned int width;
|
|
||||||
// unsigned int value;
|
|
||||||
// size_t k;
|
|
||||||
//
|
|
||||||
// octet = pointer[0];
|
|
||||||
// width = (octet & 0x80) == 0x00 ? 1 :
|
|
||||||
// (octet & 0xE0) == 0xC0 ? 2 :
|
|
||||||
// (octet & 0xF0) == 0xE0 ? 3 :
|
|
||||||
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
|
||||||
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
|
||||||
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
|
||||||
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
|
||||||
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
|
||||||
// if (!width) return 0;
|
|
||||||
// if (pointer+width > end) return 0;
|
|
||||||
// for (k = 1; k < width; k ++) {
|
|
||||||
// octet = pointer[k];
|
|
||||||
// if ((octet & 0xC0) != 0x80) return 0;
|
|
||||||
// value = (value << 6) + (octet & 0x3F);
|
|
||||||
// }
|
|
||||||
// if (!((width == 1) ||
|
|
||||||
// (width == 2 && value >= 0x80) ||
|
|
||||||
// (width == 3 && value >= 0x800) ||
|
|
||||||
// (width == 4 && value >= 0x10000))) return 0;
|
|
||||||
//
|
|
||||||
// pointer += width;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create STREAM-START.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_STREAM_START_EVENT,
|
|
||||||
encoding: encoding,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create STREAM-END.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_stream_end_event_initialize(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_STREAM_END_EVENT,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create DOCUMENT-START.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_document_start_event_initialize(event *yaml_event_t,
|
|
||||||
version_directive *yaml_version_directive_t,
|
|
||||||
tag_directives []yaml_tag_directive_t,
|
|
||||||
implicit bool) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_DOCUMENT_START_EVENT,
|
|
||||||
version_directive: version_directive,
|
|
||||||
tag_directives: tag_directives,
|
|
||||||
implicit: implicit,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create DOCUMENT-END.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_DOCUMENT_END_EVENT,
|
|
||||||
implicit: implicit,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create ALIAS.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_ALIAS_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create SCALAR.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_scalar_event_initialize(event *yaml_event_t,
|
|
||||||
anchor []byte, tag []byte,
|
|
||||||
value []byte,
|
|
||||||
plain_implicit bool, quoted_implicit bool,
|
|
||||||
style yaml_scalar_style_t) {
|
|
||||||
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_SCALAR_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
tag: tag,
|
|
||||||
value: value,
|
|
||||||
implicit: plain_implicit,
|
|
||||||
quoted_implicit: quoted_implicit,
|
|
||||||
style: yaml_style_t(style),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create SEQUENCE-START.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_sequence_start_event_initialize(event *yaml_event_t,
|
|
||||||
anchor []byte, tag []byte, implicit bool, style yaml_sequence_style_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_SEQUENCE_START_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
tag: tag,
|
|
||||||
implicit: implicit,
|
|
||||||
style: yaml_style_t(style),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create SEQUENCE-END.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_sequence_end_event_initialize(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_SEQUENCE_END_EVENT,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create MAPPING-START.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_mapping_start_event_initialize(event *yaml_event_t,
|
|
||||||
anchor []byte, tag []byte, implicit bool, style yaml_mapping_style_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_MAPPING_START_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
tag: tag,
|
|
||||||
implicit: implicit,
|
|
||||||
style: yaml_style_t(style),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create MAPPING-END.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_MAPPING_END_EVENT,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Destroy an event object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_event_delete(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Create a document object.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// func yaml_document_initialize(document *yaml_document_t,
|
|
||||||
// version_directive *yaml_version_directive_t,
|
|
||||||
// tag_directives []yaml_tag_directive_t,
|
|
||||||
// start_implicit, end_implicit bool) bool {
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// struct {
|
|
||||||
// yaml_node_t *start;
|
|
||||||
// yaml_node_t *end;
|
|
||||||
// yaml_node_t *top;
|
|
||||||
// } nodes = { NULL, NULL, NULL };
|
|
||||||
// yaml_version_directive_t *version_directive_copy = NULL;
|
|
||||||
// struct {
|
|
||||||
// yaml_tag_directive_t *start;
|
|
||||||
// yaml_tag_directive_t *end;
|
|
||||||
// yaml_tag_directive_t *top;
|
|
||||||
// } tag_directives_copy = { NULL, NULL, NULL };
|
|
||||||
// yaml_tag_directive_t value = { NULL, NULL };
|
|
||||||
// YAML_mark_t mark = { 0, 0, 0 };
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
// assert((tag_directives_start && tag_directives_end) ||
|
|
||||||
// (tag_directives_start == tag_directives_end));
|
|
||||||
// /* Valid tag directives are expected. */
|
|
||||||
//
|
|
||||||
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error;
|
|
||||||
//
|
|
||||||
// if (version_directive) {
|
|
||||||
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t));
|
|
||||||
// if (!version_directive_copy) goto error;
|
|
||||||
// version_directive_copy.major = version_directive.major;
|
|
||||||
// version_directive_copy.minor = version_directive.minor;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (tag_directives_start != tag_directives_end) {
|
|
||||||
// yaml_tag_directive_t *tag_directive;
|
|
||||||
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
|
||||||
// goto error;
|
|
||||||
// for (tag_directive = tag_directives_start;
|
|
||||||
// tag_directive != tag_directives_end; tag_directive ++) {
|
|
||||||
// assert(tag_directive.handle);
|
|
||||||
// assert(tag_directive.prefix);
|
|
||||||
// if (!yaml_check_utf8(tag_directive.handle,
|
|
||||||
// strlen((char *)tag_directive.handle)))
|
|
||||||
// goto error;
|
|
||||||
// if (!yaml_check_utf8(tag_directive.prefix,
|
|
||||||
// strlen((char *)tag_directive.prefix)))
|
|
||||||
// goto error;
|
|
||||||
// value.handle = yaml_strdup(tag_directive.handle);
|
|
||||||
// value.prefix = yaml_strdup(tag_directive.prefix);
|
|
||||||
// if (!value.handle || !value.prefix) goto error;
|
|
||||||
// if (!PUSH(&context, tag_directives_copy, value))
|
|
||||||
// goto error;
|
|
||||||
// value.handle = NULL;
|
|
||||||
// value.prefix = NULL;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
|
||||||
// tag_directives_copy.start, tag_directives_copy.top,
|
|
||||||
// start_implicit, end_implicit, mark, mark);
|
|
||||||
//
|
|
||||||
// return 1;
|
|
||||||
//
|
|
||||||
// error:
|
|
||||||
// STACK_DEL(&context, nodes);
|
|
||||||
// yaml_free(version_directive_copy);
|
|
||||||
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
|
||||||
// yaml_tag_directive_t value = POP(&context, tag_directives_copy);
|
|
||||||
// yaml_free(value.handle);
|
|
||||||
// yaml_free(value.prefix);
|
|
||||||
// }
|
|
||||||
// STACK_DEL(&context, tag_directives_copy);
|
|
||||||
// yaml_free(value.handle);
|
|
||||||
// yaml_free(value.prefix);
|
|
||||||
//
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Destroy a document object.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(void)
|
|
||||||
// yaml_document_delete(document *yaml_document_t)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// yaml_tag_directive_t *tag_directive;
|
|
||||||
//
|
|
||||||
// context.error = yaml_NO_ERROR; /* Eliminate a compliler warning. */
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// while (!STACK_EMPTY(&context, document.nodes)) {
|
|
||||||
// yaml_node_t node = POP(&context, document.nodes);
|
|
||||||
// yaml_free(node.tag);
|
|
||||||
// switch (node.type) {
|
|
||||||
// case yaml_SCALAR_NODE:
|
|
||||||
// yaml_free(node.data.scalar.value);
|
|
||||||
// break;
|
|
||||||
// case yaml_SEQUENCE_NODE:
|
|
||||||
// STACK_DEL(&context, node.data.sequence.items);
|
|
||||||
// break;
|
|
||||||
// case yaml_MAPPING_NODE:
|
|
||||||
// STACK_DEL(&context, node.data.mapping.pairs);
|
|
||||||
// break;
|
|
||||||
// default:
|
|
||||||
// assert(0); /* Should not happen. */
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// STACK_DEL(&context, document.nodes);
|
|
||||||
//
|
|
||||||
// yaml_free(document.version_directive);
|
|
||||||
// for (tag_directive = document.tag_directives.start;
|
|
||||||
// tag_directive != document.tag_directives.end;
|
|
||||||
// tag_directive++) {
|
|
||||||
// yaml_free(tag_directive.handle);
|
|
||||||
// yaml_free(tag_directive.prefix);
|
|
||||||
// }
|
|
||||||
// yaml_free(document.tag_directives.start);
|
|
||||||
//
|
|
||||||
// memset(document, 0, sizeof(yaml_document_t));
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /**
|
|
||||||
// * Get a document node.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(yaml_node_t *)
|
|
||||||
// yaml_document_get_node(document *yaml_document_t, int index)
|
|
||||||
// {
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
|
||||||
// return document.nodes.start + index - 1;
|
|
||||||
// }
|
|
||||||
// return NULL;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /**
|
|
||||||
// * Get the root object.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(yaml_node_t *)
|
|
||||||
// yaml_document_get_root_node(document *yaml_document_t)
|
|
||||||
// {
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// if (document.nodes.top != document.nodes.start) {
|
|
||||||
// return document.nodes.start;
|
|
||||||
// }
|
|
||||||
// return NULL;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Add a scalar node to a document.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_add_scalar(document *yaml_document_t,
|
|
||||||
// yaml_char_t *tag, yaml_char_t *value, int length,
|
|
||||||
// yaml_scalar_style_t style)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// YAML_mark_t mark = { 0, 0, 0 };
|
|
||||||
// yaml_char_t *tag_copy = NULL;
|
|
||||||
// yaml_char_t *value_copy = NULL;
|
|
||||||
// yaml_node_t node;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
// assert(value); /* Non-NULL value is expected. */
|
|
||||||
//
|
|
||||||
// if (!tag) {
|
|
||||||
// tag = (yaml_char_t *)yaml_DEFAULT_SCALAR_TAG;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
|
|
||||||
// tag_copy = yaml_strdup(tag);
|
|
||||||
// if (!tag_copy) goto error;
|
|
||||||
//
|
|
||||||
// if (length < 0) {
|
|
||||||
// length = strlen((char *)value);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(value, length)) goto error;
|
|
||||||
// value_copy = yaml_malloc(length+1);
|
|
||||||
// if (!value_copy) goto error;
|
|
||||||
// memcpy(value_copy, value, length);
|
|
||||||
// value_copy[length] = '\0';
|
|
||||||
//
|
|
||||||
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark);
|
|
||||||
// if (!PUSH(&context, document.nodes, node)) goto error;
|
|
||||||
//
|
|
||||||
// return document.nodes.top - document.nodes.start;
|
|
||||||
//
|
|
||||||
// error:
|
|
||||||
// yaml_free(tag_copy);
|
|
||||||
// yaml_free(value_copy);
|
|
||||||
//
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Add a sequence node to a document.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_add_sequence(document *yaml_document_t,
|
|
||||||
// yaml_char_t *tag, yaml_sequence_style_t style)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// YAML_mark_t mark = { 0, 0, 0 };
|
|
||||||
// yaml_char_t *tag_copy = NULL;
|
|
||||||
// struct {
|
|
||||||
// yaml_node_item_t *start;
|
|
||||||
// yaml_node_item_t *end;
|
|
||||||
// yaml_node_item_t *top;
|
|
||||||
// } items = { NULL, NULL, NULL };
|
|
||||||
// yaml_node_t node;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// if (!tag) {
|
|
||||||
// tag = (yaml_char_t *)yaml_DEFAULT_SEQUENCE_TAG;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
|
|
||||||
// tag_copy = yaml_strdup(tag);
|
|
||||||
// if (!tag_copy) goto error;
|
|
||||||
//
|
|
||||||
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error;
|
|
||||||
//
|
|
||||||
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
|
||||||
// style, mark, mark);
|
|
||||||
// if (!PUSH(&context, document.nodes, node)) goto error;
|
|
||||||
//
|
|
||||||
// return document.nodes.top - document.nodes.start;
|
|
||||||
//
|
|
||||||
// error:
|
|
||||||
// STACK_DEL(&context, items);
|
|
||||||
// yaml_free(tag_copy);
|
|
||||||
//
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Add a mapping node to a document.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_add_mapping(document *yaml_document_t,
|
|
||||||
// yaml_char_t *tag, yaml_mapping_style_t style)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// YAML_mark_t mark = { 0, 0, 0 };
|
|
||||||
// yaml_char_t *tag_copy = NULL;
|
|
||||||
// struct {
|
|
||||||
// yaml_node_pair_t *start;
|
|
||||||
// yaml_node_pair_t *end;
|
|
||||||
// yaml_node_pair_t *top;
|
|
||||||
// } pairs = { NULL, NULL, NULL };
|
|
||||||
// yaml_node_t node;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// if (!tag) {
|
|
||||||
// tag = (yaml_char_t *)yaml_DEFAULT_MAPPING_TAG;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
|
|
||||||
// tag_copy = yaml_strdup(tag);
|
|
||||||
// if (!tag_copy) goto error;
|
|
||||||
//
|
|
||||||
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error;
|
|
||||||
//
|
|
||||||
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
|
||||||
// style, mark, mark);
|
|
||||||
// if (!PUSH(&context, document.nodes, node)) goto error;
|
|
||||||
//
|
|
||||||
// return document.nodes.top - document.nodes.start;
|
|
||||||
//
|
|
||||||
// error:
|
|
||||||
// STACK_DEL(&context, pairs);
|
|
||||||
// yaml_free(tag_copy);
|
|
||||||
//
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Append an item to a sequence node.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_append_sequence_item(document *yaml_document_t,
|
|
||||||
// int sequence, int item)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document is required. */
|
|
||||||
// assert(sequence > 0
|
|
||||||
// && document.nodes.start + sequence <= document.nodes.top);
|
|
||||||
// /* Valid sequence id is required. */
|
|
||||||
// assert(document.nodes.start[sequence-1].type == yaml_SEQUENCE_NODE);
|
|
||||||
// /* A sequence node is required. */
|
|
||||||
// assert(item > 0 && document.nodes.start + item <= document.nodes.top);
|
|
||||||
// /* Valid item id is required. */
|
|
||||||
//
|
|
||||||
// if (!PUSH(&context,
|
|
||||||
// document.nodes.start[sequence-1].data.sequence.items, item))
|
|
||||||
// return 0;
|
|
||||||
//
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Append a pair of a key and a value to a mapping node.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_append_mapping_pair(document *yaml_document_t,
|
|
||||||
// int mapping, int key, int value)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
//
|
|
||||||
// yaml_node_pair_t pair;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document is required. */
|
|
||||||
// assert(mapping > 0
|
|
||||||
// && document.nodes.start + mapping <= document.nodes.top);
|
|
||||||
// /* Valid mapping id is required. */
|
|
||||||
// assert(document.nodes.start[mapping-1].type == yaml_MAPPING_NODE);
|
|
||||||
// /* A mapping node is required. */
|
|
||||||
// assert(key > 0 && document.nodes.start + key <= document.nodes.top);
|
|
||||||
// /* Valid key id is required. */
|
|
||||||
// assert(value > 0 && document.nodes.start + value <= document.nodes.top);
|
|
||||||
// /* Valid value id is required. */
|
|
||||||
//
|
|
||||||
// pair.key = key;
|
|
||||||
// pair.value = value;
|
|
||||||
//
|
|
||||||
// if (!PUSH(&context,
|
|
||||||
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
|
||||||
// return 0;
|
|
||||||
//
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
//
|
|
622
vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go
generated
vendored
622
vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go
generated
vendored
|
@ -1,622 +0,0 @@
|
||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Unmarshaler interface {
|
|
||||||
UnmarshalYAML(tag string, value interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Number represents a JSON number literal.
|
|
||||||
type Number string
|
|
||||||
|
|
||||||
// String returns the literal text of the number.
|
|
||||||
func (n Number) String() string { return string(n) }
|
|
||||||
|
|
||||||
// Float64 returns the number as a float64.
|
|
||||||
func (n Number) Float64() (float64, error) {
|
|
||||||
return strconv.ParseFloat(string(n), 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64 returns the number as an int64.
|
|
||||||
func (n Number) Int64() (int64, error) {
|
|
||||||
return strconv.ParseInt(string(n), 10, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Decoder struct {
|
|
||||||
parser yaml_parser_t
|
|
||||||
event yaml_event_t
|
|
||||||
replay_events []yaml_event_t
|
|
||||||
useNumber bool
|
|
||||||
|
|
||||||
anchors map[string][]yaml_event_t
|
|
||||||
tracking_anchors [][]yaml_event_t
|
|
||||||
}
|
|
||||||
|
|
||||||
type ParserError struct {
|
|
||||||
ErrorType YAML_error_type_t
|
|
||||||
Context string
|
|
||||||
ContextMark YAML_mark_t
|
|
||||||
Problem string
|
|
||||||
ProblemMark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ParserError) Error() string {
|
|
||||||
return fmt.Sprintf("yaml: [%s] %s at line %d, column %d", e.Context, e.Problem, e.ProblemMark.line+1, e.ProblemMark.column+1)
|
|
||||||
}
|
|
||||||
|
|
||||||
type UnexpectedEventError struct {
|
|
||||||
Value string
|
|
||||||
EventType yaml_event_type_t
|
|
||||||
At YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *UnexpectedEventError) Error() string {
|
|
||||||
return fmt.Sprintf("yaml: Unexpect event [%d]: '%s' at line %d, column %d", e.EventType, e.Value, e.At.line+1, e.At.column+1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func recovery(err *error) {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
if _, ok := r.(runtime.Error); ok {
|
|
||||||
panic(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
var tmpError error
|
|
||||||
switch r := r.(type) {
|
|
||||||
case error:
|
|
||||||
tmpError = r
|
|
||||||
case string:
|
|
||||||
tmpError = errors.New(r)
|
|
||||||
default:
|
|
||||||
tmpError = errors.New("Unknown panic: " + reflect.ValueOf(r).String())
|
|
||||||
}
|
|
||||||
|
|
||||||
*err = tmpError
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Unmarshal(data []byte, v interface{}) error {
|
|
||||||
d := NewDecoder(bytes.NewBuffer(data))
|
|
||||||
return d.Decode(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDecoder(r io.Reader) *Decoder {
|
|
||||||
d := &Decoder{
|
|
||||||
anchors: make(map[string][]yaml_event_t),
|
|
||||||
tracking_anchors: make([][]yaml_event_t, 1),
|
|
||||||
}
|
|
||||||
yaml_parser_initialize(&d.parser)
|
|
||||||
yaml_parser_set_input_reader(&d.parser, r)
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) Decode(v interface{}) (err error) {
|
|
||||||
defer recovery(&err)
|
|
||||||
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
|
||||||
return fmt.Errorf("Expected a pointer or nil but was a %s at %s", rv.String(), d.event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.event.event_type == yaml_NO_EVENT {
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_STREAM_START_EVENT {
|
|
||||||
return errors.New("Invalid stream")
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
d.document(rv)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) UseNumber() { d.useNumber = true }
|
|
||||||
|
|
||||||
func (d *Decoder) error(err error) {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) nextEvent() {
|
|
||||||
if d.event.event_type == yaml_STREAM_END_EVENT {
|
|
||||||
d.error(errors.New("The stream is closed"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.replay_events != nil {
|
|
||||||
d.event = d.replay_events[0]
|
|
||||||
if len(d.replay_events) == 1 {
|
|
||||||
d.replay_events = nil
|
|
||||||
} else {
|
|
||||||
d.replay_events = d.replay_events[1:]
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if !yaml_parser_parse(&d.parser, &d.event) {
|
|
||||||
yaml_event_delete(&d.event)
|
|
||||||
|
|
||||||
d.error(&ParserError{
|
|
||||||
ErrorType: d.parser.error,
|
|
||||||
Context: d.parser.context,
|
|
||||||
ContextMark: d.parser.context_mark,
|
|
||||||
Problem: d.parser.problem,
|
|
||||||
ProblemMark: d.parser.problem_mark,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
last := len(d.tracking_anchors)
|
|
||||||
// skip aliases when tracking an anchor
|
|
||||||
if last > 0 && d.event.event_type != yaml_ALIAS_EVENT {
|
|
||||||
d.tracking_anchors[last-1] = append(d.tracking_anchors[last-1], d.event)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) document(rv reflect.Value) {
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_START_EVENT {
|
|
||||||
d.error(fmt.Errorf("Expected document start at %s", d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
d.parse(rv)
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
|
|
||||||
d.error(fmt.Errorf("Expected document end at %s", d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) parse(rv reflect.Value) {
|
|
||||||
if !rv.IsValid() {
|
|
||||||
// skip ahead since we cannot store
|
|
||||||
d.valueInterface()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
anchor := string(d.event.anchor)
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_SEQUENCE_START_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
d.sequence(rv)
|
|
||||||
d.end_anchor(anchor)
|
|
||||||
case yaml_MAPPING_START_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
d.mapping(rv)
|
|
||||||
d.end_anchor(anchor)
|
|
||||||
case yaml_SCALAR_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
d.scalar(rv)
|
|
||||||
d.end_anchor(anchor)
|
|
||||||
case yaml_ALIAS_EVENT:
|
|
||||||
d.alias(rv)
|
|
||||||
case yaml_DOCUMENT_END_EVENT:
|
|
||||||
default:
|
|
||||||
d.error(&UnexpectedEventError{
|
|
||||||
Value: string(d.event.value),
|
|
||||||
EventType: d.event.event_type,
|
|
||||||
At: d.event.start_mark,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) begin_anchor(anchor string) {
|
|
||||||
if anchor != "" {
|
|
||||||
events := []yaml_event_t{d.event}
|
|
||||||
d.tracking_anchors = append(d.tracking_anchors, events)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) end_anchor(anchor string) {
|
|
||||||
if anchor != "" {
|
|
||||||
events := d.tracking_anchors[len(d.tracking_anchors)-1]
|
|
||||||
d.tracking_anchors = d.tracking_anchors[0 : len(d.tracking_anchors)-1]
|
|
||||||
// remove the anchor, replaying events shouldn't have anchors
|
|
||||||
events[0].anchor = nil
|
|
||||||
// we went one too many, remove the extra event
|
|
||||||
events = events[:len(events)-1]
|
|
||||||
// if nested, append to all the other anchors
|
|
||||||
for i, e := range d.tracking_anchors {
|
|
||||||
d.tracking_anchors[i] = append(e, events...)
|
|
||||||
}
|
|
||||||
d.anchors[anchor] = events
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
|
|
||||||
// If v is a named type and is addressable,
|
|
||||||
// start with its address, so that if the type has pointer methods,
|
|
||||||
// we find them.
|
|
||||||
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
|
|
||||||
v = v.Addr()
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
// Load value from interface, but only if the result will be
|
|
||||||
// usefully addressable.
|
|
||||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
|
||||||
e := v.Elem()
|
|
||||||
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
|
|
||||||
v = e
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Kind() != reflect.Ptr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.IsNil() {
|
|
||||||
v.Set(reflect.New(v.Type().Elem()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Type().NumMethod() > 0 {
|
|
||||||
if u, ok := v.Interface().(Unmarshaler); ok {
|
|
||||||
var temp interface{}
|
|
||||||
return u, reflect.ValueOf(&temp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) sequence(v reflect.Value) {
|
|
||||||
if d.event.event_type != yaml_SEQUENCE_START_EVENT {
|
|
||||||
d.error(fmt.Errorf("Expected sequence start at %s", d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
u, pv := d.indirect(v, false)
|
|
||||||
if u != nil {
|
|
||||||
defer func() {
|
|
||||||
if err := u.UnmarshalYAML(yaml_SEQ_TAG, pv.Interface()); err != nil {
|
|
||||||
d.error(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
_, pv = d.indirect(pv, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
v = pv
|
|
||||||
|
|
||||||
// Check type of target.
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Interface:
|
|
||||||
if v.NumMethod() == 0 {
|
|
||||||
// Decoding into nil interface? Switch to non-reflect code.
|
|
||||||
v.Set(reflect.ValueOf(d.sequenceInterface()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Otherwise it's invalid.
|
|
||||||
fallthrough
|
|
||||||
default:
|
|
||||||
d.error(fmt.Errorf("Expected an array, slice or interface{} but was a %s at %s", v, d.event.start_mark))
|
|
||||||
case reflect.Array:
|
|
||||||
case reflect.Slice:
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
|
|
||||||
break done
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get element of array, growing if necessary.
|
|
||||||
if v.Kind() == reflect.Slice {
|
|
||||||
// Grow slice if necessary
|
|
||||||
if i >= v.Cap() {
|
|
||||||
newcap := v.Cap() + v.Cap()/2
|
|
||||||
if newcap < 4 {
|
|
||||||
newcap = 4
|
|
||||||
}
|
|
||||||
newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
|
|
||||||
reflect.Copy(newv, v)
|
|
||||||
v.Set(newv)
|
|
||||||
}
|
|
||||||
if i >= v.Len() {
|
|
||||||
v.SetLen(i + 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < v.Len() {
|
|
||||||
// Decode into element.
|
|
||||||
d.parse(v.Index(i))
|
|
||||||
} else {
|
|
||||||
// Ran out of fixed array: skip.
|
|
||||||
d.parse(reflect.Value{})
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < v.Len() {
|
|
||||||
if v.Kind() == reflect.Array {
|
|
||||||
// Array. Zero the rest.
|
|
||||||
z := reflect.Zero(v.Type().Elem())
|
|
||||||
for ; i < v.Len(); i++ {
|
|
||||||
v.Index(i).Set(z)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
v.SetLen(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if i == 0 && v.Kind() == reflect.Slice {
|
|
||||||
v.Set(reflect.MakeSlice(v.Type(), 0, 0))
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) mapping(v reflect.Value) {
|
|
||||||
u, pv := d.indirect(v, false)
|
|
||||||
if u != nil {
|
|
||||||
defer func() {
|
|
||||||
if err := u.UnmarshalYAML(yaml_MAP_TAG, pv.Interface()); err != nil {
|
|
||||||
d.error(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
_, pv = d.indirect(pv, false)
|
|
||||||
}
|
|
||||||
v = pv
|
|
||||||
|
|
||||||
// Decoding into nil interface? Switch to non-reflect code.
|
|
||||||
if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
|
|
||||||
v.Set(reflect.ValueOf(d.mappingInterface()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check type of target: struct or map[X]Y
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
d.mappingStruct(v)
|
|
||||||
return
|
|
||||||
case reflect.Map:
|
|
||||||
default:
|
|
||||||
d.error(fmt.Errorf("Expected a struct or map but was a %s at %s ", v, d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
mapt := v.Type()
|
|
||||||
if v.IsNil() {
|
|
||||||
v.Set(reflect.MakeMap(mapt))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
keyt := mapt.Key()
|
|
||||||
mapElemt := mapt.Elem()
|
|
||||||
|
|
||||||
var mapElem reflect.Value
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_MAPPING_END_EVENT:
|
|
||||||
break done
|
|
||||||
case yaml_DOCUMENT_END_EVENT:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
key := reflect.New(keyt)
|
|
||||||
d.parse(key.Elem())
|
|
||||||
|
|
||||||
if !mapElem.IsValid() {
|
|
||||||
mapElem = reflect.New(mapElemt).Elem()
|
|
||||||
} else {
|
|
||||||
mapElem.Set(reflect.Zero(mapElemt))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.parse(mapElem)
|
|
||||||
|
|
||||||
v.SetMapIndex(key.Elem(), mapElem)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) mappingStruct(v reflect.Value) {
|
|
||||||
|
|
||||||
structt := v.Type()
|
|
||||||
fields := cachedTypeFields(structt)
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_MAPPING_END_EVENT:
|
|
||||||
break done
|
|
||||||
case yaml_DOCUMENT_END_EVENT:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
key := ""
|
|
||||||
d.parse(reflect.ValueOf(&key))
|
|
||||||
|
|
||||||
// Figure out field corresponding to key.
|
|
||||||
var subv reflect.Value
|
|
||||||
|
|
||||||
var f *field
|
|
||||||
for i := range fields {
|
|
||||||
ff := &fields[i]
|
|
||||||
if ff.name == key {
|
|
||||||
f = ff
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if f == nil && strings.EqualFold(ff.name, key) {
|
|
||||||
f = ff
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if f != nil {
|
|
||||||
subv = v
|
|
||||||
for _, i := range f.index {
|
|
||||||
if subv.Kind() == reflect.Ptr {
|
|
||||||
if subv.IsNil() {
|
|
||||||
subv.Set(reflect.New(subv.Type().Elem()))
|
|
||||||
}
|
|
||||||
subv = subv.Elem()
|
|
||||||
}
|
|
||||||
subv = subv.Field(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d.parse(subv)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) scalar(v reflect.Value) {
|
|
||||||
val := string(d.event.value)
|
|
||||||
wantptr := null_values[val]
|
|
||||||
|
|
||||||
u, pv := d.indirect(v, wantptr)
|
|
||||||
|
|
||||||
var tag string
|
|
||||||
if u != nil {
|
|
||||||
defer func() {
|
|
||||||
if err := u.UnmarshalYAML(tag, pv.Interface()); err != nil {
|
|
||||||
d.error(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
_, pv = d.indirect(pv, wantptr)
|
|
||||||
}
|
|
||||||
v = pv
|
|
||||||
|
|
||||||
var err error
|
|
||||||
tag, err = resolve(d.event, v, d.useNumber)
|
|
||||||
if err != nil {
|
|
||||||
d.error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) alias(rv reflect.Value) {
|
|
||||||
val, ok := d.anchors[string(d.event.anchor)]
|
|
||||||
if !ok {
|
|
||||||
d.error(fmt.Errorf("missing anchor: '%s' at %s", d.event.anchor, d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.replay_events = val
|
|
||||||
d.nextEvent()
|
|
||||||
d.parse(rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) valueInterface() interface{} {
|
|
||||||
var v interface{}
|
|
||||||
|
|
||||||
anchor := string(d.event.anchor)
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_SEQUENCE_START_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
v = d.sequenceInterface()
|
|
||||||
case yaml_MAPPING_START_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
v = d.mappingInterface()
|
|
||||||
case yaml_SCALAR_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
v = d.scalarInterface()
|
|
||||||
case yaml_ALIAS_EVENT:
|
|
||||||
rv := reflect.ValueOf(&v)
|
|
||||||
d.alias(rv)
|
|
||||||
return v
|
|
||||||
case yaml_DOCUMENT_END_EVENT:
|
|
||||||
d.error(&UnexpectedEventError{
|
|
||||||
Value: string(d.event.value),
|
|
||||||
EventType: d.event.event_type,
|
|
||||||
At: d.event.start_mark,
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
d.end_anchor(anchor)
|
|
||||||
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) scalarInterface() interface{} {
|
|
||||||
_, v := resolveInterface(d.event, d.useNumber)
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// sequenceInterface is like sequence but returns []interface{}.
|
|
||||||
func (d *Decoder) sequenceInterface() []interface{} {
|
|
||||||
var v = make([]interface{}, 0)
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
|
|
||||||
break done
|
|
||||||
}
|
|
||||||
|
|
||||||
v = append(v, d.valueInterface())
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// mappingInterface is like mapping but returns map[interface{}]interface{}.
|
|
||||||
func (d *Decoder) mappingInterface() map[interface{}]interface{} {
|
|
||||||
m := make(map[interface{}]interface{})
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_MAPPING_END_EVENT, yaml_DOCUMENT_END_EVENT:
|
|
||||||
break done
|
|
||||||
}
|
|
||||||
|
|
||||||
key := d.valueInterface()
|
|
||||||
|
|
||||||
// Read value.
|
|
||||||
m[key] = d.valueInterface()
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
return m
|
|
||||||
}
|
|
395
vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go
generated
vendored
395
vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go
generated
vendored
|
@ -1,395 +0,0 @@
|
||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
timeTimeType = reflect.TypeOf(time.Time{})
|
|
||||||
marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
|
|
||||||
numberType = reflect.TypeOf(Number(""))
|
|
||||||
nonPrintable = regexp.MustCompile("[^\t\n\r\u0020-\u007E\u0085\u00A0-\uD7FF\uE000-\uFFFD]")
|
|
||||||
multiline = regexp.MustCompile("\n|\u0085|\u2028|\u2029")
|
|
||||||
|
|
||||||
shortTags = map[string]string{
|
|
||||||
yaml_NULL_TAG: "!!null",
|
|
||||||
yaml_BOOL_TAG: "!!bool",
|
|
||||||
yaml_STR_TAG: "!!str",
|
|
||||||
yaml_INT_TAG: "!!int",
|
|
||||||
yaml_FLOAT_TAG: "!!float",
|
|
||||||
yaml_TIMESTAMP_TAG: "!!timestamp",
|
|
||||||
yaml_SEQ_TAG: "!!seq",
|
|
||||||
yaml_MAP_TAG: "!!map",
|
|
||||||
yaml_BINARY_TAG: "!!binary",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
type Marshaler interface {
|
|
||||||
MarshalYAML() (tag string, value interface{}, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// An Encoder writes JSON objects to an output stream.
|
|
||||||
type Encoder struct {
|
|
||||||
w io.Writer
|
|
||||||
emitter yaml_emitter_t
|
|
||||||
event yaml_event_t
|
|
||||||
flow bool
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func Marshal(v interface{}) ([]byte, error) {
|
|
||||||
b := bytes.Buffer{}
|
|
||||||
e := NewEncoder(&b)
|
|
||||||
err := e.Encode(v)
|
|
||||||
return b.Bytes(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncoder returns a new encoder that writes to w.
|
|
||||||
func NewEncoder(w io.Writer) *Encoder {
|
|
||||||
e := &Encoder{w: w}
|
|
||||||
yaml_emitter_initialize(&e.emitter)
|
|
||||||
yaml_emitter_set_output_writer(&e.emitter, e.w)
|
|
||||||
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
|
|
||||||
e.emit()
|
|
||||||
yaml_document_start_event_initialize(&e.event, nil, nil, true)
|
|
||||||
e.emit()
|
|
||||||
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) Encode(v interface{}) (err error) {
|
|
||||||
defer recovery(&err)
|
|
||||||
|
|
||||||
if e.err != nil {
|
|
||||||
return e.err
|
|
||||||
}
|
|
||||||
|
|
||||||
e.marshal("", reflect.ValueOf(v), true)
|
|
||||||
|
|
||||||
yaml_document_end_event_initialize(&e.event, true)
|
|
||||||
e.emit()
|
|
||||||
e.emitter.open_ended = false
|
|
||||||
yaml_stream_end_event_initialize(&e.event)
|
|
||||||
e.emit()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emit() {
|
|
||||||
if !yaml_emitter_emit(&e.emitter, &e.event) {
|
|
||||||
panic("bad emit")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) marshal(tag string, v reflect.Value, allowAddr bool) {
|
|
||||||
vt := v.Type()
|
|
||||||
|
|
||||||
if vt.Implements(marshalerType) {
|
|
||||||
e.emitMarshaler(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if vt.Kind() != reflect.Ptr && allowAddr {
|
|
||||||
if reflect.PtrTo(vt).Implements(marshalerType) {
|
|
||||||
e.emitAddrMarshaler(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Interface:
|
|
||||||
if v.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
} else {
|
|
||||||
e.marshal(tag, v.Elem(), allowAddr)
|
|
||||||
}
|
|
||||||
case reflect.Map:
|
|
||||||
e.emitMap(tag, v)
|
|
||||||
case reflect.Ptr:
|
|
||||||
if v.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
} else {
|
|
||||||
e.marshal(tag, v.Elem(), true)
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
e.emitStruct(tag, v)
|
|
||||||
case reflect.Slice:
|
|
||||||
e.emitSlice(tag, v)
|
|
||||||
case reflect.String:
|
|
||||||
e.emitString(tag, v)
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
e.emitInt(tag, v)
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
e.emitUint(tag, v)
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
e.emitFloat(tag, v)
|
|
||||||
case reflect.Bool:
|
|
||||||
e.emitBool(tag, v)
|
|
||||||
default:
|
|
||||||
panic("Can't marshal type yet: " + v.Type().String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitMap(tag string, v reflect.Value) {
|
|
||||||
e.mapping(tag, func() {
|
|
||||||
var keys stringValues = v.MapKeys()
|
|
||||||
sort.Sort(keys)
|
|
||||||
for _, k := range keys {
|
|
||||||
e.marshal("", k, true)
|
|
||||||
e.marshal("", v.MapIndex(k), true)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitStruct(tag string, v reflect.Value) {
|
|
||||||
if v.Type() == timeTimeType {
|
|
||||||
e.emitTime(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := cachedTypeFields(v.Type())
|
|
||||||
|
|
||||||
e.mapping(tag, func() {
|
|
||||||
for _, f := range fields {
|
|
||||||
fv := fieldByIndex(v, f.index)
|
|
||||||
if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
e.marshal("", reflect.ValueOf(f.name), true)
|
|
||||||
e.flow = f.flow
|
|
||||||
e.marshal("", fv, true)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitTime(tag string, v reflect.Value) {
|
|
||||||
t := v.Interface().(time.Time)
|
|
||||||
bytes, _ := t.MarshalText()
|
|
||||||
e.emitScalar(string(bytes), "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isEmptyValue(v reflect.Value) bool {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
|
||||||
return v.Len() == 0
|
|
||||||
case reflect.Bool:
|
|
||||||
return !v.Bool()
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return v.Int() == 0
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
return v.Uint() == 0
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return v.Float() == 0
|
|
||||||
case reflect.Interface, reflect.Ptr:
|
|
||||||
return v.IsNil()
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) mapping(tag string, f func()) {
|
|
||||||
implicit := tag == ""
|
|
||||||
style := yaml_BLOCK_MAPPING_STYLE
|
|
||||||
if e.flow {
|
|
||||||
e.flow = false
|
|
||||||
style = yaml_FLOW_MAPPING_STYLE
|
|
||||||
}
|
|
||||||
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
|
||||||
e.emit()
|
|
||||||
|
|
||||||
f()
|
|
||||||
|
|
||||||
yaml_mapping_end_event_initialize(&e.event)
|
|
||||||
e.emit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitSlice(tag string, v reflect.Value) {
|
|
||||||
if v.Type() == byteSliceType {
|
|
||||||
e.emitBase64(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
implicit := tag == ""
|
|
||||||
style := yaml_BLOCK_SEQUENCE_STYLE
|
|
||||||
if e.flow {
|
|
||||||
e.flow = false
|
|
||||||
style = yaml_FLOW_SEQUENCE_STYLE
|
|
||||||
}
|
|
||||||
yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
|
||||||
e.emit()
|
|
||||||
|
|
||||||
n := v.Len()
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
e.marshal("", v.Index(i), true)
|
|
||||||
}
|
|
||||||
|
|
||||||
yaml_sequence_end_event_initialize(&e.event)
|
|
||||||
e.emit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitBase64(tag string, v reflect.Value) {
|
|
||||||
if v.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s := v.Bytes()
|
|
||||||
|
|
||||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
|
|
||||||
|
|
||||||
base64.StdEncoding.Encode(dst, s)
|
|
||||||
e.emitScalar(string(dst), "", yaml_BINARY_TAG, yaml_DOUBLE_QUOTED_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitString(tag string, v reflect.Value) {
|
|
||||||
var style yaml_scalar_style_t
|
|
||||||
s := v.String()
|
|
||||||
|
|
||||||
if nonPrintable.MatchString(s) {
|
|
||||||
e.emitBase64(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Type() == numberType {
|
|
||||||
style = yaml_PLAIN_SCALAR_STYLE
|
|
||||||
} else {
|
|
||||||
event := yaml_event_t{
|
|
||||||
implicit: true,
|
|
||||||
value: []byte(s),
|
|
||||||
}
|
|
||||||
|
|
||||||
rtag, _ := resolveInterface(event, false)
|
|
||||||
if tag == "" && rtag != yaml_STR_TAG {
|
|
||||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
|
||||||
} else if multiline.MatchString(s) {
|
|
||||||
style = yaml_LITERAL_SCALAR_STYLE
|
|
||||||
} else {
|
|
||||||
style = yaml_PLAIN_SCALAR_STYLE
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
e.emitScalar(s, "", tag, style)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitBool(tag string, v reflect.Value) {
|
|
||||||
s := strconv.FormatBool(v.Bool())
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitInt(tag string, v reflect.Value) {
|
|
||||||
s := strconv.FormatInt(v.Int(), 10)
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitUint(tag string, v reflect.Value) {
|
|
||||||
s := strconv.FormatUint(v.Uint(), 10)
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitFloat(tag string, v reflect.Value) {
|
|
||||||
f := v.Float()
|
|
||||||
|
|
||||||
var s string
|
|
||||||
switch {
|
|
||||||
case math.IsNaN(f):
|
|
||||||
s = ".nan"
|
|
||||||
case math.IsInf(f, 1):
|
|
||||||
s = "+.inf"
|
|
||||||
case math.IsInf(f, -1):
|
|
||||||
s = "-.inf"
|
|
||||||
default:
|
|
||||||
s = strconv.FormatFloat(f, 'g', -1, v.Type().Bits())
|
|
||||||
}
|
|
||||||
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitNil() {
|
|
||||||
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
|
|
||||||
implicit := tag == ""
|
|
||||||
if !implicit {
|
|
||||||
style = yaml_PLAIN_SCALAR_STYLE
|
|
||||||
}
|
|
||||||
|
|
||||||
stag := shortTags[tag]
|
|
||||||
if stag == "" {
|
|
||||||
stag = tag
|
|
||||||
}
|
|
||||||
|
|
||||||
yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(stag), []byte(value), implicit, implicit, style)
|
|
||||||
e.emit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitMarshaler(tag string, v reflect.Value) {
|
|
||||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
m := v.Interface().(Marshaler)
|
|
||||||
if m == nil {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t, val, err := m.MarshalYAML()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if val == nil {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
e.marshal(t, reflect.ValueOf(val), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitAddrMarshaler(tag string, v reflect.Value) {
|
|
||||||
if !v.CanAddr() {
|
|
||||||
e.marshal(tag, v, false)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
va := v.Addr()
|
|
||||||
if va.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
m := v.Interface().(Marshaler)
|
|
||||||
t, val, err := m.MarshalYAML()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if val == nil {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
e.marshal(t, reflect.ValueOf(val), false)
|
|
||||||
}
|
|
1230
vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go
generated
vendored
1230
vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go
generated
vendored
File diff suppressed because it is too large
Load diff
465
vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go
generated
vendored
465
vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go
generated
vendored
|
@ -1,465 +0,0 @@
|
||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the reader error and return 0.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string,
|
|
||||||
offset int, value int) bool {
|
|
||||||
parser.error = yaml_READER_ERROR
|
|
||||||
parser.problem = problem
|
|
||||||
parser.problem_offset = offset
|
|
||||||
parser.problem_value = value
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Byte order marks.
|
|
||||||
*/
|
|
||||||
const (
|
|
||||||
BOM_UTF8 = "\xef\xbb\xbf"
|
|
||||||
BOM_UTF16LE = "\xff\xfe"
|
|
||||||
BOM_UTF16BE = "\xfe\xff"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Determine the input stream encoding by checking the BOM symbol. If no BOM is
|
|
||||||
* found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
|
|
||||||
/* Ensure that we had enough bytes in the raw buffer. */
|
|
||||||
for !parser.eof &&
|
|
||||||
len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
|
|
||||||
if !yaml_parser_update_raw_buffer(parser) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine the encoding. */
|
|
||||||
raw := parser.raw_buffer
|
|
||||||
pos := parser.raw_buffer_pos
|
|
||||||
remaining := len(raw) - pos
|
|
||||||
if remaining >= 2 &&
|
|
||||||
raw[pos] == BOM_UTF16LE[0] && raw[pos+1] == BOM_UTF16LE[1] {
|
|
||||||
parser.encoding = yaml_UTF16LE_ENCODING
|
|
||||||
parser.raw_buffer_pos += 2
|
|
||||||
parser.offset += 2
|
|
||||||
} else if remaining >= 2 &&
|
|
||||||
raw[pos] == BOM_UTF16BE[0] && raw[pos+1] == BOM_UTF16BE[1] {
|
|
||||||
parser.encoding = yaml_UTF16BE_ENCODING
|
|
||||||
parser.raw_buffer_pos += 2
|
|
||||||
parser.offset += 2
|
|
||||||
} else if remaining >= 3 &&
|
|
||||||
raw[pos] == BOM_UTF8[0] && raw[pos+1] == BOM_UTF8[1] && raw[pos+2] == BOM_UTF8[2] {
|
|
||||||
parser.encoding = yaml_UTF8_ENCODING
|
|
||||||
parser.raw_buffer_pos += 3
|
|
||||||
parser.offset += 3
|
|
||||||
} else {
|
|
||||||
parser.encoding = yaml_UTF8_ENCODING
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Update the raw buffer.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
|
|
||||||
size_read := 0
|
|
||||||
|
|
||||||
/* Return if the raw buffer is full. */
|
|
||||||
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return on EOF. */
|
|
||||||
|
|
||||||
if parser.eof {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Move the remaining bytes in the raw buffer to the beginning. */
|
|
||||||
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
|
|
||||||
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
|
|
||||||
}
|
|
||||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
|
|
||||||
parser.raw_buffer_pos = 0
|
|
||||||
|
|
||||||
/* Call the read handler to fill the buffer. */
|
|
||||||
size_read, err := parser.read_handler(parser,
|
|
||||||
parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
|
|
||||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
|
|
||||||
|
|
||||||
if err == io.EOF {
|
|
||||||
parser.eof = true
|
|
||||||
} else if err != nil {
|
|
||||||
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(),
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Ensure that the buffer contains at least `length` characters.
|
|
||||||
* Return 1 on success, 0 on failure.
|
|
||||||
*
|
|
||||||
* The length is supposed to be significantly less that the buffer size.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
|
||||||
/* Read handler must be set. */
|
|
||||||
if parser.read_handler == nil {
|
|
||||||
panic("read handler must be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If the EOF flag is set and the raw buffer is empty, do nothing. */
|
|
||||||
|
|
||||||
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return if the buffer contains enough characters. */
|
|
||||||
|
|
||||||
if parser.unread >= length {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine the input encoding if it is not known yet. */
|
|
||||||
|
|
||||||
if parser.encoding == yaml_ANY_ENCODING {
|
|
||||||
if !yaml_parser_determine_encoding(parser) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Move the unread characters to the beginning of the buffer. */
|
|
||||||
buffer_end := len(parser.buffer)
|
|
||||||
if 0 < parser.buffer_pos &&
|
|
||||||
parser.buffer_pos < buffer_end {
|
|
||||||
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
|
|
||||||
buffer_end -= parser.buffer_pos
|
|
||||||
parser.buffer_pos = 0
|
|
||||||
} else if parser.buffer_pos == buffer_end {
|
|
||||||
buffer_end = 0
|
|
||||||
parser.buffer_pos = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.buffer = parser.buffer[:cap(parser.buffer)]
|
|
||||||
|
|
||||||
/* Fill the buffer until it has enough characters. */
|
|
||||||
first := true
|
|
||||||
for parser.unread < length {
|
|
||||||
/* Fill the raw buffer if necessary. */
|
|
||||||
|
|
||||||
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
|
|
||||||
if !yaml_parser_update_raw_buffer(parser) {
|
|
||||||
parser.buffer = parser.buffer[:buffer_end]
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
first = false
|
|
||||||
|
|
||||||
/* Decode the raw buffer. */
|
|
||||||
for parser.raw_buffer_pos != len(parser.raw_buffer) {
|
|
||||||
var value rune
|
|
||||||
var w int
|
|
||||||
|
|
||||||
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
|
|
||||||
incomplete := false
|
|
||||||
|
|
||||||
/* Decode the next character. */
|
|
||||||
|
|
||||||
switch parser.encoding {
|
|
||||||
case yaml_UTF8_ENCODING:
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Decode a UTF-8 character. Check RFC 3629
|
|
||||||
* (http://www.ietf.org/rfc/rfc3629.txt) for more details.
|
|
||||||
*
|
|
||||||
* The following table (taken from the RFC) is used for
|
|
||||||
* decoding.
|
|
||||||
*
|
|
||||||
* Char. number range | UTF-8 octet sequence
|
|
||||||
* (hexadecimal) | (binary)
|
|
||||||
* --------------------+------------------------------------
|
|
||||||
* 0000 0000-0000 007F | 0xxxxxxx
|
|
||||||
* 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
|
|
||||||
* 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
|
|
||||||
* 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
|
||||||
*
|
|
||||||
* Additionally, the characters in the range 0xD800-0xDFFF
|
|
||||||
* are prohibited as they are reserved for use with UTF-16
|
|
||||||
* surrogate pairs.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Determine the length of the UTF-8 sequence. */
|
|
||||||
|
|
||||||
octet := parser.raw_buffer[parser.raw_buffer_pos]
|
|
||||||
w = width(octet)
|
|
||||||
|
|
||||||
/* Check if the leading octet is valid. */
|
|
||||||
|
|
||||||
if w == 0 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid leading UTF-8 octet",
|
|
||||||
parser.offset, int(octet))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if the raw buffer contains an incomplete character. */
|
|
||||||
|
|
||||||
if w > raw_unread {
|
|
||||||
if parser.eof {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"incomplete UTF-8 octet sequence",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
incomplete = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Decode the leading octet. */
|
|
||||||
switch {
|
|
||||||
case octet&0x80 == 0x00:
|
|
||||||
value = rune(octet & 0x7F)
|
|
||||||
case octet&0xE0 == 0xC0:
|
|
||||||
value = rune(octet & 0x1F)
|
|
||||||
case octet&0xF0 == 0xE0:
|
|
||||||
value = rune(octet & 0x0F)
|
|
||||||
case octet&0xF8 == 0xF0:
|
|
||||||
value = rune(octet & 0x07)
|
|
||||||
default:
|
|
||||||
value = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check and decode the trailing octets. */
|
|
||||||
|
|
||||||
for k := 1; k < w; k++ {
|
|
||||||
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
|
|
||||||
|
|
||||||
/* Check if the octet is valid. */
|
|
||||||
|
|
||||||
if (octet & 0xC0) != 0x80 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid trailing UTF-8 octet",
|
|
||||||
parser.offset+k, int(octet))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Decode the octet. */
|
|
||||||
|
|
||||||
value = (value << 6) + rune(octet&0x3F)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check the length of the sequence against the value. */
|
|
||||||
switch {
|
|
||||||
case w == 1:
|
|
||||||
case w == 2 && value >= 0x80:
|
|
||||||
case w == 3 && value >= 0x800:
|
|
||||||
case w == 4 && value >= 0x10000:
|
|
||||||
default:
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid length of a UTF-8 sequence",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check the range of the value. */
|
|
||||||
|
|
||||||
if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid Unicode character",
|
|
||||||
parser.offset, int(value))
|
|
||||||
}
|
|
||||||
case yaml_UTF16LE_ENCODING,
|
|
||||||
yaml_UTF16BE_ENCODING:
|
|
||||||
|
|
||||||
var low, high int
|
|
||||||
if parser.encoding == yaml_UTF16LE_ENCODING {
|
|
||||||
low, high = 0, 1
|
|
||||||
} else {
|
|
||||||
high, low = 1, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The UTF-16 encoding is not as simple as one might
|
|
||||||
* naively think. Check RFC 2781
|
|
||||||
* (http://www.ietf.org/rfc/rfc2781.txt).
|
|
||||||
*
|
|
||||||
* Normally, two subsequent bytes describe a Unicode
|
|
||||||
* character. However a special technique (called a
|
|
||||||
* surrogate pair) is used for specifying character
|
|
||||||
* values larger than 0xFFFF.
|
|
||||||
*
|
|
||||||
* A surrogate pair consists of two pseudo-characters:
|
|
||||||
* high surrogate area (0xD800-0xDBFF)
|
|
||||||
* low surrogate area (0xDC00-0xDFFF)
|
|
||||||
*
|
|
||||||
* The following formulas are used for decoding
|
|
||||||
* and encoding characters using surrogate pairs:
|
|
||||||
*
|
|
||||||
* U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
|
|
||||||
* U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
|
|
||||||
* W1 = 110110yyyyyyyyyy
|
|
||||||
* W2 = 110111xxxxxxxxxx
|
|
||||||
*
|
|
||||||
* where U is the character value, W1 is the high surrogate
|
|
||||||
* area, W2 is the low surrogate area.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Check for incomplete UTF-16 character. */
|
|
||||||
|
|
||||||
if raw_unread < 2 {
|
|
||||||
if parser.eof {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"incomplete UTF-16 character",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
incomplete = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get the character. */
|
|
||||||
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
|
|
||||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
|
|
||||||
|
|
||||||
/* Check for unexpected low surrogate area. */
|
|
||||||
|
|
||||||
if (value & 0xFC00) == 0xDC00 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"unexpected low surrogate area",
|
|
||||||
parser.offset, int(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check for a high surrogate area. */
|
|
||||||
|
|
||||||
if (value & 0xFC00) == 0xD800 {
|
|
||||||
|
|
||||||
w = 4
|
|
||||||
|
|
||||||
/* Check for incomplete surrogate pair. */
|
|
||||||
|
|
||||||
if raw_unread < 4 {
|
|
||||||
if parser.eof {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"incomplete UTF-16 surrogate pair",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
incomplete = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get the next character. */
|
|
||||||
|
|
||||||
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
|
|
||||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
|
|
||||||
|
|
||||||
/* Check for a low surrogate area. */
|
|
||||||
|
|
||||||
if (value2 & 0xFC00) != 0xDC00 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"expected low surrogate area",
|
|
||||||
parser.offset+2, int(value2))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Generate the value of the surrogate pair. */
|
|
||||||
|
|
||||||
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
|
|
||||||
} else {
|
|
||||||
w = 2
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic("Impossible") /* Impossible. */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if the raw buffer contains enough bytes to form a character. */
|
|
||||||
|
|
||||||
if incomplete {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if the character is in the allowed range:
|
|
||||||
* #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
|
|
||||||
* | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
|
|
||||||
* | [#x10000-#x10FFFF] (32 bit)
|
|
||||||
*/
|
|
||||||
|
|
||||||
if !(value == 0x09 || value == 0x0A || value == 0x0D ||
|
|
||||||
(value >= 0x20 && value <= 0x7E) ||
|
|
||||||
(value == 0x85) || (value >= 0xA0 && value <= 0xD7FF) ||
|
|
||||||
(value >= 0xE000 && value <= 0xFFFD) ||
|
|
||||||
(value >= 0x10000 && value <= 0x10FFFF)) {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"control characters are not allowed",
|
|
||||||
parser.offset, int(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Move the raw pointers. */
|
|
||||||
|
|
||||||
parser.raw_buffer_pos += w
|
|
||||||
parser.offset += w
|
|
||||||
|
|
||||||
/* Finally put the character into the buffer. */
|
|
||||||
|
|
||||||
/* 0000 0000-0000 007F . 0xxxxxxx */
|
|
||||||
if value <= 0x7F {
|
|
||||||
parser.buffer[buffer_end] = byte(value)
|
|
||||||
} else if value <= 0x7FF {
|
|
||||||
/* 0000 0080-0000 07FF . 110xxxxx 10xxxxxx */
|
|
||||||
parser.buffer[buffer_end] = byte(0xC0 + (value >> 6))
|
|
||||||
parser.buffer[buffer_end+1] = byte(0x80 + (value & 0x3F))
|
|
||||||
} else if value <= 0xFFFF {
|
|
||||||
/* 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx */
|
|
||||||
parser.buffer[buffer_end] = byte(0xE0 + (value >> 12))
|
|
||||||
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 6) & 0x3F))
|
|
||||||
parser.buffer[buffer_end+2] = byte(0x80 + (value & 0x3F))
|
|
||||||
} else {
|
|
||||||
/* 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
|
|
||||||
parser.buffer[buffer_end] = byte(0xF0 + (value >> 18))
|
|
||||||
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 12) & 0x3F))
|
|
||||||
parser.buffer[buffer_end+2] = byte(0x80 + ((value >> 6) & 0x3F))
|
|
||||||
parser.buffer[buffer_end+3] = byte(0x80 + (value & 0x3F))
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer_end += w
|
|
||||||
parser.unread++
|
|
||||||
}
|
|
||||||
|
|
||||||
/* On EOF, put NUL into the buffer and return. */
|
|
||||||
|
|
||||||
if parser.eof {
|
|
||||||
parser.buffer[buffer_end] = 0
|
|
||||||
buffer_end++
|
|
||||||
parser.buffer = parser.buffer[:buffer_end]
|
|
||||||
parser.unread++
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.buffer = parser.buffer[:buffer_end]
|
|
||||||
return true
|
|
||||||
}
|
|
449
vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go
generated
vendored
449
vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go
generated
vendored
|
@ -1,449 +0,0 @@
|
||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var byteSliceType = reflect.TypeOf([]byte(nil))
|
|
||||||
|
|
||||||
var binary_tags = [][]byte{[]byte("!binary"), []byte(yaml_BINARY_TAG)}
|
|
||||||
var bool_values map[string]bool
|
|
||||||
var null_values map[string]bool
|
|
||||||
|
|
||||||
var signs = []byte{'-', '+'}
|
|
||||||
var nulls = []byte{'~', 'n', 'N'}
|
|
||||||
var bools = []byte{'t', 'T', 'f', 'F', 'y', 'Y', 'n', 'N', 'o', 'O'}
|
|
||||||
|
|
||||||
var timestamp_regexp *regexp.Regexp
|
|
||||||
var ymd_regexp *regexp.Regexp
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
bool_values = make(map[string]bool)
|
|
||||||
bool_values["y"] = true
|
|
||||||
bool_values["yes"] = true
|
|
||||||
bool_values["n"] = false
|
|
||||||
bool_values["no"] = false
|
|
||||||
bool_values["true"] = true
|
|
||||||
bool_values["false"] = false
|
|
||||||
bool_values["on"] = true
|
|
||||||
bool_values["off"] = false
|
|
||||||
|
|
||||||
null_values = make(map[string]bool)
|
|
||||||
null_values["~"] = true
|
|
||||||
null_values["null"] = true
|
|
||||||
null_values["Null"] = true
|
|
||||||
null_values["NULL"] = true
|
|
||||||
|
|
||||||
timestamp_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:(?:[Tt]|[ \t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \t]*(?:Z|([-+][0-9][0-9]?)(?::([0-9][0-9])?)?))?)?$")
|
|
||||||
ymd_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)$")
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve(event yaml_event_t, v reflect.Value, useNumber bool) (string, error) {
|
|
||||||
val := string(event.value)
|
|
||||||
|
|
||||||
if null_values[val] {
|
|
||||||
v.Set(reflect.Zero(v.Type()))
|
|
||||||
return yaml_NULL_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
if useNumber && v.Type() == numberType {
|
|
||||||
tag, i := resolveInterface(event, useNumber)
|
|
||||||
if n, ok := i.(Number); ok {
|
|
||||||
v.Set(reflect.ValueOf(n))
|
|
||||||
return tag, nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("Not a number: '%s' at %s", event.value, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
return resolve_string(val, v, event)
|
|
||||||
case reflect.Bool:
|
|
||||||
return resolve_bool(val, v, event)
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return resolve_int(val, v, useNumber, event)
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
return resolve_uint(val, v, useNumber, event)
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return resolve_float(val, v, useNumber, event)
|
|
||||||
case reflect.Interface:
|
|
||||||
_, i := resolveInterface(event, useNumber)
|
|
||||||
if i != nil {
|
|
||||||
v.Set(reflect.ValueOf(i))
|
|
||||||
} else {
|
|
||||||
v.Set(reflect.Zero(v.Type()))
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Struct:
|
|
||||||
return resolve_time(val, v, event)
|
|
||||||
case reflect.Slice:
|
|
||||||
if v.Type() != byteSliceType {
|
|
||||||
return "", fmt.Errorf("Cannot resolve %s into %s at %s", val, v.String(), event.start_mark)
|
|
||||||
}
|
|
||||||
b, err := decode_binary(event.value, event)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Set(reflect.ValueOf(b))
|
|
||||||
default:
|
|
||||||
return "", fmt.Errorf("Unknown resolution for '%s' using %s at %s", val, v.String(), event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_STR_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasBinaryTag(event yaml_event_t) bool {
|
|
||||||
for _, tag := range binary_tags {
|
|
||||||
if bytes.Equal(event.tag, tag) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func decode_binary(value []byte, event yaml_event_t) ([]byte, error) {
|
|
||||||
b := make([]byte, base64.StdEncoding.DecodedLen(len(value)))
|
|
||||||
n, err := base64.StdEncoding.Decode(b, value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Invalid base64 text: '%s' at %s", string(b), event.start_mark)
|
|
||||||
}
|
|
||||||
return b[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_string(val string, v reflect.Value, event yaml_event_t) (string, error) {
|
|
||||||
if len(event.tag) > 0 {
|
|
||||||
if hasBinaryTag(event) {
|
|
||||||
b, err := decode_binary(event.value, event)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
val = string(b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
v.SetString(val)
|
|
||||||
return yaml_STR_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_bool(val string, v reflect.Value, event yaml_event_t) (string, error) {
|
|
||||||
b, found := bool_values[strings.ToLower(val)]
|
|
||||||
if !found {
|
|
||||||
return "", fmt.Errorf("Invalid boolean: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.SetBool(b)
|
|
||||||
return yaml_BOOL_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_int(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
|
|
||||||
original := val
|
|
||||||
val = strings.Replace(val, "_", "", -1)
|
|
||||||
var value uint64
|
|
||||||
|
|
||||||
isNumberValue := v.Type() == numberType
|
|
||||||
|
|
||||||
sign := int64(1)
|
|
||||||
if val[0] == '-' {
|
|
||||||
sign = -1
|
|
||||||
val = val[1:]
|
|
||||||
} else if val[0] == '+' {
|
|
||||||
val = val[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
base := 0
|
|
||||||
if val == "0" {
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString("0")
|
|
||||||
} else {
|
|
||||||
v.Set(reflect.Zero(v.Type()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_INT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(val, "0o") {
|
|
||||||
base = 8
|
|
||||||
val = val[2:]
|
|
||||||
}
|
|
||||||
|
|
||||||
value, err := strconv.ParseUint(val, base, 64)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
var val64 int64
|
|
||||||
if value <= math.MaxInt64 {
|
|
||||||
val64 = int64(value)
|
|
||||||
if sign == -1 {
|
|
||||||
val64 = -val64
|
|
||||||
}
|
|
||||||
} else if sign == -1 && value == uint64(math.MaxInt64)+1 {
|
|
||||||
val64 = math.MinInt64
|
|
||||||
} else {
|
|
||||||
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString(strconv.FormatInt(val64, 10))
|
|
||||||
} else {
|
|
||||||
if v.OverflowInt(val64) {
|
|
||||||
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
|
|
||||||
}
|
|
||||||
v.SetInt(val64)
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_INT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_uint(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
|
|
||||||
original := val
|
|
||||||
val = strings.Replace(val, "_", "", -1)
|
|
||||||
var value uint64
|
|
||||||
|
|
||||||
isNumberValue := v.Type() == numberType
|
|
||||||
|
|
||||||
if val[0] == '-' {
|
|
||||||
return "", fmt.Errorf("Unsigned int with negative value: '%s' at %s", original, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
if val[0] == '+' {
|
|
||||||
val = val[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
base := 0
|
|
||||||
if val == "0" {
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString("0")
|
|
||||||
} else {
|
|
||||||
v.Set(reflect.Zero(v.Type()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_INT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(val, "0o") {
|
|
||||||
base = 8
|
|
||||||
val = val[2:]
|
|
||||||
}
|
|
||||||
|
|
||||||
value, err := strconv.ParseUint(val, base, 64)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString(strconv.FormatUint(value, 10))
|
|
||||||
} else {
|
|
||||||
if v.OverflowUint(value) {
|
|
||||||
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.SetUint(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_INT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_float(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
|
|
||||||
val = strings.Replace(val, "_", "", -1)
|
|
||||||
var value float64
|
|
||||||
|
|
||||||
isNumberValue := v.Type() == numberType
|
|
||||||
typeBits := 64
|
|
||||||
if !isNumberValue {
|
|
||||||
typeBits = v.Type().Bits()
|
|
||||||
}
|
|
||||||
|
|
||||||
sign := 1
|
|
||||||
if val[0] == '-' {
|
|
||||||
sign = -1
|
|
||||||
val = val[1:]
|
|
||||||
} else if val[0] == '+' {
|
|
||||||
val = val[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
valLower := strings.ToLower(val)
|
|
||||||
if valLower == ".inf" {
|
|
||||||
value = math.Inf(sign)
|
|
||||||
} else if valLower == ".nan" {
|
|
||||||
value = math.NaN()
|
|
||||||
} else {
|
|
||||||
var err error
|
|
||||||
value, err = strconv.ParseFloat(val, typeBits)
|
|
||||||
value *= float64(sign)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString(strconv.FormatFloat(value, 'g', -1, typeBits))
|
|
||||||
} else {
|
|
||||||
if v.OverflowFloat(value) {
|
|
||||||
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.SetFloat(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_FLOAT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_time(val string, v reflect.Value, event yaml_event_t) (string, error) {
|
|
||||||
var parsedTime time.Time
|
|
||||||
matches := ymd_regexp.FindStringSubmatch(val)
|
|
||||||
if len(matches) > 0 {
|
|
||||||
year, _ := strconv.Atoi(matches[1])
|
|
||||||
month, _ := strconv.Atoi(matches[2])
|
|
||||||
day, _ := strconv.Atoi(matches[3])
|
|
||||||
parsedTime = time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
|
|
||||||
} else {
|
|
||||||
matches = timestamp_regexp.FindStringSubmatch(val)
|
|
||||||
if len(matches) == 0 {
|
|
||||||
return "", fmt.Errorf("Invalid timestamp: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
year, _ := strconv.Atoi(matches[1])
|
|
||||||
month, _ := strconv.Atoi(matches[2])
|
|
||||||
day, _ := strconv.Atoi(matches[3])
|
|
||||||
hour, _ := strconv.Atoi(matches[4])
|
|
||||||
min, _ := strconv.Atoi(matches[5])
|
|
||||||
sec, _ := strconv.Atoi(matches[6])
|
|
||||||
|
|
||||||
nsec := 0
|
|
||||||
if matches[7] != "" {
|
|
||||||
millis, _ := strconv.Atoi(matches[7])
|
|
||||||
nsec = int(time.Duration(millis) * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
loc := time.UTC
|
|
||||||
if matches[8] != "" {
|
|
||||||
sign := matches[8][0]
|
|
||||||
hr, _ := strconv.Atoi(matches[8][1:])
|
|
||||||
min := 0
|
|
||||||
if matches[9] != "" {
|
|
||||||
min, _ = strconv.Atoi(matches[9])
|
|
||||||
}
|
|
||||||
|
|
||||||
zoneOffset := (hr*60 + min) * 60
|
|
||||||
if sign == '-' {
|
|
||||||
zoneOffset = -zoneOffset
|
|
||||||
}
|
|
||||||
|
|
||||||
loc = time.FixedZone("", zoneOffset)
|
|
||||||
}
|
|
||||||
parsedTime = time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Set(reflect.ValueOf(parsedTime))
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolveInterface(event yaml_event_t, useNumber bool) (string, interface{}) {
|
|
||||||
val := string(event.value)
|
|
||||||
if len(event.tag) == 0 && !event.implicit {
|
|
||||||
return "", val
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(val) == 0 {
|
|
||||||
return yaml_NULL_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var result interface{}
|
|
||||||
|
|
||||||
sign := false
|
|
||||||
c := val[0]
|
|
||||||
switch {
|
|
||||||
case bytes.IndexByte(signs, c) != -1:
|
|
||||||
sign = true
|
|
||||||
fallthrough
|
|
||||||
case c >= '0' && c <= '9':
|
|
||||||
i := int64(0)
|
|
||||||
result = &i
|
|
||||||
if useNumber {
|
|
||||||
var n Number
|
|
||||||
result = &n
|
|
||||||
}
|
|
||||||
|
|
||||||
v := reflect.ValueOf(result).Elem()
|
|
||||||
if _, err := resolve_int(val, v, useNumber, event); err == nil {
|
|
||||||
return yaml_INT_TAG, v.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
f := float64(0)
|
|
||||||
result = &f
|
|
||||||
if useNumber {
|
|
||||||
var n Number
|
|
||||||
result = &n
|
|
||||||
}
|
|
||||||
|
|
||||||
v = reflect.ValueOf(result).Elem()
|
|
||||||
if _, err := resolve_float(val, v, useNumber, event); err == nil {
|
|
||||||
return yaml_FLOAT_TAG, v.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
if !sign {
|
|
||||||
t := time.Time{}
|
|
||||||
if _, err := resolve_time(val, reflect.ValueOf(&t).Elem(), event); err == nil {
|
|
||||||
return "", t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case bytes.IndexByte(nulls, c) != -1:
|
|
||||||
if null_values[val] {
|
|
||||||
return yaml_NULL_TAG, nil
|
|
||||||
}
|
|
||||||
b := false
|
|
||||||
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
|
|
||||||
return yaml_BOOL_TAG, b
|
|
||||||
}
|
|
||||||
case c == '.':
|
|
||||||
f := float64(0)
|
|
||||||
result = &f
|
|
||||||
if useNumber {
|
|
||||||
var n Number
|
|
||||||
result = &n
|
|
||||||
}
|
|
||||||
|
|
||||||
v := reflect.ValueOf(result).Elem()
|
|
||||||
if _, err := resolve_float(val, v, useNumber, event); err == nil {
|
|
||||||
return yaml_FLOAT_TAG, v.Interface()
|
|
||||||
}
|
|
||||||
case bytes.IndexByte(bools, c) != -1:
|
|
||||||
b := false
|
|
||||||
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
|
|
||||||
return yaml_BOOL_TAG, b
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasBinaryTag(event) {
|
|
||||||
bytes, err := decode_binary(event.value, event)
|
|
||||||
if err == nil {
|
|
||||||
return yaml_BINARY_TAG, bytes
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_STR_TAG, val
|
|
||||||
}
|
|
62
vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go
generated
vendored
62
vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go
generated
vendored
|
@ -1,62 +0,0 @@
|
||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Run_parser(cmd string, args []string) {
|
|
||||||
for i := 0; i < len(args); i++ {
|
|
||||||
fmt.Printf("[%d] Scanning '%s'", i, args[i])
|
|
||||||
file, err := os.Open(args[i])
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("Invalid file '%s': %s", args[i], err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
parser := yaml_parser_t{}
|
|
||||||
yaml_parser_initialize(&parser)
|
|
||||||
yaml_parser_set_input_reader(&parser, file)
|
|
||||||
|
|
||||||
failed := false
|
|
||||||
token := yaml_token_t{}
|
|
||||||
count := 0
|
|
||||||
for {
|
|
||||||
if !yaml_parser_scan(&parser, &token) {
|
|
||||||
failed = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if token.token_type == yaml_STREAM_END_TOKEN {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
|
|
||||||
file.Close()
|
|
||||||
|
|
||||||
msg := "SUCCESS"
|
|
||||||
if failed {
|
|
||||||
msg = "FAILED"
|
|
||||||
if parser.error != yaml_NO_ERROR {
|
|
||||||
m := parser.problem_mark
|
|
||||||
fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n",
|
|
||||||
parser.context, parser.problem, m.line, m.column)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("%s (%d tokens)\n", msg, count)
|
|
||||||
}
|
|
||||||
}
|
|
3318
vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go
generated
vendored
3318
vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go
generated
vendored
File diff suppressed because it is too large
Load diff
360
vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go
generated
vendored
360
vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go
generated
vendored
|
@ -1,360 +0,0 @@
|
||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A field represents a single field found in a struct.
|
|
||||||
type field struct {
|
|
||||||
name string
|
|
||||||
tag bool
|
|
||||||
index []int
|
|
||||||
typ reflect.Type
|
|
||||||
omitEmpty bool
|
|
||||||
flow bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// byName sorts field by name, breaking ties with depth,
|
|
||||||
// then breaking ties with "name came from json tag", then
|
|
||||||
// breaking ties with index sequence.
|
|
||||||
type byName []field
|
|
||||||
|
|
||||||
func (x byName) Len() int { return len(x) }
|
|
||||||
|
|
||||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
|
|
||||||
func (x byName) Less(i, j int) bool {
|
|
||||||
if x[i].name != x[j].name {
|
|
||||||
return x[i].name < x[j].name
|
|
||||||
}
|
|
||||||
if len(x[i].index) != len(x[j].index) {
|
|
||||||
return len(x[i].index) < len(x[j].index)
|
|
||||||
}
|
|
||||||
if x[i].tag != x[j].tag {
|
|
||||||
return x[i].tag
|
|
||||||
}
|
|
||||||
return byIndex(x).Less(i, j)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byIndex sorts field by index sequence.
|
|
||||||
type byIndex []field
|
|
||||||
|
|
||||||
func (x byIndex) Len() int { return len(x) }
|
|
||||||
|
|
||||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
|
|
||||||
func (x byIndex) Less(i, j int) bool {
|
|
||||||
for k, xik := range x[i].index {
|
|
||||||
if k >= len(x[j].index) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if xik != x[j].index[k] {
|
|
||||||
return xik < x[j].index[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(x[i].index) < len(x[j].index)
|
|
||||||
}
|
|
||||||
|
|
||||||
// typeFields returns a list of fields that JSON should recognize for the given type.
|
|
||||||
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
|
||||||
// and then any reachable anonymous structs.
|
|
||||||
func typeFields(t reflect.Type) []field {
|
|
||||||
// Anonymous fields to explore at the current level and the next.
|
|
||||||
current := []field{}
|
|
||||||
next := []field{{typ: t}}
|
|
||||||
|
|
||||||
// Count of queued names for current level and the next.
|
|
||||||
count := map[reflect.Type]int{}
|
|
||||||
nextCount := map[reflect.Type]int{}
|
|
||||||
|
|
||||||
// Types already visited at an earlier level.
|
|
||||||
visited := map[reflect.Type]bool{}
|
|
||||||
|
|
||||||
// Fields found.
|
|
||||||
var fields []field
|
|
||||||
|
|
||||||
for len(next) > 0 {
|
|
||||||
current, next = next, current[:0]
|
|
||||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
|
||||||
|
|
||||||
for _, f := range current {
|
|
||||||
if visited[f.typ] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
visited[f.typ] = true
|
|
||||||
|
|
||||||
// Scan f.typ for fields to include.
|
|
||||||
for i := 0; i < f.typ.NumField(); i++ {
|
|
||||||
sf := f.typ.Field(i)
|
|
||||||
if sf.PkgPath != "" { // unexported
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tag := sf.Tag.Get("yaml")
|
|
||||||
if tag == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
name, opts := parseTag(tag)
|
|
||||||
if !isValidTag(name) {
|
|
||||||
name = ""
|
|
||||||
}
|
|
||||||
index := make([]int, len(f.index)+1)
|
|
||||||
copy(index, f.index)
|
|
||||||
index[len(f.index)] = i
|
|
||||||
|
|
||||||
ft := sf.Type
|
|
||||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
|
||||||
// Follow pointer.
|
|
||||||
ft = ft.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record found field and index sequence.
|
|
||||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
|
||||||
tagged := name != ""
|
|
||||||
if name == "" {
|
|
||||||
name = sf.Name
|
|
||||||
}
|
|
||||||
fields = append(fields, field{name, tagged, index, ft,
|
|
||||||
opts.Contains("omitempty"), opts.Contains("flow")})
|
|
||||||
if count[f.typ] > 1 {
|
|
||||||
// If there were multiple instances, add a second,
|
|
||||||
// so that the annihilation code will see a duplicate.
|
|
||||||
// It only cares about the distinction between 1 or 2,
|
|
||||||
// so don't bother generating any more copies.
|
|
||||||
fields = append(fields, fields[len(fields)-1])
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record new anonymous struct to explore in next round.
|
|
||||||
nextCount[ft]++
|
|
||||||
if nextCount[ft] == 1 {
|
|
||||||
next = append(next, field{name: ft.Name(), index: index, typ: ft})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(byName(fields))
|
|
||||||
|
|
||||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
|
||||||
// except that fields with JSON tags are promoted.
|
|
||||||
|
|
||||||
// The fields are sorted in primary order of name, secondary order
|
|
||||||
// of field index length. Loop over names; for each name, delete
|
|
||||||
// hidden fields by choosing the one dominant field that survives.
|
|
||||||
out := fields[:0]
|
|
||||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
|
||||||
// One iteration per name.
|
|
||||||
// Find the sequence of fields with the name of this first field.
|
|
||||||
fi := fields[i]
|
|
||||||
name := fi.name
|
|
||||||
for advance = 1; i+advance < len(fields); advance++ {
|
|
||||||
fj := fields[i+advance]
|
|
||||||
if fj.name != name {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if advance == 1 { // Only one field with this name
|
|
||||||
out = append(out, fi)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dominant, ok := dominantField(fields[i : i+advance])
|
|
||||||
if ok {
|
|
||||||
out = append(out, dominant)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fields = out
|
|
||||||
sort.Sort(byIndex(fields))
|
|
||||||
|
|
||||||
return fields
|
|
||||||
}
|
|
||||||
|
|
||||||
// dominantField looks through the fields, all of which are known to
|
|
||||||
// have the same name, to find the single field that dominates the
|
|
||||||
// others using Go's embedding rules, modified by the presence of
|
|
||||||
// JSON tags. If there are multiple top-level fields, the boolean
|
|
||||||
// will be false: This condition is an error in Go and we skip all
|
|
||||||
// the fields.
|
|
||||||
func dominantField(fields []field) (field, bool) {
|
|
||||||
// The fields are sorted in increasing index-length order. The winner
|
|
||||||
// must therefore be one with the shortest index length. Drop all
|
|
||||||
// longer entries, which is easy: just truncate the slice.
|
|
||||||
length := len(fields[0].index)
|
|
||||||
tagged := -1 // Index of first tagged field.
|
|
||||||
for i, f := range fields {
|
|
||||||
if len(f.index) > length {
|
|
||||||
fields = fields[:i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if f.tag {
|
|
||||||
if tagged >= 0 {
|
|
||||||
// Multiple tagged fields at the same level: conflict.
|
|
||||||
// Return no field.
|
|
||||||
return field{}, false
|
|
||||||
}
|
|
||||||
tagged = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tagged >= 0 {
|
|
||||||
return fields[tagged], true
|
|
||||||
}
|
|
||||||
// All remaining fields have the same length. If there's more than one,
|
|
||||||
// we have a conflict (two fields named "X" at the same level) and we
|
|
||||||
// return no field.
|
|
||||||
if len(fields) > 1 {
|
|
||||||
return field{}, false
|
|
||||||
}
|
|
||||||
return fields[0], true
|
|
||||||
}
|
|
||||||
|
|
||||||
var fieldCache struct {
|
|
||||||
sync.RWMutex
|
|
||||||
m map[reflect.Type][]field
|
|
||||||
}
|
|
||||||
|
|
||||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
|
||||||
func cachedTypeFields(t reflect.Type) []field {
|
|
||||||
fieldCache.RLock()
|
|
||||||
f := fieldCache.m[t]
|
|
||||||
fieldCache.RUnlock()
|
|
||||||
if f != nil {
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute fields without lock.
|
|
||||||
// Might duplicate effort but won't hold other computations back.
|
|
||||||
f = typeFields(t)
|
|
||||||
if f == nil {
|
|
||||||
f = []field{}
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldCache.Lock()
|
|
||||||
if fieldCache.m == nil {
|
|
||||||
fieldCache.m = map[reflect.Type][]field{}
|
|
||||||
}
|
|
||||||
fieldCache.m[t] = f
|
|
||||||
fieldCache.Unlock()
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// tagOptions is the string following a comma in a struct field's "json"
|
|
||||||
// tag, or the empty string. It does not include the leading comma.
|
|
||||||
type tagOptions string
|
|
||||||
|
|
||||||
func isValidTag(s string) bool {
|
|
||||||
if s == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, c := range s {
|
|
||||||
switch {
|
|
||||||
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
|
||||||
// Backslash and quote chars are reserved, but
|
|
||||||
// otherwise any punctuation chars are allowed
|
|
||||||
// in a tag name.
|
|
||||||
default:
|
|
||||||
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func fieldByIndex(v reflect.Value, index []int) reflect.Value {
|
|
||||||
for _, i := range index {
|
|
||||||
if v.Kind() == reflect.Ptr {
|
|
||||||
if v.IsNil() {
|
|
||||||
return reflect.Value{}
|
|
||||||
}
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
v = v.Field(i)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func typeByIndex(t reflect.Type, index []int) reflect.Type {
|
|
||||||
for _, i := range index {
|
|
||||||
if t.Kind() == reflect.Ptr {
|
|
||||||
t = t.Elem()
|
|
||||||
}
|
|
||||||
t = t.Field(i).Type
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
|
|
||||||
// It implements the methods to sort by string.
|
|
||||||
type stringValues []reflect.Value
|
|
||||||
|
|
||||||
func (sv stringValues) Len() int { return len(sv) }
|
|
||||||
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
|
|
||||||
func (sv stringValues) Less(i, j int) bool {
|
|
||||||
av, ak := getElem(sv[i])
|
|
||||||
bv, bk := getElem(sv[j])
|
|
||||||
if ak == reflect.String && bk == reflect.String {
|
|
||||||
return av.String() < bv.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
return ak < bk
|
|
||||||
}
|
|
||||||
|
|
||||||
func getElem(v reflect.Value) (reflect.Value, reflect.Kind) {
|
|
||||||
k := v.Kind()
|
|
||||||
for k == reflect.Interface || k == reflect.Ptr && !v.IsNil() {
|
|
||||||
v = v.Elem()
|
|
||||||
k = v.Kind()
|
|
||||||
}
|
|
||||||
|
|
||||||
return v, k
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTag splits a struct field's json tag into its name and
|
|
||||||
// comma-separated options.
|
|
||||||
func parseTag(tag string) (string, tagOptions) {
|
|
||||||
if idx := strings.Index(tag, ","); idx != -1 {
|
|
||||||
return tag[:idx], tagOptions(tag[idx+1:])
|
|
||||||
}
|
|
||||||
return tag, tagOptions("")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains reports whether a comma-separated list of options
|
|
||||||
// contains a particular substr flag. substr must be surrounded by a
|
|
||||||
// string boundary or commas.
|
|
||||||
func (o tagOptions) Contains(optionName string) bool {
|
|
||||||
if len(o) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
s := string(o)
|
|
||||||
for s != "" {
|
|
||||||
var next string
|
|
||||||
i := strings.Index(s, ",")
|
|
||||||
if i >= 0 {
|
|
||||||
s, next = s[:i], s[i+1:]
|
|
||||||
}
|
|
||||||
if s == optionName {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
s = next
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
22
vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go
generated
vendored
22
vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go
generated
vendored
|
@ -1,22 +0,0 @@
|
||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
const (
|
|
||||||
yaml_VERSION_MAJOR = 0
|
|
||||||
yaml_VERSION_MINOR = 1
|
|
||||||
yaml_VERSION_PATCH = 6
|
|
||||||
yaml_VERSION_STRING = "0.1.6"
|
|
||||||
)
|
|
891
vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go
generated
vendored
891
vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go
generated
vendored
|
@ -1,891 +0,0 @@
|
||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
const (
|
|
||||||
INPUT_RAW_BUFFER_SIZE = 1024
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The size of the input buffer.
|
|
||||||
*
|
|
||||||
* It should be possible to decode the whole raw buffer.
|
|
||||||
*/
|
|
||||||
INPUT_BUFFER_SIZE = (INPUT_RAW_BUFFER_SIZE * 3)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The size of the output buffer.
|
|
||||||
*/
|
|
||||||
|
|
||||||
OUTPUT_BUFFER_SIZE = 512
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The size of the output raw buffer.
|
|
||||||
*
|
|
||||||
* It should be possible to encode the whole output buffer.
|
|
||||||
*/
|
|
||||||
|
|
||||||
OUTPUT_RAW_BUFFER_SIZE = (OUTPUT_BUFFER_SIZE*2 + 2)
|
|
||||||
|
|
||||||
INITIAL_STACK_SIZE = 16
|
|
||||||
INITIAL_QUEUE_SIZE = 16
|
|
||||||
)
|
|
||||||
|
|
||||||
func width(b byte) int {
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if b&0xE0 == 0xC0 {
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
|
|
||||||
if b&0xF0 == 0xE0 {
|
|
||||||
return 3
|
|
||||||
}
|
|
||||||
|
|
||||||
if b&0xF8 == 0xF0 {
|
|
||||||
return 4
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func copy_bytes(dest []byte, dest_pos *int, src []byte, src_pos *int) {
|
|
||||||
w := width(src[*src_pos])
|
|
||||||
switch w {
|
|
||||||
case 4:
|
|
||||||
dest[*dest_pos+3] = src[*src_pos+3]
|
|
||||||
fallthrough
|
|
||||||
case 3:
|
|
||||||
dest[*dest_pos+2] = src[*src_pos+2]
|
|
||||||
fallthrough
|
|
||||||
case 2:
|
|
||||||
dest[*dest_pos+1] = src[*src_pos+1]
|
|
||||||
fallthrough
|
|
||||||
case 1:
|
|
||||||
dest[*dest_pos] = src[*src_pos]
|
|
||||||
default:
|
|
||||||
panic("invalid width")
|
|
||||||
}
|
|
||||||
*dest_pos += w
|
|
||||||
*src_pos += w
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is an alphabetical
|
|
||||||
// * character, a digit, '_', or '-'.
|
|
||||||
// */
|
|
||||||
|
|
||||||
func is_alpha(b byte) bool {
|
|
||||||
return (b >= '0' && b <= '9') ||
|
|
||||||
(b >= 'A' && b <= 'Z') ||
|
|
||||||
(b >= 'a' && b <= 'z') ||
|
|
||||||
b == '_' || b == '-'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func is_digit(b byte) bool {
|
|
||||||
return b >= '0' && b <= '9'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Get the value of a digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func as_digit(b byte) int {
|
|
||||||
return int(b) - '0'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a hex-digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func is_hex(b byte) bool {
|
|
||||||
return (b >= '0' && b <= '9') ||
|
|
||||||
(b >= 'A' && b <= 'F') ||
|
|
||||||
(b >= 'a' && b <= 'f')
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Get the value of a hex-digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func as_hex(b byte) int {
|
|
||||||
if b >= 'A' && b <= 'F' {
|
|
||||||
return int(b) - 'A' + 10
|
|
||||||
} else if b >= 'a' && b <= 'f' {
|
|
||||||
return int(b) - 'a' + 10
|
|
||||||
}
|
|
||||||
return int(b) - '0'
|
|
||||||
}
|
|
||||||
|
|
||||||
// #define AS_HEX_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) '0'))
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character is a line break, space, tab, or NUL.
|
|
||||||
// */
|
|
||||||
func is_blankz_at(b []byte, i int) bool {
|
|
||||||
return is_blank(b[i]) || is_breakz_at(b, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a line break.
|
|
||||||
// */
|
|
||||||
func is_break_at(b []byte, i int) bool {
|
|
||||||
return b[i] == '\r' || /* CR (#xD)*/
|
|
||||||
b[i] == '\n' || /* LF (#xA) */
|
|
||||||
(b[i] == 0xC2 && b[i+1] == 0x85) || /* NEL (#x85) */
|
|
||||||
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8) || /* LS (#x2028) */
|
|
||||||
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) /* PS (#x2029) */
|
|
||||||
}
|
|
||||||
|
|
||||||
func is_breakz_at(b []byte, i int) bool {
|
|
||||||
return is_break_at(b, i) || is_z(b[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
func is_crlf_at(b []byte, i int) bool {
|
|
||||||
return b[i] == '\r' && b[i+1] == '\n'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is NUL.
|
|
||||||
// */
|
|
||||||
func is_z(b byte) bool {
|
|
||||||
return b == 0x0
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is space.
|
|
||||||
// */
|
|
||||||
func is_space(b byte) bool {
|
|
||||||
return b == ' '
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is tab.
|
|
||||||
// */
|
|
||||||
func is_tab(b byte) bool {
|
|
||||||
return b == '\t'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is blank (space or tab).
|
|
||||||
// */
|
|
||||||
func is_blank(b byte) bool {
|
|
||||||
return is_space(b) || is_tab(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character is ASCII.
|
|
||||||
// */
|
|
||||||
func is_ascii(b byte) bool {
|
|
||||||
return b <= '\x7f'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character can be printed unescaped.
|
|
||||||
// */
|
|
||||||
func is_printable_at(b []byte, i int) bool {
|
|
||||||
return ((b[i] == 0x0A) || /* . == #x0A */
|
|
||||||
(b[i] >= 0x20 && b[i] <= 0x7E) || /* #x20 <= . <= #x7E */
|
|
||||||
(b[i] == 0xC2 && b[i+1] >= 0xA0) || /* #0xA0 <= . <= #xD7FF */
|
|
||||||
(b[i] > 0xC2 && b[i] < 0xED) ||
|
|
||||||
(b[i] == 0xED && b[i+1] < 0xA0) ||
|
|
||||||
(b[i] == 0xEE) ||
|
|
||||||
(b[i] == 0xEF && /* && . != #xFEFF */
|
|
||||||
!(b[i+1] == 0xBB && b[i+2] == 0xBF) &&
|
|
||||||
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
|
|
||||||
}
|
|
||||||
|
|
||||||
func insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
|
||||||
// collapse the slice
|
|
||||||
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
|
||||||
if parser.tokens_head != len(parser.tokens) {
|
|
||||||
// move the tokens down
|
|
||||||
copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
|
||||||
}
|
|
||||||
// readjust the length
|
|
||||||
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
|
||||||
parser.tokens_head = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.tokens = append(parser.tokens, *token)
|
|
||||||
if pos < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
|
||||||
parser.tokens[parser.tokens_head+pos] = *token
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is BOM.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func is_bom_at(b []byte, i int) bool {
|
|
||||||
return b[i] == 0xEF && b[i+1] == 0xBB && b[i+2] == 0xBF
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// #ifdef HAVE_CONFIG_H
|
|
||||||
// #include <config.h>
|
|
||||||
// #endif
|
|
||||||
//
|
|
||||||
// #include "./yaml.h"
|
|
||||||
//
|
|
||||||
// #include <assert.h>
|
|
||||||
// #include <limits.h>
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Memory management.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(void *)
|
|
||||||
// yaml_malloc(size_t size);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(void *)
|
|
||||||
// yaml_realloc(void *ptr, size_t size);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(void)
|
|
||||||
// yaml_free(void *ptr);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(yaml_char_t *)
|
|
||||||
// yaml_strdup(const yaml_char_t *);
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Reader: Ensure that the buffer contains at least `length` characters.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_parser_update_buffer(yaml_parser_t *parser, size_t length);
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Scanner: Ensure that the token stack contains at least one token ready.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_parser_fetch_more_tokens(yaml_parser_t *parser);
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of the input raw buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define INPUT_RAW_BUFFER_SIZE 16384
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of the input buffer.
|
|
||||||
// *
|
|
||||||
// * It should be possible to decode the whole raw buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define INPUT_BUFFER_SIZE (INPUT_RAW_BUFFER_SIZE*3)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of the output buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define OUTPUT_BUFFER_SIZE 16384
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of the output raw buffer.
|
|
||||||
// *
|
|
||||||
// * It should be possible to encode the whole output buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define OUTPUT_RAW_BUFFER_SIZE (OUTPUT_BUFFER_SIZE*2+2)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of other stacks and queues.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define INITIAL_STACK_SIZE 16
|
|
||||||
// #define INITIAL_QUEUE_SIZE 16
|
|
||||||
// #define INITIAL_STRING_SIZE 16
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Buffer management.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define BUFFER_INIT(context,buffer,size) \
|
|
||||||
// (((buffer).start = yaml_malloc(size)) ? \
|
|
||||||
// ((buffer).last = (buffer).pointer = (buffer).start, \
|
|
||||||
// (buffer).end = (buffer).start+(size), \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define BUFFER_DEL(context,buffer) \
|
|
||||||
// (yaml_free((buffer).start), \
|
|
||||||
// (buffer).start = (buffer).pointer = (buffer).end = 0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * String management.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// typedef struct {
|
|
||||||
// yaml_char_t *start;
|
|
||||||
// yaml_char_t *end;
|
|
||||||
// yaml_char_t *pointer;
|
|
||||||
// } yaml_string_t;
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_string_extend(yaml_char_t **start,
|
|
||||||
// yaml_char_t **pointer, yaml_char_t **end);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_string_join(
|
|
||||||
// yaml_char_t **a_start, yaml_char_t **a_pointer, yaml_char_t **a_end,
|
|
||||||
// yaml_char_t **b_start, yaml_char_t **b_pointer, yaml_char_t **b_end);
|
|
||||||
//
|
|
||||||
// #define NULL_STRING { NULL, NULL, NULL }
|
|
||||||
//
|
|
||||||
// #define STRING(string,length) { (string), (string)+(length), (string) }
|
|
||||||
//
|
|
||||||
// #define STRING_ASSIGN(value,string,length) \
|
|
||||||
// ((value).start = (string), \
|
|
||||||
// (value).end = (string)+(length), \
|
|
||||||
// (value).pointer = (string))
|
|
||||||
//
|
|
||||||
// #define STRING_INIT(context,string,size) \
|
|
||||||
// (((string).start = yaml_malloc(size)) ? \
|
|
||||||
// ((string).pointer = (string).start, \
|
|
||||||
// (string).end = (string).start+(size), \
|
|
||||||
// memset((string).start, 0, (size)), \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define STRING_DEL(context,string) \
|
|
||||||
// (yaml_free((string).start), \
|
|
||||||
// (string).start = (string).pointer = (string).end = 0)
|
|
||||||
//
|
|
||||||
// #define STRING_EXTEND(context,string) \
|
|
||||||
// (((string).pointer+5 < (string).end) \
|
|
||||||
// || yaml_string_extend(&(string).start, \
|
|
||||||
// &(string).pointer, &(string).end))
|
|
||||||
//
|
|
||||||
// #define CLEAR(context,string) \
|
|
||||||
// ((string).pointer = (string).start, \
|
|
||||||
// memset((string).start, 0, (string).end-(string).start))
|
|
||||||
//
|
|
||||||
// #define JOIN(context,string_a,string_b) \
|
|
||||||
// ((yaml_string_join(&(string_a).start, &(string_a).pointer, \
|
|
||||||
// &(string_a).end, &(string_b).start, \
|
|
||||||
// &(string_b).pointer, &(string_b).end)) ? \
|
|
||||||
// ((string_b).pointer = (string_b).start, \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * String check operations.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check the octet at the specified position.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define CHECK_AT(string,octet,offset) \
|
|
||||||
// ((string).pointer[offset] == (yaml_char_t)(octet))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check the current octet in the buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define CHECK(string,octet) CHECK_AT((string),(octet),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is an alphabetical
|
|
||||||
// * character, a digit, '_', or '-'.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_ALPHA_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) '9') || \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'Z') || \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'z') || \
|
|
||||||
// (string).pointer[offset] == '_' || \
|
|
||||||
// (string).pointer[offset] == '-')
|
|
||||||
//
|
|
||||||
// #define IS_ALPHA(string) IS_ALPHA_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_DIGIT_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) '9'))
|
|
||||||
//
|
|
||||||
// #define IS_DIGIT(string) IS_DIGIT_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Get the value of a digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define AS_DIGIT_AT(string,offset) \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) '0')
|
|
||||||
//
|
|
||||||
// #define AS_DIGIT(string) AS_DIGIT_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a hex-digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_HEX_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) '9') || \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'F') || \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'f'))
|
|
||||||
//
|
|
||||||
// #define IS_HEX(string) IS_HEX_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Get the value of a hex-digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define AS_HEX_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) '0'))
|
|
||||||
//
|
|
||||||
// #define AS_HEX(string) AS_HEX_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character is ASCII.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_ASCII_AT(string,offset) \
|
|
||||||
// ((string).pointer[offset] <= (yaml_char_t) '\x7F')
|
|
||||||
//
|
|
||||||
// #define IS_ASCII(string) IS_ASCII_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character can be printed unescaped.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_PRINTABLE_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] == 0x0A) /* . == #x0A */ \
|
|
||||||
// || ((string).pointer[offset] >= 0x20 /* #x20 <= . <= #x7E */ \
|
|
||||||
// && (string).pointer[offset] <= 0x7E) \
|
|
||||||
// || ((string).pointer[offset] == 0xC2 /* #0xA0 <= . <= #xD7FF */ \
|
|
||||||
// && (string).pointer[offset+1] >= 0xA0) \
|
|
||||||
// || ((string).pointer[offset] > 0xC2 \
|
|
||||||
// && (string).pointer[offset] < 0xED) \
|
|
||||||
// || ((string).pointer[offset] == 0xED \
|
|
||||||
// && (string).pointer[offset+1] < 0xA0) \
|
|
||||||
// || ((string).pointer[offset] == 0xEE) \
|
|
||||||
// || ((string).pointer[offset] == 0xEF /* #xE000 <= . <= #xFFFD */ \
|
|
||||||
// && !((string).pointer[offset+1] == 0xBB /* && . != #xFEFF */ \
|
|
||||||
// && (string).pointer[offset+2] == 0xBF) \
|
|
||||||
// && !((string).pointer[offset+1] == 0xBF \
|
|
||||||
// && ((string).pointer[offset+2] == 0xBE \
|
|
||||||
// || (string).pointer[offset+2] == 0xBF))))
|
|
||||||
//
|
|
||||||
// #define IS_PRINTABLE(string) IS_PRINTABLE_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is NUL.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_Z_AT(string,offset) CHECK_AT((string),'\0',(offset))
|
|
||||||
//
|
|
||||||
// #define IS_Z(string) IS_Z_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is BOM.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BOM_AT(string,offset) \
|
|
||||||
// (CHECK_AT((string),'\xEF',(offset)) \
|
|
||||||
// && CHECK_AT((string),'\xBB',(offset)+1) \
|
|
||||||
// && CHECK_AT((string),'\xBF',(offset)+2)) /* BOM (#xFEFF) */
|
|
||||||
//
|
|
||||||
// #define IS_BOM(string) IS_BOM_AT(string,0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is space.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_SPACE_AT(string,offset) CHECK_AT((string),' ',(offset))
|
|
||||||
//
|
|
||||||
// #define IS_SPACE(string) IS_SPACE_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is tab.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_TAB_AT(string,offset) CHECK_AT((string),'\t',(offset))
|
|
||||||
//
|
|
||||||
// #define IS_TAB(string) IS_TAB_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is blank (space or tab).
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BLANK_AT(string,offset) \
|
|
||||||
// (IS_SPACE_AT((string),(offset)) || IS_TAB_AT((string),(offset)))
|
|
||||||
//
|
|
||||||
// #define IS_BLANK(string) IS_BLANK_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a line break.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BREAK_AT(string,offset) \
|
|
||||||
// (CHECK_AT((string),'\r',(offset)) /* CR (#xD)*/ \
|
|
||||||
// || CHECK_AT((string),'\n',(offset)) /* LF (#xA) */ \
|
|
||||||
// || (CHECK_AT((string),'\xC2',(offset)) \
|
|
||||||
// && CHECK_AT((string),'\x85',(offset)+1)) /* NEL (#x85) */ \
|
|
||||||
// || (CHECK_AT((string),'\xE2',(offset)) \
|
|
||||||
// && CHECK_AT((string),'\x80',(offset)+1) \
|
|
||||||
// && CHECK_AT((string),'\xA8',(offset)+2)) /* LS (#x2028) */ \
|
|
||||||
// || (CHECK_AT((string),'\xE2',(offset)) \
|
|
||||||
// && CHECK_AT((string),'\x80',(offset)+1) \
|
|
||||||
// && CHECK_AT((string),'\xA9',(offset)+2))) /* PS (#x2029) */
|
|
||||||
//
|
|
||||||
// #define IS_BREAK(string) IS_BREAK_AT((string),0)
|
|
||||||
//
|
|
||||||
// #define IS_CRLF_AT(string,offset) \
|
|
||||||
// (CHECK_AT((string),'\r',(offset)) && CHECK_AT((string),'\n',(offset)+1))
|
|
||||||
//
|
|
||||||
// #define IS_CRLF(string) IS_CRLF_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character is a line break or NUL.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BREAKZ_AT(string,offset) \
|
|
||||||
// (IS_BREAK_AT((string),(offset)) || IS_Z_AT((string),(offset)))
|
|
||||||
//
|
|
||||||
// #define IS_BREAKZ(string) IS_BREAKZ_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character is a line break, space, or NUL.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_SPACEZ_AT(string,offset) \
|
|
||||||
// (IS_SPACE_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
|
|
||||||
//
|
|
||||||
// #define IS_SPACEZ(string) IS_SPACEZ_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character is a line break, space, tab, or NUL.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BLANKZ_AT(string,offset) \
|
|
||||||
// (IS_BLANK_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
|
|
||||||
//
|
|
||||||
// #define IS_BLANKZ(string) IS_BLANKZ_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Determine the width of the character.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define WIDTH_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] & 0x80) == 0x00 ? 1 : \
|
|
||||||
// ((string).pointer[offset] & 0xE0) == 0xC0 ? 2 : \
|
|
||||||
// ((string).pointer[offset] & 0xF0) == 0xE0 ? 3 : \
|
|
||||||
// ((string).pointer[offset] & 0xF8) == 0xF0 ? 4 : 0)
|
|
||||||
//
|
|
||||||
// #define WIDTH(string) WIDTH_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Move the string pointer to the next character.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define MOVE(string) ((string).pointer += WIDTH((string)))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Copy a character and move the pointers of both strings.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define COPY(string_a,string_b) \
|
|
||||||
// ((*(string_b).pointer & 0x80) == 0x00 ? \
|
|
||||||
// (*((string_a).pointer++) = *((string_b).pointer++)) : \
|
|
||||||
// (*(string_b).pointer & 0xE0) == 0xC0 ? \
|
|
||||||
// (*((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++)) : \
|
|
||||||
// (*(string_b).pointer & 0xF0) == 0xE0 ? \
|
|
||||||
// (*((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++)) : \
|
|
||||||
// (*(string_b).pointer & 0xF8) == 0xF0 ? \
|
|
||||||
// (*((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++)) : 0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Stack and queue management.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_stack_extend(void **start, void **top, void **end);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_queue_extend(void **start, void **head, void **tail, void **end);
|
|
||||||
//
|
|
||||||
// #define STACK_INIT(context,stack,size) \
|
|
||||||
// (((stack).start = yaml_malloc((size)*sizeof(*(stack).start))) ? \
|
|
||||||
// ((stack).top = (stack).start, \
|
|
||||||
// (stack).end = (stack).start+(size), \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define STACK_DEL(context,stack) \
|
|
||||||
// (yaml_free((stack).start), \
|
|
||||||
// (stack).start = (stack).top = (stack).end = 0)
|
|
||||||
//
|
|
||||||
// #define STACK_EMPTY(context,stack) \
|
|
||||||
// ((stack).start == (stack).top)
|
|
||||||
//
|
|
||||||
// #define PUSH(context,stack,value) \
|
|
||||||
// (((stack).top != (stack).end \
|
|
||||||
// || yaml_stack_extend((void **)&(stack).start, \
|
|
||||||
// (void **)&(stack).top, (void **)&(stack).end)) ? \
|
|
||||||
// (*((stack).top++) = value, \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define POP(context,stack) \
|
|
||||||
// (*(--(stack).top))
|
|
||||||
//
|
|
||||||
// #define QUEUE_INIT(context,queue,size) \
|
|
||||||
// (((queue).start = yaml_malloc((size)*sizeof(*(queue).start))) ? \
|
|
||||||
// ((queue).head = (queue).tail = (queue).start, \
|
|
||||||
// (queue).end = (queue).start+(size), \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define QUEUE_DEL(context,queue) \
|
|
||||||
// (yaml_free((queue).start), \
|
|
||||||
// (queue).start = (queue).head = (queue).tail = (queue).end = 0)
|
|
||||||
//
|
|
||||||
// #define QUEUE_EMPTY(context,queue) \
|
|
||||||
// ((queue).head == (queue).tail)
|
|
||||||
//
|
|
||||||
// #define ENQUEUE(context,queue,value) \
|
|
||||||
// (((queue).tail != (queue).end \
|
|
||||||
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
|
|
||||||
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
|
|
||||||
// (*((queue).tail++) = value, \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define DEQUEUE(context,queue) \
|
|
||||||
// (*((queue).head++))
|
|
||||||
//
|
|
||||||
// #define QUEUE_INSERT(context,queue,index,value) \
|
|
||||||
// (((queue).tail != (queue).end \
|
|
||||||
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
|
|
||||||
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
|
|
||||||
// (memmove((queue).head+(index)+1,(queue).head+(index), \
|
|
||||||
// ((queue).tail-(queue).head-(index))*sizeof(*(queue).start)), \
|
|
||||||
// *((queue).head+(index)) = value, \
|
|
||||||
// (queue).tail++, \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Token initializers.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define TOKEN_INIT(token,token_type,token_start_mark,token_end_mark) \
|
|
||||||
// (memset(&(token), 0, sizeof(yaml_token_t)), \
|
|
||||||
// (token).type = (token_type), \
|
|
||||||
// (token).start_mark = (token_start_mark), \
|
|
||||||
// (token).end_mark = (token_end_mark))
|
|
||||||
//
|
|
||||||
// #define STREAM_START_TOKEN_INIT(token,token_encoding,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_STREAM_START_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.stream_start.encoding = (token_encoding))
|
|
||||||
//
|
|
||||||
// #define STREAM_END_TOKEN_INIT(token,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_STREAM_END_TOKEN,(start_mark),(end_mark)))
|
|
||||||
//
|
|
||||||
// #define ALIAS_TOKEN_INIT(token,token_value,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_ALIAS_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.alias.value = (token_value))
|
|
||||||
//
|
|
||||||
// #define ANCHOR_TOKEN_INIT(token,token_value,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_ANCHOR_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.anchor.value = (token_value))
|
|
||||||
//
|
|
||||||
// #define TAG_TOKEN_INIT(token,token_handle,token_suffix,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_TAG_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.tag.handle = (token_handle), \
|
|
||||||
// (token).data.tag.suffix = (token_suffix))
|
|
||||||
//
|
|
||||||
// #define SCALAR_TOKEN_INIT(token,token_value,token_length,token_style,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_SCALAR_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.scalar.value = (token_value), \
|
|
||||||
// (token).data.scalar.length = (token_length), \
|
|
||||||
// (token).data.scalar.style = (token_style))
|
|
||||||
//
|
|
||||||
// #define VERSION_DIRECTIVE_TOKEN_INIT(token,token_major,token_minor,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_VERSION_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.version_directive.major = (token_major), \
|
|
||||||
// (token).data.version_directive.minor = (token_minor))
|
|
||||||
//
|
|
||||||
// #define TAG_DIRECTIVE_TOKEN_INIT(token,token_handle,token_prefix,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_TAG_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.tag_directive.handle = (token_handle), \
|
|
||||||
// (token).data.tag_directive.prefix = (token_prefix))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Event initializers.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define EVENT_INIT(event,event_type,event_start_mark,event_end_mark) \
|
|
||||||
// (memset(&(event), 0, sizeof(yaml_event_t)), \
|
|
||||||
// (event).type = (event_type), \
|
|
||||||
// (event).start_mark = (event_start_mark), \
|
|
||||||
// (event).end_mark = (event_end_mark))
|
|
||||||
//
|
|
||||||
// #define STREAM_START_EVENT_INIT(event,event_encoding,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_STREAM_START_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.stream_start.encoding = (event_encoding))
|
|
||||||
//
|
|
||||||
// #define STREAM_END_EVENT_INIT(event,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_STREAM_END_EVENT,(start_mark),(end_mark)))
|
|
||||||
//
|
|
||||||
// #define DOCUMENT_START_EVENT_INIT(event,event_version_directive, \
|
|
||||||
// event_tag_directives_start,event_tag_directives_end,event_implicit,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_DOCUMENT_START_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.document_start.version_directive = (event_version_directive), \
|
|
||||||
// (event).data.document_start.tag_directives.start = (event_tag_directives_start), \
|
|
||||||
// (event).data.document_start.tag_directives.end = (event_tag_directives_end), \
|
|
||||||
// (event).data.document_start.implicit = (event_implicit))
|
|
||||||
//
|
|
||||||
// #define DOCUMENT_END_EVENT_INIT(event,event_implicit,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_DOCUMENT_END_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.document_end.implicit = (event_implicit))
|
|
||||||
//
|
|
||||||
// #define ALIAS_EVENT_INIT(event,event_anchor,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_ALIAS_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.alias.anchor = (event_anchor))
|
|
||||||
//
|
|
||||||
// #define SCALAR_EVENT_INIT(event,event_anchor,event_tag,event_value,event_length, \
|
|
||||||
// event_plain_implicit, event_quoted_implicit,event_style,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_SCALAR_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.scalar.anchor = (event_anchor), \
|
|
||||||
// (event).data.scalar.tag = (event_tag), \
|
|
||||||
// (event).data.scalar.value = (event_value), \
|
|
||||||
// (event).data.scalar.length = (event_length), \
|
|
||||||
// (event).data.scalar.plain_implicit = (event_plain_implicit), \
|
|
||||||
// (event).data.scalar.quoted_implicit = (event_quoted_implicit), \
|
|
||||||
// (event).data.scalar.style = (event_style))
|
|
||||||
//
|
|
||||||
// #define SEQUENCE_START_EVENT_INIT(event,event_anchor,event_tag, \
|
|
||||||
// event_implicit,event_style,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_SEQUENCE_START_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.sequence_start.anchor = (event_anchor), \
|
|
||||||
// (event).data.sequence_start.tag = (event_tag), \
|
|
||||||
// (event).data.sequence_start.implicit = (event_implicit), \
|
|
||||||
// (event).data.sequence_start.style = (event_style))
|
|
||||||
//
|
|
||||||
// #define SEQUENCE_END_EVENT_INIT(event,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_SEQUENCE_END_EVENT,(start_mark),(end_mark)))
|
|
||||||
//
|
|
||||||
// #define MAPPING_START_EVENT_INIT(event,event_anchor,event_tag, \
|
|
||||||
// event_implicit,event_style,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_MAPPING_START_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.mapping_start.anchor = (event_anchor), \
|
|
||||||
// (event).data.mapping_start.tag = (event_tag), \
|
|
||||||
// (event).data.mapping_start.implicit = (event_implicit), \
|
|
||||||
// (event).data.mapping_start.style = (event_style))
|
|
||||||
//
|
|
||||||
// #define MAPPING_END_EVENT_INIT(event,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_MAPPING_END_EVENT,(start_mark),(end_mark)))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Document initializer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define DOCUMENT_INIT(document,document_nodes_start,document_nodes_end, \
|
|
||||||
// document_version_directive,document_tag_directives_start, \
|
|
||||||
// document_tag_directives_end,document_start_implicit, \
|
|
||||||
// document_end_implicit,document_start_mark,document_end_mark) \
|
|
||||||
// (memset(&(document), 0, sizeof(yaml_document_t)), \
|
|
||||||
// (document).nodes.start = (document_nodes_start), \
|
|
||||||
// (document).nodes.end = (document_nodes_end), \
|
|
||||||
// (document).nodes.top = (document_nodes_start), \
|
|
||||||
// (document).version_directive = (document_version_directive), \
|
|
||||||
// (document).tag_directives.start = (document_tag_directives_start), \
|
|
||||||
// (document).tag_directives.end = (document_tag_directives_end), \
|
|
||||||
// (document).start_implicit = (document_start_implicit), \
|
|
||||||
// (document).end_implicit = (document_end_implicit), \
|
|
||||||
// (document).start_mark = (document_start_mark), \
|
|
||||||
// (document).end_mark = (document_end_mark))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Node initializers.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define NODE_INIT(node,node_type,node_tag,node_start_mark,node_end_mark) \
|
|
||||||
// (memset(&(node), 0, sizeof(yaml_node_t)), \
|
|
||||||
// (node).type = (node_type), \
|
|
||||||
// (node).tag = (node_tag), \
|
|
||||||
// (node).start_mark = (node_start_mark), \
|
|
||||||
// (node).end_mark = (node_end_mark))
|
|
||||||
//
|
|
||||||
// #define SCALAR_NODE_INIT(node,node_tag,node_value,node_length, \
|
|
||||||
// node_style,start_mark,end_mark) \
|
|
||||||
// (NODE_INIT((node),yaml_SCALAR_NODE,(node_tag),(start_mark),(end_mark)), \
|
|
||||||
// (node).data.scalar.value = (node_value), \
|
|
||||||
// (node).data.scalar.length = (node_length), \
|
|
||||||
// (node).data.scalar.style = (node_style))
|
|
||||||
//
|
|
||||||
// #define SEQUENCE_NODE_INIT(node,node_tag,node_items_start,node_items_end, \
|
|
||||||
// node_style,start_mark,end_mark) \
|
|
||||||
// (NODE_INIT((node),yaml_SEQUENCE_NODE,(node_tag),(start_mark),(end_mark)), \
|
|
||||||
// (node).data.sequence.items.start = (node_items_start), \
|
|
||||||
// (node).data.sequence.items.end = (node_items_end), \
|
|
||||||
// (node).data.sequence.items.top = (node_items_start), \
|
|
||||||
// (node).data.sequence.style = (node_style))
|
|
||||||
//
|
|
||||||
// #define MAPPING_NODE_INIT(node,node_tag,node_pairs_start,node_pairs_end, \
|
|
||||||
// node_style,start_mark,end_mark) \
|
|
||||||
// (NODE_INIT((node),yaml_MAPPING_NODE,(node_tag),(start_mark),(end_mark)), \
|
|
||||||
// (node).data.mapping.pairs.start = (node_pairs_start), \
|
|
||||||
// (node).data.mapping.pairs.end = (node_pairs_end), \
|
|
||||||
// (node).data.mapping.pairs.top = (node_pairs_start), \
|
|
||||||
// (node).data.mapping.style = (node_style))
|
|
||||||
//
|
|
953
vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go
generated
vendored
953
vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go
generated
vendored
|
@ -1,953 +0,0 @@
|
||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
/** The version directive data. */
|
|
||||||
type yaml_version_directive_t struct {
|
|
||||||
major int // The major version number
|
|
||||||
minor int // The minor version number
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The tag directive data. */
|
|
||||||
type yaml_tag_directive_t struct {
|
|
||||||
handle []byte // The tag handle
|
|
||||||
prefix []byte // The tag prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The stream encoding. */
|
|
||||||
type yaml_encoding_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Let the parser choose the encoding. */
|
|
||||||
yaml_ANY_ENCODING yaml_encoding_t = iota
|
|
||||||
/** The defau lt UTF-8 encoding. */
|
|
||||||
yaml_UTF8_ENCODING
|
|
||||||
/** The UTF-16-LE encoding with BOM. */
|
|
||||||
yaml_UTF16LE_ENCODING
|
|
||||||
/** The UTF-16-BE encoding with BOM. */
|
|
||||||
yaml_UTF16BE_ENCODING
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Line break types. */
|
|
||||||
type yaml_break_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
yaml_ANY_BREAK yaml_break_t = iota /** Let the parser choose the break type. */
|
|
||||||
yaml_CR_BREAK /** Use CR for line breaks (Mac style). */
|
|
||||||
yaml_LN_BREAK /** Use LN for line breaks (Unix style). */
|
|
||||||
yaml_CRLN_BREAK /** Use CR LN for line breaks (DOS style). */
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Many bad things could happen with the parser and emitter. */
|
|
||||||
type YAML_error_type_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** No error is produced. */
|
|
||||||
yaml_NO_ERROR YAML_error_type_t = iota
|
|
||||||
|
|
||||||
/** Cannot allocate or reallocate a block of memory. */
|
|
||||||
yaml_MEMORY_ERROR
|
|
||||||
|
|
||||||
/** Cannot read or decode the input stream. */
|
|
||||||
yaml_READER_ERROR
|
|
||||||
/** Cannot scan the input stream. */
|
|
||||||
yaml_SCANNER_ERROR
|
|
||||||
/** Cannot parse the input stream. */
|
|
||||||
yaml_PARSER_ERROR
|
|
||||||
/** Cannot compose a YAML document. */
|
|
||||||
yaml_COMPOSER_ERROR
|
|
||||||
|
|
||||||
/** Cannot write to the output stream. */
|
|
||||||
yaml_WRITER_ERROR
|
|
||||||
/** Cannot emit a YAML stream. */
|
|
||||||
yaml_EMITTER_ERROR
|
|
||||||
)
|
|
||||||
|
|
||||||
/** The pointer position. */
|
|
||||||
type YAML_mark_t struct {
|
|
||||||
/** The position index. */
|
|
||||||
index int
|
|
||||||
|
|
||||||
/** The position line. */
|
|
||||||
line int
|
|
||||||
|
|
||||||
/** The position column. */
|
|
||||||
column int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m YAML_mark_t) String() string {
|
|
||||||
return fmt.Sprintf("line %d, column %d", m.line, m.column)
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @} */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @defgroup styles Node Styles
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_style_t int
|
|
||||||
|
|
||||||
/** Scalar styles. */
|
|
||||||
type yaml_scalar_style_t yaml_style_t
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Let the emitter choose the style. */
|
|
||||||
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
|
|
||||||
|
|
||||||
/** The plain scalar style. */
|
|
||||||
yaml_PLAIN_SCALAR_STYLE
|
|
||||||
|
|
||||||
/** The single-quoted scalar style. */
|
|
||||||
yaml_SINGLE_QUOTED_SCALAR_STYLE
|
|
||||||
/** The double-quoted scalar style. */
|
|
||||||
yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
|
||||||
|
|
||||||
/** The literal scalar style. */
|
|
||||||
yaml_LITERAL_SCALAR_STYLE
|
|
||||||
/** The folded scalar style. */
|
|
||||||
yaml_FOLDED_SCALAR_STYLE
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Sequence styles. */
|
|
||||||
type yaml_sequence_style_t yaml_style_t
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Let the emitter choose the style. */
|
|
||||||
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
|
|
||||||
|
|
||||||
/** The block sequence style. */
|
|
||||||
yaml_BLOCK_SEQUENCE_STYLE
|
|
||||||
/** The flow sequence style. */
|
|
||||||
yaml_FLOW_SEQUENCE_STYLE
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Mapping styles. */
|
|
||||||
type yaml_mapping_style_t yaml_style_t
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Let the emitter choose the style. */
|
|
||||||
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
|
|
||||||
|
|
||||||
/** The block mapping style. */
|
|
||||||
yaml_BLOCK_MAPPING_STYLE
|
|
||||||
/** The flow mapping style. */
|
|
||||||
yaml_FLOW_MAPPING_STYLE
|
|
||||||
|
|
||||||
/* yaml_FLOW_SET_MAPPING_STYLE */
|
|
||||||
)
|
|
||||||
|
|
||||||
/** @} */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @defgroup tokens Tokens
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Token types. */
|
|
||||||
type yaml_token_type_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** An empty token. */
|
|
||||||
yaml_NO_TOKEN yaml_token_type_t = iota
|
|
||||||
|
|
||||||
/** A STREAM-START token. */
|
|
||||||
yaml_STREAM_START_TOKEN
|
|
||||||
/** A STREAM-END token. */
|
|
||||||
yaml_STREAM_END_TOKEN
|
|
||||||
|
|
||||||
/** A VERSION-DIRECTIVE token. */
|
|
||||||
yaml_VERSION_DIRECTIVE_TOKEN
|
|
||||||
/** A TAG-DIRECTIVE token. */
|
|
||||||
yaml_TAG_DIRECTIVE_TOKEN
|
|
||||||
/** A DOCUMENT-START token. */
|
|
||||||
yaml_DOCUMENT_START_TOKEN
|
|
||||||
/** A DOCUMENT-END token. */
|
|
||||||
yaml_DOCUMENT_END_TOKEN
|
|
||||||
|
|
||||||
/** A BLOCK-SEQUENCE-START token. */
|
|
||||||
yaml_BLOCK_SEQUENCE_START_TOKEN
|
|
||||||
/** A BLOCK-SEQUENCE-END token. */
|
|
||||||
yaml_BLOCK_MAPPING_START_TOKEN
|
|
||||||
/** A BLOCK-END token. */
|
|
||||||
yaml_BLOCK_END_TOKEN
|
|
||||||
|
|
||||||
/** A FLOW-SEQUENCE-START token. */
|
|
||||||
yaml_FLOW_SEQUENCE_START_TOKEN
|
|
||||||
/** A FLOW-SEQUENCE-END token. */
|
|
||||||
yaml_FLOW_SEQUENCE_END_TOKEN
|
|
||||||
/** A FLOW-MAPPING-START token. */
|
|
||||||
yaml_FLOW_MAPPING_START_TOKEN
|
|
||||||
/** A FLOW-MAPPING-END token. */
|
|
||||||
yaml_FLOW_MAPPING_END_TOKEN
|
|
||||||
|
|
||||||
/** A BLOCK-ENTRY token. */
|
|
||||||
yaml_BLOCK_ENTRY_TOKEN
|
|
||||||
/** A FLOW-ENTRY token. */
|
|
||||||
yaml_FLOW_ENTRY_TOKEN
|
|
||||||
/** A KEY token. */
|
|
||||||
yaml_KEY_TOKEN
|
|
||||||
/** A VALUE token. */
|
|
||||||
yaml_VALUE_TOKEN
|
|
||||||
|
|
||||||
/** An ALIAS token. */
|
|
||||||
yaml_ALIAS_TOKEN
|
|
||||||
/** An ANCHOR token. */
|
|
||||||
yaml_ANCHOR_TOKEN
|
|
||||||
/** A TAG token. */
|
|
||||||
yaml_TAG_TOKEN
|
|
||||||
/** A SCALAR token. */
|
|
||||||
yaml_SCALAR_TOKEN
|
|
||||||
)
|
|
||||||
|
|
||||||
/** The token structure. */
|
|
||||||
type yaml_token_t struct {
|
|
||||||
|
|
||||||
/** The token type. */
|
|
||||||
token_type yaml_token_type_t
|
|
||||||
|
|
||||||
/** The token data. */
|
|
||||||
/** The stream start (for @c yaml_STREAM_START_TOKEN). */
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
/** The alias (for @c yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN,yaml_TAG_TOKEN ). */
|
|
||||||
/** The anchor (for @c ). */
|
|
||||||
/** The scalar value (for @c ). */
|
|
||||||
value []byte
|
|
||||||
|
|
||||||
/** The tag suffix. */
|
|
||||||
suffix []byte
|
|
||||||
|
|
||||||
/** The scalar value (for @c yaml_SCALAR_TOKEN). */
|
|
||||||
/** The scalar style. */
|
|
||||||
style yaml_scalar_style_t
|
|
||||||
|
|
||||||
/** The version directive (for @c yaml_VERSION_DIRECTIVE_TOKEN). */
|
|
||||||
version_directive yaml_version_directive_t
|
|
||||||
|
|
||||||
/** The tag directive (for @c yaml_TAG_DIRECTIVE_TOKEN). */
|
|
||||||
prefix []byte
|
|
||||||
|
|
||||||
/** The beginning of the token. */
|
|
||||||
start_mark YAML_mark_t
|
|
||||||
/** The end of the token. */
|
|
||||||
end_mark YAML_mark_t
|
|
||||||
|
|
||||||
major, minor int
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @defgroup events Events
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Event types. */
|
|
||||||
type yaml_event_type_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** An empty event. */
|
|
||||||
yaml_NO_EVENT yaml_event_type_t = iota
|
|
||||||
|
|
||||||
/** A STREAM-START event. */
|
|
||||||
yaml_STREAM_START_EVENT
|
|
||||||
/** A STREAM-END event. */
|
|
||||||
yaml_STREAM_END_EVENT
|
|
||||||
|
|
||||||
/** A DOCUMENT-START event. */
|
|
||||||
yaml_DOCUMENT_START_EVENT
|
|
||||||
/** A DOCUMENT-END event. */
|
|
||||||
yaml_DOCUMENT_END_EVENT
|
|
||||||
|
|
||||||
/** An ALIAS event. */
|
|
||||||
yaml_ALIAS_EVENT
|
|
||||||
/** A SCALAR event. */
|
|
||||||
yaml_SCALAR_EVENT
|
|
||||||
|
|
||||||
/** A SEQUENCE-START event. */
|
|
||||||
yaml_SEQUENCE_START_EVENT
|
|
||||||
/** A SEQUENCE-END event. */
|
|
||||||
yaml_SEQUENCE_END_EVENT
|
|
||||||
|
|
||||||
/** A MAPPING-START event. */
|
|
||||||
yaml_MAPPING_START_EVENT
|
|
||||||
/** A MAPPING-END event. */
|
|
||||||
yaml_MAPPING_END_EVENT
|
|
||||||
)
|
|
||||||
|
|
||||||
/** The event structure. */
|
|
||||||
type yaml_event_t struct {
|
|
||||||
|
|
||||||
/** The event type. */
|
|
||||||
event_type yaml_event_type_t
|
|
||||||
|
|
||||||
/** The stream parameters (for @c yaml_STREAM_START_EVENT). */
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT). */
|
|
||||||
version_directive *yaml_version_directive_t
|
|
||||||
|
|
||||||
/** The beginning and end of the tag directives list. */
|
|
||||||
tag_directives []yaml_tag_directive_t
|
|
||||||
|
|
||||||
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT,yaml_MAPPING_START_EVENT). */
|
|
||||||
/** Is the document indicator implicit? */
|
|
||||||
implicit bool
|
|
||||||
|
|
||||||
/** The alias parameters (for @c yaml_ALIAS_EVENT,yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
|
|
||||||
/** The anchor. */
|
|
||||||
anchor []byte
|
|
||||||
|
|
||||||
/** The scalar parameters (for @c yaml_SCALAR_EVENT,yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
|
|
||||||
/** The tag. */
|
|
||||||
tag []byte
|
|
||||||
/** The scalar value. */
|
|
||||||
value []byte
|
|
||||||
|
|
||||||
/** Is the tag optional for the plain style? */
|
|
||||||
plain_implicit bool
|
|
||||||
/** Is the tag optional for any non-plain style? */
|
|
||||||
quoted_implicit bool
|
|
||||||
|
|
||||||
/** The sequence parameters (for @c yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
|
|
||||||
/** The sequence style. */
|
|
||||||
/** The scalar style. */
|
|
||||||
style yaml_style_t
|
|
||||||
|
|
||||||
/** The beginning of the event. */
|
|
||||||
start_mark, end_mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @defgroup nodes Nodes
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** The tag @c !!null with the only possible value: @c null. */
|
|
||||||
yaml_NULL_TAG = "tag:yaml.org,2002:null"
|
|
||||||
/** The tag @c !!bool with the values: @c true and @c falce. */
|
|
||||||
yaml_BOOL_TAG = "tag:yaml.org,2002:bool"
|
|
||||||
/** The tag @c !!str for string values. */
|
|
||||||
yaml_STR_TAG = "tag:yaml.org,2002:str"
|
|
||||||
/** The tag @c !!int for integer values. */
|
|
||||||
yaml_INT_TAG = "tag:yaml.org,2002:int"
|
|
||||||
/** The tag @c !!float for float values. */
|
|
||||||
yaml_FLOAT_TAG = "tag:yaml.org,2002:float"
|
|
||||||
/** The tag @c !!timestamp for date and time values. */
|
|
||||||
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp"
|
|
||||||
|
|
||||||
/** The tag @c !!seq is used to denote sequences. */
|
|
||||||
yaml_SEQ_TAG = "tag:yaml.org,2002:seq"
|
|
||||||
/** The tag @c !!map is used to denote mapping. */
|
|
||||||
yaml_MAP_TAG = "tag:yaml.org,2002:map"
|
|
||||||
|
|
||||||
/** The default scalar tag is @c !!str. */
|
|
||||||
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG
|
|
||||||
/** The default sequence tag is @c !!seq. */
|
|
||||||
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG
|
|
||||||
/** The default mapping tag is @c !!map. */
|
|
||||||
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG
|
|
||||||
|
|
||||||
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Node types. */
|
|
||||||
type yaml_node_type_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** An empty node. */
|
|
||||||
yaml_NO_NODE yaml_node_type_t = iota
|
|
||||||
|
|
||||||
/** A scalar node. */
|
|
||||||
yaml_SCALAR_NODE
|
|
||||||
/** A sequence node. */
|
|
||||||
yaml_SEQUENCE_NODE
|
|
||||||
/** A mapping node. */
|
|
||||||
yaml_MAPPING_NODE
|
|
||||||
)
|
|
||||||
|
|
||||||
/** An element of a sequence node. */
|
|
||||||
type yaml_node_item_t int
|
|
||||||
|
|
||||||
/** An element of a mapping node. */
|
|
||||||
type yaml_node_pair_t struct {
|
|
||||||
/** The key of the element. */
|
|
||||||
key int
|
|
||||||
/** The value of the element. */
|
|
||||||
value int
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The node structure. */
|
|
||||||
type yaml_node_t struct {
|
|
||||||
|
|
||||||
/** The node type. */
|
|
||||||
node_type yaml_node_type_t
|
|
||||||
|
|
||||||
/** The node tag. */
|
|
||||||
tag []byte
|
|
||||||
|
|
||||||
/** The scalar parameters (for @c yaml_SCALAR_NODE). */
|
|
||||||
scalar struct {
|
|
||||||
/** The scalar value. */
|
|
||||||
value []byte
|
|
||||||
/** The scalar style. */
|
|
||||||
style yaml_scalar_style_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The sequence parameters (for @c yaml_SEQUENCE_NODE). */
|
|
||||||
sequence struct {
|
|
||||||
/** The stack of sequence items. */
|
|
||||||
items []yaml_node_item_t
|
|
||||||
/** The sequence style. */
|
|
||||||
style yaml_sequence_style_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The mapping parameters (for @c yaml_MAPPING_NODE). */
|
|
||||||
mapping struct {
|
|
||||||
/** The stack of mapping pairs (key, value). */
|
|
||||||
pairs []yaml_node_pair_t
|
|
||||||
/** The mapping style. */
|
|
||||||
style yaml_mapping_style_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The beginning of the node. */
|
|
||||||
start_mark YAML_mark_t
|
|
||||||
/** The end of the node. */
|
|
||||||
end_mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The document structure. */
|
|
||||||
type yaml_document_t struct {
|
|
||||||
|
|
||||||
/** The document nodes. */
|
|
||||||
nodes []yaml_node_t
|
|
||||||
|
|
||||||
/** The version directive. */
|
|
||||||
version_directive *yaml_version_directive_t
|
|
||||||
|
|
||||||
/** The list of tag directives. */
|
|
||||||
tags []yaml_tag_directive_t
|
|
||||||
|
|
||||||
/** Is the document start indicator implicit? */
|
|
||||||
start_implicit bool
|
|
||||||
/** Is the document end indicator implicit? */
|
|
||||||
end_implicit bool
|
|
||||||
|
|
||||||
/** The beginning of the document. */
|
|
||||||
start_mark YAML_mark_t
|
|
||||||
/** The end of the document. */
|
|
||||||
end_mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The prototype of a read handler.
|
|
||||||
*
|
|
||||||
* The read handler is called when the parser needs to read more bytes from the
|
|
||||||
* source. The handler should write not more than @a size bytes to the @a
|
|
||||||
* buffer. The number of written bytes should be set to the @a length variable.
|
|
||||||
*
|
|
||||||
* @param[in,out] data A pointer to an application data specified by
|
|
||||||
* yaml_parser_set_input().
|
|
||||||
* @param[out] buffer The buffer to write the data from the source.
|
|
||||||
* @param[in] size The size of the buffer.
|
|
||||||
* @param[out] size_read The actual number of bytes read from the source.
|
|
||||||
*
|
|
||||||
* @returns On success, the handler should return @c 1. If the handler failed,
|
|
||||||
* the returned value should be @c 0. On EOF, the handler should set the
|
|
||||||
* @a size_read to @c 0 and return @c 1.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This structure holds information about a potential simple key.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_simple_key_t struct {
|
|
||||||
/** Is a simple key possible? */
|
|
||||||
possible bool
|
|
||||||
|
|
||||||
/** Is a simple key required? */
|
|
||||||
required bool
|
|
||||||
|
|
||||||
/** The number of the token. */
|
|
||||||
token_number int
|
|
||||||
|
|
||||||
/** The position mark. */
|
|
||||||
mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The states of the parser.
|
|
||||||
*/
|
|
||||||
type yaml_parser_state_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Expect STREAM-START. */
|
|
||||||
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
|
|
||||||
/** Expect the beginning of an implicit document. */
|
|
||||||
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
|
|
||||||
/** Expect DOCUMENT-START. */
|
|
||||||
yaml_PARSE_DOCUMENT_START_STATE
|
|
||||||
/** Expect the content of a document. */
|
|
||||||
yaml_PARSE_DOCUMENT_CONTENT_STATE
|
|
||||||
/** Expect DOCUMENT-END. */
|
|
||||||
yaml_PARSE_DOCUMENT_END_STATE
|
|
||||||
/** Expect a block node. */
|
|
||||||
yaml_PARSE_BLOCK_NODE_STATE
|
|
||||||
/** Expect a block node or indentless sequence. */
|
|
||||||
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE
|
|
||||||
/** Expect a flow node. */
|
|
||||||
yaml_PARSE_FLOW_NODE_STATE
|
|
||||||
/** Expect the first entry of a block sequence. */
|
|
||||||
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
|
|
||||||
/** Expect an entry of a block sequence. */
|
|
||||||
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
|
|
||||||
/** Expect an entry of an indentless sequence. */
|
|
||||||
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
|
|
||||||
/** Expect the first key of a block mapping. */
|
|
||||||
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
|
|
||||||
/** Expect a block mapping key. */
|
|
||||||
yaml_PARSE_BLOCK_MAPPING_KEY_STATE
|
|
||||||
/** Expect a block mapping value. */
|
|
||||||
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
|
|
||||||
/** Expect the first entry of a flow sequence. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
|
|
||||||
/** Expect an entry of a flow sequence. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
|
|
||||||
/** Expect a key of an ordered mapping. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
|
|
||||||
/** Expect a value of an ordered mapping. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
|
|
||||||
/** Expect the and of an ordered mapping entry. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
|
|
||||||
/** Expect the first key of a flow mapping. */
|
|
||||||
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
|
|
||||||
/** Expect a key of a flow mapping. */
|
|
||||||
yaml_PARSE_FLOW_MAPPING_KEY_STATE
|
|
||||||
/** Expect a value of a flow mapping. */
|
|
||||||
yaml_PARSE_FLOW_MAPPING_VALUE_STATE
|
|
||||||
/** Expect an empty value of a flow mapping. */
|
|
||||||
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE
|
|
||||||
/** Expect nothing. */
|
|
||||||
yaml_PARSE_END_STATE
|
|
||||||
)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This structure holds aliases data.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_alias_data_t struct {
|
|
||||||
/** The anchor. */
|
|
||||||
anchor []byte
|
|
||||||
/** The node id. */
|
|
||||||
index int
|
|
||||||
/** The anchor mark. */
|
|
||||||
mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The parser structure.
|
|
||||||
*
|
|
||||||
* All members are internal. Manage the structure using the @c yaml_parser_
|
|
||||||
* family of functions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_parser_t struct {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Error handling
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Error type. */
|
|
||||||
error YAML_error_type_t
|
|
||||||
/** Error description. */
|
|
||||||
problem string
|
|
||||||
/** The byte about which the problem occured. */
|
|
||||||
problem_offset int
|
|
||||||
/** The problematic value (@c -1 is none). */
|
|
||||||
problem_value int
|
|
||||||
/** The problem position. */
|
|
||||||
problem_mark YAML_mark_t
|
|
||||||
/** The error context. */
|
|
||||||
context string
|
|
||||||
/** The context position. */
|
|
||||||
context_mark YAML_mark_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Reader stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Read handler. */
|
|
||||||
read_handler yaml_read_handler_t
|
|
||||||
|
|
||||||
/** Reader input data. */
|
|
||||||
input_reader io.Reader
|
|
||||||
input []byte
|
|
||||||
input_pos int
|
|
||||||
|
|
||||||
/** EOF flag */
|
|
||||||
eof bool
|
|
||||||
|
|
||||||
/** The working buffer. */
|
|
||||||
buffer []byte
|
|
||||||
buffer_pos int
|
|
||||||
|
|
||||||
/* The number of unread characters in the buffer. */
|
|
||||||
unread int
|
|
||||||
|
|
||||||
/** The raw buffer. */
|
|
||||||
raw_buffer []byte
|
|
||||||
raw_buffer_pos int
|
|
||||||
|
|
||||||
/** The input encoding. */
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
/** The offset of the current position (in bytes). */
|
|
||||||
offset int
|
|
||||||
|
|
||||||
/** The mark of the current position. */
|
|
||||||
mark YAML_mark_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Scanner stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Have we started to scan the input stream? */
|
|
||||||
stream_start_produced bool
|
|
||||||
|
|
||||||
/** Have we reached the end of the input stream? */
|
|
||||||
stream_end_produced bool
|
|
||||||
|
|
||||||
/** The number of unclosed '[' and '{' indicators. */
|
|
||||||
flow_level int
|
|
||||||
|
|
||||||
/** The tokens queue. */
|
|
||||||
tokens []yaml_token_t
|
|
||||||
tokens_head int
|
|
||||||
|
|
||||||
/** The number of tokens fetched from the queue. */
|
|
||||||
tokens_parsed int
|
|
||||||
|
|
||||||
/* Does the tokens queue contain a token ready for dequeueing. */
|
|
||||||
token_available bool
|
|
||||||
|
|
||||||
/** The indentation levels stack. */
|
|
||||||
indents []int
|
|
||||||
|
|
||||||
/** The current indentation level. */
|
|
||||||
indent int
|
|
||||||
|
|
||||||
/** May a simple key occur at the current position? */
|
|
||||||
simple_key_allowed bool
|
|
||||||
|
|
||||||
/** The stack of simple keys. */
|
|
||||||
simple_keys []yaml_simple_key_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Parser stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** The parser states stack. */
|
|
||||||
states []yaml_parser_state_t
|
|
||||||
|
|
||||||
/** The current parser state. */
|
|
||||||
state yaml_parser_state_t
|
|
||||||
|
|
||||||
/** The stack of marks. */
|
|
||||||
marks []YAML_mark_t
|
|
||||||
|
|
||||||
/** The list of TAG directives. */
|
|
||||||
tag_directives []yaml_tag_directive_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Dumper stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** The alias data. */
|
|
||||||
aliases []yaml_alias_data_t
|
|
||||||
|
|
||||||
/** The currently parsed document. */
|
|
||||||
document *yaml_document_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The prototype of a write handler.
|
|
||||||
*
|
|
||||||
* The write handler is called when the emitter needs to flush the accumulated
|
|
||||||
* characters to the output. The handler should write @a size bytes of the
|
|
||||||
* @a buffer to the output.
|
|
||||||
*
|
|
||||||
* @param[in,out] data A pointer to an application data specified by
|
|
||||||
* yaml_emitter_set_output().
|
|
||||||
* @param[in] buffer The buffer with bytes to be written.
|
|
||||||
* @param[in] size The size of the buffer.
|
|
||||||
*
|
|
||||||
* @returns On success, the handler should return @c 1. If the handler failed,
|
|
||||||
* the returned value should be @c 0.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
|
|
||||||
|
|
||||||
/** The emitter states. */
|
|
||||||
type yaml_emitter_state_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Expect STREAM-START. */
|
|
||||||
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
|
|
||||||
/** Expect the first DOCUMENT-START or STREAM-END. */
|
|
||||||
yaml_EMIT_FIRST_DOCUMENT_START_STATE
|
|
||||||
/** Expect DOCUMENT-START or STREAM-END. */
|
|
||||||
yaml_EMIT_DOCUMENT_START_STATE
|
|
||||||
/** Expect the content of a document. */
|
|
||||||
yaml_EMIT_DOCUMENT_CONTENT_STATE
|
|
||||||
/** Expect DOCUMENT-END. */
|
|
||||||
yaml_EMIT_DOCUMENT_END_STATE
|
|
||||||
/** Expect the first item of a flow sequence. */
|
|
||||||
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
|
|
||||||
/** Expect an item of a flow sequence. */
|
|
||||||
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE
|
|
||||||
/** Expect the first key of a flow mapping. */
|
|
||||||
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
|
|
||||||
/** Expect a key of a flow mapping. */
|
|
||||||
yaml_EMIT_FLOW_MAPPING_KEY_STATE
|
|
||||||
/** Expect a value for a simple key of a flow mapping. */
|
|
||||||
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE
|
|
||||||
/** Expect a value of a flow mapping. */
|
|
||||||
yaml_EMIT_FLOW_MAPPING_VALUE_STATE
|
|
||||||
/** Expect the first item of a block sequence. */
|
|
||||||
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
|
|
||||||
/** Expect an item of a block sequence. */
|
|
||||||
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE
|
|
||||||
/** Expect the first key of a block mapping. */
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
|
|
||||||
/** Expect the key of a block mapping. */
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_KEY_STATE
|
|
||||||
/** Expect a value for a simple key of a block mapping. */
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE
|
|
||||||
/** Expect a value of a block mapping. */
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE
|
|
||||||
/** Expect nothing. */
|
|
||||||
yaml_EMIT_END_STATE
|
|
||||||
)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The emitter structure.
|
|
||||||
*
|
|
||||||
* All members are internal. Manage the structure using the @c yaml_emitter_
|
|
||||||
* family of functions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_emitter_t struct {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Error handling
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Error type. */
|
|
||||||
error YAML_error_type_t
|
|
||||||
/** Error description. */
|
|
||||||
problem string
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Writer stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Write handler. */
|
|
||||||
write_handler yaml_write_handler_t
|
|
||||||
|
|
||||||
/** Standard (string or file) output data. */
|
|
||||||
output_buffer *[]byte
|
|
||||||
output_writer io.Writer
|
|
||||||
|
|
||||||
/** The working buffer. */
|
|
||||||
buffer []byte
|
|
||||||
buffer_pos int
|
|
||||||
|
|
||||||
/** The raw buffer. */
|
|
||||||
raw_buffer []byte
|
|
||||||
raw_buffer_pos int
|
|
||||||
|
|
||||||
/** The stream encoding. */
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Emitter stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** If the output is in the canonical style? */
|
|
||||||
canonical bool
|
|
||||||
/** The number of indentation spaces. */
|
|
||||||
best_indent int
|
|
||||||
/** The preferred width of the output lines. */
|
|
||||||
best_width int
|
|
||||||
/** Allow unescaped non-ASCII characters? */
|
|
||||||
unicode bool
|
|
||||||
/** The preferred line break. */
|
|
||||||
line_break yaml_break_t
|
|
||||||
|
|
||||||
/** The stack of states. */
|
|
||||||
states []yaml_emitter_state_t
|
|
||||||
|
|
||||||
/** The current emitter state. */
|
|
||||||
state yaml_emitter_state_t
|
|
||||||
|
|
||||||
/** The event queue. */
|
|
||||||
events []yaml_event_t
|
|
||||||
events_head int
|
|
||||||
|
|
||||||
/** The stack of indentation levels. */
|
|
||||||
indents []int
|
|
||||||
|
|
||||||
/** The list of tag directives. */
|
|
||||||
tag_directives []yaml_tag_directive_t
|
|
||||||
|
|
||||||
/** The current indentation level. */
|
|
||||||
indent int
|
|
||||||
|
|
||||||
/** The current flow level. */
|
|
||||||
flow_level int
|
|
||||||
|
|
||||||
/** Is it the document root context? */
|
|
||||||
root_context bool
|
|
||||||
/** Is it a sequence context? */
|
|
||||||
sequence_context bool
|
|
||||||
/** Is it a mapping context? */
|
|
||||||
mapping_context bool
|
|
||||||
/** Is it a simple mapping key context? */
|
|
||||||
simple_key_context bool
|
|
||||||
|
|
||||||
/** The current line. */
|
|
||||||
line int
|
|
||||||
/** The current column. */
|
|
||||||
column int
|
|
||||||
/** If the last character was a whitespace? */
|
|
||||||
whitespace bool
|
|
||||||
/** If the last character was an indentation character (' ', '-', '?', ':')? */
|
|
||||||
indention bool
|
|
||||||
/** If an explicit document end is required? */
|
|
||||||
open_ended bool
|
|
||||||
|
|
||||||
/** Anchor analysis. */
|
|
||||||
anchor_data struct {
|
|
||||||
/** The anchor value. */
|
|
||||||
anchor []byte
|
|
||||||
/** Is it an alias? */
|
|
||||||
alias bool
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Tag analysis. */
|
|
||||||
tag_data struct {
|
|
||||||
/** The tag handle. */
|
|
||||||
handle []byte
|
|
||||||
/** The tag suffix. */
|
|
||||||
suffix []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Scalar analysis. */
|
|
||||||
scalar_data struct {
|
|
||||||
/** The scalar value. */
|
|
||||||
value []byte
|
|
||||||
/** Does the scalar contain line breaks? */
|
|
||||||
multiline bool
|
|
||||||
/** Can the scalar be expessed in the flow plain style? */
|
|
||||||
flow_plain_allowed bool
|
|
||||||
/** Can the scalar be expressed in the block plain style? */
|
|
||||||
block_plain_allowed bool
|
|
||||||
/** Can the scalar be expressed in the single quoted style? */
|
|
||||||
single_quoted_allowed bool
|
|
||||||
/** Can the scalar be expressed in the literal or folded styles? */
|
|
||||||
block_allowed bool
|
|
||||||
/** The output style. */
|
|
||||||
style yaml_scalar_style_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Dumper stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** If the stream was already opened? */
|
|
||||||
opened bool
|
|
||||||
/** If the stream was already closed? */
|
|
||||||
closed bool
|
|
||||||
|
|
||||||
/** The information associated with the document nodes. */
|
|
||||||
anchors *struct {
|
|
||||||
/** The number of references. */
|
|
||||||
references int
|
|
||||||
/** The anchor id. */
|
|
||||||
anchor int
|
|
||||||
/** If the node has been emitted? */
|
|
||||||
serialized bool
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The last assigned anchor id. */
|
|
||||||
last_anchor_id int
|
|
||||||
|
|
||||||
/** The currently emitted document. */
|
|
||||||
document *yaml_document_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
}
|
|
1
vendor/github.com/containernetworking/cni/LICENSE
generated
vendored
1
vendor/github.com/containernetworking/cni/LICENSE
generated
vendored
|
@ -199,4 +199,3 @@
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
|
|
||||||
|
|
17
vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
generated
vendored
17
vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
generated
vendored
|
@ -18,7 +18,6 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
|
||||||
|
@ -58,25 +57,15 @@ func ExecPluginWithoutResult(pluginPath string, netconf []byte, args CNIArgs) er
|
||||||
}
|
}
|
||||||
|
|
||||||
func execPlugin(pluginPath string, netconf []byte, args CNIArgs) ([]byte, error) {
|
func execPlugin(pluginPath string, netconf []byte, args CNIArgs) ([]byte, error) {
|
||||||
return defaultRawExec.ExecPlugin(pluginPath, netconf, args.AsEnv())
|
|
||||||
}
|
|
||||||
|
|
||||||
var defaultRawExec = &RawExec{Stderr: os.Stderr}
|
|
||||||
|
|
||||||
type RawExec struct {
|
|
||||||
Stderr io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *RawExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
|
|
||||||
stdout := &bytes.Buffer{}
|
stdout := &bytes.Buffer{}
|
||||||
|
|
||||||
c := exec.Cmd{
|
c := exec.Cmd{
|
||||||
Env: environ,
|
Env: args.AsEnv(),
|
||||||
Path: pluginPath,
|
Path: pluginPath,
|
||||||
Args: []string{pluginPath},
|
Args: []string{pluginPath},
|
||||||
Stdin: bytes.NewBuffer(stdinData),
|
Stdin: bytes.NewBuffer(netconf),
|
||||||
Stdout: stdout,
|
Stdout: stdout,
|
||||||
Stderr: e.Stderr,
|
Stderr: os.Stderr,
|
||||||
}
|
}
|
||||||
if err := c.Run(); err != nil {
|
if err := c.Run(); err != nil {
|
||||||
return nil, pluginErr(err, stdout.Bytes())
|
return nil, pluginErr(err, stdout.Bytes())
|
||||||
|
|
72
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
72
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
|
@ -1,6 +1,9 @@
|
||||||
package directory
|
package directory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
@ -23,27 +26,66 @@ func (d *dirImageDestination) Reference() types.ImageReference {
|
||||||
return d.ref
|
return d.ref
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||||
|
func (d *dirImageDestination) Close() {
|
||||||
|
}
|
||||||
|
|
||||||
func (d *dirImageDestination) SupportedManifestMIMETypes() []string {
|
func (d *dirImageDestination) SupportedManifestMIMETypes() []string {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dirImageDestination) PutManifest(manifest []byte) error {
|
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||||
return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644)
|
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||||
|
func (d *dirImageDestination) SupportsSignatures() error {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dirImageDestination) PutBlob(digest string, stream io.Reader) error {
|
// PutBlob writes contents of stream and returns its computed digest and size.
|
||||||
layerFile, err := os.Create(d.ref.layerPath(digest))
|
// A digest can be optionally provided if known, the specific image destination can decide to play with it or not.
|
||||||
|
// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known.
|
||||||
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
|
// to any other readers for download using the supplied digest.
|
||||||
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
|
func (d *dirImageDestination) PutBlob(stream io.Reader, digest string, expectedSize int64) (string, int64, error) {
|
||||||
|
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return "", -1, err
|
||||||
}
|
}
|
||||||
defer layerFile.Close()
|
succeeded := false
|
||||||
if _, err := io.Copy(layerFile, stream); err != nil {
|
defer func() {
|
||||||
return err
|
blobFile.Close()
|
||||||
|
if !succeeded {
|
||||||
|
os.Remove(blobFile.Name())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
h := sha256.New()
|
||||||
|
tee := io.TeeReader(stream, h)
|
||||||
|
|
||||||
|
size, err := io.Copy(blobFile, tee)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
}
|
}
|
||||||
if err := layerFile.Sync(); err != nil {
|
computedDigest := hex.EncodeToString(h.Sum(nil))
|
||||||
return err
|
if expectedSize != -1 && size != expectedSize {
|
||||||
|
return "", -1, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, expectedSize, size)
|
||||||
}
|
}
|
||||||
return nil
|
if err := blobFile.Sync(); err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
if err := blobFile.Chmod(0644); err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
blobPath := d.ref.layerPath(computedDigest)
|
||||||
|
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
succeeded = true
|
||||||
|
return "sha256:" + computedDigest, size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dirImageDestination) PutManifest(manifest []byte) error {
|
||||||
|
return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dirImageDestination) PutSignatures(signatures [][]byte) error {
|
func (d *dirImageDestination) PutSignatures(signatures [][]byte) error {
|
||||||
|
@ -54,3 +96,11 @@ func (d *dirImageDestination) PutSignatures(signatures [][]byte) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// WARNING: This does not have any transactional semantics:
|
||||||
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
|
func (d *dirImageDestination) Commit() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
13
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
13
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
|
@ -1,7 +1,6 @@
|
||||||
package directory
|
package directory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
@ -14,6 +13,7 @@ type dirImageSource struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newImageSource returns an ImageSource reading from an existing directory.
|
// newImageSource returns an ImageSource reading from an existing directory.
|
||||||
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
func newImageSource(ref dirReference) types.ImageSource {
|
func newImageSource(ref dirReference) types.ImageSource {
|
||||||
return &dirImageSource{ref}
|
return &dirImageSource{ref}
|
||||||
}
|
}
|
||||||
|
@ -24,8 +24,12 @@ func (s *dirImageSource) Reference() types.ImageReference {
|
||||||
return s.ref
|
return s.ref
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close removes resources associated with an initialized ImageSource, if any.
|
||||||
|
func (s *dirImageSource) Close() {
|
||||||
|
}
|
||||||
|
|
||||||
// it's up to the caller to determine the MIME type of the returned manifest's bytes
|
// it's up to the caller to determine the MIME type of the returned manifest's bytes
|
||||||
func (s *dirImageSource) GetManifest(_ []string) ([]byte, string, error) {
|
func (s *dirImageSource) GetManifest() ([]byte, string, error) {
|
||||||
m, err := ioutil.ReadFile(s.ref.manifestPath())
|
m, err := ioutil.ReadFile(s.ref.manifestPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
|
@ -33,6 +37,7 @@ func (s *dirImageSource) GetManifest(_ []string) ([]byte, string, error) {
|
||||||
return m, "", err
|
return m, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||||
func (s *dirImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) {
|
func (s *dirImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) {
|
||||||
r, err := os.Open(s.ref.layerPath(digest))
|
r, err := os.Open(s.ref.layerPath(digest))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -59,7 +64,3 @@ func (s *dirImageSource) GetSignatures() ([][]byte, error) {
|
||||||
}
|
}
|
||||||
return signatures, nil
|
return signatures, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *dirImageSource) Delete() error {
|
|
||||||
return fmt.Errorf("directory#dirImageSource.Delete() not implmented")
|
|
||||||
}
|
|
||||||
|
|
26
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
26
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
|
@ -32,13 +32,17 @@ func (t dirTransport) ParseReference(reference string) (types.ImageReference, er
|
||||||
// scope passed to this function will not be "", that value is always allowed.
|
// scope passed to this function will not be "", that value is always allowed.
|
||||||
func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
|
func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||||
if !strings.HasPrefix(scope, "/") {
|
if !strings.HasPrefix(scope, "/") {
|
||||||
return fmt.Errorf("Invalid scope %s: must be an absolute path", scope)
|
return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
|
||||||
}
|
}
|
||||||
// Refuse also "/", otherwise "/" and "" would have the same semantics,
|
// Refuse also "/", otherwise "/" and "" would have the same semantics,
|
||||||
// and "" could be unexpectedly shadowed by the "/" entry.
|
// and "" could be unexpectedly shadowed by the "/" entry.
|
||||||
if scope == "/" {
|
if scope == "/" {
|
||||||
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
|
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
|
||||||
}
|
}
|
||||||
|
cleaned := filepath.Clean(scope)
|
||||||
|
if cleaned != scope {
|
||||||
|
return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,21 +128,31 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImage returns a types.Image for this reference.
|
// NewImage returns a types.Image for this reference.
|
||||||
func (ref dirReference) NewImage(certPath string, tlsVerify bool) (types.Image, error) {
|
// The caller must call .Close() on the returned Image.
|
||||||
|
func (ref dirReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||||
src := newImageSource(ref)
|
src := newImageSource(ref)
|
||||||
return image.FromSource(src, nil), nil
|
return image.FromSource(src), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageSource returns a types.ImageSource for this reference.
|
// NewImageSource returns a types.ImageSource for this reference,
|
||||||
func (ref dirReference) NewImageSource(certPath string, tlsVerify bool) (types.ImageSource, error) {
|
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||||
|
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||||
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
|
func (ref dirReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||||
return newImageSource(ref), nil
|
return newImageSource(ref), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||||
func (ref dirReference) NewImageDestination(certPath string, tlsVerify bool) (types.ImageDestination, error) {
|
// The caller must call .Close() on the returned ImageDestination.
|
||||||
|
func (ref dirReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||||
return newImageDestination(ref), nil
|
return newImageDestination(ref), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteImage deletes the named image from the registry, if supported.
|
||||||
|
func (ref dirReference) DeleteImage(ctx *types.SystemContext) error {
|
||||||
|
return fmt.Errorf("Deleting images not implemented for dir: images")
|
||||||
|
}
|
||||||
|
|
||||||
// manifestPath returns a path for the manifest within a directory using our conventions.
|
// manifestPath returns a path for the manifest within a directory using our conventions.
|
||||||
func (ref dirReference) manifestPath() string {
|
func (ref dirReference) manifestPath() string {
|
||||||
return filepath.Join(ref.path, "manifest.json")
|
return filepath.Join(ref.path, "manifest.json")
|
||||||
|
|
29
vendor/github.com/containers/image/doc.go
generated
vendored
Normal file
29
vendor/github.com/containers/image/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
// Package image provides libraries and commands to interact with containers images.
|
||||||
|
//
|
||||||
|
// package main
|
||||||
|
//
|
||||||
|
// import (
|
||||||
|
// "fmt"
|
||||||
|
//
|
||||||
|
// "github.com/containers/image/docker"
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// func main() {
|
||||||
|
// ref, err := docker.ParseReference("fedora")
|
||||||
|
// if err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
// img, err := ref.NewImage(nil)
|
||||||
|
// if err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
// defer img.Close()
|
||||||
|
// b, _, err := img.Manifest()
|
||||||
|
// if err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
// fmt.Printf("%s", string(b))
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// TODO(runcom)
|
||||||
|
package image
|
110
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
110
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
|
@ -14,6 +14,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/containers/image/types"
|
||||||
"github.com/docker/docker/pkg/homedir"
|
"github.com/docker/docker/pkg/homedir"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -35,38 +36,39 @@ const (
|
||||||
|
|
||||||
// dockerClient is configuration for dealing with a single Docker registry.
|
// dockerClient is configuration for dealing with a single Docker registry.
|
||||||
type dockerClient struct {
|
type dockerClient struct {
|
||||||
|
ctx *types.SystemContext
|
||||||
registry string
|
registry string
|
||||||
username string
|
username string
|
||||||
password string
|
password string
|
||||||
wwwAuthenticate string // Cache of a value set by ping() if scheme is not empty
|
wwwAuthenticate string // Cache of a value set by ping() if scheme is not empty
|
||||||
scheme string // Cache of a value returned by a successful ping() if not empty
|
scheme string // Cache of a value returned by a successful ping() if not empty
|
||||||
client *http.Client
|
client *http.Client
|
||||||
|
signatureBase signatureStorageBase
|
||||||
}
|
}
|
||||||
|
|
||||||
// newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
|
// newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
|
||||||
func newDockerClient(refHostname, certPath string, tlsVerify bool) (*dockerClient, error) {
|
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
||||||
var registry string
|
func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool) (*dockerClient, error) {
|
||||||
if refHostname == dockerHostname {
|
registry := ref.ref.Hostname()
|
||||||
|
if registry == dockerHostname {
|
||||||
registry = dockerRegistry
|
registry = dockerRegistry
|
||||||
} else {
|
|
||||||
registry = refHostname
|
|
||||||
}
|
}
|
||||||
username, password, err := getAuth(refHostname)
|
username, password, err := getAuth(ref.ref.Hostname())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var tr *http.Transport
|
var tr *http.Transport
|
||||||
if certPath != "" || !tlsVerify {
|
if ctx != nil && (ctx.DockerCertPath != "" || ctx.DockerInsecureSkipTLSVerify) {
|
||||||
tlsc := &tls.Config{}
|
tlsc := &tls.Config{}
|
||||||
|
|
||||||
if certPath != "" {
|
if ctx.DockerCertPath != "" {
|
||||||
cert, err := tls.LoadX509KeyPair(filepath.Join(certPath, "cert.pem"), filepath.Join(certPath, "key.pem"))
|
cert, err := tls.LoadX509KeyPair(filepath.Join(ctx.DockerCertPath, "cert.pem"), filepath.Join(ctx.DockerCertPath, "key.pem"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Error loading x509 key pair: %s", err)
|
return nil, fmt.Errorf("Error loading x509 key pair: %s", err)
|
||||||
}
|
}
|
||||||
tlsc.Certificates = append(tlsc.Certificates, cert)
|
tlsc.Certificates = append(tlsc.Certificates, cert)
|
||||||
}
|
}
|
||||||
tlsc.InsecureSkipVerify = !tlsVerify
|
tlsc.InsecureSkipVerify = ctx.DockerInsecureSkipTLSVerify
|
||||||
tr = &http.Transport{
|
tr = &http.Transport{
|
||||||
TLSClientConfig: tlsc,
|
TLSClientConfig: tlsc,
|
||||||
}
|
}
|
||||||
|
@ -77,11 +79,19 @@ func newDockerClient(refHostname, certPath string, tlsVerify bool) (*dockerClien
|
||||||
if tr != nil {
|
if tr != nil {
|
||||||
client.Transport = tr
|
client.Transport = tr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sigBase, err := configuredSignatureStorageBase(ctx, ref, write)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return &dockerClient{
|
return &dockerClient{
|
||||||
registry: registry,
|
ctx: ctx,
|
||||||
username: username,
|
registry: registry,
|
||||||
password: password,
|
username: username,
|
||||||
client: client,
|
password: password,
|
||||||
|
client: client,
|
||||||
|
signatureBase: sigBase,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,16 +108,20 @@ func (c *dockerClient) makeRequest(method, url string, headers map[string][]stri
|
||||||
}
|
}
|
||||||
|
|
||||||
url = fmt.Sprintf(baseURL, c.scheme, c.registry) + url
|
url = fmt.Sprintf(baseURL, c.scheme, c.registry) + url
|
||||||
return c.makeRequestToResolvedURL(method, url, headers, stream)
|
return c.makeRequestToResolvedURL(method, url, headers, stream, -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||||
|
// streamLen, if not -1, specifies the length of the data expected on stream.
|
||||||
// makeRequest should generally be preferred.
|
// makeRequest should generally be preferred.
|
||||||
func (c *dockerClient) makeRequestToResolvedURL(method, url string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
|
func (c *dockerClient) makeRequestToResolvedURL(method, url string, headers map[string][]string, stream io.Reader, streamLen int64) (*http.Response, error) {
|
||||||
req, err := http.NewRequest(method, url, stream)
|
req, err := http.NewRequest(method, url, stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
|
||||||
|
req.ContentLength = streamLen
|
||||||
|
}
|
||||||
req.Header.Set("Docker-Distribution-API-Version", "registry/2.0")
|
req.Header.Set("Docker-Distribution-API-Version", "registry/2.0")
|
||||||
for n, h := range headers {
|
for n, h := range headers {
|
||||||
for _, hh := range h {
|
for _, hh := range h {
|
||||||
|
@ -137,41 +151,38 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||||
req.SetBasicAuth(c.username, c.password)
|
req.SetBasicAuth(c.username, c.password)
|
||||||
return nil
|
return nil
|
||||||
case "Bearer":
|
case "Bearer":
|
||||||
res, err := c.client.Do(req)
|
// FIXME? This gets a new token for every API request;
|
||||||
|
// we may be easily able to reuse a previous token, e.g.
|
||||||
|
// for OpenShift the token only identifies the user and does not vary
|
||||||
|
// across operations. Should we just try the request first, and
|
||||||
|
// only get a new token on failure?
|
||||||
|
// OTOH what to do with the single-use body stream in that case?
|
||||||
|
|
||||||
|
// Try performing the request, expecting it to fail.
|
||||||
|
testReq := *req
|
||||||
|
// Do not use the body stream, or we couldn't reuse it for the "real" call later.
|
||||||
|
testReq.Body = nil
|
||||||
|
testReq.ContentLength = 0
|
||||||
|
res, err := c.client.Do(&testReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
hdr := res.Header.Get("WWW-Authenticate")
|
chs := parseAuthHeader(res.Header)
|
||||||
if hdr == "" || res.StatusCode != http.StatusUnauthorized {
|
if res.StatusCode != http.StatusUnauthorized || chs == nil || len(chs) == 0 {
|
||||||
// no need for bearer? wtf?
|
// no need for bearer? wtf?
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
tokens = strings.Split(hdr, " ")
|
// Arbitrarily use the first challenge, there is no reason to expect more than one.
|
||||||
tokens = strings.Split(tokens[1], ",")
|
challenge := chs[0]
|
||||||
var realm, service, scope string
|
if challenge.Scheme != "bearer" { // Another artifact of trying to handle WWW-Authenticate before it actually happens.
|
||||||
for _, token := range tokens {
|
return fmt.Errorf("Unimplemented: WWW-Authenticate Bearer replaced by %#v", challenge.Scheme)
|
||||||
if strings.HasPrefix(token, "realm") {
|
|
||||||
realm = strings.Trim(token[len("realm="):], "\"")
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(token, "service") {
|
|
||||||
service = strings.Trim(token[len("service="):], "\"")
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(token, "scope") {
|
|
||||||
scope = strings.Trim(token[len("scope="):], "\"")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
realm, ok := challenge.Parameters["realm"]
|
||||||
if realm == "" {
|
if !ok {
|
||||||
return fmt.Errorf("missing realm in bearer auth challenge")
|
return fmt.Errorf("missing realm in bearer auth challenge")
|
||||||
}
|
}
|
||||||
if service == "" {
|
service, _ := challenge.Parameters["service"] // Will be "" if not present
|
||||||
return fmt.Errorf("missing service in bearer auth challenge")
|
scope, _ := challenge.Parameters["scope"] // Will be "" if not present
|
||||||
}
|
|
||||||
// The scope can be empty if we're not getting a token for a specific repo
|
|
||||||
//if scope == "" && repo != "" {
|
|
||||||
if scope == "" {
|
|
||||||
return fmt.Errorf("missing scope in bearer auth challenge")
|
|
||||||
}
|
|
||||||
token, err := c.getBearerToken(realm, service, scope)
|
token, err := c.getBearerToken(realm, service, scope)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -189,7 +200,9 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (string, err
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
getParams := authReq.URL.Query()
|
getParams := authReq.URL.Query()
|
||||||
getParams.Add("service", service)
|
if service != "" {
|
||||||
|
getParams.Add("service", service)
|
||||||
|
}
|
||||||
if scope != "" {
|
if scope != "" {
|
||||||
getParams.Add("scope", scope)
|
getParams.Add("scope", scope)
|
||||||
}
|
}
|
||||||
|
@ -321,14 +334,9 @@ func (c *dockerClient) ping() (*pingResponse, error) {
|
||||||
}
|
}
|
||||||
return pr, nil
|
return pr, nil
|
||||||
}
|
}
|
||||||
scheme := "https"
|
pr, err := ping("https")
|
||||||
pr, err := ping(scheme)
|
if err != nil && c.ctx.DockerInsecureSkipTLSVerify {
|
||||||
if err != nil {
|
pr, err = ping("http")
|
||||||
scheme = "http"
|
|
||||||
pr, err = ping(scheme)
|
|
||||||
if err == nil {
|
|
||||||
return pr, nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return pr, err
|
return pr, err
|
||||||
}
|
}
|
||||||
|
|
7
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
7
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
|
@ -18,12 +18,13 @@ type Image struct {
|
||||||
|
|
||||||
// newImage returns a new Image interface type after setting up
|
// newImage returns a new Image interface type after setting up
|
||||||
// a client to the registry hosting the given image.
|
// a client to the registry hosting the given image.
|
||||||
func newImage(ref dockerReference, certPath string, tlsVerify bool) (types.Image, error) {
|
// The caller must call .Close() on the returned Image.
|
||||||
s, err := newImageSource(ref, certPath, tlsVerify)
|
func newImage(ctx *types.SystemContext, ref dockerReference) (types.Image, error) {
|
||||||
|
s, err := newImageSource(ctx, ref, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &Image{Image: image.FromSource(s, nil), src: s}, nil
|
return &Image{Image: image.FromSource(s), src: s}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SourceRefFullName returns a fully expanded name for the repository this image is in.
|
// SourceRefFullName returns a fully expanded name for the repository this image is in.
|
||||||
|
|
235
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
235
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
|
@ -2,10 +2,16 @@ package docker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
|
@ -15,11 +21,13 @@ import (
|
||||||
type dockerImageDestination struct {
|
type dockerImageDestination struct {
|
||||||
ref dockerReference
|
ref dockerReference
|
||||||
c *dockerClient
|
c *dockerClient
|
||||||
|
// State
|
||||||
|
manifestDigest string // or "" if not yet known.
|
||||||
}
|
}
|
||||||
|
|
||||||
// newImageDestination creates a new ImageDestination for the specified image reference and connection specification.
|
// newImageDestination creates a new ImageDestination for the specified image reference.
|
||||||
func newImageDestination(ref dockerReference, certPath string, tlsVerify bool) (types.ImageDestination, error) {
|
func newImageDestination(ctx *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
|
||||||
c, err := newDockerClient(ref.ref.Hostname(), certPath, tlsVerify)
|
c, err := newDockerClient(ctx, ref, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -35,15 +43,105 @@ func (d *dockerImageDestination) Reference() types.ImageReference {
|
||||||
return d.ref
|
return d.ref
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||||
|
func (d *dockerImageDestination) Close() {
|
||||||
|
}
|
||||||
|
|
||||||
func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
|
func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
|
||||||
return []string{
|
return []string{
|
||||||
// TODO(runcom): we'll add OCI as part of another PR here
|
// TODO(runcom): we'll add OCI as part of another PR here
|
||||||
manifest.DockerV2Schema2MIMEType,
|
manifest.DockerV2Schema2MediaType,
|
||||||
manifest.DockerV2Schema1SignedMIMEType,
|
manifest.DockerV2Schema1SignedMediaType,
|
||||||
manifest.DockerV2Schema1MIMEType,
|
manifest.DockerV2Schema1MediaType,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||||
|
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||||
|
func (d *dockerImageDestination) SupportsSignatures() error {
|
||||||
|
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlob writes contents of stream and returns its computed digest and size.
|
||||||
|
// A digest can be optionally provided if known, the specific image destination can decide to play with it or not.
|
||||||
|
// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known.
|
||||||
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
|
// to any other readers for download using the supplied digest.
|
||||||
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
|
func (d *dockerImageDestination) PutBlob(stream io.Reader, digest string, expectedSize int64) (string, int64, error) {
|
||||||
|
if digest != "" {
|
||||||
|
checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), digest)
|
||||||
|
|
||||||
|
logrus.Debugf("Checking %s", checkURL)
|
||||||
|
res, err := d.c.makeRequest("HEAD", checkURL, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode == http.StatusOK {
|
||||||
|
logrus.Debugf("... already exists, not uploading")
|
||||||
|
blobLength, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
return digest, blobLength, nil
|
||||||
|
}
|
||||||
|
logrus.Debugf("... failed, status %d", res.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME? Chunked upload, progress reporting, etc.
|
||||||
|
uploadURL := fmt.Sprintf(blobUploadURL, d.ref.ref.RemoteName())
|
||||||
|
logrus.Debugf("Uploading %s", uploadURL)
|
||||||
|
res, err := d.c.makeRequest("POST", uploadURL, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode != http.StatusAccepted {
|
||||||
|
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
||||||
|
return "", -1, fmt.Errorf("Error initiating layer upload to %s, status %d", uploadURL, res.StatusCode)
|
||||||
|
}
|
||||||
|
uploadLocation, err := res.Location()
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, fmt.Errorf("Error determining upload URL: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
h := sha256.New()
|
||||||
|
tee := io.TeeReader(stream, h)
|
||||||
|
res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, expectedSize)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debugf("Error uploading layer chunked, response %#v", *res)
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
hash := h.Sum(nil)
|
||||||
|
computedDigest := "sha256:" + hex.EncodeToString(hash[:])
|
||||||
|
|
||||||
|
uploadLocation, err = res.Location()
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, fmt.Errorf("Error determining upload URL: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: DELETE uploadLocation on failure
|
||||||
|
|
||||||
|
locationQuery := uploadLocation.Query()
|
||||||
|
// TODO: check digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
|
||||||
|
locationQuery.Set("digest", computedDigest)
|
||||||
|
uploadLocation.RawQuery = locationQuery.Encode()
|
||||||
|
res, err = d.c.makeRequestToResolvedURL("PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode != http.StatusCreated {
|
||||||
|
logrus.Debugf("Error uploading layer, response %#v", *res)
|
||||||
|
return "", -1, fmt.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("Upload of layer %s complete", digest)
|
||||||
|
return computedDigest, res.Request.ContentLength, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *dockerImageDestination) PutManifest(m []byte) error {
|
func (d *dockerImageDestination) PutManifest(m []byte) error {
|
||||||
// FIXME: This only allows upload by digest, not creating a tag. See the
|
// FIXME: This only allows upload by digest, not creating a tag. See the
|
||||||
// corresponding comment in openshift.NewImageDestination.
|
// corresponding comment in openshift.NewImageDestination.
|
||||||
|
@ -51,6 +149,7 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
d.manifestDigest = digest
|
||||||
url := fmt.Sprintf(manifestURL, d.ref.ref.RemoteName(), digest)
|
url := fmt.Sprintf(manifestURL, d.ref.ref.RemoteName(), digest)
|
||||||
|
|
||||||
headers := map[string][]string{}
|
headers := map[string][]string{}
|
||||||
|
@ -74,60 +173,96 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dockerImageDestination) PutBlob(digest string, stream io.Reader) error {
|
func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
||||||
checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), digest)
|
// FIXME? This overwrites files one at a time, definitely not atomic.
|
||||||
|
// A failure when updating signatures with a reordered copy could lose some of them.
|
||||||
|
|
||||||
logrus.Debugf("Checking %s", checkURL)
|
// Skip dealing with the manifest digest if not necessary.
|
||||||
res, err := d.c.makeRequest("HEAD", checkURL, nil, nil)
|
if len(signatures) == 0 {
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode == http.StatusOK && res.Header.Get("Docker-Content-Digest") == digest {
|
|
||||||
logrus.Debugf("... already exists, not uploading")
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
logrus.Debugf("... failed, status %d", res.StatusCode)
|
if d.c.signatureBase == nil {
|
||||||
|
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported, and there is no applicable signature storage configured")
|
||||||
// FIXME? Chunked upload, progress reporting, etc.
|
|
||||||
uploadURL := fmt.Sprintf(blobUploadURL, d.ref.ref.RemoteName())
|
|
||||||
logrus.Debugf("Uploading %s", uploadURL)
|
|
||||||
res, err = d.c.makeRequest("POST", uploadURL, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode != http.StatusAccepted {
|
|
||||||
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
|
||||||
return fmt.Errorf("Error initiating layer upload to %s, status %d", uploadURL, res.StatusCode)
|
|
||||||
}
|
|
||||||
uploadLocation, err := res.Location()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error determining upload URL: %s", err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: DELETE uploadLocation on failure
|
// FIXME: This assumption that signatures are stored after the manifest rather breaks the model.
|
||||||
|
if d.manifestDigest == "" {
|
||||||
locationQuery := uploadLocation.Query()
|
return fmt.Errorf("Unknown manifest digest, can't add signatures")
|
||||||
locationQuery.Set("digest", digest)
|
}
|
||||||
uploadLocation.RawQuery = locationQuery.Encode()
|
|
||||||
res, err = d.c.makeRequestToResolvedURL("PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, stream)
|
for i, signature := range signatures {
|
||||||
if err != nil {
|
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
|
||||||
return err
|
if url == nil {
|
||||||
}
|
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||||
defer res.Body.Close()
|
}
|
||||||
if res.StatusCode != http.StatusCreated {
|
err := d.putOneSignature(url, signature)
|
||||||
logrus.Debugf("Error uploading layer, response %#v", *res)
|
if err != nil {
|
||||||
return fmt.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode)
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Remove any other signatures, if present.
|
||||||
|
// We stop at the first missing signature; if a previous deleting loop aborted
|
||||||
|
// prematurely, this may not clean up all of them, but one missing signature
|
||||||
|
// is enough for dockerImageSource to stop looking for other signatures, so that
|
||||||
|
// is sufficient.
|
||||||
|
for i := len(signatures); ; i++ {
|
||||||
|
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
|
||||||
|
if url == nil {
|
||||||
|
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||||
|
}
|
||||||
|
missing, err := d.c.deleteOneSignature(url)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if missing {
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("Upload of layer %s complete", digest)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
// putOneSignature stores one signature to url.
|
||||||
if len(signatures) != 0 {
|
func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
|
||||||
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported")
|
switch url.Scheme {
|
||||||
|
case "file":
|
||||||
|
logrus.Debugf("Writing to %s", url.Path)
|
||||||
|
err := os.MkdirAll(filepath.Dir(url.Path), 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = ioutil.WriteFile(url.Path, signature, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Unsupported scheme when writing signature to %s", url.String())
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteOneSignature deletes a signature from url, if it exists.
|
||||||
|
// If it successfully determines that the signature does not exist, returns (true, nil)
|
||||||
|
func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
|
||||||
|
switch url.Scheme {
|
||||||
|
case "file":
|
||||||
|
logrus.Debugf("Deleting %s", url.Path)
|
||||||
|
err := os.Remove(url.Path)
|
||||||
|
if err != nil && os.IsNotExist(err) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", url.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// WARNING: This does not have any transactional semantics:
|
||||||
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
|
func (d *dockerImageDestination) Commit() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
185
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
185
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
|
@ -6,6 +6,8 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
|
@ -23,19 +25,30 @@ func (e errFetchManifest) Error() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type dockerImageSource struct {
|
type dockerImageSource struct {
|
||||||
ref dockerReference
|
ref dockerReference
|
||||||
c *dockerClient
|
requestedManifestMIMETypes []string
|
||||||
|
c *dockerClient
|
||||||
|
// State
|
||||||
|
cachedManifest []byte // nil if not loaded yet
|
||||||
|
cachedManifestMIMEType string // Only valid if cachedManifest != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newImageSource creates a new ImageSource for the specified image reference and connection specification.
|
// newImageSource creates a new ImageSource for the specified image reference,
|
||||||
func newImageSource(ref dockerReference, certPath string, tlsVerify bool) (*dockerImageSource, error) {
|
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||||
c, err := newDockerClient(ref.ref.Hostname(), certPath, tlsVerify)
|
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||||
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
|
func newImageSource(ctx *types.SystemContext, ref dockerReference, requestedManifestMIMETypes []string) (*dockerImageSource, error) {
|
||||||
|
c, err := newDockerClient(ctx, ref, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if requestedManifestMIMETypes == nil {
|
||||||
|
requestedManifestMIMETypes = manifest.DefaultRequestedManifestMIMETypes
|
||||||
|
}
|
||||||
return &dockerImageSource{
|
return &dockerImageSource{
|
||||||
ref: ref,
|
ref: ref,
|
||||||
c: c,
|
requestedManifestMIMETypes: requestedManifestMIMETypes,
|
||||||
|
c: c,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,6 +58,10 @@ func (s *dockerImageSource) Reference() types.ImageReference {
|
||||||
return s.ref
|
return s.ref
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close removes resources associated with an initialized ImageSource, if any.
|
||||||
|
func (s *dockerImageSource) Close() {
|
||||||
|
}
|
||||||
|
|
||||||
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
|
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
|
||||||
// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string.
|
// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string.
|
||||||
func simplifyContentType(contentType string) string {
|
func simplifyContentType(contentType string) string {
|
||||||
|
@ -58,32 +75,54 @@ func simplifyContentType(contentType string) string {
|
||||||
return mimeType
|
return mimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *dockerImageSource) GetManifest(mimetypes []string) ([]byte, string, error) {
|
func (s *dockerImageSource) GetManifest() ([]byte, string, error) {
|
||||||
reference, err := s.ref.tagOrDigest()
|
err := s.ensureManifestIsLoaded()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
return s.cachedManifest, s.cachedManifestMIMEType, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
|
||||||
|
//
|
||||||
|
// ImageSource implementations are not required or expected to do any caching,
|
||||||
|
// but because our signatures are “attached” to the manifest digest,
|
||||||
|
// we need to ensure that the digest of the manifest returned by GetManifest
|
||||||
|
// and used by GetSignatures are consistent, otherwise we would get spurious
|
||||||
|
// signature verification failures when pulling while a tag is being updated.
|
||||||
|
func (s *dockerImageSource) ensureManifestIsLoaded() error {
|
||||||
|
if s.cachedManifest != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
reference, err := s.ref.tagOrDigest()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
url := fmt.Sprintf(manifestURL, s.ref.ref.RemoteName(), reference)
|
url := fmt.Sprintf(manifestURL, s.ref.ref.RemoteName(), reference)
|
||||||
// TODO(runcom) set manifest version header! schema1 for now - then schema2 etc etc and v1
|
// TODO(runcom) set manifest version header! schema1 for now - then schema2 etc etc and v1
|
||||||
// TODO(runcom) NO, switch on the resulter manifest like Docker is doing
|
// TODO(runcom) NO, switch on the resulter manifest like Docker is doing
|
||||||
headers := make(map[string][]string)
|
headers := make(map[string][]string)
|
||||||
headers["Accept"] = mimetypes
|
headers["Accept"] = s.requestedManifestMIMETypes
|
||||||
res, err := s.c.makeRequest("GET", url, headers, nil)
|
res, err := s.c.makeRequest("GET", url, headers, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return err
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
manblob, err := ioutil.ReadAll(res.Body)
|
manblob, err := ioutil.ReadAll(res.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return err
|
||||||
}
|
}
|
||||||
if res.StatusCode != http.StatusOK {
|
if res.StatusCode != http.StatusOK {
|
||||||
return nil, "", errFetchManifest{res.StatusCode, manblob}
|
return errFetchManifest{res.StatusCode, manblob}
|
||||||
}
|
}
|
||||||
// We might validate manblob against the Docker-Content-Digest header here to protect against transport errors.
|
// We might validate manblob against the Docker-Content-Digest header here to protect against transport errors.
|
||||||
return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil
|
s.cachedManifest = manblob
|
||||||
|
s.cachedManifestMIMEType = simplifyContentType(res.Header.Get("Content-Type"))
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||||
func (s *dockerImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) {
|
func (s *dockerImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) {
|
||||||
url := fmt.Sprintf(blobsURL, s.ref.ref.RemoteName(), digest)
|
url := fmt.Sprintf(blobsURL, s.ref.ref.RemoteName(), digest)
|
||||||
logrus.Debugf("Downloading %s", url)
|
logrus.Debugf("Downloading %s", url)
|
||||||
|
@ -97,62 +136,152 @@ func (s *dockerImageSource) GetBlob(digest string) (io.ReadCloser, int64, error)
|
||||||
}
|
}
|
||||||
size, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64)
|
size, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
size = 0
|
size = -1
|
||||||
}
|
}
|
||||||
return res.Body, size, nil
|
return res.Body, size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
|
func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
|
||||||
return [][]byte{}, nil
|
if s.c.signatureBase == nil { // Skip dealing with the manifest digest if not necessary.
|
||||||
|
return [][]byte{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.ensureManifestIsLoaded(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
manifestDigest, err := manifest.Digest(s.cachedManifest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
signatures := [][]byte{}
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
|
||||||
|
if url == nil {
|
||||||
|
return nil, fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||||
|
}
|
||||||
|
signature, missing, err := s.getOneSignature(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if missing {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
signatures = append(signatures, signature)
|
||||||
|
}
|
||||||
|
return signatures, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *dockerImageSource) Delete() error {
|
// getOneSignature downloads one signature from url.
|
||||||
var body []byte
|
// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
|
||||||
|
func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, missing bool, err error) {
|
||||||
|
switch url.Scheme {
|
||||||
|
case "file":
|
||||||
|
logrus.Debugf("Reading %s", url.Path)
|
||||||
|
sig, err := ioutil.ReadFile(url.Path)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, true, nil
|
||||||
|
}
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
return sig, false, nil
|
||||||
|
|
||||||
|
case "http", "https":
|
||||||
|
logrus.Debugf("GET %s", url)
|
||||||
|
res, err := s.c.client.Get(url.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode == http.StatusNotFound {
|
||||||
|
return nil, true, nil
|
||||||
|
} else if res.StatusCode != http.StatusOK {
|
||||||
|
return nil, false, fmt.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode)
|
||||||
|
}
|
||||||
|
sig, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
return sig, false, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", url.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteImage deletes the named image from the registry, if supported.
|
||||||
|
func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||||
|
c, err := newDockerClient(ctx, ref, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||||
headers := make(map[string][]string)
|
headers := make(map[string][]string)
|
||||||
headers["Accept"] = []string{manifest.DockerV2Schema2MIMEType}
|
headers["Accept"] = []string{manifest.DockerV2Schema2MediaType}
|
||||||
|
|
||||||
reference, err := s.ref.tagOrDigest()
|
reference, err := ref.tagOrDigest()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
getURL := fmt.Sprintf(manifestURL, s.ref.ref.RemoteName(), reference)
|
getURL := fmt.Sprintf(manifestURL, ref.ref.RemoteName(), reference)
|
||||||
get, err := s.c.makeRequest("GET", getURL, headers, nil)
|
get, err := c.makeRequest("GET", getURL, headers, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer get.Body.Close()
|
defer get.Body.Close()
|
||||||
body, err = ioutil.ReadAll(get.Body)
|
manifestBody, err := ioutil.ReadAll(get.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch get.StatusCode {
|
switch get.StatusCode {
|
||||||
case http.StatusOK:
|
case http.StatusOK:
|
||||||
case http.StatusNotFound:
|
case http.StatusNotFound:
|
||||||
return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry.", s.ref.ref)
|
return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry.", ref.ref)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("Failed to delete %v: %v (%v)", s.ref.ref, body, get.Status)
|
return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
digest := get.Header.Get("Docker-Content-Digest")
|
digest := get.Header.Get("Docker-Content-Digest")
|
||||||
deleteURL := fmt.Sprintf(manifestURL, s.ref.ref.RemoteName(), digest)
|
deleteURL := fmt.Sprintf(manifestURL, ref.ref.RemoteName(), digest)
|
||||||
|
|
||||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||||
delete, err := s.c.makeRequest("DELETE", deleteURL, headers, nil)
|
delete, err := c.makeRequest("DELETE", deleteURL, headers, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer delete.Body.Close()
|
defer delete.Body.Close()
|
||||||
|
|
||||||
body, err = ioutil.ReadAll(delete.Body)
|
body, err := ioutil.ReadAll(delete.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if delete.StatusCode != http.StatusAccepted {
|
if delete.StatusCode != http.StatusAccepted {
|
||||||
return fmt.Errorf("Failed to delete %v: %v (%v)", deleteURL, body, delete.Status)
|
return fmt.Errorf("Failed to delete %v: %s (%v)", deleteURL, string(body), delete.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.signatureBase != nil {
|
||||||
|
manifestDigest, err := manifest.Digest(manifestBody)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
url := signatureStorageURL(c.signatureBase, manifestDigest, i)
|
||||||
|
if url == nil {
|
||||||
|
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||||
|
}
|
||||||
|
missing, err := c.deleteOneSignature(url)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if missing {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
26
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
26
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
|
@ -9,7 +9,7 @@ import (
|
||||||
"github.com/docker/docker/reference"
|
"github.com/docker/docker/reference"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Transport is an ImageTransport for Docker references.
|
// Transport is an ImageTransport for Docker registry-hosted images.
|
||||||
var Transport = dockerTransport{}
|
var Transport = dockerTransport{}
|
||||||
|
|
||||||
type dockerTransport struct{}
|
type dockerTransport struct{}
|
||||||
|
@ -116,18 +116,28 @@ func (ref dockerReference) PolicyConfigurationNamespaces() []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImage returns a types.Image for this reference.
|
// NewImage returns a types.Image for this reference.
|
||||||
func (ref dockerReference) NewImage(certPath string, tlsVerify bool) (types.Image, error) {
|
// The caller must call .Close() on the returned Image.
|
||||||
return newImage(ref, certPath, tlsVerify)
|
func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||||
|
return newImage(ctx, ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageSource returns a types.ImageSource for this reference.
|
// NewImageSource returns a types.ImageSource for this reference,
|
||||||
func (ref dockerReference) NewImageSource(certPath string, tlsVerify bool) (types.ImageSource, error) {
|
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||||
return newImageSource(ref, certPath, tlsVerify)
|
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||||
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
|
func (ref dockerReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||||
|
return newImageSource(ctx, ref, requestedManifestMIMETypes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||||
func (ref dockerReference) NewImageDestination(certPath string, tlsVerify bool) (types.ImageDestination, error) {
|
// The caller must call .Close() on the returned ImageDestination.
|
||||||
return newImageDestination(ref, certPath, tlsVerify)
|
func (ref dockerReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||||
|
return newImageDestination(ctx, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteImage deletes the named image from the registry, if supported.
|
||||||
|
func (ref dockerReference) DeleteImage(ctx *types.SystemContext) error {
|
||||||
|
return deleteImage(ctx, ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
// tagOrDigest returns a tag or digest from the reference.
|
// tagOrDigest returns a tag or digest from the reference.
|
||||||
|
|
198
vendor/github.com/containers/image/docker/lookaside.go
generated
vendored
Normal file
198
vendor/github.com/containers/image/docker/lookaside.go
generated
vendored
Normal file
|
@ -0,0 +1,198 @@
|
||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ghodss/yaml"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
|
||||||
|
// You can override this at build time with
|
||||||
|
// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path'
|
||||||
|
var systemRegistriesDirPath = builtinRegistriesDirPath
|
||||||
|
|
||||||
|
// builtinRegistriesDirPath is the path to registries.d.
|
||||||
|
// DO NOT change this, instead see systemRegistriesDirPath above.
|
||||||
|
const builtinRegistriesDirPath = "/etc/containers/registries.d"
|
||||||
|
|
||||||
|
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
|
||||||
|
// NOTE: Keep this in sync with docs/registries.d.md!
|
||||||
|
type registryConfiguration struct {
|
||||||
|
DefaultDocker *registryNamespace `json:"default-docker"`
|
||||||
|
// The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*),
|
||||||
|
Docker map[string]registryNamespace `json:"docker"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// registryNamespace defines lookaside locations for a single namespace.
|
||||||
|
type registryNamespace struct {
|
||||||
|
SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing.
|
||||||
|
SigStoreStaging string `json:"sigstore-staging"` // For writing only.
|
||||||
|
}
|
||||||
|
|
||||||
|
// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage.
|
||||||
|
// Users outside of this file should use configuredSignatureStorageBase and signatureStorageURL below.
|
||||||
|
type signatureStorageBase *url.URL // The only documented value is nil, meaning storage is not supported.
|
||||||
|
|
||||||
|
// configuredSignatureStorageBase reads configuration to find an appropriate signature storage URL for ref, for write access if “write”.
|
||||||
|
func configuredSignatureStorageBase(ctx *types.SystemContext, ref dockerReference, write bool) (signatureStorageBase, error) {
|
||||||
|
// FIXME? Loading and parsing the config could be cached across calls.
|
||||||
|
dirPath := registriesDirPath(ctx)
|
||||||
|
logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath)
|
||||||
|
config, err := loadAndMergeConfig(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
topLevel := config.signatureTopLevel(ref, write)
|
||||||
|
if topLevel == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
url, err := url.Parse(topLevel)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Invalid signature storage URL %s: %v", topLevel, err)
|
||||||
|
}
|
||||||
|
// FIXME? Restrict to explicitly supported schemes?
|
||||||
|
repo := ref.ref.FullName() // Note that this is without a tag or digest.
|
||||||
|
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
|
||||||
|
return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String())
|
||||||
|
}
|
||||||
|
url.Path = url.Path + "/" + repo
|
||||||
|
return url, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// registriesDirPath returns a path to registries.d
|
||||||
|
func registriesDirPath(ctx *types.SystemContext) string {
|
||||||
|
if ctx != nil {
|
||||||
|
if ctx.RegistriesDirPath != "" {
|
||||||
|
return ctx.RegistriesDirPath
|
||||||
|
}
|
||||||
|
if ctx.RootForImplicitAbsolutePaths != "" {
|
||||||
|
return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return systemRegistriesDirPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadAndMergeConfig loads configuration files in dirPath
|
||||||
|
func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||||
|
mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}}
|
||||||
|
dockerDefaultMergedFrom := ""
|
||||||
|
nsMergedFrom := map[string]string{}
|
||||||
|
|
||||||
|
dir, err := os.Open(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return &mergedConfig, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
configNames, err := dir.Readdirnames(0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, configName := range configNames {
|
||||||
|
if !strings.HasSuffix(configName, ".yaml") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
configPath := filepath.Join(dirPath, configName)
|
||||||
|
configBytes, err := ioutil.ReadFile(configPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var config registryConfiguration
|
||||||
|
err = yaml.Unmarshal(configBytes, &config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error parsing %s: %v", configPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.DefaultDocker != nil {
|
||||||
|
if mergedConfig.DefaultDocker != nil {
|
||||||
|
return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
|
||||||
|
dockerDefaultMergedFrom, configPath)
|
||||||
|
}
|
||||||
|
mergedConfig.DefaultDocker = config.DefaultDocker
|
||||||
|
dockerDefaultMergedFrom = configPath
|
||||||
|
}
|
||||||
|
|
||||||
|
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
|
||||||
|
if _, ok := mergedConfig.Docker[nsName]; ok {
|
||||||
|
return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
|
||||||
|
nsName, nsMergedFrom[nsName], configPath)
|
||||||
|
}
|
||||||
|
mergedConfig.Docker[nsName] = nsConfig
|
||||||
|
nsMergedFrom[nsName] = configPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &mergedConfig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”.
|
||||||
|
// (the top level of the storage, namespaced by repo.FullName etc.), or "" if no signature storage should be used.
|
||||||
|
func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string {
|
||||||
|
if config.Docker != nil {
|
||||||
|
// Look for a full match.
|
||||||
|
identity := ref.PolicyConfigurationIdentity()
|
||||||
|
if ns, ok := config.Docker[identity]; ok {
|
||||||
|
logrus.Debugf(` Using "docker" namespace %s`, identity)
|
||||||
|
if url := ns.signatureTopLevel(write); url != "" {
|
||||||
|
return url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for a match of the possible parent namespaces.
|
||||||
|
for _, name := range ref.PolicyConfigurationNamespaces() {
|
||||||
|
if ns, ok := config.Docker[name]; ok {
|
||||||
|
logrus.Debugf(` Using "docker" namespace %s`, name)
|
||||||
|
if url := ns.signatureTopLevel(write); url != "" {
|
||||||
|
return url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Look for a default location
|
||||||
|
if config.DefaultDocker != nil {
|
||||||
|
logrus.Debugf(` Using "default-docker" configuration`)
|
||||||
|
if url := config.DefaultDocker.signatureTopLevel(write); url != "" {
|
||||||
|
return url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logrus.Debugf(" No signature storage configuration found for %s", ref.PolicyConfigurationIdentity())
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”.
|
||||||
|
// or "" if nothing has been configured.
|
||||||
|
func (ns registryNamespace) signatureTopLevel(write bool) string {
|
||||||
|
if write && ns.SigStoreStaging != "" {
|
||||||
|
logrus.Debugf(` Using %s`, ns.SigStoreStaging)
|
||||||
|
return ns.SigStoreStaging
|
||||||
|
}
|
||||||
|
if ns.SigStore != "" {
|
||||||
|
logrus.Debugf(` Using %s`, ns.SigStore)
|
||||||
|
return ns.SigStore
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable.
|
||||||
|
// Returns nil iff base == nil.
|
||||||
|
func signatureStorageURL(base signatureStorageBase, manifestDigest string, index int) *url.URL {
|
||||||
|
if base == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
url := *base
|
||||||
|
url.Path = fmt.Sprintf("%s@%s/signature-%d", url.Path, manifestDigest, index+1)
|
||||||
|
return &url
|
||||||
|
}
|
159
vendor/github.com/containers/image/docker/wwwauthenticate.go
generated
vendored
Normal file
159
vendor/github.com/containers/image/docker/wwwauthenticate.go
generated
vendored
Normal file
|
@ -0,0 +1,159 @@
|
||||||
|
package docker
|
||||||
|
|
||||||
|
// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// challenge carries information from a WWW-Authenticate response header.
|
||||||
|
// See RFC 7235.
|
||||||
|
type challenge struct {
|
||||||
|
// Scheme is the auth-scheme according to RFC 7235
|
||||||
|
Scheme string
|
||||||
|
|
||||||
|
// Parameters are the auth-params according to RFC 7235
|
||||||
|
Parameters map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Octet types from RFC 7230.
|
||||||
|
type octetType byte
|
||||||
|
|
||||||
|
var octetTypes [256]octetType
|
||||||
|
|
||||||
|
const (
|
||||||
|
isToken octetType = 1 << iota
|
||||||
|
isSpace
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// OCTET = <any 8-bit sequence of data>
|
||||||
|
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||||
|
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||||
|
// CR = <US-ASCII CR, carriage return (13)>
|
||||||
|
// LF = <US-ASCII LF, linefeed (10)>
|
||||||
|
// SP = <US-ASCII SP, space (32)>
|
||||||
|
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||||
|
// <"> = <US-ASCII double-quote mark (34)>
|
||||||
|
// CRLF = CR LF
|
||||||
|
// LWS = [CRLF] 1*( SP | HT )
|
||||||
|
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||||
|
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||||
|
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||||
|
// token = 1*<any CHAR except CTLs or separators>
|
||||||
|
// qdtext = <any TEXT except <">>
|
||||||
|
|
||||||
|
for c := 0; c < 256; c++ {
|
||||||
|
var t octetType
|
||||||
|
isCtl := c <= 31 || c == 127
|
||||||
|
isChar := 0 <= c && c <= 127
|
||||||
|
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
|
||||||
|
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
|
||||||
|
t |= isSpace
|
||||||
|
}
|
||||||
|
if isChar && !isCtl && !isSeparator {
|
||||||
|
t |= isToken
|
||||||
|
}
|
||||||
|
octetTypes[c] = t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAuthHeader(header http.Header) []challenge {
|
||||||
|
challenges := []challenge{}
|
||||||
|
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
|
||||||
|
v, p := parseValueAndParams(h)
|
||||||
|
if v != "" {
|
||||||
|
challenges = append(challenges, challenge{Scheme: v, Parameters: p})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return challenges
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: This is not a fully compliant parser per RFC 7235:
|
||||||
|
// Most notably it does not support more than one challenge within a single header
|
||||||
|
// Some of the whitespace parsing also seems noncompliant.
|
||||||
|
// But it is clearly better than what we used to have…
|
||||||
|
func parseValueAndParams(header string) (value string, params map[string]string) {
|
||||||
|
params = make(map[string]string)
|
||||||
|
value, s := expectToken(header)
|
||||||
|
if value == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value = strings.ToLower(value)
|
||||||
|
s = "," + skipSpace(s)
|
||||||
|
for strings.HasPrefix(s, ",") {
|
||||||
|
var pkey string
|
||||||
|
pkey, s = expectToken(skipSpace(s[1:]))
|
||||||
|
if pkey == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(s, "=") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var pvalue string
|
||||||
|
pvalue, s = expectTokenOrQuoted(s[1:])
|
||||||
|
if pvalue == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pkey = strings.ToLower(pkey)
|
||||||
|
params[pkey] = pvalue
|
||||||
|
s = skipSpace(s)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func skipSpace(s string) (rest string) {
|
||||||
|
i := 0
|
||||||
|
for ; i < len(s); i++ {
|
||||||
|
if octetTypes[s[i]]&isSpace == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s[i:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectToken(s string) (token, rest string) {
|
||||||
|
i := 0
|
||||||
|
for ; i < len(s); i++ {
|
||||||
|
if octetTypes[s[i]]&isToken == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s[:i], s[i:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectTokenOrQuoted(s string) (value string, rest string) {
|
||||||
|
if !strings.HasPrefix(s, "\"") {
|
||||||
|
return expectToken(s)
|
||||||
|
}
|
||||||
|
s = s[1:]
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
switch s[i] {
|
||||||
|
case '"':
|
||||||
|
return s[:i], s[i+1:]
|
||||||
|
case '\\':
|
||||||
|
p := make([]byte, len(s)-1)
|
||||||
|
j := copy(p, s[:i])
|
||||||
|
escape := true
|
||||||
|
for i = i + 1; i < len(s); i++ {
|
||||||
|
b := s[i]
|
||||||
|
switch {
|
||||||
|
case escape:
|
||||||
|
escape = false
|
||||||
|
p[j] = b
|
||||||
|
j++
|
||||||
|
case b == '\\':
|
||||||
|
escape = true
|
||||||
|
case b == '"':
|
||||||
|
return string(p[:j]), s[i+1:]
|
||||||
|
default:
|
||||||
|
p[j] = b
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", ""
|
||||||
|
}
|
48
vendor/github.com/containers/image/image/image.go
generated
vendored
48
vendor/github.com/containers/image/image/image.go
generated
vendored
|
@ -33,21 +33,18 @@ type genericImage struct {
|
||||||
// this field is valid only if cachedManifest is not nil
|
// this field is valid only if cachedManifest is not nil
|
||||||
cachedManifestMIMEType string
|
cachedManifestMIMEType string
|
||||||
// private cache for Signatures(); nil if not yet known.
|
// private cache for Signatures(); nil if not yet known.
|
||||||
cachedSignatures [][]byte
|
cachedSignatures [][]byte
|
||||||
requestedManifestMIMETypes []string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FromSource returns a types.Image implementation for source.
|
// FromSource returns a types.Image implementation for source.
|
||||||
func FromSource(src types.ImageSource, requestedManifestMIMETypes []string) types.Image {
|
// The caller must call .Close() on the returned Image.
|
||||||
if len(requestedManifestMIMETypes) == 0 {
|
//
|
||||||
requestedManifestMIMETypes = []string{
|
// FromSource “takes ownership” of the input ImageSource and will call src.Close()
|
||||||
manifest.OCIV1ImageManifestMIMEType,
|
// when the image is closed. (This does not prevent callers from using both the
|
||||||
manifest.DockerV2Schema2MIMEType,
|
// Image and ImageSource objects simultaneously, but it means that they only need to
|
||||||
manifest.DockerV2Schema1SignedMIMEType,
|
// the Image.)
|
||||||
manifest.DockerV2Schema1MIMEType,
|
func FromSource(src types.ImageSource) types.Image {
|
||||||
}
|
return &genericImage{src: src}
|
||||||
}
|
|
||||||
return &genericImage{src: src, requestedManifestMIMETypes: requestedManifestMIMETypes}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference returns the reference used to set up this source, _as specified by the user_
|
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||||
|
@ -56,16 +53,26 @@ func (i *genericImage) Reference() types.ImageReference {
|
||||||
return i.src.Reference()
|
return i.src.Reference()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close removes resources associated with an initialized Image, if any.
|
||||||
|
func (i *genericImage) Close() {
|
||||||
|
i.src.Close()
|
||||||
|
}
|
||||||
|
|
||||||
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
||||||
// NOTE: It is essential for signature verification that Manifest returns the manifest from which BlobDigests is computed.
|
// NOTE: It is essential for signature verification that Manifest returns the manifest from which BlobDigests is computed.
|
||||||
func (i *genericImage) Manifest() ([]byte, string, error) {
|
func (i *genericImage) Manifest() ([]byte, string, error) {
|
||||||
if i.cachedManifest == nil {
|
if i.cachedManifest == nil {
|
||||||
m, mt, err := i.src.GetManifest(i.requestedManifestMIMETypes)
|
m, mt, err := i.src.GetManifest()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
i.cachedManifest = m
|
i.cachedManifest = m
|
||||||
if mt == "" {
|
if mt == "" || mt == "text/plain" {
|
||||||
|
// Crane registries can return "text/plain".
|
||||||
|
// This makes no real sense, but it happens
|
||||||
|
// because requests for manifests are
|
||||||
|
// redirected to a content distribution
|
||||||
|
// network which is configured that way.
|
||||||
mt = manifest.GuessMIMEType(i.cachedManifest)
|
mt = manifest.GuessMIMEType(i.cachedManifest)
|
||||||
}
|
}
|
||||||
i.cachedManifestMIMEType = mt
|
i.cachedManifestMIMEType = mt
|
||||||
|
@ -244,7 +251,7 @@ func (i *genericImage) getParsedManifest() (genericManifest, error) {
|
||||||
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
|
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
|
||||||
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
|
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
|
||||||
// need to happen within the ImageSource.
|
// need to happen within the ImageSource.
|
||||||
case manifest.DockerV2Schema1MIMEType, manifest.DockerV2Schema1SignedMIMEType, "application/json":
|
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json":
|
||||||
mschema1 := &manifestSchema1{}
|
mschema1 := &manifestSchema1{}
|
||||||
if err := json.Unmarshal(manblob, mschema1); err != nil {
|
if err := json.Unmarshal(manblob, mschema1); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -260,7 +267,7 @@ func (i *genericImage) getParsedManifest() (genericManifest, error) {
|
||||||
//return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
|
//return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
|
||||||
//}
|
//}
|
||||||
return mschema1, nil
|
return mschema1, nil
|
||||||
case manifest.DockerV2Schema2MIMEType:
|
case manifest.DockerV2Schema2MediaType:
|
||||||
v2s2 := manifestSchema2{src: i.src}
|
v2s2 := manifestSchema2{src: i.src}
|
||||||
if err := json.Unmarshal(manblob, &v2s2); err != nil {
|
if err := json.Unmarshal(manblob, &v2s2); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -299,15 +306,6 @@ func (i *genericImage) BlobDigests() ([]string, error) {
|
||||||
return uniqueBlobDigests(m), nil
|
return uniqueBlobDigests(m), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *genericImage) getLayer(dest types.ImageDestination, digest string) error {
|
|
||||||
stream, _, err := i.src.GetBlob(digest)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer stream.Close()
|
|
||||||
return dest.PutBlob(digest, stream)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fixManifestLayers, after validating the supplied manifest
|
// fixManifestLayers, after validating the supplied manifest
|
||||||
// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History),
|
// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History),
|
||||||
// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates,
|
// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates,
|
||||||
|
|
49
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
49
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
|
@ -6,33 +6,32 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
||||||
"github.com/docker/libtrust"
|
"github.com/docker/libtrust"
|
||||||
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
|
// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
|
||||||
|
|
||||||
// FIXME(runcom, mitr): should we havea mediatype pkg??
|
// FIXME(runcom, mitr): should we havea mediatype pkg??
|
||||||
const (
|
const (
|
||||||
// DockerV2Schema1MIMEType MIME type represents Docker manifest schema 1
|
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
|
||||||
DockerV2Schema1MIMEType = "application/vnd.docker.distribution.manifest.v1+json"
|
DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
|
||||||
// DockerV2Schema1MIMEType MIME type represents Docker manifest schema 1 with a JWS signature
|
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
|
||||||
DockerV2Schema1SignedMIMEType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
|
DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
|
||||||
// DockerV2Schema2MIMEType MIME type represents Docker manifest schema 2
|
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
|
||||||
DockerV2Schema2MIMEType = "application/vnd.docker.distribution.manifest.v2+json"
|
DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
|
||||||
// DockerV2ListMIMEType MIME type represents Docker manifest schema 2 list
|
// DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
|
||||||
DockerV2ListMIMEType = "application/vnd.docker.distribution.manifest.list.v2+json"
|
DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
|
||||||
|
|
||||||
// OCIV1DescriptorMIMEType specifies the mediaType for a content descriptor.
|
|
||||||
OCIV1DescriptorMIMEType = "application/vnd.oci.descriptor.v1+json"
|
|
||||||
// OCIV1ImageManifestMIMEType specifies the mediaType for an image manifest.
|
|
||||||
OCIV1ImageManifestMIMEType = "application/vnd.oci.image.manifest.v1+json"
|
|
||||||
// OCIV1ImageManifestListMIMEType specifies the mediaType for an image manifest list.
|
|
||||||
OCIV1ImageManifestListMIMEType = "application/vnd.oci.image.manifest.list.v1+json"
|
|
||||||
// OCIV1ImageSerializationMIMEType is the mediaType used for layers referenced by the manifest.
|
|
||||||
OCIV1ImageSerializationMIMEType = "application/vnd.oci.image.serialization.rootfs.tar.gzip"
|
|
||||||
// OCIV1ImageSerializationConfigMIMEType specifies the mediaType for the image configuration.
|
|
||||||
OCIV1ImageSerializationConfigMIMEType = "application/vnd.oci.image.serialization.config.v1+json"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource
|
||||||
|
// should request from the backend unless directed otherwise.
|
||||||
|
var DefaultRequestedManifestMIMETypes = []string{
|
||||||
|
imgspecv1.MediaTypeImageManifest,
|
||||||
|
DockerV2Schema2MediaType,
|
||||||
|
DockerV2Schema1SignedMediaType,
|
||||||
|
DockerV2Schema1MediaType,
|
||||||
|
}
|
||||||
|
|
||||||
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
|
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
|
||||||
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
|
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
|
||||||
// but we may not have such metadata available (e.g. when the manifest is a local file).
|
// but we may not have such metadata available (e.g. when the manifest is a local file).
|
||||||
|
@ -49,25 +48,25 @@ func GuessMIMEType(manifest []byte) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch meta.MediaType {
|
switch meta.MediaType {
|
||||||
case DockerV2Schema2MIMEType, DockerV2ListMIMEType, OCIV1DescriptorMIMEType, OCIV1ImageManifestMIMEType, OCIV1ImageManifestListMIMEType: // A recognized type.
|
case DockerV2Schema2MediaType, DockerV2ListMediaType, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageManifestList: // A recognized type.
|
||||||
return meta.MediaType
|
return meta.MediaType
|
||||||
}
|
}
|
||||||
// this is the only way the function can return DockerV2Schema1MIMEType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
|
// this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
|
||||||
switch meta.SchemaVersion {
|
switch meta.SchemaVersion {
|
||||||
case 1:
|
case 1:
|
||||||
if meta.Signatures != nil {
|
if meta.Signatures != nil {
|
||||||
return DockerV2Schema1SignedMIMEType
|
return DockerV2Schema1SignedMediaType
|
||||||
}
|
}
|
||||||
return DockerV2Schema1MIMEType
|
return DockerV2Schema1MediaType
|
||||||
case 2: // Really should not happen, meta.MediaType should have been set. But given the data, this is our best guess.
|
case 2: // Really should not happen, meta.MediaType should have been set. But given the data, this is our best guess.
|
||||||
return DockerV2Schema2MIMEType
|
return DockerV2Schema2MediaType
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
|
// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
|
||||||
func Digest(manifest []byte) (string, error) {
|
func Digest(manifest []byte) (string, error) {
|
||||||
if GuessMIMEType(manifest) == DockerV2Schema1SignedMIMEType {
|
if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
|
||||||
sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
|
sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
206
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
Normal file
206
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
Normal file
|
@ -0,0 +1,206 @@
|
||||||
|
package layout
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/containers/image/manifest"
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
imgspec "github.com/opencontainers/image-spec/specs-go"
|
||||||
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ociImageDestination struct {
|
||||||
|
ref ociReference
|
||||||
|
}
|
||||||
|
|
||||||
|
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
||||||
|
func newImageDestination(ref ociReference) types.ImageDestination {
|
||||||
|
return &ociImageDestination{ref: ref}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||||
|
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
||||||
|
func (d *ociImageDestination) Reference() types.ImageReference {
|
||||||
|
return d.ref
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||||
|
func (d *ociImageDestination) Close() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ociImageDestination) SupportedManifestMIMETypes() []string {
|
||||||
|
return []string{
|
||||||
|
imgspecv1.MediaTypeImageManifest,
|
||||||
|
manifest.DockerV2Schema2MediaType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||||
|
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||||
|
func (d *ociImageDestination) SupportsSignatures() error {
|
||||||
|
return fmt.Errorf("Pushing signatures for OCI images is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlob writes contents of stream and returns its computed digest and size.
|
||||||
|
// A digest can be optionally provided if known, the specific image destination can decide to play with it or not.
|
||||||
|
// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known.
|
||||||
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
|
// to any other readers for download using the supplied digest.
|
||||||
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
|
func (d *ociImageDestination) PutBlob(stream io.Reader, _ string, expectedSize int64) (string, int64, error) {
|
||||||
|
if err := ensureDirectoryExists(d.ref.dir); err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
succeeded := false
|
||||||
|
defer func() {
|
||||||
|
blobFile.Close()
|
||||||
|
if !succeeded {
|
||||||
|
os.Remove(blobFile.Name())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
h := sha256.New()
|
||||||
|
tee := io.TeeReader(stream, h)
|
||||||
|
|
||||||
|
size, err := io.Copy(blobFile, tee)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
computedDigest := "sha256:" + hex.EncodeToString(h.Sum(nil))
|
||||||
|
if expectedSize != -1 && size != expectedSize {
|
||||||
|
return "", -1, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, expectedSize, size)
|
||||||
|
}
|
||||||
|
if err := blobFile.Sync(); err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
if err := blobFile.Chmod(0644); err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
blobPath, err := d.ref.blobPath(computedDigest)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
succeeded = true
|
||||||
|
return computedDigest, size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createManifest(m []byte) ([]byte, string, error) {
|
||||||
|
om := imgspecv1.Manifest{}
|
||||||
|
mt := manifest.GuessMIMEType(m)
|
||||||
|
switch mt {
|
||||||
|
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
|
||||||
|
// There a simple reason about not yet implementing this.
|
||||||
|
// OCI image-spec assure about backward compatibility with docker v2s2 but not v2s1
|
||||||
|
// generating a v2s2 is a migration docker does when upgrading to 1.10.3
|
||||||
|
// and I don't think we should bother about this now (I don't want to have migration code here in skopeo)
|
||||||
|
return nil, "", errors.New("can't create an OCI manifest from Docker V2 schema 1 manifest")
|
||||||
|
case manifest.DockerV2Schema2MediaType:
|
||||||
|
if err := json.Unmarshal(m, &om); err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
om.MediaType = imgspecv1.MediaTypeImageManifest
|
||||||
|
for i := range om.Layers {
|
||||||
|
om.Layers[i].MediaType = imgspecv1.MediaTypeImageLayer
|
||||||
|
}
|
||||||
|
om.Config.MediaType = imgspecv1.MediaTypeImageConfig
|
||||||
|
b, err := json.Marshal(om)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
return b, om.MediaType, nil
|
||||||
|
case manifest.DockerV2ListMediaType:
|
||||||
|
return nil, "", errors.New("can't create an OCI manifest from Docker V2 schema 2 manifest list")
|
||||||
|
case imgspecv1.MediaTypeImageManifestList:
|
||||||
|
return nil, "", errors.New("can't create an OCI manifest from OCI manifest list")
|
||||||
|
case imgspecv1.MediaTypeImageManifest:
|
||||||
|
return m, mt, nil
|
||||||
|
}
|
||||||
|
return nil, "", fmt.Errorf("unrecognized manifest media type %q", mt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ociImageDestination) PutManifest(m []byte) error {
|
||||||
|
// TODO(mitr, runcom): this breaks signatures entirely since at this point we're creating a new manifest
|
||||||
|
// and signatures don't apply anymore. Will fix.
|
||||||
|
ociMan, mt, err := createManifest(m)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
digest, err := manifest.Digest(ociMan)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
desc := imgspec.Descriptor{}
|
||||||
|
desc.Digest = digest
|
||||||
|
// TODO(runcom): beaware and add support for OCI manifest list
|
||||||
|
desc.MediaType = mt
|
||||||
|
desc.Size = int64(len(ociMan))
|
||||||
|
data, err := json.Marshal(desc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
blobPath, err := d.ref.blobPath(digest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ioutil.WriteFile(blobPath, ociMan, 0644); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// TODO(runcom): ugly here?
|
||||||
|
if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
descriptorPath := d.ref.descriptorPath(d.ref.tag)
|
||||||
|
if err := ensureParentDirectoryExists(descriptorPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return ioutil.WriteFile(descriptorPath, data, 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureDirectoryExists(path string) error {
|
||||||
|
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
||||||
|
if err := os.MkdirAll(path, 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureParentDirectoryExists ensures the parent of the supplied path exists.
|
||||||
|
func ensureParentDirectoryExists(path string) error {
|
||||||
|
return ensureDirectoryExists(filepath.Dir(path))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ociImageDestination) PutSignatures(signatures [][]byte) error {
|
||||||
|
if len(signatures) != 0 {
|
||||||
|
return fmt.Errorf("Pushing signatures for OCI images is not supported")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// WARNING: This does not have any transactional semantics:
|
||||||
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
|
func (d *ociImageDestination) Commit() error {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package oci
|
package layout
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
@ -12,7 +12,7 @@ import (
|
||||||
"github.com/docker/docker/reference"
|
"github.com/docker/docker/reference"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Transport is an ImageTransport for Docker references.
|
// Transport is an ImageTransport for OCI directories.
|
||||||
var Transport = ociTransport{}
|
var Transport = ociTransport{}
|
||||||
|
|
||||||
type ociTransport struct{}
|
type ociTransport struct{}
|
||||||
|
@ -58,6 +58,10 @@ func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||||
if scope == "/" {
|
if scope == "/" {
|
||||||
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
|
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
|
||||||
}
|
}
|
||||||
|
cleaned := filepath.Clean(dir)
|
||||||
|
if cleaned != dir {
|
||||||
|
return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -161,28 +165,42 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImage returns a types.Image for this reference.
|
// NewImage returns a types.Image for this reference.
|
||||||
func (ref ociReference) NewImage(certPath string, tlsVerify bool) (types.Image, error) {
|
// The caller must call .Close() on the returned Image.
|
||||||
|
func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||||
return nil, errors.New("Full Image support not implemented for oci: image names")
|
return nil, errors.New("Full Image support not implemented for oci: image names")
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageSource returns a types.ImageSource for this reference.
|
// NewImageSource returns a types.ImageSource for this reference,
|
||||||
func (ref ociReference) NewImageSource(certPath string, tlsVerify bool) (types.ImageSource, error) {
|
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||||
|
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||||
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
|
func (ref ociReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||||
return nil, errors.New("Reading images not implemented for oci: image names")
|
return nil, errors.New("Reading images not implemented for oci: image names")
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||||
func (ref ociReference) NewImageDestination(certPath string, tlsVerify bool) (types.ImageDestination, error) {
|
// The caller must call .Close() on the returned ImageDestination.
|
||||||
|
func (ref ociReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||||
return newImageDestination(ref), nil
|
return newImageDestination(ref), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteImage deletes the named image from the registry, if supported.
|
||||||
|
func (ref ociReference) DeleteImage(ctx *types.SystemContext) error {
|
||||||
|
return fmt.Errorf("Deleting images not implemented for oci: images")
|
||||||
|
}
|
||||||
|
|
||||||
// ociLayoutPathPath returns a path for the oci-layout within a directory using OCI conventions.
|
// ociLayoutPathPath returns a path for the oci-layout within a directory using OCI conventions.
|
||||||
func (ref ociReference) ociLayoutPath() string {
|
func (ref ociReference) ociLayoutPath() string {
|
||||||
return filepath.Join(ref.dir, "oci-layout")
|
return filepath.Join(ref.dir, "oci-layout")
|
||||||
}
|
}
|
||||||
|
|
||||||
// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
|
// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
|
||||||
func (ref ociReference) blobPath(digest string) string {
|
func (ref ociReference) blobPath(digest string) (string, error) {
|
||||||
return filepath.Join(ref.dir, "blobs", strings.Replace(digest, ":", "-", -1))
|
pts := strings.SplitN(digest, ":", 2)
|
||||||
|
if len(pts) != 2 {
|
||||||
|
return "", fmt.Errorf("unexpected digest reference %s", digest)
|
||||||
|
}
|
||||||
|
return filepath.Join(ref.dir, "blobs", pts[0], pts[1]), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// descriptorPath returns a path for the manifest within a directory using OCI conventions.
|
// descriptorPath returns a path for the manifest within a directory using OCI conventions.
|
155
vendor/github.com/containers/image/oci/oci_dest.go
generated
vendored
155
vendor/github.com/containers/image/oci/oci_dest.go
generated
vendored
|
@ -1,155 +0,0 @@
|
||||||
package oci
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/containers/image/manifest"
|
|
||||||
"github.com/containers/image/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ociManifest struct {
|
|
||||||
SchemaVersion int `json:"schemaVersion"`
|
|
||||||
MediaType string `json:"mediaType"`
|
|
||||||
Config descriptor `json:"config"`
|
|
||||||
Layers []descriptor `json:"layers"`
|
|
||||||
Annotations map[string]string `json:"annotations"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type descriptor struct {
|
|
||||||
Digest string `json:"digest"`
|
|
||||||
MediaType string `json:"mediaType"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ociImageDestination struct {
|
|
||||||
ref ociReference
|
|
||||||
}
|
|
||||||
|
|
||||||
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
|
||||||
func newImageDestination(ref ociReference) types.ImageDestination {
|
|
||||||
return &ociImageDestination{ref: ref}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
|
||||||
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
|
||||||
func (d *ociImageDestination) Reference() types.ImageReference {
|
|
||||||
return d.ref
|
|
||||||
}
|
|
||||||
|
|
||||||
func createManifest(m []byte) ([]byte, string, error) {
|
|
||||||
om := ociManifest{}
|
|
||||||
mt := manifest.GuessMIMEType(m)
|
|
||||||
switch mt {
|
|
||||||
case manifest.DockerV2Schema1MIMEType:
|
|
||||||
// There a simple reason about not yet implementing this.
|
|
||||||
// OCI image-spec assure about backward compatibility with docker v2s2 but not v2s1
|
|
||||||
// generating a v2s2 is a migration docker does when upgrading to 1.10.3
|
|
||||||
// and I don't think we should bother about this now (I don't want to have migration code here in skopeo)
|
|
||||||
return nil, "", fmt.Errorf("can't create OCI manifest from Docker V2 schema 1 manifest")
|
|
||||||
case manifest.DockerV2Schema2MIMEType:
|
|
||||||
if err := json.Unmarshal(m, &om); err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
om.MediaType = manifest.OCIV1ImageManifestMIMEType
|
|
||||||
for i := range om.Layers {
|
|
||||||
om.Layers[i].MediaType = manifest.OCIV1ImageSerializationMIMEType
|
|
||||||
}
|
|
||||||
om.Config.MediaType = manifest.OCIV1ImageSerializationConfigMIMEType
|
|
||||||
b, err := json.Marshal(om)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
return b, om.MediaType, nil
|
|
||||||
case manifest.DockerV2ListMIMEType:
|
|
||||||
return nil, "", fmt.Errorf("can't create OCI manifest from Docker V2 schema 2 manifest list")
|
|
||||||
case manifest.OCIV1ImageManifestListMIMEType:
|
|
||||||
return nil, "", fmt.Errorf("can't create OCI manifest from OCI manifest list")
|
|
||||||
case manifest.OCIV1ImageManifestMIMEType:
|
|
||||||
return m, om.MediaType, nil
|
|
||||||
}
|
|
||||||
return nil, "", fmt.Errorf("Unrecognized manifest media type")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ociImageDestination) PutManifest(m []byte) error {
|
|
||||||
// TODO(mitr, runcom): this breaks signatures entirely since at this point we're creating a new manifest
|
|
||||||
// and signatures don't apply anymore. Will fix.
|
|
||||||
ociMan, mt, err := createManifest(m)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
digest, err := manifest.Digest(ociMan)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
desc := descriptor{}
|
|
||||||
desc.Digest = digest
|
|
||||||
// TODO(runcom): beaware and add support for OCI manifest list
|
|
||||||
desc.MediaType = mt
|
|
||||||
desc.Size = int64(len(ociMan))
|
|
||||||
data, err := json.Marshal(desc)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ioutil.WriteFile(d.ref.blobPath(digest), ociMan, 0644); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// TODO(runcom): ugly here?
|
|
||||||
if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
descriptorPath := d.ref.descriptorPath(d.ref.tag)
|
|
||||||
if err := ensureParentDirectoryExists(descriptorPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return ioutil.WriteFile(descriptorPath, data, 0644)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ociImageDestination) PutBlob(digest string, stream io.Reader) error {
|
|
||||||
blobPath := d.ref.blobPath(digest)
|
|
||||||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
blob, err := os.Create(blobPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer blob.Close()
|
|
||||||
if _, err := io.Copy(blob, stream); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := blob.Sync(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureParentDirectoryExists ensures the parent of the supplied path exists.
|
|
||||||
func ensureParentDirectoryExists(path string) error {
|
|
||||||
parent := filepath.Dir(path)
|
|
||||||
if _, err := os.Stat(parent); err != nil && os.IsNotExist(err) {
|
|
||||||
if err := os.MkdirAll(parent, 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ociImageDestination) SupportedManifestMIMETypes() []string {
|
|
||||||
return []string{
|
|
||||||
manifest.OCIV1ImageManifestMIMEType,
|
|
||||||
manifest.DockerV2Schema2MIMEType,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ociImageDestination) PutSignatures(signatures [][]byte) error {
|
|
||||||
if len(signatures) != 0 {
|
|
||||||
return fmt.Errorf("Pushing signatures for OCI images is not supported")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
9
vendor/github.com/containers/image/openshift/openshift-copies.go
generated
vendored
9
vendor/github.com/containers/image/openshift/openshift-copies.go
generated
vendored
|
@ -950,7 +950,8 @@ func (m *clustersMap) UnmarshalJSON(data []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, e := range a {
|
for _, e := range a {
|
||||||
(*m)[e.Name] = &e.Cluster
|
cluster := e.Cluster // Allocates a new instance in each iteration
|
||||||
|
(*m)[e.Name] = &cluster
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -963,7 +964,8 @@ func (m *authInfosMap) UnmarshalJSON(data []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, e := range a {
|
for _, e := range a {
|
||||||
(*m)[e.Name] = &e.AuthInfo
|
authInfo := e.AuthInfo // Allocates a new instance in each iteration
|
||||||
|
(*m)[e.Name] = &authInfo
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -976,7 +978,8 @@ func (m *contextsMap) UnmarshalJSON(data []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, e := range a {
|
for _, e := range a {
|
||||||
(*m)[e.Name] = &e.Context
|
context := e.Context // Allocates a new instance in each iteration
|
||||||
|
(*m)[e.Name] = &context
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
229
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
229
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
|
@ -2,12 +2,14 @@ package openshift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -20,7 +22,8 @@ import (
|
||||||
|
|
||||||
// openshiftClient is configuration for dealing with a single image stream, for reading or writing.
|
// openshiftClient is configuration for dealing with a single image stream, for reading or writing.
|
||||||
type openshiftClient struct {
|
type openshiftClient struct {
|
||||||
ref openshiftReference
|
ref openshiftReference
|
||||||
|
baseURL *url.URL
|
||||||
// Values from Kubernetes configuration
|
// Values from Kubernetes configuration
|
||||||
httpClient *http.Client
|
httpClient *http.Client
|
||||||
bearerToken string // "" if not used
|
bearerToken string // "" if not used
|
||||||
|
@ -50,13 +53,15 @@ func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
logrus.Debugf("URL: %#v", *baseURL)
|
logrus.Debugf("URL: %#v", *baseURL)
|
||||||
if *baseURL != *ref.baseURL {
|
|
||||||
return nil, fmt.Errorf("Unexpected baseURL mismatch: default %#v, reference %#v", *baseURL, *ref.baseURL)
|
if httpClient == nil {
|
||||||
|
httpClient = http.DefaultClient
|
||||||
}
|
}
|
||||||
httpClient.Timeout = 1 * time.Minute
|
httpClient.Timeout = 1 * time.Minute
|
||||||
|
|
||||||
return &openshiftClient{
|
return &openshiftClient{
|
||||||
ref: ref,
|
ref: ref,
|
||||||
|
baseURL: baseURL,
|
||||||
httpClient: httpClient,
|
httpClient: httpClient,
|
||||||
bearerToken: restConfig.BearerToken,
|
bearerToken: restConfig.BearerToken,
|
||||||
username: restConfig.Username,
|
username: restConfig.Username,
|
||||||
|
@ -66,7 +71,7 @@ func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) {
|
||||||
|
|
||||||
// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object.
|
// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object.
|
||||||
func (c *openshiftClient) doRequest(method, path string, requestBody []byte) ([]byte, error) {
|
func (c *openshiftClient) doRequest(method, path string, requestBody []byte) ([]byte, error) {
|
||||||
url := *c.ref.baseURL
|
url := *c.baseURL
|
||||||
url.Path = path
|
url.Path = path
|
||||||
var requestBodyReader io.Reader
|
var requestBodyReader io.Reader
|
||||||
if requestBody != nil {
|
if requestBody != nil {
|
||||||
|
@ -126,6 +131,22 @@ func (c *openshiftClient) doRequest(method, path string, requestBody []byte) ([]
|
||||||
return body, nil
|
return body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getImage loads the specified image object.
|
||||||
|
func (c *openshiftClient) getImage(imageStreamImageName string) (*image, error) {
|
||||||
|
// FIXME: validate components per validation.IsValidPathSegmentName?
|
||||||
|
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName)
|
||||||
|
body, err := c.doRequest("GET", path, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Note: This does absolutely no kind/version checking or conversions.
|
||||||
|
var isi imageStreamImage
|
||||||
|
if err := json.Unmarshal(body, &isi); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &isi.Image, nil
|
||||||
|
}
|
||||||
|
|
||||||
// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use;
|
// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use;
|
||||||
// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside.
|
// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside.
|
||||||
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
|
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
|
||||||
|
@ -133,42 +154,33 @@ func (c *openshiftClient) convertDockerImageReference(ref string) (string, error
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
|
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
|
||||||
}
|
}
|
||||||
// Sanity check that the reference is at least plausibly similar, i.e. uses the hard-coded port we expect.
|
return c.ref.dockerReference.Hostname() + "/" + parts[1], nil
|
||||||
if !strings.HasSuffix(parts[0], ":5000") {
|
|
||||||
return "", fmt.Errorf("Invalid format of docker reference %s: expecting port 5000", ref)
|
|
||||||
}
|
|
||||||
return c.dockerRegistryHostPart() + "/" + parts[1], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// dockerRegistryHostPart returns the host:port of the embedded Docker Registry API endpoint
|
|
||||||
// FIXME: There seems to be no way to discover the correct:host port using the API, so hard-code our knowledge
|
|
||||||
// about how the OpenShift Atomic Registry is configured, per examples/atomic-registry/run.sh:
|
|
||||||
// -p OPENSHIFT_OAUTH_PROVIDER_URL=https://${INSTALL_HOST}:8443,COCKPIT_KUBE_URL=https://${INSTALL_HOST},REGISTRY_HOST=${INSTALL_HOST}:5000
|
|
||||||
func (c *openshiftClient) dockerRegistryHostPart() string {
|
|
||||||
return strings.SplitN(c.ref.baseURL.Host, ":", 2)[0] + ":5000"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type openshiftImageSource struct {
|
type openshiftImageSource struct {
|
||||||
client *openshiftClient
|
client *openshiftClient
|
||||||
// Values specific to this image
|
// Values specific to this image
|
||||||
certPath string // Only for parseDockerImageSource
|
ctx *types.SystemContext
|
||||||
tlsVerify bool // Only for parseDockerImageSource
|
requestedManifestMIMETypes []string
|
||||||
// State
|
// State
|
||||||
docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet
|
docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet
|
||||||
imageStreamImageName string // Resolved image identifier, or "" if not known yet
|
imageStreamImageName string // Resolved image identifier, or "" if not known yet
|
||||||
}
|
}
|
||||||
|
|
||||||
// newImageSource creates a new ImageSource for the specified reference and connection specification.
|
// newImageSource creates a new ImageSource for the specified reference,
|
||||||
func newImageSource(ref openshiftReference, certPath string, tlsVerify bool) (types.ImageSource, error) {
|
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||||
|
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||||
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
|
func newImageSource(ctx *types.SystemContext, ref openshiftReference, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||||
client, err := newOpenshiftClient(ref)
|
client, err := newOpenshiftClient(ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &openshiftImageSource{
|
return &openshiftImageSource{
|
||||||
client: client,
|
client: client,
|
||||||
certPath: certPath,
|
ctx: ctx,
|
||||||
tlsVerify: tlsVerify,
|
requestedManifestMIMETypes: requestedManifestMIMETypes,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,13 +190,22 @@ func (s *openshiftImageSource) Reference() types.ImageReference {
|
||||||
return s.client.ref
|
return s.client.ref
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *openshiftImageSource) GetManifest(mimetypes []string) ([]byte, string, error) {
|
// Close removes resources associated with an initialized ImageSource, if any.
|
||||||
|
func (s *openshiftImageSource) Close() {
|
||||||
|
if s.docker != nil {
|
||||||
|
s.docker.Close()
|
||||||
|
s.docker = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *openshiftImageSource) GetManifest() ([]byte, string, error) {
|
||||||
if err := s.ensureImageIsResolved(); err != nil {
|
if err := s.ensureImageIsResolved(); err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
return s.docker.GetManifest(mimetypes)
|
return s.docker.GetManifest()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||||
func (s *openshiftImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) {
|
func (s *openshiftImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) {
|
||||||
if err := s.ensureImageIsResolved(); err != nil {
|
if err := s.ensureImageIsResolved(); err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
|
@ -193,7 +214,21 @@ func (s *openshiftImageSource) GetBlob(digest string) (io.ReadCloser, int64, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *openshiftImageSource) GetSignatures() ([][]byte, error) {
|
func (s *openshiftImageSource) GetSignatures() ([][]byte, error) {
|
||||||
return nil, nil
|
if err := s.ensureImageIsResolved(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
image, err := s.client.getImage(s.imageStreamImageName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var sigs [][]byte
|
||||||
|
for _, sig := range image.Signatures {
|
||||||
|
if sig.Type == imageSignatureTypeAtomic {
|
||||||
|
sigs = append(sigs, sig.Content)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sigs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
|
// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
|
||||||
|
@ -215,7 +250,7 @@ func (s *openshiftImageSource) ensureImageIsResolved() error {
|
||||||
}
|
}
|
||||||
var te *tagEvent
|
var te *tagEvent
|
||||||
for _, tag := range is.Status.Tags {
|
for _, tag := range is.Status.Tags {
|
||||||
if tag.Tag != s.client.ref.tag {
|
if tag.Tag != s.client.ref.dockerReference.Tag() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(tag.Items) > 0 {
|
if len(tag.Items) > 0 {
|
||||||
|
@ -236,7 +271,7 @@ func (s *openshiftImageSource) ensureImageIsResolved() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d, err := dockerRef.NewImageSource(s.certPath, s.tlsVerify)
|
d, err := dockerRef.NewImageSource(s.ctx, s.requestedManifestMIMETypes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -248,10 +283,12 @@ func (s *openshiftImageSource) ensureImageIsResolved() error {
|
||||||
type openshiftImageDestination struct {
|
type openshiftImageDestination struct {
|
||||||
client *openshiftClient
|
client *openshiftClient
|
||||||
docker types.ImageDestination // The Docker Registry endpoint
|
docker types.ImageDestination // The Docker Registry endpoint
|
||||||
|
// State
|
||||||
|
imageStreamImageName string // "" if not yet known
|
||||||
}
|
}
|
||||||
|
|
||||||
// newImageDestination creates a new ImageDestination for the specified reference and connection specification.
|
// newImageDestination creates a new ImageDestination for the specified reference.
|
||||||
func newImageDestination(ref openshiftReference, certPath string, tlsVerify bool) (types.ImageDestination, error) {
|
func newImageDestination(ctx *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) {
|
||||||
client, err := newOpenshiftClient(ref)
|
client, err := newOpenshiftClient(ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -260,12 +297,12 @@ func newImageDestination(ref openshiftReference, certPath string, tlsVerify bool
|
||||||
// FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
|
// FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
|
||||||
// i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
|
// i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
|
||||||
// the manifest digest at this point.
|
// the manifest digest at this point.
|
||||||
dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", client.dockerRegistryHostPart(), client.ref.namespace, client.ref.stream, client.ref.tag)
|
dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", client.ref.dockerReference.Hostname(), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag())
|
||||||
dockerRef, err := docker.ParseReference(dockerRefString)
|
dockerRef, err := docker.ParseReference(dockerRefString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
docker, err := dockerRef.NewImageDestination(certPath, tlsVerify)
|
docker, err := dockerRef.NewImageDestination(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -282,21 +319,47 @@ func (d *openshiftImageDestination) Reference() types.ImageReference {
|
||||||
return d.client.ref
|
return d.client.ref
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||||
|
func (d *openshiftImageDestination) Close() {
|
||||||
|
d.docker.Close()
|
||||||
|
}
|
||||||
|
|
||||||
func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {
|
func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {
|
||||||
return []string{
|
return []string{
|
||||||
manifest.DockerV2Schema1SignedMIMEType,
|
manifest.DockerV2Schema1SignedMediaType,
|
||||||
manifest.DockerV2Schema1MIMEType,
|
manifest.DockerV2Schema1MediaType,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||||
|
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||||
|
func (d *openshiftImageDestination) SupportsSignatures() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlob writes contents of stream and returns its computed digest and size.
|
||||||
|
// A digest can be optionally provided if known, the specific image destination can decide to play with it or not.
|
||||||
|
// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known.
|
||||||
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
|
// to any other readers for download using the supplied digest.
|
||||||
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
|
func (d *openshiftImageDestination) PutBlob(stream io.Reader, digest string, expectedSize int64) (string, int64, error) {
|
||||||
|
return d.docker.PutBlob(stream, digest, expectedSize)
|
||||||
|
}
|
||||||
|
|
||||||
func (d *openshiftImageDestination) PutManifest(m []byte) error {
|
func (d *openshiftImageDestination) PutManifest(m []byte) error {
|
||||||
|
// FIXME? Can this eventually just call d.docker.PutManifest()?
|
||||||
|
// Right now we need this as a skeleton to attach signatures to, and
|
||||||
|
// to workaround our inability to change tags when uploading v2s1 manifests.
|
||||||
|
|
||||||
// Note: This does absolutely no kind/version checking or conversions.
|
// Note: This does absolutely no kind/version checking or conversions.
|
||||||
manifestDigest, err := manifest.Digest(m)
|
manifestDigest, err := manifest.Digest(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
d.imageStreamImageName = manifestDigest
|
||||||
// FIXME: We can't do what respositorymiddleware.go does because we don't know the internal address. Does any of this matter?
|
// FIXME: We can't do what respositorymiddleware.go does because we don't know the internal address. Does any of this matter?
|
||||||
dockerImageReference := fmt.Sprintf("%s/%s/%s@%s", d.client.dockerRegistryHostPart(), d.client.ref.namespace, d.client.ref.stream, manifestDigest)
|
dockerImageReference := fmt.Sprintf("%s/%s/%s@%s", d.client.ref.dockerReference.Hostname(), d.client.ref.namespace, d.client.ref.stream, manifestDigest)
|
||||||
ism := imageStreamMapping{
|
ism := imageStreamMapping{
|
||||||
typeMeta: typeMeta{
|
typeMeta: typeMeta{
|
||||||
Kind: "ImageStreamMapping",
|
Kind: "ImageStreamMapping",
|
||||||
|
@ -313,7 +376,7 @@ func (d *openshiftImageDestination) PutManifest(m []byte) error {
|
||||||
DockerImageReference: dockerImageReference,
|
DockerImageReference: dockerImageReference,
|
||||||
DockerImageManifest: string(m),
|
DockerImageManifest: string(m),
|
||||||
},
|
},
|
||||||
Tag: d.client.ref.tag,
|
Tag: d.client.ref.dockerReference.Tag(),
|
||||||
}
|
}
|
||||||
body, err := json.Marshal(ism)
|
body, err := json.Marshal(ism)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -327,20 +390,78 @@ func (d *openshiftImageDestination) PutManifest(m []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return d.docker.PutManifest(m)
|
return nil
|
||||||
}
|
|
||||||
|
|
||||||
func (d *openshiftImageDestination) PutBlob(digest string, stream io.Reader) error {
|
|
||||||
return d.docker.PutBlob(digest, stream)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *openshiftImageDestination) PutSignatures(signatures [][]byte) error {
|
func (d *openshiftImageDestination) PutSignatures(signatures [][]byte) error {
|
||||||
if len(signatures) != 0 {
|
if d.imageStreamImageName == "" {
|
||||||
return fmt.Errorf("Pushing signatures to an Atomic Registry is not supported")
|
return fmt.Errorf("Internal error: Unknown manifest digest, can't add signatures")
|
||||||
}
|
}
|
||||||
|
// Because image signatures are a shared resource in Atomic Registry, the default upload
|
||||||
|
// always adds signatures. Eventually we should also allow removing signatures.
|
||||||
|
|
||||||
|
if len(signatures) == 0 {
|
||||||
|
return nil // No need to even read the old state.
|
||||||
|
}
|
||||||
|
|
||||||
|
image, err := d.client.getImage(d.imageStreamImageName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
existingSigNames := map[string]struct{}{}
|
||||||
|
for _, sig := range image.Signatures {
|
||||||
|
existingSigNames[sig.objectMeta.Name] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
sigExists:
|
||||||
|
for _, newSig := range signatures {
|
||||||
|
for _, existingSig := range image.Signatures {
|
||||||
|
if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
|
||||||
|
continue sigExists
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
|
||||||
|
var signatureName string
|
||||||
|
for {
|
||||||
|
randBytes := make([]byte, 16)
|
||||||
|
n, err := rand.Read(randBytes)
|
||||||
|
if err != nil || n != 16 {
|
||||||
|
return fmt.Errorf("Error generating random signature ID: %v, len %d", err, n)
|
||||||
|
}
|
||||||
|
signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes)
|
||||||
|
if _, ok := existingSigNames[signatureName]; !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Note: This does absolutely no kind/version checking or conversions.
|
||||||
|
sig := imageSignature{
|
||||||
|
typeMeta: typeMeta{
|
||||||
|
Kind: "ImageSignature",
|
||||||
|
APIVersion: "v1",
|
||||||
|
},
|
||||||
|
objectMeta: objectMeta{Name: signatureName},
|
||||||
|
Type: imageSignatureTypeAtomic,
|
||||||
|
Content: newSig,
|
||||||
|
}
|
||||||
|
body, err := json.Marshal(sig)
|
||||||
|
_, err = d.client.doRequest("POST", "/oapi/v1/imagesignatures", body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// WARNING: This does not have any transactional semantics:
|
||||||
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
|
func (d *openshiftImageDestination) Commit() error {
|
||||||
|
return d.docker.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies.
|
// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies.
|
||||||
type imageStream struct {
|
type imageStream struct {
|
||||||
Status imageStreamStatus `json:"status,omitempty"`
|
Status imageStreamStatus `json:"status,omitempty"`
|
||||||
|
@ -367,6 +488,22 @@ type image struct {
|
||||||
DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"`
|
DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"`
|
||||||
DockerImageManifest string `json:"dockerImageManifest,omitempty"`
|
DockerImageManifest string `json:"dockerImageManifest,omitempty"`
|
||||||
// DockerImageLayers []ImageLayer `json:"dockerImageLayers"`
|
// DockerImageLayers []ImageLayer `json:"dockerImageLayers"`
|
||||||
|
Signatures []imageSignature `json:"signatures,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const imageSignatureTypeAtomic string = "atomic"
|
||||||
|
|
||||||
|
type imageSignature struct {
|
||||||
|
typeMeta `json:",inline"`
|
||||||
|
objectMeta `json:"metadata,omitempty"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Content []byte `json:"content"`
|
||||||
|
// Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
|
||||||
|
// ImageIdentity string `json:"imageIdentity,omitempty"`
|
||||||
|
// SignedClaims map[string]string `json:"signedClaims,omitempty"`
|
||||||
|
// Created *unversioned.Time `json:"created,omitempty"`
|
||||||
|
// IssuedBy SignatureIssuer `json:"issuedBy,omitempty"`
|
||||||
|
// IssuedTo SignatureSubject `json:"issuedTo,omitempty"`
|
||||||
}
|
}
|
||||||
type imageStreamMapping struct {
|
type imageStreamMapping struct {
|
||||||
typeMeta `json:",inline"`
|
typeMeta `json:",inline"`
|
||||||
|
@ -398,7 +535,3 @@ type status struct {
|
||||||
// Details *StatusDetails `json:"details,omitempty"`
|
// Details *StatusDetails `json:"details,omitempty"`
|
||||||
Code int32 `json:"code,omitempty"`
|
Code int32 `json:"code,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *openshiftImageSource) Delete() error {
|
|
||||||
return fmt.Errorf("openshift#openshiftImageSource.Delete() not implmented")
|
|
||||||
}
|
|
||||||
|
|
97
vendor/github.com/containers/image/openshift/openshift_transport.go
generated
vendored
97
vendor/github.com/containers/image/openshift/openshift_transport.go
generated
vendored
|
@ -3,16 +3,15 @@ package openshift
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/docker/policyconfiguration"
|
"github.com/containers/image/docker/policyconfiguration"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/docker/docker/reference"
|
"github.com/docker/docker/reference"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Transport is an ImageTransport for directory paths.
|
// Transport is an ImageTransport for OpenShift registry-hosted images.
|
||||||
var Transport = openshiftTransport{}
|
var Transport = openshiftTransport{}
|
||||||
|
|
||||||
type openshiftTransport struct{}
|
type openshiftTransport struct{}
|
||||||
|
@ -44,67 +43,33 @@ func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error
|
||||||
|
|
||||||
// openshiftReference is an ImageReference for OpenShift images.
|
// openshiftReference is an ImageReference for OpenShift images.
|
||||||
type openshiftReference struct {
|
type openshiftReference struct {
|
||||||
baseURL *url.URL
|
dockerReference reference.NamedTagged
|
||||||
namespace string
|
namespace string // Computed from dockerReference in advance.
|
||||||
stream string
|
stream string // Computed from dockerReference in advance.
|
||||||
tag string
|
|
||||||
dockerReference reference.Named // Computed from the above in advance, so that later references can not fail.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: Is imageName like this a good way to refer to OpenShift images?
|
|
||||||
// Keep this in sync with scopeRegexp!
|
|
||||||
var imageNameRegexp = regexp.MustCompile("^([^:/]*)/([^:/]*):([^:/]*)$")
|
|
||||||
|
|
||||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference.
|
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference.
|
||||||
func ParseReference(reference string) (types.ImageReference, error) {
|
func ParseReference(ref string) (types.ImageReference, error) {
|
||||||
// Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client.
|
r, err := reference.ParseNamed(ref)
|
||||||
cmdConfig := defaultClientConfig()
|
|
||||||
logrus.Debugf("cmdConfig: %#v", cmdConfig)
|
|
||||||
restConfig, err := cmdConfig.ClientConfig()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("failed to parse image reference %q, %v", ref, err)
|
||||||
}
|
}
|
||||||
// REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.)
|
tagged, ok := r.(reference.NamedTagged)
|
||||||
logrus.Debugf("restConfig: %#v", restConfig)
|
if !ok {
|
||||||
baseURL, _, err := restClientFor(restConfig)
|
return nil, fmt.Errorf("invalid image reference %s, %#v", ref, r)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
logrus.Debugf("URL: %#v", *baseURL)
|
return NewReference(tagged)
|
||||||
|
|
||||||
m := imageNameRegexp.FindStringSubmatch(reference)
|
|
||||||
if m == nil || len(m) != 4 {
|
|
||||||
return nil, fmt.Errorf("Invalid image reference %s, %#v", reference, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewReference(baseURL, m[1], m[2], m[3])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReference returns an OpenShift reference for a base URL, namespace, stream and tag.
|
// NewReference returns an OpenShift reference for a reference.NamedTagged
|
||||||
func NewReference(baseURL *url.URL, namespace, stream, tag string) (types.ImageReference, error) {
|
func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) {
|
||||||
// Precompute also dockerReference so that later references can not fail.
|
r := strings.SplitN(dockerRef.RemoteName(), "/", 3)
|
||||||
//
|
if len(r) != 2 {
|
||||||
// This discards ref.baseURL.Path, which is unexpected for a “base URL”;
|
return nil, fmt.Errorf("invalid image reference %s", dockerRef.String())
|
||||||
// but openshiftClient.doRequest actually completely overrides url.Path
|
|
||||||
// (and defaultServerURL rejects non-trivial Path values), so it is OK for
|
|
||||||
// us to ignore it as well.
|
|
||||||
//
|
|
||||||
// FIXME: This is, strictly speaking, a namespace conflict with images placed in a Docker registry running on the same host.
|
|
||||||
// Do we need to do something else, perhaps disambiguate (port number?) or namespace Docker and OpenShift separately?
|
|
||||||
dockerRef, err := reference.WithName(fmt.Sprintf("%s/%s/%s", baseURL.Host, namespace, stream))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
dockerRef, err = reference.WithTag(dockerRef, tag)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return openshiftReference{
|
return openshiftReference{
|
||||||
baseURL: baseURL,
|
namespace: r[0],
|
||||||
namespace: namespace,
|
stream: r[1],
|
||||||
stream: stream,
|
|
||||||
tag: tag,
|
|
||||||
dockerReference: dockerRef,
|
dockerReference: dockerRef,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -119,7 +84,7 @@ func (ref openshiftReference) Transport() types.ImageTransport {
|
||||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||||
func (ref openshiftReference) StringWithinTransport() string {
|
func (ref openshiftReference) StringWithinTransport() string {
|
||||||
return fmt.Sprintf("%s/%s:%s", ref.namespace, ref.stream, ref.tag)
|
return ref.dockerReference.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// DockerReference returns a Docker reference associated with this reference
|
// DockerReference returns a Docker reference associated with this reference
|
||||||
|
@ -154,16 +119,26 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImage returns a types.Image for this reference.
|
// NewImage returns a types.Image for this reference.
|
||||||
func (ref openshiftReference) NewImage(certPath string, tlsVerify bool) (types.Image, error) {
|
// The caller must call .Close() on the returned Image.
|
||||||
|
func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||||
return nil, errors.New("Full Image support not implemented for atomic: image names")
|
return nil, errors.New("Full Image support not implemented for atomic: image names")
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageSource returns a types.ImageSource for this reference.
|
// NewImageSource returns a types.ImageSource for this reference,
|
||||||
func (ref openshiftReference) NewImageSource(certPath string, tlsVerify bool) (types.ImageSource, error) {
|
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||||
return newImageSource(ref, certPath, tlsVerify)
|
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||||
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
|
func (ref openshiftReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||||
|
return newImageSource(ctx, ref, requestedManifestMIMETypes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||||
func (ref openshiftReference) NewImageDestination(certPath string, tlsVerify bool) (types.ImageDestination, error) {
|
// The caller must call .Close() on the returned ImageDestination.
|
||||||
return newImageDestination(ref, certPath, tlsVerify)
|
func (ref openshiftReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||||
|
return newImageDestination(ctx, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteImage deletes the named image from the registry, if supported.
|
||||||
|
func (ref openshiftReference) DeleteImage(ctx *types.SystemContext) error {
|
||||||
|
return fmt.Errorf("Deleting images not implemented for atomic: images")
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/containers/image/transports/transports.go
generated
vendored
4
vendor/github.com/containers/image/transports/transports.go
generated
vendored
|
@ -6,7 +6,7 @@ import (
|
||||||
|
|
||||||
"github.com/containers/image/directory"
|
"github.com/containers/image/directory"
|
||||||
"github.com/containers/image/docker"
|
"github.com/containers/image/docker"
|
||||||
"github.com/containers/image/oci"
|
ociLayout "github.com/containers/image/oci/layout"
|
||||||
"github.com/containers/image/openshift"
|
"github.com/containers/image/openshift"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
)
|
)
|
||||||
|
@ -19,7 +19,7 @@ func init() {
|
||||||
for _, t := range []types.ImageTransport{
|
for _, t := range []types.ImageTransport{
|
||||||
directory.Transport,
|
directory.Transport,
|
||||||
docker.Transport,
|
docker.Transport,
|
||||||
oci.Transport,
|
ociLayout.Transport,
|
||||||
openshift.Transport,
|
openshift.Transport,
|
||||||
} {
|
} {
|
||||||
name := t.Name()
|
name := t.Name()
|
||||||
|
|
90
vendor/github.com/containers/image/types/types.go
generated
vendored
90
vendor/github.com/containers/image/types/types.go
generated
vendored
|
@ -71,52 +71,87 @@ type ImageReference interface {
|
||||||
PolicyConfigurationNamespaces() []string
|
PolicyConfigurationNamespaces() []string
|
||||||
|
|
||||||
// NewImage returns a types.Image for this reference.
|
// NewImage returns a types.Image for this reference.
|
||||||
NewImage(certPath string, tlsVerify bool) (Image, error)
|
// The caller must call .Close() on the returned Image.
|
||||||
// NewImageSource returns a types.ImageSource for this reference.
|
NewImage(ctx *SystemContext) (Image, error)
|
||||||
NewImageSource(certPath string, tlsVerify bool) (ImageSource, error)
|
// NewImageSource returns a types.ImageSource for this reference,
|
||||||
|
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||||
|
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||||
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
|
NewImageSource(ctx *SystemContext, requestedManifestMIMETypes []string) (ImageSource, error)
|
||||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||||
NewImageDestination(certPath string, tlsVerify bool) (ImageDestination, error)
|
// The caller must call .Close() on the returned ImageDestination.
|
||||||
|
NewImageDestination(ctx *SystemContext) (ImageDestination, error)
|
||||||
|
|
||||||
|
// DeleteImage deletes the named image from the registry, if supported.
|
||||||
|
DeleteImage(ctx *SystemContext) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImageSource is a service, possibly remote (= slow), to download components of a single image.
|
// ImageSource is a service, possibly remote (= slow), to download components of a single image.
|
||||||
// This is primarily useful for copying images around; for examining their properties, Image (below)
|
// This is primarily useful for copying images around; for examining their properties, Image (below)
|
||||||
// is usually more useful.
|
// is usually more useful.
|
||||||
|
// Each ImageSource should eventually be closed by calling Close().
|
||||||
type ImageSource interface {
|
type ImageSource interface {
|
||||||
// Reference returns the reference used to set up this source, _as specified by the user_
|
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||||
Reference() ImageReference
|
Reference() ImageReference
|
||||||
// GetManifest returns the image's manifest along with its MIME type. The empty string is returned if the MIME type is unknown. The slice parameter indicates the supported mime types the manifest should be when getting it.
|
// Close removes resources associated with an initialized ImageSource, if any.
|
||||||
|
Close()
|
||||||
|
// GetManifest returns the image's manifest along with its MIME type. The empty string is returned if the MIME type is unknown.
|
||||||
// It may use a remote (= slow) service.
|
// It may use a remote (= slow) service.
|
||||||
GetManifest([]string) ([]byte, string, error)
|
GetManifest() ([]byte, string, error)
|
||||||
// Note: Calling GetBlob() may have ordering dependencies WRT other methods of this type. FIXME: How does this work with (docker save) on stdin?
|
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||||
// the second return value is the size of the blob. If not known 0 is returned
|
|
||||||
GetBlob(digest string) (io.ReadCloser, int64, error)
|
GetBlob(digest string) (io.ReadCloser, int64, error)
|
||||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||||
GetSignatures() ([][]byte, error)
|
GetSignatures() ([][]byte, error)
|
||||||
// Delete image from registry, if operation is supported
|
|
||||||
Delete() error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
|
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
|
||||||
|
//
|
||||||
|
// There is a specific required order for some of the calls:
|
||||||
|
// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
|
||||||
|
// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents)
|
||||||
|
// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist.
|
||||||
|
//
|
||||||
|
// Each ImageDestination should eventually be closed by calling Close().
|
||||||
type ImageDestination interface {
|
type ImageDestination interface {
|
||||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||||
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
||||||
Reference() ImageReference
|
Reference() ImageReference
|
||||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||||
PutManifest([]byte) error
|
Close()
|
||||||
// Note: Calling PutBlob() and other methods may have ordering dependencies WRT other methods of this type. FIXME: Figure out and document.
|
|
||||||
PutBlob(digest string, stream io.Reader) error
|
|
||||||
PutSignatures(signatures [][]byte) error
|
|
||||||
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
|
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
|
||||||
// If an empty slice or nil it's returned, then any mime type can be tried to upload
|
// If an empty slice or nil it's returned, then any mime type can be tried to upload
|
||||||
SupportedManifestMIMETypes() []string
|
SupportedManifestMIMETypes() []string
|
||||||
|
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||||
|
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||||
|
SupportsSignatures() error
|
||||||
|
|
||||||
|
// PutBlob writes contents of stream and returns its computed digest and size.
|
||||||
|
// A digest can be optionally provided if known, the specific image destination can decide to play with it or not.
|
||||||
|
// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known.
|
||||||
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
|
// to any other readers for download using the supplied digest.
|
||||||
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
|
PutBlob(stream io.Reader, digest string, expectedSize int64) (string, int64, error)
|
||||||
|
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||||
|
PutManifest([]byte) error
|
||||||
|
PutSignatures(signatures [][]byte) error
|
||||||
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// WARNING: This does not have any transactional semantics:
|
||||||
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
|
Commit() error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Image is the primary API for inspecting properties of images.
|
// Image is the primary API for inspecting properties of images.
|
||||||
|
// Each Image should eventually be closed by calling Close().
|
||||||
type Image interface {
|
type Image interface {
|
||||||
// Reference returns the reference used to set up this source, _as specified by the user_
|
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||||
Reference() ImageReference
|
Reference() ImageReference
|
||||||
|
// Close removes resources associated with an initialized Image, if any.
|
||||||
|
Close()
|
||||||
// ref to repository?
|
// ref to repository?
|
||||||
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
||||||
// NOTE: It is essential for signature verification that Manifest returns the manifest from which BlobDigests is computed.
|
// NOTE: It is essential for signature verification that Manifest returns the manifest from which BlobDigests is computed.
|
||||||
|
@ -143,3 +178,28 @@ type ImageInspectInfo struct {
|
||||||
Os string
|
Os string
|
||||||
Layers []string
|
Layers []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SystemContext allows parametrizing access to implicitly-accessed resources,
|
||||||
|
// like configuration files in /etc and users' login state in their home directory.
|
||||||
|
// Various components can share the same field only if their semantics is exactly
|
||||||
|
// the same; if in doubt, add a new field.
|
||||||
|
// It is always OK to pass nil instead of a SystemContext.
|
||||||
|
type SystemContext struct {
|
||||||
|
// If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/).
|
||||||
|
// Not used for any of the more specific path overrides available in this struct.
|
||||||
|
// Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it).
|
||||||
|
// NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths .
|
||||||
|
// and there is no need to worry about the environment.)
|
||||||
|
// NOTE: This does NOT affect paths starting by $HOME.
|
||||||
|
RootForImplicitAbsolutePaths string
|
||||||
|
|
||||||
|
// === Global configuration overrides ===
|
||||||
|
// If not "", overrides the system's default path for signature.Policy configuration.
|
||||||
|
SignaturePolicyPath string
|
||||||
|
// If not "", overrides the system's default path for registries.d (Docker signature storage configuration)
|
||||||
|
RegistriesDirPath string
|
||||||
|
|
||||||
|
// === docker.Transport overrides ===
|
||||||
|
DockerCertPath string // If not "", a directory containing "cert.pem" and "key.pem" used when talking to a Docker Registry
|
||||||
|
DockerInsecureSkipTLSVerify bool // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
|
||||||
|
}
|
||||||
|
|
37
vendor/github.com/docker/distribution/.gitignore
generated
vendored
37
vendor/github.com/docker/distribution/.gitignore
generated
vendored
|
@ -1,37 +0,0 @@
|
||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
||||||
*.prof
|
|
||||||
|
|
||||||
# never checkin from the bin file (for now)
|
|
||||||
bin/*
|
|
||||||
|
|
||||||
# Test key files
|
|
||||||
*.pem
|
|
||||||
|
|
||||||
# Cover profiles
|
|
||||||
*.out
|
|
||||||
|
|
||||||
# Editor/IDE specific files.
|
|
||||||
*.sublime-project
|
|
||||||
*.sublime-workspace
|
|
18
vendor/github.com/docker/distribution/.mailmap
generated
vendored
18
vendor/github.com/docker/distribution/.mailmap
generated
vendored
|
@ -1,18 +0,0 @@
|
||||||
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@users.noreply.github.com>
|
|
||||||
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@gmail.com>
|
|
||||||
Olivier Gambier <olivier@docker.com> Olivier Gambier <dmp42@users.noreply.github.com>
|
|
||||||
Brian Bland <brian.bland@docker.com> Brian Bland <r4nd0m1n4t0r@gmail.com>
|
|
||||||
Brian Bland <brian.bland@docker.com> Brian Bland <brian.t.bland@gmail.com>
|
|
||||||
Josh Hawn <josh.hawn@docker.com> Josh Hawn <jlhawn@berkeley.edu>
|
|
||||||
Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com>
|
|
||||||
Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com>
|
|
||||||
Andrew Meredith <andymeredith@gmail.com> Andrew Meredith <kendru@users.noreply.github.com>
|
|
||||||
harche <p.harshal@gmail.com> harche <harche@users.noreply.github.com>
|
|
||||||
Jessie Frazelle <jessie@docker.com> <jfrazelle@users.noreply.github.com>
|
|
||||||
Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com>
|
|
||||||
Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com>
|
|
||||||
Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr>
|
|
||||||
davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
|
|
||||||
Omer Cohen <git@omer.io> Omer Cohen <git@omerc.net>
|
|
||||||
Eric Yang <windfarer@gmail.com> Eric Yang <Windfarer@users.noreply.github.com>
|
|
||||||
Nikita Tarasov <nikita@mygento.ru> Nikita <luckyraul@users.noreply.github.com>
|
|
147
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
147
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
|
@ -1,147 +0,0 @@
|
||||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
|
||||||
Aaron Schlesinger <aschlesinger@deis.com>
|
|
||||||
Aaron Vinson <avinson.public@gmail.com>
|
|
||||||
Adam Enger <adamenger@gmail.com>
|
|
||||||
Adrian Mouat <adrian.mouat@gmail.com>
|
|
||||||
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
|
|
||||||
Alex Chan <alex.chan@metaswitch.com>
|
|
||||||
Alex Elman <aelman@indeed.com>
|
|
||||||
Alexey Gladkov <gladkov.alexey@gmail.com>
|
|
||||||
allencloud <allen.sun@daocloud.io>
|
|
||||||
amitshukla <ashukla73@hotmail.com>
|
|
||||||
Amy Lindburg <amy.lindburg@docker.com>
|
|
||||||
Andrew Hsu <andrewhsu@acm.org>
|
|
||||||
Andrew Meredith <andymeredith@gmail.com>
|
|
||||||
Andrew T Nguyen <andrew.nguyen@docker.com>
|
|
||||||
Andrey Kostov <kostov.andrey@gmail.com>
|
|
||||||
Andy Goldstein <agoldste@redhat.com>
|
|
||||||
Anis Elleuch <vadmeste@gmail.com>
|
|
||||||
Anton Tiurin <noxiouz@yandex.ru>
|
|
||||||
Antonio Mercado <amercado@thinknode.com>
|
|
||||||
Antonio Murdaca <runcom@redhat.com>
|
|
||||||
Arien Holthuizen <aholthuizen@schubergphilis.com>
|
|
||||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
|
||||||
Arthur Baars <arthur@semmle.com>
|
|
||||||
Asuka Suzuki <hello@tanksuzuki.com>
|
|
||||||
Avi Miller <avi.miller@oracle.com>
|
|
||||||
Ayose Cazorla <ayosec@gmail.com>
|
|
||||||
BadZen <dave.trombley@gmail.com>
|
|
||||||
Ben Firshman <ben@firshman.co.uk>
|
|
||||||
bin liu <liubin0329@gmail.com>
|
|
||||||
Brian Bland <brian.bland@docker.com>
|
|
||||||
burnettk <burnettk@gmail.com>
|
|
||||||
Carson A <ca@carsonoid.net>
|
|
||||||
Chris Dillon <squarism@gmail.com>
|
|
||||||
cyli <cyli@twistedmatrix.com>
|
|
||||||
Daisuke Fujita <dtanshi45@gmail.com>
|
|
||||||
Daniel Huhn <daniel@danielhuhn.de>
|
|
||||||
Darren Shepherd <darren@rancher.com>
|
|
||||||
Dave Trombley <dave.trombley@gmail.com>
|
|
||||||
Dave Tucker <dt@docker.com>
|
|
||||||
David Lawrence <david.lawrence@docker.com>
|
|
||||||
David Verhasselt <david@crowdway.com>
|
|
||||||
David Xia <dxia@spotify.com>
|
|
||||||
davidli <wenquan.li@hp.com>
|
|
||||||
Dejan Golja <dejan@golja.org>
|
|
||||||
Derek McGowan <derek@mcgstyle.net>
|
|
||||||
Diogo Mónica <diogo.monica@gmail.com>
|
|
||||||
DJ Enriquez <dj.enriquez@infospace.com>
|
|
||||||
Donald Huang <don.hcd@gmail.com>
|
|
||||||
Doug Davis <dug@us.ibm.com>
|
|
||||||
Eric Yang <windfarer@gmail.com>
|
|
||||||
Fabio Huser <fabio@fh1.ch>
|
|
||||||
farmerworking <farmerworking@gmail.com>
|
|
||||||
Felix Yan <felixonmars@archlinux.org>
|
|
||||||
Florentin Raud <florentin.raud@gmail.com>
|
|
||||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
|
||||||
gabriell nascimento <gabriell@bluesoft.com.br>
|
|
||||||
Gleb Schukin <gschukin@ptsecurity.com>
|
|
||||||
harche <p.harshal@gmail.com>
|
|
||||||
Henri Gomez <henri.gomez@gmail.com>
|
|
||||||
Hu Keping <hukeping@huawei.com>
|
|
||||||
Hua Wang <wanghua.humble@gmail.com>
|
|
||||||
HuKeping <hukeping@huawei.com>
|
|
||||||
Ian Babrou <ibobrik@gmail.com>
|
|
||||||
igayoso <igayoso@gmail.com>
|
|
||||||
Jack Griffin <jackpg14@gmail.com>
|
|
||||||
Jason Freidman <jason.freidman@gmail.com>
|
|
||||||
Jeff Nickoloff <jeff@allingeek.com>
|
|
||||||
Jessie Frazelle <jessie@docker.com>
|
|
||||||
jhaohai <jhaohai@foxmail.com>
|
|
||||||
Jianqing Wang <tsing@jianqing.org>
|
|
||||||
John Starks <jostarks@microsoft.com>
|
|
||||||
Jon Johnson <jonjohnson@google.com>
|
|
||||||
Jon Poler <jonathan.poler@apcera.com>
|
|
||||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
|
||||||
Jordan Liggitt <jliggitt@redhat.com>
|
|
||||||
Josh Hawn <josh.hawn@docker.com>
|
|
||||||
Julien Fernandez <julien.fernandez@gmail.com>
|
|
||||||
Ke Xu <leonhartx.k@gmail.com>
|
|
||||||
Keerthan Mala <kmala@engineyard.com>
|
|
||||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
|
||||||
Kenneth Lim <kennethlimcp@gmail.com>
|
|
||||||
Kenny Leung <kleung@google.com>
|
|
||||||
Li Yi <denverdino@gmail.com>
|
|
||||||
Liu Hua <sdu.liu@huawei.com>
|
|
||||||
liuchang0812 <liuchang0812@gmail.com>
|
|
||||||
Louis Kottmann <louis.kottmann@gmail.com>
|
|
||||||
Luke Carpenter <x@rubynerd.net>
|
|
||||||
Mary Anthony <mary@docker.com>
|
|
||||||
Matt Bentley <mbentley@mbentley.net>
|
|
||||||
Matt Duch <matt@learnmetrics.com>
|
|
||||||
Matt Moore <mattmoor@google.com>
|
|
||||||
Matt Robenolt <matt@ydekproductions.com>
|
|
||||||
Michael Prokop <mika@grml.org>
|
|
||||||
Michal Minar <miminar@redhat.com>
|
|
||||||
Miquel Sabaté <msabate@suse.com>
|
|
||||||
Morgan Bauer <mbauer@us.ibm.com>
|
|
||||||
moxiegirl <mary@docker.com>
|
|
||||||
Nathan Sullivan <nathan@nightsys.net>
|
|
||||||
nevermosby <robolwq@qq.com>
|
|
||||||
Nghia Tran <tcnghia@gmail.com>
|
|
||||||
Nikita Tarasov <nikita@mygento.ru>
|
|
||||||
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
|
|
||||||
Oilbeater <liumengxinfly@gmail.com>
|
|
||||||
Olivier Gambier <olivier@docker.com>
|
|
||||||
Olivier Jacques <olivier.jacques@hp.com>
|
|
||||||
Omer Cohen <git@omer.io>
|
|
||||||
Patrick Devine <patrick.devine@docker.com>
|
|
||||||
Phil Estes <estesp@linux.vnet.ibm.com>
|
|
||||||
Philip Misiowiec <philip@atlashealth.com>
|
|
||||||
Richard Scothern <richard.scothern@docker.com>
|
|
||||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
|
||||||
Rusty Conover <rusty@luckydinosaur.com>
|
|
||||||
Sean Boran <Boran@users.noreply.github.com>
|
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
|
||||||
Serge Dubrouski <sergeyfd@gmail.com>
|
|
||||||
Sharif Nassar <sharif@mrwacky.com>
|
|
||||||
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
|
||||||
Shreyas Karnik <karnik.shreyas@gmail.com>
|
|
||||||
Simon Thulbourn <simon+github@thulbourn.com>
|
|
||||||
Spencer Rinehart <anubis@overthemonkey.com>
|
|
||||||
Stefan Majewsky <stefan.majewsky@sap.com>
|
|
||||||
Stefan Weil <sw@weilnetz.de>
|
|
||||||
Stephen J Day <stephen.day@docker.com>
|
|
||||||
Sungho Moon <sungho.moon@navercorp.com>
|
|
||||||
Sven Dowideit <SvenDowideit@home.org.au>
|
|
||||||
Sylvain Baubeau <sbaubeau@redhat.com>
|
|
||||||
Ted Reed <ted.reed@gmail.com>
|
|
||||||
tgic <farmer1992@gmail.com>
|
|
||||||
Thomas Sjögren <konstruktoid@users.noreply.github.com>
|
|
||||||
Tianon Gravi <admwiggin@gmail.com>
|
|
||||||
Tibor Vass <teabee89@gmail.com>
|
|
||||||
Tonis Tiigi <tonistiigi@gmail.com>
|
|
||||||
Tony Holdstock-Brown <tony@docker.com>
|
|
||||||
Trevor Pounds <trevor.pounds@gmail.com>
|
|
||||||
Troels Thomsen <troels@thomsen.io>
|
|
||||||
Vincent Batts <vbatts@redhat.com>
|
|
||||||
Vincent Demeester <vincent@sbr.pm>
|
|
||||||
Vincent Giersch <vincent.giersch@ovh.net>
|
|
||||||
W. Trevor King <wking@tremily.us>
|
|
||||||
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
|
|
||||||
xg.song <xg.song@venusource.com>
|
|
||||||
xiekeyang <xiekeyang@huawei.com>
|
|
||||||
Yann ROBERT <yann.robert@anantaplex.fr>
|
|
||||||
yuzou <zouyu7@huawei.com>
|
|
||||||
zhouhaibing089 <zhouhaibing089@gmail.com>
|
|
||||||
姜继忠 <jizhong.jiangjz@alibaba-inc.com>
|
|
119
vendor/github.com/docker/distribution/BUILDING.md
generated
vendored
119
vendor/github.com/docker/distribution/BUILDING.md
generated
vendored
|
@ -1,119 +0,0 @@
|
||||||
|
|
||||||
# Building the registry source
|
|
||||||
|
|
||||||
## Use-case
|
|
||||||
|
|
||||||
This is useful if you intend to actively work on the registry.
|
|
||||||
|
|
||||||
### Alternatives
|
|
||||||
|
|
||||||
Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/).
|
|
||||||
|
|
||||||
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
|
|
||||||
|
|
||||||
OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md).
|
|
||||||
|
|
||||||
### Gotchas
|
|
||||||
|
|
||||||
You are expected to know your way around with go & git.
|
|
||||||
|
|
||||||
If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
|
|
||||||
|
|
||||||
## Build the development environment
|
|
||||||
|
|
||||||
The first prerequisite of properly building distribution targets is to have a Go
|
|
||||||
development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
|
|
||||||
for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
|
|
||||||
environment.
|
|
||||||
|
|
||||||
If a Go development environment is setup, one can use `go get` to install the
|
|
||||||
`registry` command from the current latest:
|
|
||||||
|
|
||||||
go get github.com/docker/distribution/cmd/registry
|
|
||||||
|
|
||||||
The above will install the source repository into the `GOPATH`.
|
|
||||||
|
|
||||||
Now create the directory for the registry data (this might require you to set permissions properly)
|
|
||||||
|
|
||||||
mkdir -p /var/lib/registry
|
|
||||||
|
|
||||||
... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
|
|
||||||
|
|
||||||
The `registry`
|
|
||||||
binary can then be run with the following:
|
|
||||||
|
|
||||||
$ $GOPATH/bin/registry --version
|
|
||||||
$GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown
|
|
||||||
|
|
||||||
> __NOTE:__ While you do not need to use `go get` to checkout the distribution
|
|
||||||
> project, for these build instructions to work, the project must be checked
|
|
||||||
> out in the correct location in the `GOPATH`. This should almost always be
|
|
||||||
> `$GOPATH/src/github.com/docker/distribution`.
|
|
||||||
|
|
||||||
The registry can be run with the default config using the following
|
|
||||||
incantation:
|
|
||||||
|
|
||||||
$ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml
|
|
||||||
INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
|
|
||||||
INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
|
|
||||||
INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
|
|
||||||
INFO[0000] debug server listening localhost:5001
|
|
||||||
|
|
||||||
If it is working, one should see the above log messages.
|
|
||||||
|
|
||||||
### Repeatable Builds
|
|
||||||
|
|
||||||
For the full development experience, one should `cd` into
|
|
||||||
`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go`
|
|
||||||
commands, such as `go test`, should work per package (please see
|
|
||||||
[Developing](#developing) if they don't work).
|
|
||||||
|
|
||||||
A `Makefile` has been provided as a convenience to support repeatable builds.
|
|
||||||
Please install the following into `GOPATH` for it to work:
|
|
||||||
|
|
||||||
go get github.com/tools/godep github.com/golang/lint/golint
|
|
||||||
|
|
||||||
**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly.
|
|
||||||
|
|
||||||
Once these commands are available in the `GOPATH`, run `make` to get a full
|
|
||||||
build:
|
|
||||||
|
|
||||||
$ make
|
|
||||||
+ clean
|
|
||||||
+ fmt
|
|
||||||
+ vet
|
|
||||||
+ lint
|
|
||||||
+ build
|
|
||||||
github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar
|
|
||||||
github.com/Sirupsen/logrus
|
|
||||||
github.com/docker/libtrust
|
|
||||||
...
|
|
||||||
github.com/yvasiyarov/gorelic
|
|
||||||
github.com/docker/distribution/registry/handlers
|
|
||||||
github.com/docker/distribution/cmd/registry
|
|
||||||
+ test
|
|
||||||
...
|
|
||||||
ok github.com/docker/distribution/digest 7.875s
|
|
||||||
ok github.com/docker/distribution/manifest 0.028s
|
|
||||||
ok github.com/docker/distribution/notifications 17.322s
|
|
||||||
? github.com/docker/distribution/registry [no test files]
|
|
||||||
ok github.com/docker/distribution/registry/api/v2 0.101s
|
|
||||||
? github.com/docker/distribution/registry/auth [no test files]
|
|
||||||
ok github.com/docker/distribution/registry/auth/silly 0.011s
|
|
||||||
...
|
|
||||||
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry
|
|
||||||
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template
|
|
||||||
+ binaries
|
|
||||||
|
|
||||||
The above provides a repeatable build using the contents of the vendored
|
|
||||||
Godeps directory. This includes formatting, vetting, linting, building,
|
|
||||||
testing and generating tagged binaries. We can verify this worked by running
|
|
||||||
the registry binary generated in the "./bin" directory:
|
|
||||||
|
|
||||||
$ ./bin/registry -version
|
|
||||||
./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m
|
|
||||||
|
|
||||||
### Optional build tags
|
|
||||||
|
|
||||||
Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
|
|
||||||
the environment variable `DOCKER_BUILDTAGS`.
|
|
35
vendor/github.com/docker/distribution/CHANGELOG.md
generated
vendored
35
vendor/github.com/docker/distribution/CHANGELOG.md
generated
vendored
|
@ -1,35 +0,0 @@
|
||||||
# Changelog
|
|
||||||
|
|
||||||
## 2.5.0 (2016-06-14)
|
|
||||||
|
|
||||||
### Storage
|
|
||||||
- Ensure uploads directory is cleaned after upload is commited
|
|
||||||
- Add ability to cap concurrent operations in filesystem driver
|
|
||||||
- S3: Add 'us-gov-west-1' to the valid region list
|
|
||||||
- Swift: Handle ceph not returning Last-Modified header for HEAD requests
|
|
||||||
- Add redirect middleware
|
|
||||||
|
|
||||||
#### Registry
|
|
||||||
- Add support for blobAccessController middleware
|
|
||||||
- Add support for layers from foreign sources
|
|
||||||
- Remove signature store
|
|
||||||
- Add support for Let's Encrypt
|
|
||||||
- Correct yaml key names in configuration
|
|
||||||
|
|
||||||
#### Client
|
|
||||||
- Add option to get content digest from manifest get
|
|
||||||
|
|
||||||
#### Spec
|
|
||||||
- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported
|
|
||||||
- Clarify API documentation around catalog fetch behavior
|
|
||||||
|
|
||||||
### API
|
|
||||||
- Support returning HTTP 429 (Too Many Requests)
|
|
||||||
|
|
||||||
### Documentation
|
|
||||||
- Update auth documentation examples to show "expires in" as int
|
|
||||||
|
|
||||||
### Docker Image
|
|
||||||
- Use Alpine Linux as base image
|
|
||||||
|
|
||||||
|
|
140
vendor/github.com/docker/distribution/CONTRIBUTING.md
generated
vendored
140
vendor/github.com/docker/distribution/CONTRIBUTING.md
generated
vendored
|
@ -1,140 +0,0 @@
|
||||||
# Contributing to the registry
|
|
||||||
|
|
||||||
## Before reporting an issue...
|
|
||||||
|
|
||||||
### If your problem is with...
|
|
||||||
|
|
||||||
- automated builds
|
|
||||||
- your account on the [Docker Hub](https://hub.docker.com/)
|
|
||||||
- any other [Docker Hub](https://hub.docker.com/) issue
|
|
||||||
|
|
||||||
Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
|
|
||||||
|
|
||||||
### If you...
|
|
||||||
|
|
||||||
- need help setting up your registry
|
|
||||||
- can't figure out something
|
|
||||||
- are not sure what's going on or what your problem is
|
|
||||||
|
|
||||||
Then please do not open an issue here yet - you should first try one of the following support forums:
|
|
||||||
|
|
||||||
- irc: #docker-distribution on freenode
|
|
||||||
- mailing-list: <distribution@dockerproject.org> or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
|
|
||||||
|
|
||||||
## Reporting an issue properly
|
|
||||||
|
|
||||||
By following these simple rules you will get better and faster feedback on your issue.
|
|
||||||
|
|
||||||
- search the bugtracker for an already reported issue
|
|
||||||
|
|
||||||
### If you found an issue that describes your problem:
|
|
||||||
|
|
||||||
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
|
|
||||||
- please refrain from adding "same thing here" or "+1" comments
|
|
||||||
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
|
|
||||||
- comment if you have some new, technical and relevant information to add to the case
|
|
||||||
- __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
|
|
||||||
|
|
||||||
### If you have not found an existing issue that describes your problem:
|
|
||||||
|
|
||||||
1. create a new issue, with a succinct title that describes your issue:
|
|
||||||
- bad title: "It doesn't work with my docker"
|
|
||||||
- good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
|
|
||||||
2. copy the output of:
|
|
||||||
- `docker version`
|
|
||||||
- `docker info`
|
|
||||||
- `docker exec <registry-container> registry -version`
|
|
||||||
3. copy the command line you used to launch your Registry
|
|
||||||
4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
|
|
||||||
5. reproduce your problem and get your docker daemon logs showing the error
|
|
||||||
6. if relevant, copy your registry logs that show the error
|
|
||||||
7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
|
|
||||||
8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
|
|
||||||
|
|
||||||
## Contributing a patch for a known bug, or a small correction
|
|
||||||
|
|
||||||
You should follow the basic GitHub workflow:
|
|
||||||
|
|
||||||
1. fork
|
|
||||||
2. commit a change
|
|
||||||
3. make sure the tests pass
|
|
||||||
4. PR
|
|
||||||
|
|
||||||
Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
|
|
||||||
|
|
||||||
- configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
|
|
||||||
- sign your commits using `-s`: `git commit -s -m "My commit"`
|
|
||||||
|
|
||||||
Some simple rules to ensure quick merge:
|
|
||||||
|
|
||||||
- clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
|
|
||||||
- prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
|
|
||||||
- if you need to amend your PR following comments, please squash instead of adding more commits
|
|
||||||
|
|
||||||
## Contributing new features
|
|
||||||
|
|
||||||
You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
|
|
||||||
|
|
||||||
If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
|
|
||||||
If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work.
|
|
||||||
|
|
||||||
Then you should submit your implementation, clearly linking to the issue (and possible proposal).
|
|
||||||
|
|
||||||
Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
|
|
||||||
|
|
||||||
It's mandatory to:
|
|
||||||
|
|
||||||
- interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
|
|
||||||
- address maintainers' comments and modify your submission accordingly
|
|
||||||
- write tests for any new code
|
|
||||||
|
|
||||||
Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
|
|
||||||
|
|
||||||
Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493)
|
|
||||||
|
|
||||||
## Coding Style
|
|
||||||
|
|
||||||
Unless explicitly stated, we follow all coding guidelines from the Go
|
|
||||||
community. While some of these standards may seem arbitrary, they somehow seem
|
|
||||||
to result in a solid, consistent codebase.
|
|
||||||
|
|
||||||
It is possible that the code base does not currently comply with these
|
|
||||||
guidelines. We are not looking for a massive PR that fixes this, since that
|
|
||||||
goes against the spirit of the guidelines. All new contributions should make a
|
|
||||||
best effort to clean up and make the code base better than they left it.
|
|
||||||
Obviously, apply your best judgement. Remember, the goal here is to make the
|
|
||||||
code base easier for humans to navigate and understand. Always keep that in
|
|
||||||
mind when nudging others to comply.
|
|
||||||
|
|
||||||
The rules:
|
|
||||||
|
|
||||||
1. All code should be formatted with `gofmt -s`.
|
|
||||||
2. All code should pass the default levels of
|
|
||||||
[`golint`](https://github.com/golang/lint).
|
|
||||||
3. All code should follow the guidelines covered in [Effective
|
|
||||||
Go](http://golang.org/doc/effective_go.html) and [Go Code Review
|
|
||||||
Comments](https://github.com/golang/go/wiki/CodeReviewComments).
|
|
||||||
4. Comment the code. Tell us the why, the history and the context.
|
|
||||||
5. Document _all_ declarations and methods, even private ones. Declare
|
|
||||||
expectations, caveats and anything else that may be important. If a type
|
|
||||||
gets exported, having the comments already there will ensure it's ready.
|
|
||||||
6. Variable name length should be proportional to its context and no longer.
|
|
||||||
`noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
|
|
||||||
In practice, short methods will have short variable names and globals will
|
|
||||||
have longer names.
|
|
||||||
7. No underscores in package names. If you need a compound name, step back,
|
|
||||||
and re-examine why you need a compound name. If you still think you need a
|
|
||||||
compound name, lose the underscore.
|
|
||||||
8. No utils or helpers packages. If a function is not general enough to
|
|
||||||
warrant its own package, it has not been written generally enough to be a
|
|
||||||
part of a util package. Just leave it unexported and well-documented.
|
|
||||||
9. All tests should run with `go test` and outside tooling should not be
|
|
||||||
required. No, we don't need another unit testing framework. Assertion
|
|
||||||
packages are acceptable if they provide _real_ incremental value.
|
|
||||||
10. Even though we call these "rules" above, they are actually just
|
|
||||||
guidelines. Since you've read all the rules, you now know that.
|
|
||||||
|
|
||||||
If you are having trouble getting into the mood of idiomatic Go, we recommend
|
|
||||||
reading through [Effective Go](http://golang.org/doc/effective_go.html). The
|
|
||||||
[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
|
|
||||||
kool-aid is a lot easier than going thirsty.
|
|
18
vendor/github.com/docker/distribution/Dockerfile
generated
vendored
18
vendor/github.com/docker/distribution/Dockerfile
generated
vendored
|
@ -1,18 +0,0 @@
|
||||||
FROM golang:1.6-alpine
|
|
||||||
|
|
||||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
|
||||||
ENV DOCKER_BUILDTAGS include_oss include_gcs
|
|
||||||
|
|
||||||
WORKDIR $DISTRIBUTION_DIR
|
|
||||||
COPY . $DISTRIBUTION_DIR
|
|
||||||
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
|
|
||||||
|
|
||||||
RUN set -ex \
|
|
||||||
&& apk add --no-cache make git
|
|
||||||
|
|
||||||
RUN make PREFIX=/go clean binaries
|
|
||||||
|
|
||||||
VOLUME ["/var/lib/registry"]
|
|
||||||
EXPOSE 5000
|
|
||||||
ENTRYPOINT ["registry"]
|
|
||||||
CMD ["serve", "/etc/docker/registry/config.yml"]
|
|
1
vendor/github.com/docker/distribution/LICENSE
generated
vendored
1
vendor/github.com/docker/distribution/LICENSE
generated
vendored
|
@ -199,4 +199,3 @@ Apache License
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
|
|
||||||
|
|
58
vendor/github.com/docker/distribution/MAINTAINERS
generated
vendored
58
vendor/github.com/docker/distribution/MAINTAINERS
generated
vendored
|
@ -1,58 +0,0 @@
|
||||||
# Distribution maintainers file
|
|
||||||
#
|
|
||||||
# This file describes who runs the docker/distribution project and how.
|
|
||||||
# This is a living document - if you see something out of date or missing, speak up!
|
|
||||||
#
|
|
||||||
# It is structured to be consumable by both humans and programs.
|
|
||||||
# To extract its contents programmatically, use any TOML-compliant parser.
|
|
||||||
#
|
|
||||||
# This file is compiled into the MAINTAINERS file in docker/opensource.
|
|
||||||
#
|
|
||||||
[Org]
|
|
||||||
[Org."Core maintainers"]
|
|
||||||
people = [
|
|
||||||
"aaronlehmann",
|
|
||||||
"dmcgowan",
|
|
||||||
"dmp42",
|
|
||||||
"richardscothern",
|
|
||||||
"shykes",
|
|
||||||
"stevvooe",
|
|
||||||
]
|
|
||||||
|
|
||||||
[people]
|
|
||||||
|
|
||||||
# A reference list of all people associated with the project.
|
|
||||||
# All other sections should refer to people by their canonical key
|
|
||||||
# in the people section.
|
|
||||||
|
|
||||||
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
|
|
||||||
|
|
||||||
[people.aaronlehmann]
|
|
||||||
Name = "Aaron Lehmann"
|
|
||||||
Email = "aaron.lehmann@docker.com"
|
|
||||||
GitHub = "aaronlehmann"
|
|
||||||
|
|
||||||
[people.dmcgowan]
|
|
||||||
Name = "Derek McGowan"
|
|
||||||
Email = "derek@mcgstyle.net"
|
|
||||||
GitHub = "dmcgowan"
|
|
||||||
|
|
||||||
[people.dmp42]
|
|
||||||
Name = "Olivier Gambier"
|
|
||||||
Email = "olivier@docker.com"
|
|
||||||
GitHub = "dmp42"
|
|
||||||
|
|
||||||
[people.richardscothern]
|
|
||||||
Name = "Richard Scothern"
|
|
||||||
Email = "richard.scothern@gmail.com"
|
|
||||||
GitHub = "richardscothern"
|
|
||||||
|
|
||||||
[people.shykes]
|
|
||||||
Name = "Solomon Hykes"
|
|
||||||
Email = "solomon@docker.com"
|
|
||||||
GitHub = "shykes"
|
|
||||||
|
|
||||||
[people.stevvooe]
|
|
||||||
Name = "Stephen Day"
|
|
||||||
Email = "stephen.day@docker.com"
|
|
||||||
GitHub = "stevvooe"
|
|
106
vendor/github.com/docker/distribution/Makefile
generated
vendored
106
vendor/github.com/docker/distribution/Makefile
generated
vendored
|
@ -1,106 +0,0 @@
|
||||||
# Set an output prefix, which is the local directory if not specified
|
|
||||||
PREFIX?=$(shell pwd)
|
|
||||||
|
|
||||||
|
|
||||||
# Used to populate version variable in main package.
|
|
||||||
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
|
||||||
|
|
||||||
# Allow turning off function inlining and variable registerization
|
|
||||||
ifeq (${DISABLE_OPTIMIZATION},true)
|
|
||||||
GO_GCFLAGS=-gcflags "-N -l"
|
|
||||||
VERSION:="$(VERSION)-noopt"
|
|
||||||
endif
|
|
||||||
|
|
||||||
GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)"
|
|
||||||
|
|
||||||
.PHONY: clean all fmt vet lint build test binaries
|
|
||||||
.DEFAULT: all
|
|
||||||
all: fmt vet lint build test binaries
|
|
||||||
|
|
||||||
AUTHORS: .mailmap .git/HEAD
|
|
||||||
git log --format='%aN <%aE>' | sort -fu > $@
|
|
||||||
|
|
||||||
# This only needs to be generated by hand when cutting full releases.
|
|
||||||
version/version.go:
|
|
||||||
./version/version.sh > $@
|
|
||||||
|
|
||||||
# Required for go 1.5 to build
|
|
||||||
GO15VENDOREXPERIMENT := 1
|
|
||||||
|
|
||||||
# Package list
|
|
||||||
PKGS := $(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/)
|
|
||||||
|
|
||||||
# Resolving binary dependencies for specific targets
|
|
||||||
GOLINT := $(shell which golint || echo '')
|
|
||||||
GODEP := $(shell which godep || echo '')
|
|
||||||
|
|
||||||
${PREFIX}/bin/registry: $(wildcard **/*.go)
|
|
||||||
@echo "+ $@"
|
|
||||||
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry
|
|
||||||
|
|
||||||
${PREFIX}/bin/digest: $(wildcard **/*.go)
|
|
||||||
@echo "+ $@"
|
|
||||||
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest
|
|
||||||
|
|
||||||
${PREFIX}/bin/registry-api-descriptor-template: $(wildcard **/*.go)
|
|
||||||
@echo "+ $@"
|
|
||||||
@go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template
|
|
||||||
|
|
||||||
docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template
|
|
||||||
./bin/registry-api-descriptor-template $< > $@
|
|
||||||
|
|
||||||
vet:
|
|
||||||
@echo "+ $@"
|
|
||||||
@go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS)
|
|
||||||
|
|
||||||
fmt:
|
|
||||||
@echo "+ $@"
|
|
||||||
@test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \
|
|
||||||
(echo >&2 "+ please format Go code with 'gofmt -s'" && false)
|
|
||||||
|
|
||||||
lint:
|
|
||||||
@echo "+ $@"
|
|
||||||
$(if $(GOLINT), , \
|
|
||||||
$(error Please install golint: `go get -u github.com/golang/lint/golint`))
|
|
||||||
@test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)"
|
|
||||||
|
|
||||||
build:
|
|
||||||
@echo "+ $@"
|
|
||||||
@go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS)
|
|
||||||
|
|
||||||
test:
|
|
||||||
@echo "+ $@"
|
|
||||||
@go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS)
|
|
||||||
|
|
||||||
test-full:
|
|
||||||
@echo "+ $@"
|
|
||||||
@go test -tags "${DOCKER_BUILDTAGS}" $(PKGS)
|
|
||||||
|
|
||||||
binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template
|
|
||||||
@echo "+ $@"
|
|
||||||
|
|
||||||
clean:
|
|
||||||
@echo "+ $@"
|
|
||||||
@rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template"
|
|
||||||
|
|
||||||
dep-save:
|
|
||||||
@echo "+ $@"
|
|
||||||
$(if $(GODEP), , \
|
|
||||||
$(error Please install godep: go get github.com/tools/godep))
|
|
||||||
@$(GODEP) save $(PKGS)
|
|
||||||
|
|
||||||
dep-restore:
|
|
||||||
@echo "+ $@"
|
|
||||||
$(if $(GODEP), , \
|
|
||||||
$(error Please install godep: go get github.com/tools/godep))
|
|
||||||
@$(GODEP) restore -v
|
|
||||||
|
|
||||||
dep-validate: dep-restore
|
|
||||||
@echo "+ $@"
|
|
||||||
@rm -Rf .vendor.bak
|
|
||||||
@mv vendor .vendor.bak
|
|
||||||
@rm -Rf Godeps
|
|
||||||
@$(GODEP) save ./...
|
|
||||||
@test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \
|
|
||||||
(echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false)
|
|
||||||
@rm -Rf .vendor.bak
|
|
131
vendor/github.com/docker/distribution/README.md
generated
vendored
131
vendor/github.com/docker/distribution/README.md
generated
vendored
|
@ -1,131 +0,0 @@
|
||||||
# Distribution
|
|
||||||
|
|
||||||
The Docker toolset to pack, ship, store, and deliver content.
|
|
||||||
|
|
||||||
This repository's main product is the Docker Registry 2.0 implementation
|
|
||||||
for storing and distributing Docker images. It supersedes the
|
|
||||||
[docker/docker-registry](https://github.com/docker/docker-registry)
|
|
||||||
project with a new API design, focused around security and performance.
|
|
||||||
|
|
||||||
<img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/>
|
|
||||||
|
|
||||||
[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master)
|
|
||||||
[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution)
|
|
||||||
|
|
||||||
This repository contains the following components:
|
|
||||||
|
|
||||||
|**Component** |Description |
|
|
||||||
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
|
|
||||||
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
|
|
||||||
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
|
|
||||||
| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. |
|
|
||||||
|
|
||||||
### How does this integrate with Docker engine?
|
|
||||||
|
|
||||||
This project should provide an implementation to a V2 API for use in the [Docker
|
|
||||||
core project](https://github.com/docker/docker). The API should be embeddable
|
|
||||||
and simplify the process of securely pulling and pushing content from `docker`
|
|
||||||
daemons.
|
|
||||||
|
|
||||||
### What are the long term goals of the Distribution project?
|
|
||||||
|
|
||||||
The _Distribution_ project has the further long term goal of providing a
|
|
||||||
secure tool chain for distributing content. The specifications, APIs and tools
|
|
||||||
should be as useful with Docker as they are without.
|
|
||||||
|
|
||||||
Our goal is to design a professional grade and extensible content distribution
|
|
||||||
system that allow users to:
|
|
||||||
|
|
||||||
* Enjoy an efficient, secured and reliable way to store, manage, package and
|
|
||||||
exchange content
|
|
||||||
* Hack/roll their own on top of healthy open-source components
|
|
||||||
* Implement their own home made solution through good specs, and solid
|
|
||||||
extensions mechanism.
|
|
||||||
|
|
||||||
## More about Registry 2.0
|
|
||||||
|
|
||||||
The new registry implementation provides the following benefits:
|
|
||||||
|
|
||||||
- faster push and pull
|
|
||||||
- new, more efficient implementation
|
|
||||||
- simplified deployment
|
|
||||||
- pluggable storage backend
|
|
||||||
- webhook notifications
|
|
||||||
|
|
||||||
For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
|
|
||||||
|
|
||||||
### Who needs to deploy a registry?
|
|
||||||
|
|
||||||
By default, Docker users pull images from Docker's public registry instance.
|
|
||||||
[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
|
|
||||||
ability. Users can also push images to a repository on Docker's public registry,
|
|
||||||
if they have a [Docker Hub](https://hub.docker.com/) account.
|
|
||||||
|
|
||||||
For some users and even companies, this default behavior is sufficient. For
|
|
||||||
others, it is not.
|
|
||||||
|
|
||||||
For example, users with their own software products may want to maintain a
|
|
||||||
registry for private, company images. Also, you may wish to deploy your own
|
|
||||||
image repository for images used to test or in continuous integration. For these
|
|
||||||
use cases and others, [deploying your own registry instance](docs/deploying.md)
|
|
||||||
may be the better choice.
|
|
||||||
|
|
||||||
### Migration to Registry 2.0
|
|
||||||
|
|
||||||
For those who have previously deployed their own registry based on the Registry
|
|
||||||
1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
|
|
||||||
data migration is required. A tool to assist with migration efforts has been
|
|
||||||
created. For more information see [docker/migrator]
|
|
||||||
(https://github.com/docker/migrator).
|
|
||||||
|
|
||||||
## Contribute
|
|
||||||
|
|
||||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
|
|
||||||
issues, fixes, and patches to this project. If you are contributing code, see
|
|
||||||
the instructions for [building a development environment](docs/recipes/building.md).
|
|
||||||
|
|
||||||
## Support
|
|
||||||
|
|
||||||
If any issues are encountered while using the _Distribution_ project, several
|
|
||||||
avenues are available for support:
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<th align="left">
|
|
||||||
IRC
|
|
||||||
</th>
|
|
||||||
<td>
|
|
||||||
#docker-distribution on FreeNode
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<th align="left">
|
|
||||||
Issue Tracker
|
|
||||||
</th>
|
|
||||||
<td>
|
|
||||||
github.com/docker/distribution/issues
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<th align="left">
|
|
||||||
Google Groups
|
|
||||||
</th>
|
|
||||||
<td>
|
|
||||||
https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<th align="left">
|
|
||||||
Mailing List
|
|
||||||
</th>
|
|
||||||
<td>
|
|
||||||
docker@dockerproject.org
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
This project is distributed under [Apache License, Version 2.0](LICENSE).
|
|
267
vendor/github.com/docker/distribution/ROADMAP.md
generated
vendored
267
vendor/github.com/docker/distribution/ROADMAP.md
generated
vendored
|
@ -1,267 +0,0 @@
|
||||||
# Roadmap
|
|
||||||
|
|
||||||
The Distribution Project consists of several components, some of which are
|
|
||||||
still being defined. This document defines the high-level goals of the
|
|
||||||
project, identifies the current components, and defines the release-
|
|
||||||
relationship to the Docker Platform.
|
|
||||||
|
|
||||||
* [Distribution Goals](#distribution-goals)
|
|
||||||
* [Distribution Components](#distribution-components)
|
|
||||||
* [Project Planning](#project-planning): release-relationship to the Docker Platform.
|
|
||||||
|
|
||||||
This road map is a living document, providing an overview of the goals and
|
|
||||||
considerations made in respect of the future of the project.
|
|
||||||
|
|
||||||
## Distribution Goals
|
|
||||||
|
|
||||||
- Replace the existing [docker registry](github.com/docker/docker-registry)
|
|
||||||
implementation as the primary implementation.
|
|
||||||
- Replace the existing push and pull code in the docker engine with the
|
|
||||||
distribution package.
|
|
||||||
- Define a strong data model for distributing docker images
|
|
||||||
- Provide a flexible distribution tool kit for use in the docker platform
|
|
||||||
- Unlock new distribution models
|
|
||||||
|
|
||||||
## Distribution Components
|
|
||||||
|
|
||||||
Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming
|
|
||||||
features and bugfixes for a component will be added to the relevant milestone. If a feature or
|
|
||||||
bugfix is not part of a milestone, it is currently unscheduled for
|
|
||||||
implementation.
|
|
||||||
|
|
||||||
* [Registry](#registry)
|
|
||||||
* [Distribution Package](#distribution-package)
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### Registry
|
|
||||||
|
|
||||||
The new Docker registry is the main portion of the distribution repository.
|
|
||||||
Registry 2.0 is the first release of the next-generation registry. This was
|
|
||||||
primarily focused on implementing the [new registry
|
|
||||||
API](https://github.com/docker/distribution/blob/master/docs/spec/api.md),
|
|
||||||
with a focus on security and performance.
|
|
||||||
|
|
||||||
Following from the Distribution project goals above, we have a set of goals
|
|
||||||
for registry v2 that we would like to follow in the design. New features
|
|
||||||
should be compared against these goals.
|
|
||||||
|
|
||||||
#### Data Storage and Distribution First
|
|
||||||
|
|
||||||
The registry's first goal is to provide a reliable, consistent storage
|
|
||||||
location for Docker images. The registry should only provide the minimal
|
|
||||||
amount of indexing required to fetch image data and no more.
|
|
||||||
|
|
||||||
This means we should be selective in new features and API additions, including
|
|
||||||
those that may require expensive, ever growing indexes. Requests should be
|
|
||||||
servable in "constant time".
|
|
||||||
|
|
||||||
#### Content Addressability
|
|
||||||
|
|
||||||
All data objects used in the registry API should be content addressable.
|
|
||||||
Content identifiers should be secure and verifiable. This provides a secure,
|
|
||||||
reliable base from which to build more advanced content distribution systems.
|
|
||||||
|
|
||||||
#### Content Agnostic
|
|
||||||
|
|
||||||
In the past, changes to the image format would require large changes in Docker
|
|
||||||
and the Registry. By decoupling the distribution and image format, we can
|
|
||||||
allow the formats to progress without having to coordinate between the two.
|
|
||||||
This means that we should be focused on decoupling Docker from the registry
|
|
||||||
just as much as decoupling the registry from Docker. Such an approach will
|
|
||||||
allow us to unlock new distribution models that haven't been possible before.
|
|
||||||
|
|
||||||
We can take this further by saying that the new registry should be content
|
|
||||||
agnostic. The registry provides a model of names, tags, manifests and content
|
|
||||||
addresses and that model can be used to work with content.
|
|
||||||
|
|
||||||
#### Simplicity
|
|
||||||
|
|
||||||
The new registry should be closer to a microservice component than its
|
|
||||||
predecessor. This means it should have a narrower API and a low number of
|
|
||||||
service dependencies. It should be easy to deploy.
|
|
||||||
|
|
||||||
This means that other solutions should be explored before changing the API or
|
|
||||||
adding extra dependencies. If functionality is required, can it be added as an
|
|
||||||
extension or companion service.
|
|
||||||
|
|
||||||
#### Extensibility
|
|
||||||
|
|
||||||
The registry should provide extension points to add functionality. By keeping
|
|
||||||
the scope narrow, but providing the ability to add functionality.
|
|
||||||
|
|
||||||
Features like search, indexing, synchronization and registry explorers fall
|
|
||||||
into this category. No such feature should be added unless we've found it
|
|
||||||
impossible to do through an extension.
|
|
||||||
|
|
||||||
#### Active Feature Discussions
|
|
||||||
|
|
||||||
The following are feature discussions that are currently active.
|
|
||||||
|
|
||||||
If you don't see your favorite, unimplemented feature, feel free to contact us
|
|
||||||
via IRC or the mailing list and we can talk about adding it. The goal here is
|
|
||||||
to make sure that new features go through a rigid design process before
|
|
||||||
landing in the registry.
|
|
||||||
|
|
||||||
##### Proxying to other Registries
|
|
||||||
|
|
||||||
A _pull-through caching_ mode exists for the registry, but is restricted from
|
|
||||||
within the docker client to only mirror the official Docker Hub. This functionality
|
|
||||||
can be expanded when image provenance has been specified and implemented in the
|
|
||||||
distribution project.
|
|
||||||
|
|
||||||
##### Metadata storage
|
|
||||||
|
|
||||||
Metadata for the registry is currently stored with the manifest and layer data on
|
|
||||||
the storage backend. While this is a big win for simplicity and reliably maintaining
|
|
||||||
state, it comes with the cost of consistency and high latency. The mutable registry
|
|
||||||
metadata operations should be abstracted behind an API which will allow ACID compliant
|
|
||||||
storage systems to handle metadata.
|
|
||||||
|
|
||||||
##### Peer to Peer transfer
|
|
||||||
|
|
||||||
Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit
|
|
||||||
|
|
||||||
##### Indexing, Search and Discovery
|
|
||||||
|
|
||||||
The original registry provided some implementation of search for use with
|
|
||||||
private registries. Support has been elided from V2 since we'd like to both
|
|
||||||
decouple search functionality from the registry. The makes the registry
|
|
||||||
simpler to deploy, especially in use cases where search is not needed, and
|
|
||||||
let's us decouple the image format from the registry.
|
|
||||||
|
|
||||||
There are explorations into using the catalog API and notification system to
|
|
||||||
build external indexes. The current line of thought is that we will define a
|
|
||||||
common search API to index and query docker images. Such a system could be run
|
|
||||||
as a companion to a registry or set of registries to power discovery.
|
|
||||||
|
|
||||||
The main issue with search and discovery is that there are so many ways to
|
|
||||||
accomplish it. There are two aspects to this project. The first is deciding on
|
|
||||||
how it will be done, including an API definition that can work with changing
|
|
||||||
data formats. The second is the process of integrating with `docker search`.
|
|
||||||
We expect that someone attempts to address the problem with the existing tools
|
|
||||||
and propose it as a standard search API or uses it to inform a standardization
|
|
||||||
process. Once this has been explored, we integrate with the docker client.
|
|
||||||
|
|
||||||
Please see the following for more detail:
|
|
||||||
|
|
||||||
- https://github.com/docker/distribution/issues/206
|
|
||||||
|
|
||||||
##### Deletes
|
|
||||||
|
|
||||||
> __NOTE:__ Deletes are a much asked for feature. Before requesting this
|
|
||||||
feature or participating in discussion, we ask that you read this section in
|
|
||||||
full and understand the problems behind deletes.
|
|
||||||
|
|
||||||
While, at first glance, implementing deleting seems simple, there are a number
|
|
||||||
mitigating factors that make many solutions not ideal or even pathological in
|
|
||||||
the context of a registry. The following paragraph discuss the background and
|
|
||||||
approaches that could be applied to arrive at a solution.
|
|
||||||
|
|
||||||
The goal of deletes in any system is to remove unused or unneeded data. Only
|
|
||||||
data requested for deletion should be removed and no other data. Removing
|
|
||||||
unintended data is worse than _not_ removing data that was requested for
|
|
||||||
removal but ideally, both are supported. Generally, according to this rule, we
|
|
||||||
err on holding data longer than needed, ensuring that it is only removed when
|
|
||||||
we can be certain that it can be removed. With the current behavior, we opt to
|
|
||||||
hold onto the data forever, ensuring that data cannot be incorrectly removed.
|
|
||||||
|
|
||||||
To understand the problems with implementing deletes, one must understand the
|
|
||||||
data model. All registry data is stored in a filesystem layout, implemented on
|
|
||||||
a "storage driver", effectively a _virtual file system_ (VFS). The storage
|
|
||||||
system must assume that this VFS layer will be eventually consistent and has
|
|
||||||
poor read- after-write consistency, since this is the lower common denominator
|
|
||||||
among the storage drivers. This is mitigated by writing values in reverse-
|
|
||||||
dependent order, but makes wider transactional operations unsafe.
|
|
||||||
|
|
||||||
Layered on the VFS model is a content-addressable _directed, acyclic graph_
|
|
||||||
(DAG) made up of blobs. Manifests reference layers. Tags reference manifests.
|
|
||||||
Since the same data can be referenced by multiple manifests, we only store
|
|
||||||
data once, even if it is in different repositories. Thus, we have a set of
|
|
||||||
blobs, referenced by tags and manifests. If we want to delete a blob we need
|
|
||||||
to be certain that it is no longer referenced by another manifest or tag. When
|
|
||||||
we delete a manifest, we also can try to delete the referenced blobs. Deciding
|
|
||||||
whether or not a blob has an active reference is the crux of the problem.
|
|
||||||
|
|
||||||
Conceptually, deleting a manifest and its resources is quite simple. Just find
|
|
||||||
all the manifests, enumerate the referenced blobs and delete the blobs not in
|
|
||||||
that set. An astute observer will recognize this as a garbage collection
|
|
||||||
problem. As with garbage collection in programming languages, this is very
|
|
||||||
simple when one always has a consistent view. When one adds parallelism and an
|
|
||||||
inconsistent view of data, it becomes very challenging.
|
|
||||||
|
|
||||||
A simple example can demonstrate this. Let's say we are deleting a manifest
|
|
||||||
_A_ in one process. We scan the manifest and decide that all the blobs are
|
|
||||||
ready for deletion. Concurrently, we have another process accepting a new
|
|
||||||
manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_
|
|
||||||
is accepted and all the blobs are considered present, so the operation
|
|
||||||
proceeds. The original process then deletes the referenced blobs, assuming
|
|
||||||
they were unreferenced. The manifest _B_, which we thought had all of its data
|
|
||||||
present, can no longer be served by the registry, since the dependent data has
|
|
||||||
been deleted.
|
|
||||||
|
|
||||||
Deleting data from the registry safely requires some way to coordinate this
|
|
||||||
operation. The following approaches are being considered:
|
|
||||||
|
|
||||||
- _Reference Counting_ - Maintain a count of references to each blob. This is
|
|
||||||
challenging for a number of reasons: 1. maintaining a consistent consensus
|
|
||||||
of reference counts across a set of Registries and 2. Building the initial
|
|
||||||
list of reference counts for an existing registry. These challenges can be
|
|
||||||
met with a consensus protocol like Paxos or Raft in the first case and a
|
|
||||||
necessary but simple scan in the second..
|
|
||||||
- _Lock the World GC_ - Halt all writes to the data store. Walk the data store
|
|
||||||
and find all blob references. Delete all unreferenced blobs. This approach
|
|
||||||
is very simple but requires disabling writes for a period of time while the
|
|
||||||
service reads all data. This is slow and expensive but very accurate and
|
|
||||||
effective.
|
|
||||||
- _Generational GC_ - Do something similar to above but instead of blocking
|
|
||||||
writes, writes are sent to another storage backend while reads are broadcast
|
|
||||||
to the new and old backends. GC is then performed on the read-only portion.
|
|
||||||
Because writes land in the new backend, the data in the read-only section
|
|
||||||
can be safely deleted. The main drawbacks of this approach are complexity
|
|
||||||
and coordination.
|
|
||||||
- _Centralized Oracle_ - Using a centralized, transactional database, we can
|
|
||||||
know exactly which data is referenced at any given time. This avoids
|
|
||||||
coordination problem by managing this data in a single location. We trade
|
|
||||||
off metadata scalability for simplicity and performance. This is a very good
|
|
||||||
option for most registry deployments. This would create a bottleneck for
|
|
||||||
registry metadata. However, metadata is generally not the main bottleneck
|
|
||||||
when serving images.
|
|
||||||
|
|
||||||
Please let us know if other solutions exist that we have yet to enumerate.
|
|
||||||
Note that for any approach, implementation is a massive consideration. For
|
|
||||||
example, a mark-sweep based solution may seem simple but the amount of work in
|
|
||||||
coordination offset the extra work it might take to build a _Centralized
|
|
||||||
Oracle_. We'll accept proposals for any solution but please coordinate with us
|
|
||||||
before dropping code.
|
|
||||||
|
|
||||||
At this time, we have traded off simplicity and ease of deployment for disk
|
|
||||||
space. Simplicity and ease of deployment tend to reduce developer involvement,
|
|
||||||
which is currently the most expensive resource in software engineering. Taking
|
|
||||||
on any solution for deletes will greatly effect these factors, trading off
|
|
||||||
very cheap disk space for a complex deployment and operational story.
|
|
||||||
|
|
||||||
Please see the following issues for more detail:
|
|
||||||
|
|
||||||
- https://github.com/docker/distribution/issues/422
|
|
||||||
- https://github.com/docker/distribution/issues/461
|
|
||||||
- https://github.com/docker/distribution/issues/462
|
|
||||||
|
|
||||||
### Distribution Package
|
|
||||||
|
|
||||||
At its core, the Distribution Project is a set of Go packages that make up
|
|
||||||
Distribution Components. At this time, most of these packages make up the
|
|
||||||
Registry implementation.
|
|
||||||
|
|
||||||
The package itself is considered unstable. If you're using it, please take care to vendor the dependent version.
|
|
||||||
|
|
||||||
For feature additions, please see the Registry section. In the future, we may break out a
|
|
||||||
separate Roadmap for distribution-specific features that apply to more than
|
|
||||||
just the registry.
|
|
||||||
|
|
||||||
***
|
|
||||||
|
|
||||||
### Project Planning
|
|
||||||
|
|
||||||
An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress.
|
|
||||||
|
|
8
vendor/github.com/docker/distribution/blobs.go
generated
vendored
8
vendor/github.com/docker/distribution/blobs.go
generated
vendored
|
@ -69,9 +69,6 @@ type Descriptor struct {
|
||||||
// against against this digest.
|
// against against this digest.
|
||||||
Digest digest.Digest `json:"digest,omitempty"`
|
Digest digest.Digest `json:"digest,omitempty"`
|
||||||
|
|
||||||
// URLs contains the source URLs of this content.
|
|
||||||
URLs []string `json:"urls,omitempty"`
|
|
||||||
|
|
||||||
// NOTE: Before adding a field here, please ensure that all
|
// NOTE: Before adding a field here, please ensure that all
|
||||||
// other options have been exhausted. Much of the type relationships
|
// other options have been exhausted. Much of the type relationships
|
||||||
// depend on the simplicity of this type.
|
// depend on the simplicity of this type.
|
||||||
|
@ -127,11 +124,6 @@ type BlobDescriptorService interface {
|
||||||
Clear(ctx context.Context, dgst digest.Digest) error
|
Clear(ctx context.Context, dgst digest.Digest) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
|
|
||||||
type BlobDescriptorServiceFactory interface {
|
|
||||||
BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadSeekCloser is the primary reader type for blob data, combining
|
// ReadSeekCloser is the primary reader type for blob data, combining
|
||||||
// io.ReadSeeker with io.Closer.
|
// io.ReadSeeker with io.Closer.
|
||||||
type ReadSeekCloser interface {
|
type ReadSeekCloser interface {
|
||||||
|
|
89
vendor/github.com/docker/distribution/circle.yml
generated
vendored
89
vendor/github.com/docker/distribution/circle.yml
generated
vendored
|
@ -1,89 +0,0 @@
|
||||||
# Pony-up!
|
|
||||||
machine:
|
|
||||||
pre:
|
|
||||||
# Install gvm
|
|
||||||
- bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer)
|
|
||||||
# Install codecov for coverage
|
|
||||||
- pip install --user codecov
|
|
||||||
|
|
||||||
post:
|
|
||||||
# go
|
|
||||||
- gvm install go1.6 --prefer-binary --name=stable
|
|
||||||
|
|
||||||
environment:
|
|
||||||
# Convenient shortcuts to "common" locations
|
|
||||||
CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME
|
|
||||||
BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
|
|
||||||
# Trick circle brainflat "no absolute path" behavior
|
|
||||||
BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR
|
|
||||||
DOCKER_BUILDTAGS: "include_oss include_gcs"
|
|
||||||
# Workaround Circle parsing dumb bugs and/or YAML wonkyness
|
|
||||||
CIRCLE_PAIN: "mode: set"
|
|
||||||
|
|
||||||
hosts:
|
|
||||||
# Not used yet
|
|
||||||
fancy: 127.0.0.1
|
|
||||||
|
|
||||||
dependencies:
|
|
||||||
pre:
|
|
||||||
# Copy the code to the gopath of all go versions
|
|
||||||
- >
|
|
||||||
gvm use stable &&
|
|
||||||
mkdir -p "$(dirname $BASE_STABLE)" &&
|
|
||||||
cp -R "$CHECKOUT" "$BASE_STABLE"
|
|
||||||
|
|
||||||
override:
|
|
||||||
# Install dependencies for every copied clone/go version
|
|
||||||
- gvm use stable && go get github.com/tools/godep:
|
|
||||||
pwd: $BASE_STABLE
|
|
||||||
|
|
||||||
post:
|
|
||||||
# For the stable go version, additionally install linting tools
|
|
||||||
- >
|
|
||||||
gvm use stable &&
|
|
||||||
go get github.com/axw/gocov/gocov github.com/golang/lint/golint
|
|
||||||
|
|
||||||
test:
|
|
||||||
pre:
|
|
||||||
# Output the go versions we are going to test
|
|
||||||
# - gvm use old && go version
|
|
||||||
- gvm use stable && go version
|
|
||||||
|
|
||||||
# Ensure validation of dependencies
|
|
||||||
- gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi:
|
|
||||||
pwd: $BASE_STABLE
|
|
||||||
|
|
||||||
# First thing: build everything. This will catch compile errors, and it's
|
|
||||||
# also necessary for go vet to work properly (see #807).
|
|
||||||
- gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"):
|
|
||||||
pwd: $BASE_STABLE
|
|
||||||
|
|
||||||
# FMT
|
|
||||||
- gvm use stable && make fmt:
|
|
||||||
pwd: $BASE_STABLE
|
|
||||||
|
|
||||||
# VET
|
|
||||||
- gvm use stable && make vet:
|
|
||||||
pwd: $BASE_STABLE
|
|
||||||
|
|
||||||
# LINT
|
|
||||||
- gvm use stable && make lint:
|
|
||||||
pwd: $BASE_STABLE
|
|
||||||
|
|
||||||
override:
|
|
||||||
# Test stable, and report
|
|
||||||
- gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE':
|
|
||||||
timeout: 600
|
|
||||||
pwd: $BASE_STABLE
|
|
||||||
|
|
||||||
post:
|
|
||||||
# Report to codecov
|
|
||||||
- bash <(curl -s https://codecov.io/bash):
|
|
||||||
pwd: $BASE_STABLE
|
|
||||||
|
|
||||||
## Notes
|
|
||||||
# Disabled the -race detector due to massive memory usage.
|
|
||||||
# Do we want these as well?
|
|
||||||
# - go get code.google.com/p/go.tools/cmd/goimports
|
|
||||||
# - test -z "$(goimports -l -w ./... | tee /dev/stderr)"
|
|
||||||
# http://labix.org/gocheck
|
|
85
vendor/github.com/docker/distribution/context/context.go
generated
vendored
85
vendor/github.com/docker/distribution/context/context.go
generated
vendored
|
@ -1,85 +0,0 @@
|
||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/docker/distribution/uuid"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Context is a copy of Context from the golang.org/x/net/context package.
|
|
||||||
type Context interface {
|
|
||||||
context.Context
|
|
||||||
}
|
|
||||||
|
|
||||||
// instanceContext is a context that provides only an instance id. It is
|
|
||||||
// provided as the main background context.
|
|
||||||
type instanceContext struct {
|
|
||||||
Context
|
|
||||||
id string // id of context, logged as "instance.id"
|
|
||||||
once sync.Once // once protect generation of the id
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ic *instanceContext) Value(key interface{}) interface{} {
|
|
||||||
if key == "instance.id" {
|
|
||||||
ic.once.Do(func() {
|
|
||||||
// We want to lazy initialize the UUID such that we don't
|
|
||||||
// call a random generator from the package initialization
|
|
||||||
// code. For various reasons random could not be available
|
|
||||||
// https://github.com/docker/distribution/issues/782
|
|
||||||
ic.id = uuid.Generate().String()
|
|
||||||
})
|
|
||||||
return ic.id
|
|
||||||
}
|
|
||||||
|
|
||||||
return ic.Context.Value(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
var background = &instanceContext{
|
|
||||||
Context: context.Background(),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Background returns a non-nil, empty Context. The background context
|
|
||||||
// provides a single key, "instance.id" that is globally unique to the
|
|
||||||
// process.
|
|
||||||
func Background() Context {
|
|
||||||
return background
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithValue returns a copy of parent in which the value associated with key is
|
|
||||||
// val. Use context Values only for request-scoped data that transits processes
|
|
||||||
// and APIs, not for passing optional parameters to functions.
|
|
||||||
func WithValue(parent Context, key, val interface{}) Context {
|
|
||||||
return context.WithValue(parent, key, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// stringMapContext is a simple context implementation that checks a map for a
|
|
||||||
// key, falling back to a parent if not present.
|
|
||||||
type stringMapContext struct {
|
|
||||||
context.Context
|
|
||||||
m map[string]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithValues returns a context that proxies lookups through a map. Only
|
|
||||||
// supports string keys.
|
|
||||||
func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
|
|
||||||
mo := make(map[string]interface{}, len(m)) // make our own copy.
|
|
||||||
for k, v := range m {
|
|
||||||
mo[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
return stringMapContext{
|
|
||||||
Context: ctx,
|
|
||||||
m: mo,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (smc stringMapContext) Value(key interface{}) interface{} {
|
|
||||||
if ks, ok := key.(string); ok {
|
|
||||||
if v, ok := smc.m[ks]; ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return smc.Context.Value(key)
|
|
||||||
}
|
|
89
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
89
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
|
@ -1,89 +0,0 @@
|
||||||
// Package context provides several utilities for working with
|
|
||||||
// golang.org/x/net/context in http requests. Primarily, the focus is on
|
|
||||||
// logging relevant request information but this package is not limited to
|
|
||||||
// that purpose.
|
|
||||||
//
|
|
||||||
// The easiest way to get started is to get the background context:
|
|
||||||
//
|
|
||||||
// ctx := context.Background()
|
|
||||||
//
|
|
||||||
// The returned context should be passed around your application and be the
|
|
||||||
// root of all other context instances. If the application has a version, this
|
|
||||||
// line should be called before anything else:
|
|
||||||
//
|
|
||||||
// ctx := context.WithVersion(context.Background(), version)
|
|
||||||
//
|
|
||||||
// The above will store the version in the context and will be available to
|
|
||||||
// the logger.
|
|
||||||
//
|
|
||||||
// Logging
|
|
||||||
//
|
|
||||||
// The most useful aspect of this package is GetLogger. This function takes
|
|
||||||
// any context.Context interface and returns the current logger from the
|
|
||||||
// context. Canonical usage looks like this:
|
|
||||||
//
|
|
||||||
// GetLogger(ctx).Infof("something interesting happened")
|
|
||||||
//
|
|
||||||
// GetLogger also takes optional key arguments. The keys will be looked up in
|
|
||||||
// the context and reported with the logger. The following example would
|
|
||||||
// return a logger that prints the version with each log message:
|
|
||||||
//
|
|
||||||
// ctx := context.Context(context.Background(), "version", version)
|
|
||||||
// GetLogger(ctx, "version").Infof("this log message has a version field")
|
|
||||||
//
|
|
||||||
// The above would print out a log message like this:
|
|
||||||
//
|
|
||||||
// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
|
|
||||||
//
|
|
||||||
// When used with WithLogger, we gain the ability to decorate the context with
|
|
||||||
// loggers that have information from disparate parts of the call stack.
|
|
||||||
// Following from the version example, we can build a new context with the
|
|
||||||
// configured logger such that we always print the version field:
|
|
||||||
//
|
|
||||||
// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
|
|
||||||
//
|
|
||||||
// Since the logger has been pushed to the context, we can now get the version
|
|
||||||
// field for free with our log messages. Future calls to GetLogger on the new
|
|
||||||
// context will have the version field:
|
|
||||||
//
|
|
||||||
// GetLogger(ctx).Infof("this log message has a version field")
|
|
||||||
//
|
|
||||||
// This becomes more powerful when we start stacking loggers. Let's say we
|
|
||||||
// have the version logger from above but also want a request id. Using the
|
|
||||||
// context above, in our request scoped function, we place another logger in
|
|
||||||
// the context:
|
|
||||||
//
|
|
||||||
// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
|
|
||||||
// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
|
|
||||||
//
|
|
||||||
// When GetLogger is called on the new context, "http.request.id" will be
|
|
||||||
// included as a logger field, along with the original "version" field:
|
|
||||||
//
|
|
||||||
// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
|
|
||||||
//
|
|
||||||
// Note that this only affects the new context, the previous context, with the
|
|
||||||
// version field, can be used independently. Put another way, the new logger,
|
|
||||||
// added to the request context, is unique to that context and can have
|
|
||||||
// request scoped varaibles.
|
|
||||||
//
|
|
||||||
// HTTP Requests
|
|
||||||
//
|
|
||||||
// This package also contains several methods for working with http requests.
|
|
||||||
// The concepts are very similar to those described above. We simply place the
|
|
||||||
// request in the context using WithRequest. This makes the request variables
|
|
||||||
// available. GetRequestLogger can then be called to get request specific
|
|
||||||
// variables in a log line:
|
|
||||||
//
|
|
||||||
// ctx = WithRequest(ctx, req)
|
|
||||||
// GetRequestLogger(ctx).Infof("request variables")
|
|
||||||
//
|
|
||||||
// Like above, if we want to include the request data in all log messages in
|
|
||||||
// the context, we push the logger to a new context and use that one:
|
|
||||||
//
|
|
||||||
// ctx = WithLogger(ctx, GetRequestLogger(ctx))
|
|
||||||
//
|
|
||||||
// The concept is fairly powerful and ensures that calls throughout the stack
|
|
||||||
// can be traced in log messages. Using the fields like "http.request.id", one
|
|
||||||
// can analyze call flow for a particular request with a simple grep of the
|
|
||||||
// logs.
|
|
||||||
package context
|
|
366
vendor/github.com/docker/distribution/context/http.go
generated
vendored
366
vendor/github.com/docker/distribution/context/http.go
generated
vendored
|
@ -1,366 +0,0 @@
|
||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/distribution/uuid"
|
|
||||||
"github.com/gorilla/mux"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Common errors used with this package.
|
|
||||||
var (
|
|
||||||
ErrNoRequestContext = errors.New("no http request in context")
|
|
||||||
ErrNoResponseWriterContext = errors.New("no http response in context")
|
|
||||||
)
|
|
||||||
|
|
||||||
func parseIP(ipStr string) net.IP {
|
|
||||||
ip := net.ParseIP(ipStr)
|
|
||||||
if ip == nil {
|
|
||||||
log.Warnf("invalid remote IP address: %q", ipStr)
|
|
||||||
}
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoteAddr extracts the remote address of the request, taking into
|
|
||||||
// account proxy headers.
|
|
||||||
func RemoteAddr(r *http.Request) string {
|
|
||||||
if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
|
|
||||||
proxies := strings.Split(prior, ",")
|
|
||||||
if len(proxies) > 0 {
|
|
||||||
remoteAddr := strings.Trim(proxies[0], " ")
|
|
||||||
if parseIP(remoteAddr) != nil {
|
|
||||||
return remoteAddr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// X-Real-Ip is less supported, but worth checking in the
|
|
||||||
// absence of X-Forwarded-For
|
|
||||||
if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
|
|
||||||
if parseIP(realIP) != nil {
|
|
||||||
return realIP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.RemoteAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoteIP extracts the remote IP of the request, taking into
|
|
||||||
// account proxy headers.
|
|
||||||
func RemoteIP(r *http.Request) string {
|
|
||||||
addr := RemoteAddr(r)
|
|
||||||
|
|
||||||
// Try parsing it as "IP:port"
|
|
||||||
if ip, _, err := net.SplitHostPort(addr); err == nil {
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithRequest places the request on the context. The context of the request
|
|
||||||
// is assigned a unique id, available at "http.request.id". The request itself
|
|
||||||
// is available at "http.request". Other common attributes are available under
|
|
||||||
// the prefix "http.request.". If a request is already present on the context,
|
|
||||||
// this method will panic.
|
|
||||||
func WithRequest(ctx Context, r *http.Request) Context {
|
|
||||||
if ctx.Value("http.request") != nil {
|
|
||||||
// NOTE(stevvooe): This needs to be considered a programming error. It
|
|
||||||
// is unlikely that we'd want to have more than one request in
|
|
||||||
// context.
|
|
||||||
panic("only one request per context")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &httpRequestContext{
|
|
||||||
Context: ctx,
|
|
||||||
startedAt: time.Now(),
|
|
||||||
id: uuid.Generate().String(),
|
|
||||||
r: r,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRequest returns the http request in the given context. Returns
|
|
||||||
// ErrNoRequestContext if the context does not have an http request associated
|
|
||||||
// with it.
|
|
||||||
func GetRequest(ctx Context) (*http.Request, error) {
|
|
||||||
if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok {
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
return nil, ErrNoRequestContext
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRequestID attempts to resolve the current request id, if possible. An
|
|
||||||
// error is return if it is not available on the context.
|
|
||||||
func GetRequestID(ctx Context) string {
|
|
||||||
return GetStringValue(ctx, "http.request.id")
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithResponseWriter returns a new context and response writer that makes
|
|
||||||
// interesting response statistics available within the context.
|
|
||||||
func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
|
|
||||||
if closeNotifier, ok := w.(http.CloseNotifier); ok {
|
|
||||||
irwCN := &instrumentedResponseWriterCN{
|
|
||||||
instrumentedResponseWriter: instrumentedResponseWriter{
|
|
||||||
ResponseWriter: w,
|
|
||||||
Context: ctx,
|
|
||||||
},
|
|
||||||
CloseNotifier: closeNotifier,
|
|
||||||
}
|
|
||||||
|
|
||||||
return irwCN, irwCN
|
|
||||||
}
|
|
||||||
|
|
||||||
irw := instrumentedResponseWriter{
|
|
||||||
ResponseWriter: w,
|
|
||||||
Context: ctx,
|
|
||||||
}
|
|
||||||
return &irw, &irw
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetResponseWriter returns the http.ResponseWriter from the provided
|
|
||||||
// context. If not present, ErrNoResponseWriterContext is returned. The
|
|
||||||
// returned instance provides instrumentation in the context.
|
|
||||||
func GetResponseWriter(ctx Context) (http.ResponseWriter, error) {
|
|
||||||
v := ctx.Value("http.response")
|
|
||||||
|
|
||||||
rw, ok := v.(http.ResponseWriter)
|
|
||||||
if !ok || rw == nil {
|
|
||||||
return nil, ErrNoResponseWriterContext
|
|
||||||
}
|
|
||||||
|
|
||||||
return rw, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getVarsFromRequest let's us change request vars implementation for testing
|
|
||||||
// and maybe future changes.
|
|
||||||
var getVarsFromRequest = mux.Vars
|
|
||||||
|
|
||||||
// WithVars extracts gorilla/mux vars and makes them available on the returned
|
|
||||||
// context. Variables are available at keys with the prefix "vars.". For
|
|
||||||
// example, if looking for the variable "name", it can be accessed as
|
|
||||||
// "vars.name". Implementations that are accessing values need not know that
|
|
||||||
// the underlying context is implemented with gorilla/mux vars.
|
|
||||||
func WithVars(ctx Context, r *http.Request) Context {
|
|
||||||
return &muxVarsContext{
|
|
||||||
Context: ctx,
|
|
||||||
vars: getVarsFromRequest(r),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRequestLogger returns a logger that contains fields from the request in
|
|
||||||
// the current context. If the request is not available in the context, no
|
|
||||||
// fields will display. Request loggers can safely be pushed onto the context.
|
|
||||||
func GetRequestLogger(ctx Context) Logger {
|
|
||||||
return GetLogger(ctx,
|
|
||||||
"http.request.id",
|
|
||||||
"http.request.method",
|
|
||||||
"http.request.host",
|
|
||||||
"http.request.uri",
|
|
||||||
"http.request.referer",
|
|
||||||
"http.request.useragent",
|
|
||||||
"http.request.remoteaddr",
|
|
||||||
"http.request.contenttype")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetResponseLogger reads the current response stats and builds a logger.
|
|
||||||
// Because the values are read at call time, pushing a logger returned from
|
|
||||||
// this function on the context will lead to missing or invalid data. Only
|
|
||||||
// call this at the end of a request, after the response has been written.
|
|
||||||
func GetResponseLogger(ctx Context) Logger {
|
|
||||||
l := getLogrusLogger(ctx,
|
|
||||||
"http.response.written",
|
|
||||||
"http.response.status",
|
|
||||||
"http.response.contenttype")
|
|
||||||
|
|
||||||
duration := Since(ctx, "http.request.startedat")
|
|
||||||
|
|
||||||
if duration > 0 {
|
|
||||||
l = l.WithField("http.response.duration", duration.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// httpRequestContext makes information about a request available to context.
|
|
||||||
type httpRequestContext struct {
|
|
||||||
Context
|
|
||||||
|
|
||||||
startedAt time.Time
|
|
||||||
id string
|
|
||||||
r *http.Request
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns a keyed element of the request for use in the context. To get
|
|
||||||
// the request itself, query "request". For other components, access them as
|
|
||||||
// "request.<component>". For example, r.RequestURI
|
|
||||||
func (ctx *httpRequestContext) Value(key interface{}) interface{} {
|
|
||||||
if keyStr, ok := key.(string); ok {
|
|
||||||
if keyStr == "http.request" {
|
|
||||||
return ctx.r
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasPrefix(keyStr, "http.request.") {
|
|
||||||
goto fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := strings.Split(keyStr, ".")
|
|
||||||
|
|
||||||
if len(parts) != 3 {
|
|
||||||
goto fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
switch parts[2] {
|
|
||||||
case "uri":
|
|
||||||
return ctx.r.RequestURI
|
|
||||||
case "remoteaddr":
|
|
||||||
return RemoteAddr(ctx.r)
|
|
||||||
case "method":
|
|
||||||
return ctx.r.Method
|
|
||||||
case "host":
|
|
||||||
return ctx.r.Host
|
|
||||||
case "referer":
|
|
||||||
referer := ctx.r.Referer()
|
|
||||||
if referer != "" {
|
|
||||||
return referer
|
|
||||||
}
|
|
||||||
case "useragent":
|
|
||||||
return ctx.r.UserAgent()
|
|
||||||
case "id":
|
|
||||||
return ctx.id
|
|
||||||
case "startedat":
|
|
||||||
return ctx.startedAt
|
|
||||||
case "contenttype":
|
|
||||||
ct := ctx.r.Header.Get("Content-Type")
|
|
||||||
if ct != "" {
|
|
||||||
return ct
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fallback:
|
|
||||||
return ctx.Context.Value(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
type muxVarsContext struct {
|
|
||||||
Context
|
|
||||||
vars map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *muxVarsContext) Value(key interface{}) interface{} {
|
|
||||||
if keyStr, ok := key.(string); ok {
|
|
||||||
if keyStr == "vars" {
|
|
||||||
return ctx.vars
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(keyStr, "vars.") {
|
|
||||||
keyStr = strings.TrimPrefix(keyStr, "vars.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := ctx.vars[keyStr]; ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctx.Context.Value(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// instrumentedResponseWriterCN provides response writer information in a
|
|
||||||
// context. It implements http.CloseNotifier so that users can detect
|
|
||||||
// early disconnects.
|
|
||||||
type instrumentedResponseWriterCN struct {
|
|
||||||
instrumentedResponseWriter
|
|
||||||
http.CloseNotifier
|
|
||||||
}
|
|
||||||
|
|
||||||
// instrumentedResponseWriter provides response writer information in a
|
|
||||||
// context. This variant is only used in the case where CloseNotifier is not
|
|
||||||
// implemented by the parent ResponseWriter.
|
|
||||||
type instrumentedResponseWriter struct {
|
|
||||||
http.ResponseWriter
|
|
||||||
Context
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
status int
|
|
||||||
written int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
|
|
||||||
n, err = irw.ResponseWriter.Write(p)
|
|
||||||
|
|
||||||
irw.mu.Lock()
|
|
||||||
irw.written += int64(n)
|
|
||||||
|
|
||||||
// Guess the likely status if not set.
|
|
||||||
if irw.status == 0 {
|
|
||||||
irw.status = http.StatusOK
|
|
||||||
}
|
|
||||||
|
|
||||||
irw.mu.Unlock()
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (irw *instrumentedResponseWriter) WriteHeader(status int) {
|
|
||||||
irw.ResponseWriter.WriteHeader(status)
|
|
||||||
|
|
||||||
irw.mu.Lock()
|
|
||||||
irw.status = status
|
|
||||||
irw.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (irw *instrumentedResponseWriter) Flush() {
|
|
||||||
if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
|
|
||||||
flusher.Flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
|
|
||||||
if keyStr, ok := key.(string); ok {
|
|
||||||
if keyStr == "http.response" {
|
|
||||||
return irw
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasPrefix(keyStr, "http.response.") {
|
|
||||||
goto fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := strings.Split(keyStr, ".")
|
|
||||||
|
|
||||||
if len(parts) != 3 {
|
|
||||||
goto fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
irw.mu.Lock()
|
|
||||||
defer irw.mu.Unlock()
|
|
||||||
|
|
||||||
switch parts[2] {
|
|
||||||
case "written":
|
|
||||||
return irw.written
|
|
||||||
case "status":
|
|
||||||
return irw.status
|
|
||||||
case "contenttype":
|
|
||||||
contentType := irw.Header().Get("Content-Type")
|
|
||||||
if contentType != "" {
|
|
||||||
return contentType
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fallback:
|
|
||||||
return irw.Context.Value(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} {
|
|
||||||
if keyStr, ok := key.(string); ok {
|
|
||||||
if keyStr == "http.response" {
|
|
||||||
return irw
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return irw.instrumentedResponseWriter.Value(key)
|
|
||||||
}
|
|
116
vendor/github.com/docker/distribution/context/logger.go
generated
vendored
116
vendor/github.com/docker/distribution/context/logger.go
generated
vendored
|
@ -1,116 +0,0 @@
|
||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Logger provides a leveled-logging interface.
|
|
||||||
type Logger interface {
|
|
||||||
// standard logger methods
|
|
||||||
Print(args ...interface{})
|
|
||||||
Printf(format string, args ...interface{})
|
|
||||||
Println(args ...interface{})
|
|
||||||
|
|
||||||
Fatal(args ...interface{})
|
|
||||||
Fatalf(format string, args ...interface{})
|
|
||||||
Fatalln(args ...interface{})
|
|
||||||
|
|
||||||
Panic(args ...interface{})
|
|
||||||
Panicf(format string, args ...interface{})
|
|
||||||
Panicln(args ...interface{})
|
|
||||||
|
|
||||||
// Leveled methods, from logrus
|
|
||||||
Debug(args ...interface{})
|
|
||||||
Debugf(format string, args ...interface{})
|
|
||||||
Debugln(args ...interface{})
|
|
||||||
|
|
||||||
Error(args ...interface{})
|
|
||||||
Errorf(format string, args ...interface{})
|
|
||||||
Errorln(args ...interface{})
|
|
||||||
|
|
||||||
Info(args ...interface{})
|
|
||||||
Infof(format string, args ...interface{})
|
|
||||||
Infoln(args ...interface{})
|
|
||||||
|
|
||||||
Warn(args ...interface{})
|
|
||||||
Warnf(format string, args ...interface{})
|
|
||||||
Warnln(args ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLogger creates a new context with provided logger.
|
|
||||||
func WithLogger(ctx Context, logger Logger) Context {
|
|
||||||
return WithValue(ctx, "logger", logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLoggerWithField returns a logger instance with the specified field key
|
|
||||||
// and value without affecting the context. Extra specified keys will be
|
|
||||||
// resolved from the context.
|
|
||||||
func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger {
|
|
||||||
return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLoggerWithFields returns a logger instance with the specified fields
|
|
||||||
// without affecting the context. Extra specified keys will be resolved from
|
|
||||||
// the context.
|
|
||||||
func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger {
|
|
||||||
// must convert from interface{} -> interface{} to string -> interface{} for logrus.
|
|
||||||
lfields := make(logrus.Fields, len(fields))
|
|
||||||
for key, value := range fields {
|
|
||||||
lfields[fmt.Sprint(key)] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
return getLogrusLogger(ctx, keys...).WithFields(lfields)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLogger returns the logger from the current context, if present. If one
|
|
||||||
// or more keys are provided, they will be resolved on the context and
|
|
||||||
// included in the logger. While context.Value takes an interface, any key
|
|
||||||
// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
|
|
||||||
// a logging key field. If context keys are integer constants, for example,
|
|
||||||
// its recommended that a String method is implemented.
|
|
||||||
func GetLogger(ctx Context, keys ...interface{}) Logger {
|
|
||||||
return getLogrusLogger(ctx, keys...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLogrusLogger returns the logrus logger for the context. If one more keys
|
|
||||||
// are provided, they will be resolved on the context and included in the
|
|
||||||
// logger. Only use this function if specific logrus functionality is
|
|
||||||
// required.
|
|
||||||
func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
|
|
||||||
var logger *logrus.Entry
|
|
||||||
|
|
||||||
// Get a logger, if it is present.
|
|
||||||
loggerInterface := ctx.Value("logger")
|
|
||||||
if loggerInterface != nil {
|
|
||||||
if lgr, ok := loggerInterface.(*logrus.Entry); ok {
|
|
||||||
logger = lgr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if logger == nil {
|
|
||||||
fields := logrus.Fields{}
|
|
||||||
|
|
||||||
// Fill in the instance id, if we have it.
|
|
||||||
instanceID := ctx.Value("instance.id")
|
|
||||||
if instanceID != nil {
|
|
||||||
fields["instance.id"] = instanceID
|
|
||||||
}
|
|
||||||
|
|
||||||
fields["go.version"] = runtime.Version()
|
|
||||||
// If no logger is found, just return the standard logger.
|
|
||||||
logger = logrus.StandardLogger().WithFields(fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := logrus.Fields{}
|
|
||||||
for _, key := range keys {
|
|
||||||
v := ctx.Value(key)
|
|
||||||
if v != nil {
|
|
||||||
fields[fmt.Sprint(key)] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return logger.WithFields(fields)
|
|
||||||
}
|
|
104
vendor/github.com/docker/distribution/context/trace.go
generated
vendored
104
vendor/github.com/docker/distribution/context/trace.go
generated
vendored
|
@ -1,104 +0,0 @@
|
||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/distribution/uuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WithTrace allocates a traced timing span in a new context. This allows a
|
|
||||||
// caller to track the time between calling WithTrace and the returned done
|
|
||||||
// function. When the done function is called, a log message is emitted with a
|
|
||||||
// "trace.duration" field, corresponding to the elapsed time and a
|
|
||||||
// "trace.func" field, corresponding to the function that called WithTrace.
|
|
||||||
//
|
|
||||||
// The logging keys "trace.id" and "trace.parent.id" are provided to implement
|
|
||||||
// dapper-like tracing. This function should be complemented with a WithSpan
|
|
||||||
// method that could be used for tracing distributed RPC calls.
|
|
||||||
//
|
|
||||||
// The main benefit of this function is to post-process log messages or
|
|
||||||
// intercept them in a hook to provide timing data. Trace ids and parent ids
|
|
||||||
// can also be linked to provide call tracing, if so required.
|
|
||||||
//
|
|
||||||
// Here is an example of the usage:
|
|
||||||
//
|
|
||||||
// func timedOperation(ctx Context) {
|
|
||||||
// ctx, done := WithTrace(ctx)
|
|
||||||
// defer done("this will be the log message")
|
|
||||||
// // ... function body ...
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// If the function ran for roughly 1s, such a usage would emit a log message
|
|
||||||
// as follows:
|
|
||||||
//
|
|
||||||
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
|
|
||||||
//
|
|
||||||
// Notice that the function name is automatically resolved, along with the
|
|
||||||
// package and a trace id is emitted that can be linked with parent ids.
|
|
||||||
func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) {
|
|
||||||
if ctx == nil {
|
|
||||||
ctx = Background()
|
|
||||||
}
|
|
||||||
|
|
||||||
pc, file, line, _ := runtime.Caller(1)
|
|
||||||
f := runtime.FuncForPC(pc)
|
|
||||||
ctx = &traced{
|
|
||||||
Context: ctx,
|
|
||||||
id: uuid.Generate().String(),
|
|
||||||
start: time.Now(),
|
|
||||||
parent: GetStringValue(ctx, "trace.id"),
|
|
||||||
fnname: f.Name(),
|
|
||||||
file: file,
|
|
||||||
line: line,
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctx, func(format string, a ...interface{}) {
|
|
||||||
GetLogger(ctx,
|
|
||||||
"trace.duration",
|
|
||||||
"trace.id",
|
|
||||||
"trace.parent.id",
|
|
||||||
"trace.func",
|
|
||||||
"trace.file",
|
|
||||||
"trace.line").
|
|
||||||
Debugf(format, a...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// traced represents a context that is traced for function call timing. It
|
|
||||||
// also provides fast lookup for the various attributes that are available on
|
|
||||||
// the trace.
|
|
||||||
type traced struct {
|
|
||||||
Context
|
|
||||||
id string
|
|
||||||
parent string
|
|
||||||
start time.Time
|
|
||||||
fnname string
|
|
||||||
file string
|
|
||||||
line int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *traced) Value(key interface{}) interface{} {
|
|
||||||
switch key {
|
|
||||||
case "trace.start":
|
|
||||||
return ts.start
|
|
||||||
case "trace.duration":
|
|
||||||
return time.Since(ts.start)
|
|
||||||
case "trace.id":
|
|
||||||
return ts.id
|
|
||||||
case "trace.parent.id":
|
|
||||||
if ts.parent == "" {
|
|
||||||
return nil // must return nil to signal no parent.
|
|
||||||
}
|
|
||||||
|
|
||||||
return ts.parent
|
|
||||||
case "trace.func":
|
|
||||||
return ts.fnname
|
|
||||||
case "trace.file":
|
|
||||||
return ts.file
|
|
||||||
case "trace.line":
|
|
||||||
return ts.line
|
|
||||||
}
|
|
||||||
|
|
||||||
return ts.Context.Value(key)
|
|
||||||
}
|
|
24
vendor/github.com/docker/distribution/context/util.go
generated
vendored
24
vendor/github.com/docker/distribution/context/util.go
generated
vendored
|
@ -1,24 +0,0 @@
|
||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Since looks up key, which should be a time.Time, and returns the duration
|
|
||||||
// since that time. If the key is not found, the value returned will be zero.
|
|
||||||
// This is helpful when inferring metrics related to context execution times.
|
|
||||||
func Since(ctx Context, key interface{}) time.Duration {
|
|
||||||
if startedAt, ok := ctx.Value(key).(time.Time); ok {
|
|
||||||
return time.Since(startedAt)
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringValue returns a string value from the context. The empty string
|
|
||||||
// will be returned if not found.
|
|
||||||
func GetStringValue(ctx Context, key interface{}) (value string) {
|
|
||||||
if valuev, ok := ctx.Value(key).(string); ok {
|
|
||||||
value = valuev
|
|
||||||
}
|
|
||||||
return value
|
|
||||||
}
|
|
16
vendor/github.com/docker/distribution/context/version.go
generated
vendored
16
vendor/github.com/docker/distribution/context/version.go
generated
vendored
|
@ -1,16 +0,0 @@
|
||||||
package context
|
|
||||||
|
|
||||||
// WithVersion stores the application version in the context. The new context
|
|
||||||
// gets a logger to ensure log messages are marked with the application
|
|
||||||
// version.
|
|
||||||
func WithVersion(ctx Context, version string) Context {
|
|
||||||
ctx = WithValue(ctx, "version", version)
|
|
||||||
// push a new logger onto the stack
|
|
||||||
return WithLogger(ctx, GetLogger(ctx, "version"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVersion returns the application version from the context. An empty
|
|
||||||
// string may returned if the version was not set on the context.
|
|
||||||
func GetVersion(ctx Context) string {
|
|
||||||
return GetStringValue(ctx, "version")
|
|
||||||
}
|
|
7
vendor/github.com/docker/distribution/coverpkg.sh
generated
vendored
7
vendor/github.com/docker/distribution/coverpkg.sh
generated
vendored
|
@ -1,7 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Given a subpackage and the containing package, figures out which packages
|
|
||||||
# need to be passed to `go test -coverpkg`: this includes all of the
|
|
||||||
# subpackage's dependencies within the containing package, as well as the
|
|
||||||
# subpackage itself.
|
|
||||||
DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)"
|
|
||||||
echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ','
|
|
6
vendor/github.com/docker/distribution/manifests.go
generated
vendored
6
vendor/github.com/docker/distribution/manifests.go
generated
vendored
|
@ -61,6 +61,12 @@ type ManifestEnumerator interface {
|
||||||
Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
|
Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SignaturesGetter provides an interface for getting the signatures of a schema1 manifest. If the digest
|
||||||
|
// referred to is not a schema1 manifest, an error should be returned.
|
||||||
|
type SignaturesGetter interface {
|
||||||
|
GetSignatures(ctx context.Context, manifestDigest digest.Digest) ([]digest.Digest, error)
|
||||||
|
}
|
||||||
|
|
||||||
// Describable is an interface for descriptors
|
// Describable is an interface for descriptors
|
||||||
type Describable interface {
|
type Describable interface {
|
||||||
Descriptor() Descriptor
|
Descriptor() Descriptor
|
||||||
|
|
126
vendor/github.com/docker/distribution/uuid/uuid.go
generated
vendored
126
vendor/github.com/docker/distribution/uuid/uuid.go
generated
vendored
|
@ -1,126 +0,0 @@
|
||||||
// Package uuid provides simple UUID generation. Only version 4 style UUIDs
|
|
||||||
// can be generated.
|
|
||||||
//
|
|
||||||
// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs.
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Bits is the number of bits in a UUID
|
|
||||||
Bits = 128
|
|
||||||
|
|
||||||
// Size is the number of bytes in a UUID
|
|
||||||
Size = Bits / 8
|
|
||||||
|
|
||||||
format = "%08x-%04x-%04x-%04x-%012x"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrUUIDInvalid indicates a parsed string is not a valid uuid.
|
|
||||||
ErrUUIDInvalid = fmt.Errorf("invalid uuid")
|
|
||||||
|
|
||||||
// Loggerf can be used to override the default logging destination. Such
|
|
||||||
// log messages in this library should be logged at warning or higher.
|
|
||||||
Loggerf = func(format string, args ...interface{}) {}
|
|
||||||
)
|
|
||||||
|
|
||||||
// UUID represents a UUID value. UUIDs can be compared and set to other values
|
|
||||||
// and accessed by byte.
|
|
||||||
type UUID [Size]byte
|
|
||||||
|
|
||||||
// Generate creates a new, version 4 uuid.
|
|
||||||
func Generate() (u UUID) {
|
|
||||||
const (
|
|
||||||
// ensures we backoff for less than 450ms total. Use the following to
|
|
||||||
// select new value, in units of 10ms:
|
|
||||||
// n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
|
|
||||||
maxretries = 9
|
|
||||||
backoff = time.Millisecond * 10
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
totalBackoff time.Duration
|
|
||||||
count int
|
|
||||||
retries int
|
|
||||||
)
|
|
||||||
|
|
||||||
for {
|
|
||||||
// This should never block but the read may fail. Because of this,
|
|
||||||
// we just try to read the random number generator until we get
|
|
||||||
// something. This is a very rare condition but may happen.
|
|
||||||
b := time.Duration(retries) * backoff
|
|
||||||
time.Sleep(b)
|
|
||||||
totalBackoff += b
|
|
||||||
|
|
||||||
n, err := io.ReadFull(rand.Reader, u[count:])
|
|
||||||
if err != nil {
|
|
||||||
if retryOnError(err) && retries < maxretries {
|
|
||||||
count += n
|
|
||||||
retries++
|
|
||||||
Loggerf("error generating version 4 uuid, retrying: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Any other errors represent a system problem. What did someone
|
|
||||||
// do to /dev/urandom?
|
|
||||||
panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
u[6] = (u[6] & 0x0f) | 0x40 // set version byte
|
|
||||||
u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b}
|
|
||||||
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse attempts to extract a uuid from the string or returns an error.
|
|
||||||
func Parse(s string) (u UUID, err error) {
|
|
||||||
if len(s) != 36 {
|
|
||||||
return UUID{}, ErrUUIDInvalid
|
|
||||||
}
|
|
||||||
|
|
||||||
// create stack addresses for each section of the uuid.
|
|
||||||
p := make([][]byte, 5)
|
|
||||||
|
|
||||||
if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil {
|
|
||||||
return u, err
|
|
||||||
}
|
|
||||||
|
|
||||||
copy(u[0:4], p[0])
|
|
||||||
copy(u[4:6], p[1])
|
|
||||||
copy(u[6:8], p[2])
|
|
||||||
copy(u[8:10], p[3])
|
|
||||||
copy(u[10:16], p[4])
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u UUID) String() string {
|
|
||||||
return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryOnError tries to detect whether or not retrying would be fruitful.
|
|
||||||
func retryOnError(err error) bool {
|
|
||||||
switch err := err.(type) {
|
|
||||||
case *os.PathError:
|
|
||||||
return retryOnError(err.Err) // unpack the target error
|
|
||||||
case syscall.Errno:
|
|
||||||
if err == syscall.EPERM {
|
|
||||||
// EPERM represents an entropy pool exhaustion, a condition under
|
|
||||||
// which we backoff and retry.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
59
vendor/github.com/docker/docker/daemon/graphdriver/counter.go
generated
vendored
59
vendor/github.com/docker/docker/daemon/graphdriver/counter.go
generated
vendored
|
@ -2,66 +2,31 @@ package graphdriver
|
||||||
|
|
||||||
import "sync"
|
import "sync"
|
||||||
|
|
||||||
type minfo struct {
|
|
||||||
check bool
|
|
||||||
count int
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefCounter is a generic counter for use by graphdriver Get/Put calls
|
// RefCounter is a generic counter for use by graphdriver Get/Put calls
|
||||||
type RefCounter struct {
|
type RefCounter struct {
|
||||||
counts map[string]*minfo
|
counts map[string]int
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
checker Checker
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRefCounter returns a new RefCounter
|
// NewRefCounter returns a new RefCounter
|
||||||
func NewRefCounter(c Checker) *RefCounter {
|
func NewRefCounter() *RefCounter {
|
||||||
return &RefCounter{
|
return &RefCounter{counts: make(map[string]int)}
|
||||||
checker: c,
|
|
||||||
counts: make(map[string]*minfo),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Increment increaes the ref count for the given id and returns the current count
|
// Increment increaes the ref count for the given id and returns the current count
|
||||||
func (c *RefCounter) Increment(path string) int {
|
func (c *RefCounter) Increment(id string) int {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
m := c.counts[path]
|
c.counts[id]++
|
||||||
if m == nil {
|
count := c.counts[id]
|
||||||
m = &minfo{}
|
|
||||||
c.counts[path] = m
|
|
||||||
}
|
|
||||||
// if we are checking this path for the first time check to make sure
|
|
||||||
// if it was already mounted on the system and make sure we have a correct ref
|
|
||||||
// count if it is mounted as it is in use.
|
|
||||||
if !m.check {
|
|
||||||
m.check = true
|
|
||||||
if c.checker.IsMounted(path) {
|
|
||||||
m.count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.count++
|
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
return m.count
|
return count
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decrement decreases the ref count for the given id and returns the current count
|
// Decrement decreases the ref count for the given id and returns the current count
|
||||||
func (c *RefCounter) Decrement(path string) int {
|
func (c *RefCounter) Decrement(id string) int {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
m := c.counts[path]
|
c.counts[id]--
|
||||||
if m == nil {
|
count := c.counts[id]
|
||||||
m = &minfo{}
|
|
||||||
c.counts[path] = m
|
|
||||||
}
|
|
||||||
// if we are checking this path for the first time check to make sure
|
|
||||||
// if it was already mounted on the system and make sure we have a correct ref
|
|
||||||
// count if it is mounted as it is in use.
|
|
||||||
if !m.check {
|
|
||||||
m.check = true
|
|
||||||
if c.checker.IsMounted(path) {
|
|
||||||
m.count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.count--
|
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
return m.count
|
return count
|
||||||
}
|
}
|
||||||
|
|
13
vendor/github.com/docker/docker/daemon/graphdriver/driver.go
generated
vendored
13
vendor/github.com/docker/docker/daemon/graphdriver/driver.go
generated
vendored
|
@ -46,12 +46,9 @@ type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDM
|
||||||
type ProtoDriver interface {
|
type ProtoDriver interface {
|
||||||
// String returns a string representation of this driver.
|
// String returns a string representation of this driver.
|
||||||
String() string
|
String() string
|
||||||
// CreateReadWrite creates a new, empty filesystem layer that is ready
|
|
||||||
// to be used as the storage for a container.
|
|
||||||
CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error
|
|
||||||
// Create creates a new, empty, filesystem layer with the
|
// Create creates a new, empty, filesystem layer with the
|
||||||
// specified id and parent and mountLabel. Parent and mountLabel may be "".
|
// specified id and parent and mountLabel. Parent and mountLabel may be "".
|
||||||
Create(id, parent, mountLabel string, storageOpt map[string]string) error
|
Create(id, parent, mountLabel string) error
|
||||||
// Remove attempts to remove the filesystem layer with this id.
|
// Remove attempts to remove the filesystem layer with this id.
|
||||||
Remove(id string) error
|
Remove(id string) error
|
||||||
// Get returns the mountpoint for the layered filesystem referred
|
// Get returns the mountpoint for the layered filesystem referred
|
||||||
|
@ -113,17 +110,11 @@ type FileGetCloser interface {
|
||||||
Close() error
|
Close() error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checker makes checks on specified filesystems.
|
|
||||||
type Checker interface {
|
|
||||||
// IsMounted returns true if the provided path is mounted for the specific checker
|
|
||||||
IsMounted(path string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
drivers = make(map[string]InitFunc)
|
drivers = make(map[string]InitFunc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register registers an InitFunc for the driver.
|
// Register registers a InitFunc for the driver.
|
||||||
func Register(name string, initFunc InitFunc) error {
|
func Register(name string, initFunc InitFunc) error {
|
||||||
if _, exists := drivers[name]; exists {
|
if _, exists := drivers[name]; exists {
|
||||||
return fmt.Errorf("Name already registered %s", name)
|
return fmt.Errorf("Name already registered %s", name)
|
||||||
|
|
34
vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go
generated
vendored
34
vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go
generated
vendored
|
@ -5,8 +5,6 @@ package graphdriver
|
||||||
import (
|
import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/mount"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -16,8 +14,6 @@ const (
|
||||||
FsMagicBtrfs = FsMagic(0x9123683E)
|
FsMagicBtrfs = FsMagic(0x9123683E)
|
||||||
// FsMagicCramfs filesystem id for Cramfs
|
// FsMagicCramfs filesystem id for Cramfs
|
||||||
FsMagicCramfs = FsMagic(0x28cd3d45)
|
FsMagicCramfs = FsMagic(0x28cd3d45)
|
||||||
// FsMagicEcryptfs filesystem id for eCryptfs
|
|
||||||
FsMagicEcryptfs = FsMagic(0xf15f)
|
|
||||||
// FsMagicExtfs filesystem id for Extfs
|
// FsMagicExtfs filesystem id for Extfs
|
||||||
FsMagicExtfs = FsMagic(0x0000EF53)
|
FsMagicExtfs = FsMagic(0x0000EF53)
|
||||||
// FsMagicF2fs filesystem id for F2fs
|
// FsMagicF2fs filesystem id for F2fs
|
||||||
|
@ -93,36 +89,6 @@ func GetFSMagic(rootpath string) (FsMagic, error) {
|
||||||
return FsMagic(buf.Type), nil
|
return FsMagic(buf.Type), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFsChecker returns a checker configured for the provied FsMagic
|
|
||||||
func NewFsChecker(t FsMagic) Checker {
|
|
||||||
return &fsChecker{
|
|
||||||
t: t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type fsChecker struct {
|
|
||||||
t FsMagic
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fsChecker) IsMounted(path string) bool {
|
|
||||||
m, _ := Mounted(c.t, path)
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
|
|
||||||
// if the specified path is mounted.
|
|
||||||
func NewDefaultChecker() Checker {
|
|
||||||
return &defaultChecker{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type defaultChecker struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *defaultChecker) IsMounted(path string) bool {
|
|
||||||
m, _ := mount.Mounted(path)
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mounted checks if the given path is mounted as the fs type
|
// Mounted checks if the given path is mounted as the fs type
|
||||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
||||||
var buf syscall.Statfs_t
|
var buf syscall.Statfs_t
|
||||||
|
|
65
vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go
generated
vendored
65
vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go
generated
vendored
|
@ -1,65 +0,0 @@
|
||||||
// +build solaris,cgo
|
|
||||||
|
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
/*
|
|
||||||
#include <sys/statvfs.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
|
|
||||||
static inline struct statvfs *getstatfs(char *s) {
|
|
||||||
struct statvfs *buf;
|
|
||||||
int err;
|
|
||||||
buf = (struct statvfs *)malloc(sizeof(struct statvfs));
|
|
||||||
err = statvfs(s, buf);
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// FsMagicZfs filesystem id for Zfs
|
|
||||||
FsMagicZfs = FsMagic(0x2fc12fc1)
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Slice of drivers that should be used in an order
|
|
||||||
priority = []string{
|
|
||||||
"zfs",
|
|
||||||
}
|
|
||||||
|
|
||||||
// FsNames maps filesystem id to name of the filesystem.
|
|
||||||
FsNames = map[FsMagic]string{
|
|
||||||
FsMagicZfs: "zfs",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetFSMagic returns the filesystem id given the path.
|
|
||||||
func GetFSMagic(rootpath string) (FsMagic, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mounted checks if the given path is mounted as the fs type
|
|
||||||
//Solaris supports only ZFS for now
|
|
||||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
|
||||||
|
|
||||||
cs := C.CString(filepath.Dir(mountPath))
|
|
||||||
buf := C.getstatfs(cs)
|
|
||||||
|
|
||||||
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
|
|
||||||
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
|
|
||||||
(buf.f_basetype[3] != 0) {
|
|
||||||
log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
|
|
||||||
C.free(unsafe.Pointer(buf))
|
|
||||||
return false, ErrPrerequisites
|
|
||||||
}
|
|
||||||
|
|
||||||
C.free(unsafe.Pointer(buf))
|
|
||||||
C.free(unsafe.Pointer(cs))
|
|
||||||
return true, nil
|
|
||||||
}
|
|
2
vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go
generated
vendored
2
vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// +build !linux,!windows,!freebsd,!solaris
|
// +build !linux,!windows,!freebsd
|
||||||
|
|
||||||
package graphdriver
|
package graphdriver
|
||||||
|
|
||||||
|
|
2
vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go
generated
vendored
2
vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go
generated
vendored
|
@ -4,6 +4,8 @@ var (
|
||||||
// Slice of drivers that should be used in order
|
// Slice of drivers that should be used in order
|
||||||
priority = []string{
|
priority = []string{
|
||||||
"windowsfilter",
|
"windowsfilter",
|
||||||
|
"windowsdiff",
|
||||||
|
"vfs",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
2
vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go
generated
vendored
2
vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go
generated
vendored
|
@ -132,7 +132,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (s
|
||||||
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
|
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
|
||||||
GIDMaps: gdw.gidMaps}
|
GIDMaps: gdw.gidMaps}
|
||||||
start := time.Now().UTC()
|
start := time.Now().UTC()
|
||||||
logrus.Debug("Start untar layer")
|
logrus.Debugf("Start untar layer")
|
||||||
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
|
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/docker/docker/daemon/graphdriver/plugin.go
generated
vendored
2
vendor/github.com/docker/docker/daemon/graphdriver/plugin.go
generated
vendored
|
@ -23,7 +23,7 @@ func lookupPlugin(name, home string, opts []string) (Driver, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
|
return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
|
||||||
}
|
}
|
||||||
return newPluginDriver(name, home, opts, pl.Client())
|
return newPluginDriver(name, home, opts, pl.Client)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) {
|
func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) {
|
||||||
|
|
18
vendor/github.com/docker/docker/daemon/graphdriver/proxy.go
generated
vendored
18
vendor/github.com/docker/docker/daemon/graphdriver/proxy.go
generated
vendored
|
@ -54,23 +54,7 @@ func (d *graphDriverProxy) String() string {
|
||||||
return d.name
|
return d.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
|
func (d *graphDriverProxy) Create(id, parent, mountLabel string) error {
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
Parent: parent,
|
|
||||||
MountLabel: mountLabel,
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
|
|
||||||
args := &graphDriverRequest{
|
args := &graphDriverRequest{
|
||||||
ID: id,
|
ID: id,
|
||||||
Parent: parent,
|
Parent: parent,
|
||||||
|
|
15
vendor/github.com/docker/docker/image/fs.go
generated
vendored
15
vendor/github.com/docker/docker/image/fs.go
generated
vendored
|
@ -9,7 +9,6 @@ import (
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/docker/distribution/digest"
|
"github.com/docker/distribution/digest"
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// IDWalkFunc is function called by StoreBackend.Walk
|
// IDWalkFunc is function called by StoreBackend.Walk
|
||||||
|
@ -119,7 +118,12 @@ func (s *fs) Set(data []byte) (ID, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
id := ID(digest.FromBytes(data))
|
id := ID(digest.FromBytes(data))
|
||||||
if err := ioutils.AtomicWriteFile(s.contentFile(id), data, 0600); err != nil {
|
filePath := s.contentFile(id)
|
||||||
|
tempFilePath := s.contentFile(id) + ".tmp"
|
||||||
|
if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if err := os.Rename(tempFilePath, filePath); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,7 +156,12 @@ func (s *fs) SetMetadata(id ID, key string, data []byte) error {
|
||||||
if err := os.MkdirAll(baseDir, 0700); err != nil {
|
if err := os.MkdirAll(baseDir, 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(id), key), data, 0600)
|
filePath := filepath.Join(s.metadataDir(id), key)
|
||||||
|
tempFilePath := filePath + ".tmp"
|
||||||
|
if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.Rename(tempFilePath, filePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetadata returns metadata for a given ID.
|
// GetMetadata returns metadata for a given ID.
|
||||||
|
|
8
vendor/github.com/docker/docker/image/image.go
generated
vendored
8
vendor/github.com/docker/docker/image/image.go
generated
vendored
|
@ -48,11 +48,9 @@ type V1Image struct {
|
||||||
// Image stores the image configuration
|
// Image stores the image configuration
|
||||||
type Image struct {
|
type Image struct {
|
||||||
V1Image
|
V1Image
|
||||||
Parent ID `json:"parent,omitempty"`
|
Parent ID `json:"parent,omitempty"`
|
||||||
RootFS *RootFS `json:"rootfs,omitempty"`
|
RootFS *RootFS `json:"rootfs,omitempty"`
|
||||||
History []History `json:"history,omitempty"`
|
History []History `json:"history,omitempty"`
|
||||||
OSVersion string `json:"os.version,omitempty"`
|
|
||||||
OSFeatures []string `json:"os.features,omitempty"`
|
|
||||||
|
|
||||||
// rawJSON caches the immutable JSON associated with this image.
|
// rawJSON caches the immutable JSON associated with this image.
|
||||||
rawJSON []byte
|
rawJSON []byte
|
||||||
|
|
8
vendor/github.com/docker/docker/image/rootfs.go
generated
vendored
8
vendor/github.com/docker/docker/image/rootfs.go
generated
vendored
|
@ -2,14 +2,6 @@ package image
|
||||||
|
|
||||||
import "github.com/docker/docker/layer"
|
import "github.com/docker/docker/layer"
|
||||||
|
|
||||||
// TypeLayers is used for RootFS.Type for filesystems organized into layers.
|
|
||||||
const TypeLayers = "layers"
|
|
||||||
|
|
||||||
// NewRootFS returns empty RootFS struct
|
|
||||||
func NewRootFS() *RootFS {
|
|
||||||
return &RootFS{Type: TypeLayers}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append appends a new diffID to rootfs
|
// Append appends a new diffID to rootfs
|
||||||
func (r *RootFS) Append(id layer.DiffID) {
|
func (r *RootFS) Append(id layer.DiffID) {
|
||||||
r.DiffIDs = append(r.DiffIDs, id)
|
r.DiffIDs = append(r.DiffIDs, id)
|
||||||
|
|
5
vendor/github.com/docker/docker/image/rootfs_unix.go
generated
vendored
5
vendor/github.com/docker/docker/image/rootfs_unix.go
generated
vendored
|
@ -16,3 +16,8 @@ type RootFS struct {
|
||||||
func (r *RootFS) ChainID() layer.ChainID {
|
func (r *RootFS) ChainID() layer.ChainID {
|
||||||
return layer.CreateChainID(r.DiffIDs)
|
return layer.CreateChainID(r.DiffIDs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewRootFS returns empty RootFS struct
|
||||||
|
func NewRootFS() *RootFS {
|
||||||
|
return &RootFS{Type: "layers"}
|
||||||
|
}
|
||||||
|
|
21
vendor/github.com/docker/docker/image/rootfs_windows.go
generated
vendored
21
vendor/github.com/docker/docker/image/rootfs_windows.go
generated
vendored
|
@ -10,9 +10,6 @@ import (
|
||||||
"github.com/docker/docker/layer"
|
"github.com/docker/docker/layer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TypeLayersWithBase is used for RootFS.Type for Windows filesystems that have layers and a centrally-stored base layer.
|
|
||||||
const TypeLayersWithBase = "layers+base"
|
|
||||||
|
|
||||||
// RootFS describes images root filesystem
|
// RootFS describes images root filesystem
|
||||||
// This is currently a placeholder that only supports layers. In the future
|
// This is currently a placeholder that only supports layers. In the future
|
||||||
// this can be made into an interface that supports different implementations.
|
// this can be made into an interface that supports different implementations.
|
||||||
|
@ -24,25 +21,17 @@ type RootFS struct {
|
||||||
|
|
||||||
// BaseLayerID returns the 64 byte hex ID for the baselayer name.
|
// BaseLayerID returns the 64 byte hex ID for the baselayer name.
|
||||||
func (r *RootFS) BaseLayerID() string {
|
func (r *RootFS) BaseLayerID() string {
|
||||||
if r.Type != TypeLayersWithBase {
|
|
||||||
panic("tried to get base layer ID without a base layer")
|
|
||||||
}
|
|
||||||
baseID := sha512.Sum384([]byte(r.BaseLayer))
|
baseID := sha512.Sum384([]byte(r.BaseLayer))
|
||||||
return fmt.Sprintf("%x", baseID[:32])
|
return fmt.Sprintf("%x", baseID[:32])
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChainID returns the ChainID for the top layer in RootFS.
|
// ChainID returns the ChainID for the top layer in RootFS.
|
||||||
func (r *RootFS) ChainID() layer.ChainID {
|
func (r *RootFS) ChainID() layer.ChainID {
|
||||||
ids := r.DiffIDs
|
baseDiffID := digest.FromBytes([]byte(r.BaseLayerID()))
|
||||||
if r.Type == TypeLayersWithBase {
|
return layer.CreateChainID(append([]layer.DiffID{layer.DiffID(baseDiffID)}, r.DiffIDs...))
|
||||||
// Add an extra ID for the base.
|
|
||||||
baseDiffID := layer.DiffID(digest.FromBytes([]byte(r.BaseLayerID())))
|
|
||||||
ids = append([]layer.DiffID{baseDiffID}, ids...)
|
|
||||||
}
|
|
||||||
return layer.CreateChainID(ids)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRootFSWithBaseLayer returns a RootFS struct with a base layer
|
// NewRootFS returns empty RootFS struct
|
||||||
func NewRootFSWithBaseLayer(baseLayer string) *RootFS {
|
func NewRootFS() *RootFS {
|
||||||
return &RootFS{Type: TypeLayersWithBase, BaseLayer: baseLayer}
|
return &RootFS{Type: "layers+base"}
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue