Implement image handling using containers/image
Flesh out the implementations of various image-related functions. Signed-off-by: Nalin Dahyabhai <nalin@redhat.com> Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
parent
8874367041
commit
e39cf0dd21
128 changed files with 7231 additions and 3075 deletions
|
@ -7,6 +7,8 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
sreexec "github.com/containers/storage/pkg/reexec"
|
||||
dreexec "github.com/docker/docker/pkg/reexec"
|
||||
"github.com/kubernetes-incubator/ocid/server"
|
||||
"github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"github.com/urfave/cli"
|
||||
|
@ -18,6 +20,13 @@ const (
|
|||
)
|
||||
|
||||
func main() {
|
||||
if sreexec.Init() {
|
||||
return
|
||||
}
|
||||
if dreexec.Init() {
|
||||
return
|
||||
}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "ocic"
|
||||
app.Usage = "client for ocid"
|
||||
|
|
58
glide.lock
generated
58
glide.lock
generated
|
@ -1,5 +1,5 @@
|
|||
hash: d966770b8886be2e6e94f299dc52f7b6f64597a6e036b70aa361108d0aa5a5aa
|
||||
updated: 2016-09-17T17:51:57.682909454+02:00
|
||||
hash: 06a42cd1074b800b7cff0bfa89b648dcff008fce0172bf1f5f42548ccb4baa26
|
||||
updated: 2016-09-17T18:49:17.02525117+02:00
|
||||
imports:
|
||||
- name: github.com/containernetworking/cni
|
||||
version: 9d5e6e60e79491207834ae8439e80c943db65a69
|
||||
|
@ -14,29 +14,28 @@ imports:
|
|||
- directory
|
||||
- image
|
||||
- transports
|
||||
- directory/explicitfilepath
|
||||
- signatures
|
||||
- copy
|
||||
- storage
|
||||
- signature
|
||||
- types
|
||||
- manifest
|
||||
- version
|
||||
- docker
|
||||
- oci/layout
|
||||
- openshift
|
||||
- storage
|
||||
- directory/explicitfilepath
|
||||
- docker/policyconfiguration
|
||||
- version
|
||||
- name: github.com/containers/storage
|
||||
version: 32264f11f3b997e8d2ba032a6a13ebdfcddc2897
|
||||
subpackages:
|
||||
- pkg/archive
|
||||
- pkg/ioutils
|
||||
- storage
|
||||
- pkg/fileutils
|
||||
- pkg/idtools
|
||||
- pkg/pools
|
||||
- pkg/promise
|
||||
- pkg/system
|
||||
- pkg/longpath
|
||||
- pkg/reexec
|
||||
- drivers
|
||||
- drivers/register
|
||||
- pkg/archive
|
||||
- pkg/idtools
|
||||
- pkg/ioutils
|
||||
- pkg/stringid
|
||||
- storageversion
|
||||
- pkg/chrootarchive
|
||||
|
@ -50,8 +49,12 @@ imports:
|
|||
- drivers/vfs
|
||||
- drivers/windows
|
||||
- drivers/zfs
|
||||
- pkg/fileutils
|
||||
- pkg/pools
|
||||
- pkg/promise
|
||||
- pkg/system
|
||||
- pkg/longpath
|
||||
- pkg/random
|
||||
- pkg/reexec
|
||||
- pkg/plugins/transport
|
||||
- pkg/directory
|
||||
- pkg/parsers
|
||||
|
@ -63,36 +66,31 @@ imports:
|
|||
subpackages:
|
||||
- digest
|
||||
- reference
|
||||
- context
|
||||
- uuid
|
||||
- name: github.com/docker/docker
|
||||
version: b9f10c951893f9a00865890a5232e85d770c1087
|
||||
version: 23cf638307f030cd8d48c9efc21feec18a6f88f8
|
||||
subpackages:
|
||||
- pkg/jsonlog
|
||||
- pkg/jsonmessage
|
||||
- pkg/longpath
|
||||
- pkg/mount
|
||||
- pkg/stdcopy
|
||||
- pkg/symlink
|
||||
- pkg/system
|
||||
- pkg/term
|
||||
- pkg/term/windows
|
||||
- pkg/reexec
|
||||
- reference
|
||||
- image
|
||||
- image/v1
|
||||
- pkg/ioutils
|
||||
- pkg/homedir
|
||||
- layer
|
||||
- pkg/version
|
||||
- pkg/longpath
|
||||
- daemon/graphdriver
|
||||
- pkg/archive
|
||||
- pkg/idtools
|
||||
- pkg/ioutils
|
||||
- pkg/stringid
|
||||
- pkg/chrootarchive
|
||||
- pkg/mount
|
||||
- pkg/plugins
|
||||
- pkg/fileutils
|
||||
- pkg/pools
|
||||
- pkg/promise
|
||||
- pkg/system
|
||||
- pkg/random
|
||||
- pkg/reexec
|
||||
- pkg/plugins/transport
|
||||
- name: github.com/docker/engine-api
|
||||
version: dea108d3aa0c67d7162a3fd8aa65f38a430019fd
|
||||
|
@ -157,6 +155,10 @@ imports:
|
|||
subpackages:
|
||||
- jsonpb
|
||||
- proto
|
||||
- name: github.com/gorilla/context
|
||||
version: 215affda49addc4c8ef7e2534915df2c8c35c6cd
|
||||
- name: github.com/gorilla/mux
|
||||
version: 8096f47503459bcc74d1f4c487b7e6e42e5746b5
|
||||
- name: github.com/imdario/mergo
|
||||
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
- name: github.com/kubernetes/kubernetes
|
||||
|
@ -174,6 +176,8 @@ imports:
|
|||
version: d8e08e7d31d4f441646638b35f423a760d6dfbcd
|
||||
- name: github.com/mistifyio/go-zfs
|
||||
version: 1b4ae6fb4e77b095934d4430860ff202060169f8
|
||||
- name: github.com/mtrmac/gpgme
|
||||
version: 7486dcd030936581e863bb69602ef411920d7fc9
|
||||
- name: github.com/opencontainers/image-spec
|
||||
version: 287772a24ab02d5d2fe3fbbba4f13dff9b9ce003
|
||||
subpackages:
|
||||
|
|
13
glide.yaml
13
glide.yaml
|
@ -4,11 +4,14 @@ import:
|
|||
version: ~0.10.0
|
||||
- package: github.com/containers/image
|
||||
version: add-storage-transport
|
||||
repo: git@github.com:nalind/image
|
||||
repo: git@github.com:nalind/image
|
||||
subpackages:
|
||||
- directory
|
||||
- image
|
||||
- transports
|
||||
- signatures
|
||||
- copy
|
||||
- storage
|
||||
- package: github.com/kubernetes/kubernetes
|
||||
version: ~1.5.0-alpha.0
|
||||
subpackages:
|
||||
|
@ -28,3 +31,11 @@ import:
|
|||
- context
|
||||
- package: google.golang.org/grpc
|
||||
version: ~1.0.1-GA
|
||||
- package: github.com/containers/storage
|
||||
subpackages:
|
||||
- storage
|
||||
- pkg/reexec
|
||||
- package: github.com/docker/docker
|
||||
version: ~1.12.1
|
||||
subpackages:
|
||||
- pkg/reexec
|
||||
|
|
103
server/image.go
103
server/image.go
|
@ -2,11 +2,10 @@ package server
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/directory"
|
||||
"github.com/containers/image/image"
|
||||
ic "github.com/containers/image/copy"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports"
|
||||
pb "github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -14,18 +13,34 @@ import (
|
|||
|
||||
// ListImages lists existing images.
|
||||
func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) {
|
||||
// TODO
|
||||
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
||||
// and listing images.
|
||||
return nil, nil
|
||||
images, err := s.storage.Images()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := pb.ListImagesResponse{}
|
||||
for _, image := range images {
|
||||
idCopy := image.ID
|
||||
resp.Images = append(resp.Images, &pb.Image{
|
||||
Id: &idCopy,
|
||||
RepoTags: image.Names,
|
||||
})
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// ImageStatus returns the status of the image.
|
||||
func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) {
|
||||
// TODO
|
||||
// containers/storage will take care of this by looking inside /var/lib/ocid/images
|
||||
// and getting the image status
|
||||
return nil, nil
|
||||
image, err := s.storage.GetImage(*(req.Image.Image))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := pb.ImageStatusResponse{}
|
||||
idCopy := image.ID
|
||||
resp.Image = &pb.Image{
|
||||
Id: &idCopy,
|
||||
RepoTags: image.Names,
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// PullImage pulls a image with authentication config.
|
||||
|
@ -36,64 +51,36 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P
|
|||
}
|
||||
|
||||
// TODO(runcom): deal with AuthConfig in req.GetAuth()
|
||||
|
||||
// TODO(mrunalp,runcom): why do we need the SandboxConfig here?
|
||||
// how do we pull in a specified sandbox?
|
||||
tr, err := transports.ParseImageName(img)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
||||
src, err := tr.NewImageSource(nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i := image.FromSource(src)
|
||||
blobs, err := i.BlobDigests()
|
||||
sr, err := transports.ParseImageName(img)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := os.Mkdir(filepath.Join(imageStore, tr.StringWithinTransport()), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dir, err := directory.NewReference(filepath.Join(imageStore, tr.StringWithinTransport()))
|
||||
dr, err := transports.ParseImageName(storage.Transport.Name() + ":" + sr.StringWithinTransport())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
|
||||
dest, err := dir.NewImageDestination(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// save blobs (layer + config for docker v2s2, layers only for docker v2s1 [the config is in the manifest])
|
||||
for _, b := range blobs {
|
||||
// TODO(runcom,nalin): we need do-then-commit to later purge on error
|
||||
r, _, err := src.GetBlob(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, _, err := dest.PutBlob(r, b, -1); err != nil {
|
||||
r.Close()
|
||||
return nil, err
|
||||
}
|
||||
r.Close()
|
||||
}
|
||||
// save manifest
|
||||
m, _, err := i.Manifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := dest.PutManifest(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://)
|
||||
policy, err := signature.DefaultPolicy(s.imageContext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc, err := signature.NewPolicyContext(policy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = ic.Image(s.imageContext, pc, dr, sr, &ic.Options{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &pb.PullImageResponse{}, nil
|
||||
}
|
||||
|
||||
// RemoveImage removes the image.
|
||||
func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) {
|
||||
return nil, nil
|
||||
_, err := s.storage.DeleteImage(*(req.Image.Image), true)
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -5,6 +5,9 @@ import (
|
|||
"os"
|
||||
"sync"
|
||||
|
||||
imagestorage "github.com/containers/image/storage"
|
||||
imagetypes "github.com/containers/image/types"
|
||||
"github.com/containers/storage/storage"
|
||||
"github.com/kubernetes-incubator/ocid/oci"
|
||||
"github.com/kubernetes-incubator/ocid/utils"
|
||||
"github.com/rajatchopra/ocicni"
|
||||
|
@ -13,15 +16,19 @@ import (
|
|||
const (
|
||||
runtimeAPIVersion = "v1alpha1"
|
||||
imageStore = "/var/lib/ocid/images"
|
||||
storageRun = "/var/run/ocid/storage"
|
||||
storageGraph = "/var/lib/ocid/storage"
|
||||
)
|
||||
|
||||
// Server implements the RuntimeService and ImageService
|
||||
type Server struct {
|
||||
runtime *oci.Runtime
|
||||
sandboxDir string
|
||||
stateLock sync.Mutex
|
||||
state *serverState
|
||||
netPlugin ocicni.CNIPlugin
|
||||
runtime *oci.Runtime
|
||||
sandboxDir string
|
||||
stateLock sync.Mutex
|
||||
state *serverState
|
||||
netPlugin ocicni.CNIPlugin
|
||||
imageContext *imagetypes.SystemContext
|
||||
storage storage.Store
|
||||
}
|
||||
|
||||
// New creates a new Server with options provided
|
||||
|
@ -44,6 +51,11 @@ func New(runtimePath, sandboxDir, containerDir string) (*Server, error) {
|
|||
}
|
||||
sandboxes := make(map[string]*sandbox)
|
||||
containers := make(map[string]*oci.Container)
|
||||
store, err := storage.MakeStore(storageRun, storageGraph, "", []string{}, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
imagestorage.Transport.SetStore(store)
|
||||
netPlugin, err := ocicni.InitCNI("")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -52,10 +64,17 @@ func New(runtimePath, sandboxDir, containerDir string) (*Server, error) {
|
|||
runtime: r,
|
||||
netPlugin: netPlugin,
|
||||
sandboxDir: sandboxDir,
|
||||
storage: store,
|
||||
state: &serverState{
|
||||
sandboxes: sandboxes,
|
||||
containers: containers,
|
||||
},
|
||||
imageContext: &imagetypes.SystemContext{
|
||||
RootForImplicitAbsolutePaths: "",
|
||||
SignaturePolicyPath: "",
|
||||
DockerCertPath: "",
|
||||
DockerInsecureSkipTLSVerify: false,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
182
vendor/github.com/containers/image/copy/copy.go
generated
vendored
Normal file
182
vendor/github.com/containers/image/copy/copy.go
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
|||
package copy
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
// supportedDigests lists the supported blob digest types.
|
||||
var supportedDigests = map[string]func() hash.Hash{
|
||||
"sha256": sha256.New,
|
||||
}
|
||||
|
||||
type digestingReader struct {
|
||||
source io.Reader
|
||||
digest hash.Hash
|
||||
expectedDigest []byte
|
||||
validationFailed bool
|
||||
}
|
||||
|
||||
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
|
||||
// and set validationFailed to true if the source stream does not match expectedDigestString.
|
||||
func newDigestingReader(source io.Reader, expectedDigestString string) (*digestingReader, error) {
|
||||
fields := strings.SplitN(expectedDigestString, ":", 2)
|
||||
if len(fields) != 2 {
|
||||
return nil, fmt.Errorf("Invalid digest specification %s", expectedDigestString)
|
||||
}
|
||||
fn, ok := supportedDigests[fields[0]]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Invalid digest specification %s: unknown digest type %s", expectedDigestString, fields[0])
|
||||
}
|
||||
digest := fn()
|
||||
expectedDigest, err := hex.DecodeString(fields[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Invalid digest value %s: %v", expectedDigestString, err)
|
||||
}
|
||||
if len(expectedDigest) != digest.Size() {
|
||||
return nil, fmt.Errorf("Invalid digest specification %s: length %d does not match %d", expectedDigestString, len(expectedDigest), digest.Size())
|
||||
}
|
||||
return &digestingReader{
|
||||
source: source,
|
||||
digest: digest,
|
||||
expectedDigest: expectedDigest,
|
||||
validationFailed: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
n, err := d.source.Read(p)
|
||||
if n > 0 {
|
||||
if n2, err := d.digest.Write(p[:n]); n2 != n || err != nil {
|
||||
// Coverage: This should not happen, the hash.Hash interface requires
|
||||
// d.digest.Write to never return an error, and the io.Writer interface
|
||||
// requires n2 == len(input) if no error is returned.
|
||||
return 0, fmt.Errorf("Error updating digest during verification: %d vs. %d, %v", n2, n, err)
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
actualDigest := d.digest.Sum(nil)
|
||||
if subtle.ConstantTimeCompare(actualDigest, d.expectedDigest) != 1 {
|
||||
d.validationFailed = true
|
||||
return 0, fmt.Errorf("Digest did not match, expected %s, got %s", hex.EncodeToString(d.expectedDigest), hex.EncodeToString(actualDigest))
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
|
||||
type Options struct {
|
||||
RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
|
||||
SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
|
||||
}
|
||||
|
||||
// Image copies image from srcRef to destRef, using policyContext to validate source image admissibility.
|
||||
func Image(ctx *types.SystemContext, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) error {
|
||||
dest, err := destRef.NewImageDestination(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing destination %s: %v", transports.ImageName(destRef), err)
|
||||
}
|
||||
defer dest.Close()
|
||||
|
||||
rawSource, err := srcRef.NewImageSource(ctx, dest.SupportedManifestMIMETypes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing source %s: %v", transports.ImageName(srcRef), err)
|
||||
}
|
||||
src := image.FromSource(rawSource)
|
||||
defer src.Close()
|
||||
|
||||
// Please keep this policy check BEFORE reading any other information about the image.
|
||||
if allowed, err := policyContext.IsRunningImageAllowed(src); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
|
||||
return fmt.Errorf("Source image rejected: %v", err)
|
||||
}
|
||||
|
||||
manifest, _, err := src.Manifest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest: %v", err)
|
||||
}
|
||||
|
||||
var sigs [][]byte
|
||||
if options != nil && options.RemoveSignatures {
|
||||
sigs = [][]byte{}
|
||||
} else {
|
||||
s, err := src.Signatures()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading signatures: %v", err)
|
||||
}
|
||||
sigs = s
|
||||
}
|
||||
if len(sigs) != 0 {
|
||||
if err := dest.SupportsSignatures(); err != nil {
|
||||
return fmt.Errorf("Can not copy signatures: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
blobDigests, err := src.BlobDigests()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing manifest: %v", err)
|
||||
}
|
||||
for _, digest := range blobDigests {
|
||||
stream, blobSize, err := rawSource.GetBlob(digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading blob %s: %v", digest, err)
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
|
||||
// use a separate validation failure indicator.
|
||||
// Note that we don't use a stronger "validationSucceeded" indicator, because
|
||||
// dest.PutBlob may detect that the layer already exists, in which case we don't
|
||||
// read stream to the end, and validation does not happen.
|
||||
digestingReader, err := newDigestingReader(stream, digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error preparing to verify blob %s: %v", digest, err)
|
||||
}
|
||||
if _, _, err := dest.PutBlob(digestingReader, digest, blobSize); err != nil {
|
||||
return fmt.Errorf("Error writing blob: %v", err)
|
||||
}
|
||||
if digestingReader.validationFailed { // Coverage: This should never happen.
|
||||
return fmt.Errorf("Internal error uploading blob %s, digest verification failed but was ignored", digest)
|
||||
}
|
||||
}
|
||||
|
||||
if options != nil && options.SignBy != "" {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing GPG: %v", err)
|
||||
}
|
||||
dockerReference := dest.Reference().DockerReference()
|
||||
if dockerReference == nil {
|
||||
return fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference()))
|
||||
}
|
||||
|
||||
newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, options.SignBy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
}
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
|
||||
if err := dest.PutManifest(manifest); err != nil {
|
||||
return fmt.Errorf("Error writing manifest: %v", err)
|
||||
}
|
||||
|
||||
if err := dest.PutSignatures(sigs); err != nil {
|
||||
return fmt.Errorf("Error writing signatures: %v", err)
|
||||
}
|
||||
|
||||
if err := dest.Commit(); err != nil {
|
||||
return fmt.Errorf("Error committing the finished image: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
60
vendor/github.com/containers/image/signature/docker.go
generated
vendored
Normal file
60
vendor/github.com/containers/image/signature/docker.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
)
|
||||
|
||||
// SignDockerManifest returns a signature for manifest as the specified dockerReference,
|
||||
// using mech and keyIdentity.
|
||||
func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
|
||||
manifestDigest, err := manifest.Digest(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sig := privateSignature{
|
||||
Signature{
|
||||
DockerManifestDigest: manifestDigest,
|
||||
DockerReference: dockerReference,
|
||||
},
|
||||
}
|
||||
return sig.sign(mech, keyIdentity)
|
||||
}
|
||||
|
||||
// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference,
|
||||
// using mech.
|
||||
func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte,
|
||||
expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) {
|
||||
sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{
|
||||
validateKeyIdentity: func(keyIdentity string) error {
|
||||
if keyIdentity != expectedKeyIdentity {
|
||||
return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
validateSignedDockerReference: func(signedDockerReference string) error {
|
||||
if signedDockerReference != expectedDockerReference {
|
||||
return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s",
|
||||
signedDockerReference, expectedDockerReference)}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
validateSignedDockerManifestDigest: func(signedDockerManifestDigest string) error {
|
||||
matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !matches {
|
||||
return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sig, nil
|
||||
}
|
107
vendor/github.com/containers/image/signature/json.go
generated
vendored
Normal file
107
vendor/github.com/containers/image/signature/json.go
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
package signature
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// jsonFormatError is returned when JSON does not match expected format.
|
||||
type jsonFormatError string
|
||||
|
||||
func (err jsonFormatError) Error() string {
|
||||
return string(err)
|
||||
}
|
||||
|
||||
// validateExactMapKeys returns an error if the keys of m are not exactly expectedKeys, which must be pairwise distinct
|
||||
func validateExactMapKeys(m map[string]interface{}, expectedKeys ...string) error {
|
||||
if len(m) != len(expectedKeys) {
|
||||
return jsonFormatError("Unexpected keys in a JSON object")
|
||||
}
|
||||
|
||||
for _, k := range expectedKeys {
|
||||
if _, ok := m[k]; !ok {
|
||||
return jsonFormatError(fmt.Sprintf("Key %s missing in a JSON object", k))
|
||||
}
|
||||
}
|
||||
// Assuming expectedKeys are pairwise distinct, we know m contains len(expectedKeys) different values in expectedKeys.
|
||||
return nil
|
||||
}
|
||||
|
||||
// mapField returns a member fieldName of m, if it is a JSON map, or an error.
|
||||
func mapField(m map[string]interface{}, fieldName string) (map[string]interface{}, error) {
|
||||
untyped, ok := m[fieldName]
|
||||
if !ok {
|
||||
return nil, jsonFormatError(fmt.Sprintf("Field %s missing", fieldName))
|
||||
}
|
||||
v, ok := untyped.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, jsonFormatError(fmt.Sprintf("Field %s is not a JSON object", fieldName))
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// stringField returns a member fieldName of m, if it is a string, or an error.
|
||||
func stringField(m map[string]interface{}, fieldName string) (string, error) {
|
||||
untyped, ok := m[fieldName]
|
||||
if !ok {
|
||||
return "", jsonFormatError(fmt.Sprintf("Field %s missing", fieldName))
|
||||
}
|
||||
v, ok := untyped.(string)
|
||||
if !ok {
|
||||
return "", jsonFormatError(fmt.Sprintf("Field %s is not a JSON object", fieldName))
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
|
||||
// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to
|
||||
// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected.
|
||||
//
|
||||
// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
|
||||
// we could use reflection to automate this. Later?
|
||||
func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
|
||||
seenKeys := map[string]struct{}{}
|
||||
|
||||
dec := json.NewDecoder(bytes.NewReader(data))
|
||||
t, err := dec.Token()
|
||||
if err != nil {
|
||||
return jsonFormatError(err.Error())
|
||||
}
|
||||
if t != json.Delim('{') {
|
||||
return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
|
||||
}
|
||||
for {
|
||||
t, err := dec.Token()
|
||||
if err != nil {
|
||||
return jsonFormatError(err.Error())
|
||||
}
|
||||
if t == json.Delim('}') {
|
||||
break
|
||||
}
|
||||
|
||||
key, ok := t.(string)
|
||||
if !ok {
|
||||
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
|
||||
return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
|
||||
}
|
||||
if _, ok := seenKeys[key]; ok {
|
||||
return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
|
||||
}
|
||||
seenKeys[key] = struct{}{}
|
||||
|
||||
valuePtr := fieldResolver(key)
|
||||
if valuePtr == nil {
|
||||
return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
|
||||
}
|
||||
// This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
|
||||
if err := dec.Decode(valuePtr); err != nil {
|
||||
return jsonFormatError(err.Error())
|
||||
}
|
||||
}
|
||||
if _, err := dec.Token(); err != io.EOF {
|
||||
return jsonFormatError("Unexpected data after JSON object")
|
||||
}
|
||||
return nil
|
||||
}
|
121
vendor/github.com/containers/image/signature/mechanism.go
generated
vendored
Normal file
121
vendor/github.com/containers/image/signature/mechanism.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/mtrmac/gpgme"
|
||||
)
|
||||
|
||||
// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
|
||||
// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to
|
||||
// eliminate ambiguities, support CA signatures and perhaps other key properties)
|
||||
type SigningMechanism interface {
|
||||
// ImportKeysFromBytes imports public keys from the supplied blob and returns their identities.
|
||||
// The blob is assumed to have an appropriate format (the caller is expected to know which one).
|
||||
// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism).
|
||||
ImportKeysFromBytes(blob []byte) ([]string, error)
|
||||
// Sign creates a (non-detached) signature of input using keyidentity
|
||||
Sign(input []byte, keyIdentity string) ([]byte, error)
|
||||
// Verify parses unverifiedSignature and returns the content and the signer's identity
|
||||
Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error)
|
||||
}
|
||||
|
||||
// A GPG/OpenPGP signing mechanism.
|
||||
type gpgSigningMechanism struct {
|
||||
ctx *gpgme.Context
|
||||
}
|
||||
|
||||
// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism.
|
||||
func NewGPGSigningMechanism() (SigningMechanism, error) {
|
||||
return newGPGSigningMechanismInDirectory("")
|
||||
}
|
||||
|
||||
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
|
||||
func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
|
||||
ctx, err := gpgme.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if optionalDir != "" {
|
||||
err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ctx.SetArmor(false)
|
||||
ctx.SetTextMode(false)
|
||||
return gpgSigningMechanism{ctx: ctx}, nil
|
||||
}
|
||||
|
||||
// ImportKeysFromBytes implements SigningMechanism.ImportKeysFromBytes
|
||||
func (m gpgSigningMechanism) ImportKeysFromBytes(blob []byte) ([]string, error) {
|
||||
inputData, err := gpgme.NewDataBytes(blob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := m.ctx.Import(inputData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyIdentities := []string{}
|
||||
for _, i := range res.Imports {
|
||||
if i.Result == nil {
|
||||
keyIdentities = append(keyIdentities, i.Fingerprint)
|
||||
}
|
||||
}
|
||||
return keyIdentities, nil
|
||||
}
|
||||
|
||||
// Sign implements SigningMechanism.Sign
|
||||
func (m gpgSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
|
||||
key, err := m.ctx.GetKey(keyIdentity, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inputData, err := gpgme.NewDataBytes(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var sigBuffer bytes.Buffer
|
||||
sigData, err := gpgme.NewDataWriter(&sigBuffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sigBuffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// Verify implements SigningMechanism.Verify
|
||||
func (m gpgSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
|
||||
signedBuffer := bytes.Buffer{}
|
||||
signedData, err := gpgme.NewDataWriter(&signedBuffer)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
_, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if len(sigs) != 1 {
|
||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))}
|
||||
}
|
||||
sig := sigs[0]
|
||||
// This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
|
||||
if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
|
||||
// FIXME: Better error reporting eventually
|
||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)}
|
||||
}
|
||||
return signedBuffer.Bytes(), sig.Fingerprint, nil
|
||||
}
|
697
vendor/github.com/containers/image/signature/policy_config.go
generated
vendored
Normal file
697
vendor/github.com/containers/image/signature/policy_config.go
generated
vendored
Normal file
|
@ -0,0 +1,697 @@
|
|||
// policy_config.go hanles creation of policy objects, either by parsing JSON
|
||||
// or by programs building them programmatically.
|
||||
|
||||
// The New* constructors are intended to be a stable API. FIXME: after an independent review.
|
||||
|
||||
// Do not invoke the internals of the JSON marshaling/unmarshaling directly.
|
||||
|
||||
// We can't just blindly call json.Unmarshal because that would silently ignore
|
||||
// typos, and that would just not do for security policy.
|
||||
|
||||
// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context.
|
||||
// But at least it is not worse than blind json.Unmarshal()…
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/docker/reference"
|
||||
)
|
||||
|
||||
// systemDefaultPolicyPath is the policy path used for DefaultPolicy().
|
||||
// You can override this at build time with
|
||||
// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path'
|
||||
var systemDefaultPolicyPath = builtinDefaultPolicyPath
|
||||
|
||||
// builtinDefaultPolicyPath is the policy pat used for DefaultPolicy().
|
||||
// DO NOT change this, instead see systemDefaultPolicyPath above.
|
||||
const builtinDefaultPolicyPath = "/etc/containers/policy.json"
|
||||
|
||||
// InvalidPolicyFormatError is returned when parsing an invalid policy configuration.
|
||||
type InvalidPolicyFormatError string
|
||||
|
||||
func (err InvalidPolicyFormatError) Error() string {
|
||||
return string(err)
|
||||
}
|
||||
|
||||
// FIXME: NewDefaultPolicy, from default file (or environment if trusted?)
|
||||
|
||||
// DefaultPolicy returns the default policy of the system.
|
||||
// Most applications should be using this method to get the policy configured
|
||||
// by the system administrator.
|
||||
// ctx should usually be nil, can be set to override the default.
|
||||
// NOTE: When this function returns an error, report it to the user and abort.
|
||||
// DO NOT hard-code fallback policies in your application.
|
||||
func DefaultPolicy(ctx *types.SystemContext) (*Policy, error) {
|
||||
return NewPolicyFromFile(defaultPolicyPath(ctx))
|
||||
}
|
||||
|
||||
// defaultPolicyPath returns a path to the default policy of the system.
|
||||
func defaultPolicyPath(ctx *types.SystemContext) string {
|
||||
if ctx != nil {
|
||||
if ctx.SignaturePolicyPath != "" {
|
||||
return ctx.SignaturePolicyPath
|
||||
}
|
||||
if ctx.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemDefaultPolicyPath)
|
||||
}
|
||||
}
|
||||
return systemDefaultPolicyPath
|
||||
}
|
||||
|
||||
// NewPolicyFromFile returns a policy configured in the specified file.
|
||||
func NewPolicyFromFile(fileName string) (*Policy, error) {
|
||||
contents, err := ioutil.ReadFile(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewPolicyFromBytes(contents)
|
||||
}
|
||||
|
||||
// NewPolicyFromBytes returns a policy parsed from the specified blob.
|
||||
// Use this function instead of calling json.Unmarshal directly.
|
||||
func NewPolicyFromBytes(data []byte) (*Policy, error) {
|
||||
p := Policy{}
|
||||
if err := json.Unmarshal(data, &p); err != nil {
|
||||
return nil, InvalidPolicyFormatError(err.Error())
|
||||
}
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
// Compile-time check that Policy implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*Policy)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (p *Policy) UnmarshalJSON(data []byte) error {
|
||||
*p = Policy{}
|
||||
transports := policyTransportsMap{}
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "default":
|
||||
return &p.Default
|
||||
case "transports":
|
||||
return &transports
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.Default == nil {
|
||||
return InvalidPolicyFormatError("Default policy is missing")
|
||||
}
|
||||
p.Transports = map[string]PolicyTransportScopes(transports)
|
||||
return nil
|
||||
}
|
||||
|
||||
// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member.
|
||||
type policyTransportsMap map[string]PolicyTransportScopes
|
||||
|
||||
// Compile-time check that policyTransportsMap implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*policyTransportsMap)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (m *policyTransportsMap) UnmarshalJSON(data []byte) error {
|
||||
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
|
||||
// So, use a temporary map of pointers-to-slices and convert.
|
||||
tmpMap := map[string]*PolicyTransportScopes{}
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
transport, ok := transports.KnownTransports[key]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
// paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
|
||||
if _, ok := tmpMap[key]; ok {
|
||||
return nil
|
||||
}
|
||||
ptsWithTransport := policyTransportScopesWithTransport{
|
||||
transport: transport,
|
||||
dest: &PolicyTransportScopes{}, // This allocates a new instance on each call.
|
||||
}
|
||||
tmpMap[key] = ptsWithTransport.dest
|
||||
return &ptsWithTransport
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
for key, ptr := range tmpMap {
|
||||
(*m)[key] = *ptr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler.
|
||||
// we want to only use policyTransportScopesWithTransport
|
||||
var _ json.Unmarshaler = (*PolicyTransportScopes)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error {
|
||||
return errors.New("Do not try to unmarshal PolicyTransportScopes directly")
|
||||
}
|
||||
|
||||
// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes
|
||||
// while validating using a specific ImageTransport.
|
||||
type policyTransportScopesWithTransport struct {
|
||||
transport types.ImageTransport
|
||||
dest *PolicyTransportScopes
|
||||
}
|
||||
|
||||
// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error {
|
||||
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
|
||||
// So, use a temporary map of pointers-to-slices and convert.
|
||||
tmpMap := map[string]*PolicyRequirements{}
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
// paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
|
||||
if _, ok := tmpMap[key]; ok {
|
||||
return nil
|
||||
}
|
||||
if key != "" {
|
||||
if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
ptr := &PolicyRequirements{} // This allocates a new instance on each call.
|
||||
tmpMap[key] = ptr
|
||||
return ptr
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
for key, ptr := range tmpMap {
|
||||
(*m.dest)[key] = *ptr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compile-time check that PolicyRequirements implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*PolicyRequirements)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (m *PolicyRequirements) UnmarshalJSON(data []byte) error {
|
||||
reqJSONs := []json.RawMessage{}
|
||||
if err := json.Unmarshal(data, &reqJSONs); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(reqJSONs) == 0 {
|
||||
return InvalidPolicyFormatError("List of verification policy requirements must not be empty")
|
||||
}
|
||||
res := make([]PolicyRequirement, len(reqJSONs))
|
||||
for i, reqJSON := range reqJSONs {
|
||||
req, err := newPolicyRequirementFromJSON(reqJSON)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res[i] = req
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation.
|
||||
func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) {
|
||||
var typeField prCommon
|
||||
if err := json.Unmarshal(data, &typeField); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res PolicyRequirement
|
||||
switch typeField.Type {
|
||||
case prTypeInsecureAcceptAnything:
|
||||
res = &prInsecureAcceptAnything{}
|
||||
case prTypeReject:
|
||||
res = &prReject{}
|
||||
case prTypeSignedBy:
|
||||
res = &prSignedBy{}
|
||||
case prTypeSignedBaseLayer:
|
||||
res = &prSignedBaseLayer{}
|
||||
default:
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type))
|
||||
}
|
||||
if err := json.Unmarshal(data, &res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type.
|
||||
func newPRInsecureAcceptAnything() *prInsecureAcceptAnything {
|
||||
return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}}
|
||||
}
|
||||
|
||||
// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement.
|
||||
func NewPRInsecureAcceptAnything() PolicyRequirement {
|
||||
return newPRInsecureAcceptAnything()
|
||||
}
|
||||
|
||||
// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
|
||||
*pr = prInsecureAcceptAnything{}
|
||||
var tmp prInsecureAcceptAnything
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tmp.Type != prTypeInsecureAcceptAnything {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
}
|
||||
*pr = *newPRInsecureAcceptAnything()
|
||||
return nil
|
||||
}
|
||||
|
||||
// newPRReject is NewPRReject, except it returns the private type.
|
||||
func newPRReject() *prReject {
|
||||
return &prReject{prCommon{Type: prTypeReject}}
|
||||
}
|
||||
|
||||
// NewPRReject returns a new "reject" PolicyRequirement.
|
||||
func NewPRReject() PolicyRequirement {
|
||||
return newPRReject()
|
||||
}
|
||||
|
||||
// Compile-time check that prReject implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*prReject)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (pr *prReject) UnmarshalJSON(data []byte) error {
|
||||
*pr = prReject{}
|
||||
var tmp prReject
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tmp.Type != prTypeReject {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
}
|
||||
*pr = *newPRReject()
|
||||
return nil
|
||||
}
|
||||
|
||||
// newPRSignedBy returns a new prSignedBy if parameters are valid.
|
||||
func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
|
||||
if !keyType.IsValid() {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType))
|
||||
}
|
||||
if len(keyPath) > 0 && len(keyData) > 0 {
|
||||
return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously")
|
||||
}
|
||||
if signedIdentity == nil {
|
||||
return nil, InvalidPolicyFormatError("signedIdentity not specified")
|
||||
}
|
||||
return &prSignedBy{
|
||||
prCommon: prCommon{Type: prTypeSignedBy},
|
||||
KeyType: keyType,
|
||||
KeyPath: keyPath,
|
||||
KeyData: keyData,
|
||||
SignedIdentity: signedIdentity,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type.
|
||||
func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
|
||||
return newPRSignedBy(keyType, keyPath, nil, signedIdentity)
|
||||
}
|
||||
|
||||
// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath
|
||||
func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
|
||||
return newPRSignedByKeyPath(keyType, keyPath, signedIdentity)
|
||||
}
|
||||
|
||||
// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type.
|
||||
func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
|
||||
return newPRSignedBy(keyType, "", keyData, signedIdentity)
|
||||
}
|
||||
|
||||
// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData
|
||||
func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
|
||||
return newPRSignedByKeyData(keyType, keyData, signedIdentity)
|
||||
}
|
||||
|
||||
// Compile-time check that prSignedBy implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*prSignedBy)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
|
||||
*pr = prSignedBy{}
|
||||
var tmp prSignedBy
|
||||
var gotKeyPath, gotKeyData = false, false
|
||||
var signedIdentity json.RawMessage
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
case "keyType":
|
||||
return &tmp.KeyType
|
||||
case "keyPath":
|
||||
gotKeyPath = true
|
||||
return &tmp.KeyPath
|
||||
case "keyData":
|
||||
gotKeyData = true
|
||||
return &tmp.KeyData
|
||||
case "signedIdentity":
|
||||
return &signedIdentity
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tmp.Type != prTypeSignedBy {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
}
|
||||
if signedIdentity == nil {
|
||||
tmp.SignedIdentity = NewPRMMatchExact()
|
||||
} else {
|
||||
si, err := newPolicyReferenceMatchFromJSON(signedIdentity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmp.SignedIdentity = si
|
||||
}
|
||||
|
||||
var res *prSignedBy
|
||||
var err error
|
||||
switch {
|
||||
case gotKeyPath && gotKeyData:
|
||||
return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously")
|
||||
case gotKeyPath && !gotKeyData:
|
||||
res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity)
|
||||
case !gotKeyPath && gotKeyData:
|
||||
res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity)
|
||||
case !gotKeyPath && !gotKeyData:
|
||||
return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified")
|
||||
default: // Coverage: This should never happen
|
||||
return fmt.Errorf("Impossible keyPath/keyData presence combination!?")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*pr = *res
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValid returns true iff kt is a recognized value
|
||||
func (kt sbKeyType) IsValid() bool {
|
||||
switch kt {
|
||||
case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys,
|
||||
SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time check that sbKeyType implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*sbKeyType)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (kt *sbKeyType) UnmarshalJSON(data []byte) error {
|
||||
*kt = sbKeyType("")
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
if !sbKeyType(s).IsValid() {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s))
|
||||
}
|
||||
*kt = sbKeyType(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type.
|
||||
func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) {
|
||||
if baseLayerIdentity == nil {
|
||||
return nil, InvalidPolicyFormatError("baseLayerIdentity not specified")
|
||||
}
|
||||
return &prSignedBaseLayer{
|
||||
prCommon: prCommon{Type: prTypeSignedBaseLayer},
|
||||
BaseLayerIdentity: baseLayerIdentity,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement.
|
||||
func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
|
||||
return newPRSignedBaseLayer(baseLayerIdentity)
|
||||
}
|
||||
|
||||
// Compile-time check that prSignedBaseLayer implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*prSignedBaseLayer)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
|
||||
*pr = prSignedBaseLayer{}
|
||||
var tmp prSignedBaseLayer
|
||||
var baseLayerIdentity json.RawMessage
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
case "baseLayerIdentity":
|
||||
return &baseLayerIdentity
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tmp.Type != prTypeSignedBaseLayer {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
}
|
||||
if baseLayerIdentity == nil {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("baseLayerIdentity not specified"))
|
||||
}
|
||||
bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := newPRSignedBaseLayer(bli)
|
||||
if err != nil {
|
||||
// Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid.
|
||||
return err
|
||||
}
|
||||
*pr = *res
|
||||
return nil
|
||||
}
|
||||
|
||||
// newPolicyRequirementFromJSON parses JSON data into a PolicyReferenceMatch implementation.
|
||||
func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) {
|
||||
var typeField prmCommon
|
||||
if err := json.Unmarshal(data, &typeField); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res PolicyReferenceMatch
|
||||
switch typeField.Type {
|
||||
case prmTypeMatchExact:
|
||||
res = &prmMatchExact{}
|
||||
case prmTypeMatchRepository:
|
||||
res = &prmMatchRepository{}
|
||||
case prmTypeExactReference:
|
||||
res = &prmExactReference{}
|
||||
case prmTypeExactRepository:
|
||||
res = &prmExactRepository{}
|
||||
default:
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type))
|
||||
}
|
||||
if err := json.Unmarshal(data, &res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type.
|
||||
func newPRMMatchExact() *prmMatchExact {
|
||||
return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}}
|
||||
}
|
||||
|
||||
// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch.
|
||||
func NewPRMMatchExact() PolicyReferenceMatch {
|
||||
return newPRMMatchExact()
|
||||
}
|
||||
|
||||
// Compile-time check that prmMatchExact implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*prmMatchExact)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmMatchExact{}
|
||||
var tmp prmMatchExact
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tmp.Type != prmTypeMatchExact {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
}
|
||||
*prm = *newPRMMatchExact()
|
||||
return nil
|
||||
}
|
||||
|
||||
// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type.
|
||||
func newPRMMatchRepository() *prmMatchRepository {
|
||||
return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}}
|
||||
}
|
||||
|
||||
// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch.
|
||||
func NewPRMMatchRepository() PolicyReferenceMatch {
|
||||
return newPRMMatchRepository()
|
||||
}
|
||||
|
||||
// Compile-time check that prmMatchRepository implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*prmMatchRepository)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmMatchRepository{}
|
||||
var tmp prmMatchRepository
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tmp.Type != prmTypeMatchRepository {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
}
|
||||
*prm = *newPRMMatchRepository()
|
||||
return nil
|
||||
}
|
||||
|
||||
// newPRMExactReference is NewPRMExactReference, except it resturns the private type.
|
||||
func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
|
||||
ref, err := reference.ParseNamed(dockerReference)
|
||||
if err != nil {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error()))
|
||||
}
|
||||
if reference.IsNameOnly(ref) {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference))
|
||||
}
|
||||
return &prmExactReference{
|
||||
prmCommon: prmCommon{Type: prmTypeExactReference},
|
||||
DockerReference: dockerReference,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch.
|
||||
func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) {
|
||||
return newPRMExactReference(dockerReference)
|
||||
}
|
||||
|
||||
// Compile-time check that prmExactReference implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*prmExactReference)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmExactReference{}
|
||||
var tmp prmExactReference
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
case "dockerReference":
|
||||
return &tmp.DockerReference
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tmp.Type != prmTypeExactReference {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
}
|
||||
|
||||
res, err := newPRMExactReference(tmp.DockerReference)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*prm = *res
|
||||
return nil
|
||||
}
|
||||
|
||||
// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type.
|
||||
func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
|
||||
if _, err := reference.ParseNamed(dockerRepository); err != nil {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))
|
||||
}
|
||||
return &prmExactRepository{
|
||||
prmCommon: prmCommon{Type: prmTypeExactRepository},
|
||||
DockerRepository: dockerRepository,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch.
|
||||
func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) {
|
||||
return newPRMExactRepository(dockerRepository)
|
||||
}
|
||||
|
||||
// Compile-time check that prmExactRepository implements json.Unmarshaler.
|
||||
var _ json.Unmarshaler = (*prmExactRepository)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmExactRepository{}
|
||||
var tmp prmExactRepository
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
case "dockerRepository":
|
||||
return &tmp.DockerRepository
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tmp.Type != prmTypeExactRepository {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
}
|
||||
|
||||
res, err := newPRMExactRepository(tmp.DockerRepository)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*prm = *res
|
||||
return nil
|
||||
}
|
287
vendor/github.com/containers/image/signature/policy_eval.go
generated
vendored
Normal file
287
vendor/github.com/containers/image/signature/policy_eval.go
generated
vendored
Normal file
|
@ -0,0 +1,287 @@
|
|||
// This defines the top-level policy evaluation API.
|
||||
// To the extent possible, the interface of the fuctions provided
|
||||
// here is intended to be completely unambiguous, and stable for users
|
||||
// to rely on.
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
// PolicyRequirementError is an explanatory text for rejecting a signature or an image.
|
||||
type PolicyRequirementError string
|
||||
|
||||
func (err PolicyRequirementError) Error() string {
|
||||
return string(err)
|
||||
}
|
||||
|
||||
// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted.
|
||||
type signatureAcceptanceResult string
|
||||
|
||||
const (
|
||||
sarAccepted signatureAcceptanceResult = "sarAccepted"
|
||||
sarRejected signatureAcceptanceResult = "sarRejected"
|
||||
sarUnknown signatureAcceptanceResult = "sarUnknown"
|
||||
)
|
||||
|
||||
// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image.
|
||||
// The type is public, but its definition is private.
|
||||
type PolicyRequirement interface {
|
||||
// FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache
|
||||
// costly initialization like creating temporary GPG home directories and reading files.
|
||||
// Setup() (someState, error)
|
||||
// Then, the operations below would be done on the someState object, not directly on a PolicyRequirement.
|
||||
|
||||
// isSignatureAuthorAccepted, given an image and a signature blob, returns:
|
||||
// - sarAccepted if the signature has been verified against the appropriate public key
|
||||
// (where "appropriate public key" may depend on the contents of the signature);
|
||||
// in that case a parsed Signature should be returned.
|
||||
// - sarRejected if the signature has not been verified;
|
||||
// in that case error must be non-nil, and should be an PolicyRequirementError if evaluation
|
||||
// succeeded but the result was rejection.
|
||||
// - sarUnknown if if this PolicyRequirement does not deal with signatures.
|
||||
// NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
|
||||
// Returning sarUnknown and a non-nil error value is invalid.
|
||||
// WARNING: This makes the signature contents acceptable for futher processing,
|
||||
// but it does not necessarily mean that the contents of the signature are
|
||||
// consistent with local policy.
|
||||
// For example:
|
||||
// - Do not use a true value to determine whether to run
|
||||
// a container based on this image; use IsRunningImageAllowed instead.
|
||||
// - Just because a signature is accepted does not automatically mean the contents of the
|
||||
// signature are authorized to run code as root, or to affect system or cluster configuration.
|
||||
isSignatureAuthorAccepted(image types.Image, sig []byte) (signatureAcceptanceResult, *Signature, error)
|
||||
|
||||
// isRunningImageAllowed returns true if the requirement allows running an image.
|
||||
// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
|
||||
// succeeded but the result was rejection.
|
||||
// WARNING: This validates signatures and the manifest, but does not download or validate the
|
||||
// layers. Users must validate that the layers match their expected digests.
|
||||
isRunningImageAllowed(image types.Image) (bool, error)
|
||||
}
|
||||
|
||||
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
|
||||
// The type is public, but its implementation is private.
|
||||
type PolicyReferenceMatch interface {
|
||||
// matchesDockerReference decides whether a specific image identity is accepted for an image
|
||||
// (or, usually, for the image's Reference().DockerReference()). Note that
|
||||
// image.Reference().DockerReference() may be nil.
|
||||
matchesDockerReference(image types.Image, signatureDockerReference string) bool
|
||||
}
|
||||
|
||||
// PolicyContext encapsulates a policy and possible cached state
|
||||
// for speeding up its evaluation.
|
||||
type PolicyContext struct {
|
||||
Policy *Policy
|
||||
state policyContextState // Internal consistency checking
|
||||
}
|
||||
|
||||
// policyContextState is used internally to verify the users are not misusing a PolicyContext.
|
||||
type policyContextState string
|
||||
|
||||
const (
|
||||
pcInvalid policyContextState = ""
|
||||
pcInitializing policyContextState = "Initializing"
|
||||
pcReady policyContextState = "Ready"
|
||||
pcInUse policyContextState = "InUse"
|
||||
pcDestroying policyContextState = "Destroying"
|
||||
pcDestroyed policyContextState = "Destroyed"
|
||||
)
|
||||
|
||||
// changeContextState changes pc.state, or fails if the state is unexpected
|
||||
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
|
||||
if pc.state != expected {
|
||||
return fmt.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
|
||||
}
|
||||
pc.state = new
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewPolicyContext sets up and initializes a context for the specified policy.
|
||||
// The policy must not be modified while the context exists. FIXME: make a deep copy?
|
||||
// If this function succeeds, the caller should call PolicyContext.Destroy() when done.
|
||||
func NewPolicyContext(policy *Policy) (*PolicyContext, error) {
|
||||
pc := &PolicyContext{Policy: policy, state: pcInitializing}
|
||||
// FIXME: initialize
|
||||
if err := pc.changeState(pcInitializing, pcReady); err != nil {
|
||||
// Huh?! This should never fail, we didn't give the pointer to anybody.
|
||||
// Just give up and leave unclean state around.
|
||||
return nil, err
|
||||
}
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
// Destroy should be called when the user of the context is done with it.
|
||||
func (pc *PolicyContext) Destroy() error {
|
||||
if err := pc.changeState(pcReady, pcDestroying); err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME: destroy
|
||||
return pc.changeState(pcDestroying, pcDestroyed)
|
||||
}
|
||||
|
||||
// policyIdentityLogName returns a string description of the image identity for policy purposes.
|
||||
// ONLY use this for log messages, not for any decisions!
|
||||
func policyIdentityLogName(ref types.ImageReference) string {
|
||||
return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity()
|
||||
}
|
||||
|
||||
// requirementsForImageRef selects the appropriate requirements for ref.
|
||||
func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements {
|
||||
// Do we have a PolicyTransportScopes for this transport?
|
||||
transportName := ref.Transport().Name()
|
||||
if transportScopes, ok := pc.Policy.Transports[transportName]; ok {
|
||||
// Look for a full match.
|
||||
identity := ref.PolicyConfigurationIdentity()
|
||||
if req, ok := transportScopes[identity]; ok {
|
||||
logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity)
|
||||
return req
|
||||
}
|
||||
|
||||
// Look for a match of the possible parent namespaces.
|
||||
for _, name := range ref.PolicyConfigurationNamespaces() {
|
||||
if req, ok := transportScopes[name]; ok {
|
||||
logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name)
|
||||
return req
|
||||
}
|
||||
}
|
||||
|
||||
// Look for a default match for the transport.
|
||||
if req, ok := transportScopes[""]; ok {
|
||||
logrus.Debugf(` Using transport "%s" policy section ""`, transportName)
|
||||
return req
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf(" Using default policy section")
|
||||
return pc.Policy.Default
|
||||
}
|
||||
|
||||
// GetSignaturesWithAcceptedAuthor returns those signatures from an image
|
||||
// for which the policy accepts the author (and which have been successfully
|
||||
// verified).
|
||||
// NOTE: This may legitimately return an empty list and no error, if the image
|
||||
// has no signatures or only invalid signatures.
|
||||
// WARNING: This makes the signature contents acceptable for futher processing,
|
||||
// but it does not necessarily mean that the contents of the signature are
|
||||
// consistent with local policy.
|
||||
// For example:
|
||||
// - Do not use a an existence of an accepted signature to determine whether to run
|
||||
// a container based on this image; use IsRunningImageAllowed instead.
|
||||
// - Just because a signature is accepted does not automatically mean the contents of the
|
||||
// signature are authorized to run code as root, or to affect system or cluster configuration.
|
||||
func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(image types.Image) (sigs []*Signature, finalErr error) {
|
||||
if err := pc.changeState(pcReady, pcInUse); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := pc.changeState(pcInUse, pcReady); err != nil {
|
||||
sigs = nil
|
||||
finalErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference()))
|
||||
reqs := pc.requirementsForImageRef(image.Reference())
|
||||
|
||||
// FIXME: rename Signatures to UnverifiedSignatures
|
||||
unverifiedSignatures, err := image.Signatures()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := make([]*Signature, 0, len(unverifiedSignatures))
|
||||
for sigNumber, sig := range unverifiedSignatures {
|
||||
var acceptedSig *Signature // non-nil if accepted
|
||||
rejected := false
|
||||
// FIXME? Say more about the contents of the signature, i.e. parse it even before verification?!
|
||||
logrus.Debugf("Evaluating signature %d:", sigNumber)
|
||||
interpretingReqs:
|
||||
for reqNumber, req := range reqs {
|
||||
// FIXME: Log the requirement itself? For now, we use just the number.
|
||||
// FIXME: supply state
|
||||
switch res, as, err := req.isSignatureAuthorAccepted(image, sig); res {
|
||||
case sarAccepted:
|
||||
if as == nil { // Coverage: this should never happen
|
||||
logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber)
|
||||
rejected = true
|
||||
break interpretingReqs
|
||||
}
|
||||
logrus.Debugf(" Requirement %d: signature accepted", reqNumber)
|
||||
if acceptedSig == nil {
|
||||
acceptedSig = as
|
||||
} else if *as != *acceptedSig { // Coverage: this should never happen
|
||||
// Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents?
|
||||
logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber)
|
||||
rejected = true
|
||||
acceptedSig = nil
|
||||
break interpretingReqs
|
||||
}
|
||||
case sarRejected:
|
||||
logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error())
|
||||
rejected = true
|
||||
break interpretingReqs
|
||||
case sarUnknown:
|
||||
if err != nil { // Coverage: this should never happen
|
||||
logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error())
|
||||
rejected = true
|
||||
break interpretingReqs
|
||||
}
|
||||
logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber)
|
||||
default: // Coverage: this should never happen
|
||||
logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res))
|
||||
rejected = true
|
||||
break interpretingReqs
|
||||
}
|
||||
}
|
||||
// This also handles the (invalid) case of empty reqs, by rejecting the signature.
|
||||
if acceptedSig != nil && !rejected {
|
||||
logrus.Debugf(" Overall: OK, signature accepted")
|
||||
res = append(res, acceptedSig)
|
||||
} else {
|
||||
logrus.Debugf(" Overall: Signature not accepted")
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// IsRunningImageAllowed returns true iff the policy allows running the image.
|
||||
// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
|
||||
// succeeded but the result was rejection.
|
||||
// WARNING: This validates signatures and the manifest, but does not download or validate the
|
||||
// layers. Users must validate that the layers match their expected digests.
|
||||
func (pc *PolicyContext) IsRunningImageAllowed(image types.Image) (res bool, finalErr error) {
|
||||
if err := pc.changeState(pcReady, pcInUse); err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() {
|
||||
if err := pc.changeState(pcInUse, pcReady); err != nil {
|
||||
res = false
|
||||
finalErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference()))
|
||||
reqs := pc.requirementsForImageRef(image.Reference())
|
||||
|
||||
if len(reqs) == 0 {
|
||||
return false, PolicyRequirementError("List of verification policy requirements must not be empty")
|
||||
}
|
||||
|
||||
for reqNumber, req := range reqs {
|
||||
// FIXME: supply state
|
||||
allowed, err := req.isRunningImageAllowed(image)
|
||||
if !allowed {
|
||||
logrus.Debugf("Requirement %d: denied, done", reqNumber)
|
||||
return false, err
|
||||
}
|
||||
logrus.Debugf(" Requirement %d: allowed", reqNumber)
|
||||
}
|
||||
// We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image.
|
||||
logrus.Debugf("Overall: allowed")
|
||||
return true, nil
|
||||
}
|
18
vendor/github.com/containers/image/signature/policy_eval_baselayer.go
generated
vendored
Normal file
18
vendor/github.com/containers/image/signature/policy_eval_baselayer.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Policy evaluation for prSignedBaseLayer.
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(image types.Image, sig []byte) (signatureAcceptanceResult, *Signature, error) {
|
||||
return sarUnknown, nil, nil
|
||||
}
|
||||
|
||||
func (pr *prSignedBaseLayer) isRunningImageAllowed(image types.Image) (bool, error) {
|
||||
// FIXME? Reject this at policy parsing time already?
|
||||
logrus.Errorf("signedBaseLayer not implemented yet!")
|
||||
return false, PolicyRequirementError("signedBaseLayer not implemented yet!")
|
||||
}
|
137
vendor/github.com/containers/image/signature/policy_eval_signedby.go
generated
vendored
Normal file
137
vendor/github.com/containers/image/signature/policy_eval_signedby.go
generated
vendored
Normal file
|
@ -0,0 +1,137 @@
|
|||
// Policy evaluation for prSignedBy.
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
func (pr *prSignedBy) isSignatureAuthorAccepted(image types.Image, sig []byte) (signatureAcceptanceResult, *Signature, error) {
|
||||
switch pr.KeyType {
|
||||
case SBKeyTypeGPGKeys:
|
||||
case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
|
||||
// FIXME? Reject this at policy parsing time already?
|
||||
return sarRejected, nil, fmt.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType))
|
||||
default:
|
||||
// This should never happen, newPRSignedBy ensures KeyType.IsValid()
|
||||
return sarRejected, nil, fmt.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType))
|
||||
}
|
||||
|
||||
if pr.KeyPath != "" && pr.KeyData != nil {
|
||||
return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`)
|
||||
}
|
||||
// FIXME: move this to per-context initialization
|
||||
var data []byte
|
||||
if pr.KeyData != nil {
|
||||
data = pr.KeyData
|
||||
} else {
|
||||
d, err := ioutil.ReadFile(pr.KeyPath)
|
||||
if err != nil {
|
||||
return sarRejected, nil, err
|
||||
}
|
||||
data = d
|
||||
}
|
||||
|
||||
// FIXME: move this to per-context initialization
|
||||
dir, err := ioutil.TempDir("", "skopeo-signedBy-")
|
||||
if err != nil {
|
||||
return sarRejected, nil, err
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
mech, err := newGPGSigningMechanismInDirectory(dir)
|
||||
if err != nil {
|
||||
return sarRejected, nil, err
|
||||
}
|
||||
|
||||
trustedIdentities, err := mech.ImportKeysFromBytes(data)
|
||||
if err != nil {
|
||||
return sarRejected, nil, err
|
||||
}
|
||||
if len(trustedIdentities) == 0 {
|
||||
return sarRejected, nil, PolicyRequirementError("No public keys imported")
|
||||
}
|
||||
|
||||
signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{
|
||||
validateKeyIdentity: func(keyIdentity string) error {
|
||||
for _, trustedIdentity := range trustedIdentities {
|
||||
if keyIdentity == trustedIdentity {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Coverage: We use a private GPG home directory and only import trusted keys, so this should
|
||||
// not be reachable.
|
||||
return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity))
|
||||
},
|
||||
validateSignedDockerReference: func(ref string) error {
|
||||
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
|
||||
return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
validateSignedDockerManifestDigest: func(digest string) error {
|
||||
m, _, err := image.Manifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
digestMatches, err := manifest.MatchesDigest(m, digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !digestMatches {
|
||||
return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return sarRejected, nil, err
|
||||
}
|
||||
|
||||
return sarAccepted, signature, nil
|
||||
}
|
||||
|
||||
func (pr *prSignedBy) isRunningImageAllowed(image types.Image) (bool, error) {
|
||||
sigs, err := image.Signatures()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var rejections []error
|
||||
for _, s := range sigs {
|
||||
var reason error
|
||||
switch res, _, err := pr.isSignatureAuthorAccepted(image, s); res {
|
||||
case sarAccepted:
|
||||
// One accepted signature is enough.
|
||||
return true, nil
|
||||
case sarRejected:
|
||||
reason = err
|
||||
case sarUnknown:
|
||||
// Huh?! This should not happen at all; treat it as any other invalid value.
|
||||
fallthrough
|
||||
default:
|
||||
reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
|
||||
}
|
||||
rejections = append(rejections, reason)
|
||||
}
|
||||
var summary error
|
||||
switch len(rejections) {
|
||||
case 0:
|
||||
summary = PolicyRequirementError("A signature was required, but no signature exists")
|
||||
case 1:
|
||||
summary = rejections[0]
|
||||
default:
|
||||
var msgs []string
|
||||
for _, e := range rejections {
|
||||
msgs = append(msgs, e.Error())
|
||||
}
|
||||
summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
|
||||
strings.Join(msgs, "; ")))
|
||||
}
|
||||
return false, summary
|
||||
}
|
28
vendor/github.com/containers/image/signature/policy_eval_simple.go
generated
vendored
Normal file
28
vendor/github.com/containers/image/signature/policy_eval_simple.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
// Policy evaluation for the various simple PolicyRequirement types.
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(image types.Image, sig []byte) (signatureAcceptanceResult, *Signature, error) {
|
||||
// prInsecureAcceptAnything semantics: Every image is allowed to run,
|
||||
// but this does not consider the signature as verified.
|
||||
return sarUnknown, nil, nil
|
||||
}
|
||||
|
||||
func (pr *prInsecureAcceptAnything) isRunningImageAllowed(image types.Image) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (pr *prReject) isSignatureAuthorAccepted(image types.Image, sig []byte) (signatureAcceptanceResult, *Signature, error) {
|
||||
return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference())))
|
||||
}
|
||||
|
||||
func (pr *prReject) isRunningImageAllowed(image types.Image) (bool, error) {
|
||||
return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference())))
|
||||
}
|
78
vendor/github.com/containers/image/signature/policy_reference_match.go
generated
vendored
Normal file
78
vendor/github.com/containers/image/signature/policy_reference_match.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
// PolicyReferenceMatch implementations.
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/docker/reference"
|
||||
)
|
||||
|
||||
// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images.
|
||||
func parseImageAndDockerReference(image types.Image, s2 string) (reference.Named, reference.Named, error) {
|
||||
r1 := image.Reference().DockerReference()
|
||||
if r1 == nil {
|
||||
return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity",
|
||||
transports.ImageName(image.Reference())))
|
||||
}
|
||||
r2, err := reference.ParseNamed(s2)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return r1, r2, nil
|
||||
}
|
||||
|
||||
func (prm *prmMatchExact) matchesDockerReference(image types.Image, signatureDockerReference string) bool {
|
||||
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now.
|
||||
if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) {
|
||||
return false
|
||||
}
|
||||
return signature.String() == intended.String()
|
||||
}
|
||||
|
||||
func (prm *prmMatchRepository) matchesDockerReference(image types.Image, signatureDockerReference string) bool {
|
||||
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return signature.Name() == intended.Name()
|
||||
}
|
||||
|
||||
// parseDockerReferences converts two reference strings into parsed entities, failing on any error
|
||||
func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) {
|
||||
r1, err := reference.ParseNamed(s1)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
r2, err := reference.ParseNamed(s2)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return r1, r2, nil
|
||||
}
|
||||
|
||||
func (prm *prmExactReference) matchesDockerReference(image types.Image, signatureDockerReference string) bool {
|
||||
intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// prm.DockerReference and signatureDockerReference should be exact; so, verify that now.
|
||||
if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) {
|
||||
return false
|
||||
}
|
||||
return signature.String() == intended.String()
|
||||
}
|
||||
|
||||
func (prm *prmExactRepository) matchesDockerReference(image types.Image, signatureDockerReference string) bool {
|
||||
intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return signature.Name() == intended.Name()
|
||||
}
|
143
vendor/github.com/containers/image/signature/policy_types.go
generated
vendored
Normal file
143
vendor/github.com/containers/image/signature/policy_types.go
generated
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
|
||||
|
||||
// This defines types used to represent a signature verification policy in memory.
|
||||
// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements
|
||||
// built using the constructor functions provided in policy_config.go.
|
||||
|
||||
package signature
|
||||
|
||||
// Policy defines requirements for considering a signature valid.
|
||||
type Policy struct {
|
||||
// Default applies to any image which does not have a matching policy in Transports.
|
||||
// Note that this can happen even if a matching PolicyTransportScopes exists in Transports
|
||||
// if the image matches none of the scopes.
|
||||
Default PolicyRequirements `json:"default"`
|
||||
Transports map[string]PolicyTransportScopes `json:"transports"`
|
||||
}
|
||||
|
||||
// PolicyTransportScopes defines policies for images for a specific transport,
|
||||
// for various scopes, the map keys.
|
||||
// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.);
|
||||
// there is one scope precisely matching to a single image, and namespace scopes as prefixes
|
||||
// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]])
|
||||
// The empty scope, if exists, is considered a parent namespace of all other scopes.
|
||||
// Most specific scope wins, duplication is prohibited (hard failure).
|
||||
type PolicyTransportScopes map[string]PolicyRequirements
|
||||
|
||||
// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature).
|
||||
// Must not be empty, frequently will only contain a single element.
|
||||
type PolicyRequirements []PolicyRequirement
|
||||
|
||||
// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image.
|
||||
// The type is public, but its definition is private.
|
||||
|
||||
// prCommon is the common type field in a JSON encoding of PolicyRequirement.
|
||||
type prCommon struct {
|
||||
Type prTypeIdentifier `json:"type"`
|
||||
}
|
||||
|
||||
// prTypeIdentifier is string designating a kind of a PolicyRequirement.
|
||||
type prTypeIdentifier string
|
||||
|
||||
const (
|
||||
prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything"
|
||||
prTypeReject prTypeIdentifier = "reject"
|
||||
prTypeSignedBy prTypeIdentifier = "signedBy"
|
||||
prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer"
|
||||
)
|
||||
|
||||
// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything:
|
||||
// every image is allowed to run.
|
||||
// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit).
|
||||
// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted).
|
||||
// FIXME? Better name?
|
||||
type prInsecureAcceptAnything struct {
|
||||
prCommon
|
||||
}
|
||||
|
||||
// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected.
|
||||
type prReject struct {
|
||||
prCommon
|
||||
}
|
||||
|
||||
// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity
|
||||
type prSignedBy struct {
|
||||
prCommon
|
||||
|
||||
// KeyType specifies what kind of key reference KeyPath/KeyData is.
|
||||
// Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs”
|
||||
// FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only
|
||||
KeyType sbKeyType `json:"keyType"`
|
||||
|
||||
// KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified.
|
||||
KeyPath string `json:"keyPath,omitempty"`
|
||||
// KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified.
|
||||
KeyData []byte `json:"keyData,omitempty"`
|
||||
|
||||
// SignedIdentity specifies what image identity the signature must be claiming about the image.
|
||||
// Defaults to "match-exact" if not specified.
|
||||
SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
|
||||
}
|
||||
|
||||
// sbKeyType are the allowed values for prSignedBy.KeyType
|
||||
type sbKeyType string
|
||||
|
||||
const (
|
||||
// SBKeyTypeGPGKeys refers to keys contained in a GPG keyring
|
||||
SBKeyTypeGPGKeys sbKeyType = "GPGKeys"
|
||||
// SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring
|
||||
SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys"
|
||||
// SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates
|
||||
// FIXME: PEM, DER?
|
||||
SBKeyTypeX509Certificates sbKeyType = "X509Certificates"
|
||||
// SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs
|
||||
// FIXME: PEM, DER?
|
||||
SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs"
|
||||
)
|
||||
|
||||
// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image.
|
||||
type prSignedBaseLayer struct {
|
||||
prCommon
|
||||
// BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful.
|
||||
BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"`
|
||||
}
|
||||
|
||||
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
|
||||
// The type is public, but its implementation is private.
|
||||
|
||||
// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch.
|
||||
type prmCommon struct {
|
||||
Type prmTypeIdentifier `json:"type"`
|
||||
}
|
||||
|
||||
// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch.
|
||||
type prmTypeIdentifier string
|
||||
|
||||
const (
|
||||
prmTypeMatchExact prmTypeIdentifier = "matchExact"
|
||||
prmTypeMatchRepository prmTypeIdentifier = "matchRepository"
|
||||
prmTypeExactReference prmTypeIdentifier = "exactReference"
|
||||
prmTypeExactRepository prmTypeIdentifier = "exactRepository"
|
||||
)
|
||||
|
||||
// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly.
|
||||
type prmMatchExact struct {
|
||||
prmCommon
|
||||
}
|
||||
|
||||
// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag.
|
||||
type prmMatchRepository struct {
|
||||
prmCommon
|
||||
}
|
||||
|
||||
// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly.
|
||||
type prmExactReference struct {
|
||||
prmCommon
|
||||
DockerReference string `json:"dockerReference"`
|
||||
}
|
||||
|
||||
// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag.
|
||||
type prmExactRepository struct {
|
||||
prmCommon
|
||||
DockerRepository string `json:"dockerRepository"`
|
||||
}
|
191
vendor/github.com/containers/image/signature/signature.go
generated
vendored
Normal file
191
vendor/github.com/containers/image/signature/signature.go
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
|||
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/version"
|
||||
)
|
||||
|
||||
const (
|
||||
signatureType = "atomic container signature"
|
||||
)
|
||||
|
||||
// InvalidSignatureError is returned when parsing an invalid signature.
|
||||
type InvalidSignatureError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (err InvalidSignatureError) Error() string {
|
||||
return err.msg
|
||||
}
|
||||
|
||||
// Signature is a parsed content of a signature.
|
||||
type Signature struct {
|
||||
DockerManifestDigest string // FIXME: more precise type?
|
||||
DockerReference string // FIXME: more precise type?
|
||||
}
|
||||
|
||||
// Wrap signature to add to it some methods which we don't want to make public.
|
||||
type privateSignature struct {
|
||||
Signature
|
||||
}
|
||||
|
||||
// Compile-time check that privateSignature implements json.Marshaler
|
||||
var _ json.Marshaler = (*privateSignature)(nil)
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (s privateSignature) MarshalJSON() ([]byte, error) {
|
||||
return s.marshalJSONWithVariables(time.Now().UTC().Unix(), "atomic "+version.Version)
|
||||
}
|
||||
|
||||
// Implementation of MarshalJSON, with a caller-chosen values of the variable items to help testing.
|
||||
func (s privateSignature) marshalJSONWithVariables(timestamp int64, creatorID string) ([]byte, error) {
|
||||
if s.DockerManifestDigest == "" || s.DockerReference == "" {
|
||||
return nil, errors.New("Unexpected empty signature content")
|
||||
}
|
||||
critical := map[string]interface{}{
|
||||
"type": signatureType,
|
||||
"image": map[string]string{"docker-manifest-digest": s.DockerManifestDigest},
|
||||
"identity": map[string]string{"docker-reference": s.DockerReference},
|
||||
}
|
||||
optional := map[string]interface{}{
|
||||
"creator": creatorID,
|
||||
"timestamp": timestamp,
|
||||
}
|
||||
signature := map[string]interface{}{
|
||||
"critical": critical,
|
||||
"optional": optional,
|
||||
}
|
||||
return json.Marshal(signature)
|
||||
}
|
||||
|
||||
// Compile-time check that privateSignature implements json.Unmarshaler
|
||||
var _ json.Unmarshaler = (*privateSignature)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (s *privateSignature) UnmarshalJSON(data []byte) error {
|
||||
err := s.strictUnmarshalJSON(data)
|
||||
if err != nil {
|
||||
if _, ok := err.(jsonFormatError); ok {
|
||||
err = InvalidSignatureError{msg: err.Error()}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type.
|
||||
// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller.
|
||||
func (s *privateSignature) strictUnmarshalJSON(data []byte) error {
|
||||
var untyped interface{}
|
||||
if err := json.Unmarshal(data, &untyped); err != nil {
|
||||
return err
|
||||
}
|
||||
o, ok := untyped.(map[string]interface{})
|
||||
if !ok {
|
||||
return InvalidSignatureError{msg: "Invalid signature format"}
|
||||
}
|
||||
if err := validateExactMapKeys(o, "critical", "optional"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := mapField(o, "critical")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateExactMapKeys(c, "type", "image", "identity"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
optional, err := mapField(o, "optional")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = optional // We don't use anything from here for now.
|
||||
|
||||
t, err := stringField(c, "type")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t != signatureType {
|
||||
return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)}
|
||||
}
|
||||
|
||||
image, err := mapField(c, "image")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateExactMapKeys(image, "docker-manifest-digest"); err != nil {
|
||||
return err
|
||||
}
|
||||
digest, err := stringField(image, "docker-manifest-digest")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.DockerManifestDigest = digest
|
||||
|
||||
identity, err := mapField(c, "identity")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateExactMapKeys(identity, "docker-reference"); err != nil {
|
||||
return err
|
||||
}
|
||||
reference, err := stringField(identity, "docker-reference")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.DockerReference = reference
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign formats the signature and returns a blob signed using mech and keyIdentity
|
||||
func (s privateSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) {
|
||||
json, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mech.Sign(json, keyIdentity)
|
||||
}
|
||||
|
||||
// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable.
|
||||
// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies
|
||||
// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
|
||||
// because all of the functions have the same type, so there is a risk of exchanging the functions;
|
||||
// named members of this struct are more explicit.
|
||||
type signatureAcceptanceRules struct {
|
||||
validateKeyIdentity func(string) error
|
||||
validateSignedDockerReference func(string) error
|
||||
validateSignedDockerManifestDigest func(string) error
|
||||
}
|
||||
|
||||
// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components
|
||||
// match expected values, both as specified by rules, and returns it
|
||||
func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) {
|
||||
signed, keyIdentity, err := mech.Verify(unverifiedSignature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rules.validateKeyIdentity(keyIdentity); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var unmatchedSignature privateSignature
|
||||
if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
|
||||
return nil, InvalidSignatureError{msg: err.Error()}
|
||||
}
|
||||
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.DockerManifestDigest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rules.validateSignedDockerReference(unmatchedSignature.DockerReference); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signature := unmatchedSignature.Signature // Policy OK.
|
||||
return &signature, nil
|
||||
}
|
85
vendor/github.com/docker/distribution/context/context.go
generated
vendored
Normal file
85
vendor/github.com/docker/distribution/context/context.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Context is a copy of Context from the golang.org/x/net/context package.
|
||||
type Context interface {
|
||||
context.Context
|
||||
}
|
||||
|
||||
// instanceContext is a context that provides only an instance id. It is
|
||||
// provided as the main background context.
|
||||
type instanceContext struct {
|
||||
Context
|
||||
id string // id of context, logged as "instance.id"
|
||||
once sync.Once // once protect generation of the id
|
||||
}
|
||||
|
||||
func (ic *instanceContext) Value(key interface{}) interface{} {
|
||||
if key == "instance.id" {
|
||||
ic.once.Do(func() {
|
||||
// We want to lazy initialize the UUID such that we don't
|
||||
// call a random generator from the package initialization
|
||||
// code. For various reasons random could not be available
|
||||
// https://github.com/docker/distribution/issues/782
|
||||
ic.id = uuid.Generate().String()
|
||||
})
|
||||
return ic.id
|
||||
}
|
||||
|
||||
return ic.Context.Value(key)
|
||||
}
|
||||
|
||||
var background = &instanceContext{
|
||||
Context: context.Background(),
|
||||
}
|
||||
|
||||
// Background returns a non-nil, empty Context. The background context
|
||||
// provides a single key, "instance.id" that is globally unique to the
|
||||
// process.
|
||||
func Background() Context {
|
||||
return background
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val. Use context Values only for request-scoped data that transits processes
|
||||
// and APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
||||
|
||||
// stringMapContext is a simple context implementation that checks a map for a
|
||||
// key, falling back to a parent if not present.
|
||||
type stringMapContext struct {
|
||||
context.Context
|
||||
m map[string]interface{}
|
||||
}
|
||||
|
||||
// WithValues returns a context that proxies lookups through a map. Only
|
||||
// supports string keys.
|
||||
func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
|
||||
mo := make(map[string]interface{}, len(m)) // make our own copy.
|
||||
for k, v := range m {
|
||||
mo[k] = v
|
||||
}
|
||||
|
||||
return stringMapContext{
|
||||
Context: ctx,
|
||||
m: mo,
|
||||
}
|
||||
}
|
||||
|
||||
func (smc stringMapContext) Value(key interface{}) interface{} {
|
||||
if ks, ok := key.(string); ok {
|
||||
if v, ok := smc.m[ks]; ok {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return smc.Context.Value(key)
|
||||
}
|
89
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
Normal file
89
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
// Package context provides several utilities for working with
|
||||
// golang.org/x/net/context in http requests. Primarily, the focus is on
|
||||
// logging relevant request information but this package is not limited to
|
||||
// that purpose.
|
||||
//
|
||||
// The easiest way to get started is to get the background context:
|
||||
//
|
||||
// ctx := context.Background()
|
||||
//
|
||||
// The returned context should be passed around your application and be the
|
||||
// root of all other context instances. If the application has a version, this
|
||||
// line should be called before anything else:
|
||||
//
|
||||
// ctx := context.WithVersion(context.Background(), version)
|
||||
//
|
||||
// The above will store the version in the context and will be available to
|
||||
// the logger.
|
||||
//
|
||||
// Logging
|
||||
//
|
||||
// The most useful aspect of this package is GetLogger. This function takes
|
||||
// any context.Context interface and returns the current logger from the
|
||||
// context. Canonical usage looks like this:
|
||||
//
|
||||
// GetLogger(ctx).Infof("something interesting happened")
|
||||
//
|
||||
// GetLogger also takes optional key arguments. The keys will be looked up in
|
||||
// the context and reported with the logger. The following example would
|
||||
// return a logger that prints the version with each log message:
|
||||
//
|
||||
// ctx := context.Context(context.Background(), "version", version)
|
||||
// GetLogger(ctx, "version").Infof("this log message has a version field")
|
||||
//
|
||||
// The above would print out a log message like this:
|
||||
//
|
||||
// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
|
||||
//
|
||||
// When used with WithLogger, we gain the ability to decorate the context with
|
||||
// loggers that have information from disparate parts of the call stack.
|
||||
// Following from the version example, we can build a new context with the
|
||||
// configured logger such that we always print the version field:
|
||||
//
|
||||
// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
|
||||
//
|
||||
// Since the logger has been pushed to the context, we can now get the version
|
||||
// field for free with our log messages. Future calls to GetLogger on the new
|
||||
// context will have the version field:
|
||||
//
|
||||
// GetLogger(ctx).Infof("this log message has a version field")
|
||||
//
|
||||
// This becomes more powerful when we start stacking loggers. Let's say we
|
||||
// have the version logger from above but also want a request id. Using the
|
||||
// context above, in our request scoped function, we place another logger in
|
||||
// the context:
|
||||
//
|
||||
// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
|
||||
// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
|
||||
//
|
||||
// When GetLogger is called on the new context, "http.request.id" will be
|
||||
// included as a logger field, along with the original "version" field:
|
||||
//
|
||||
// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
|
||||
//
|
||||
// Note that this only affects the new context, the previous context, with the
|
||||
// version field, can be used independently. Put another way, the new logger,
|
||||
// added to the request context, is unique to that context and can have
|
||||
// request scoped varaibles.
|
||||
//
|
||||
// HTTP Requests
|
||||
//
|
||||
// This package also contains several methods for working with http requests.
|
||||
// The concepts are very similar to those described above. We simply place the
|
||||
// request in the context using WithRequest. This makes the request variables
|
||||
// available. GetRequestLogger can then be called to get request specific
|
||||
// variables in a log line:
|
||||
//
|
||||
// ctx = WithRequest(ctx, req)
|
||||
// GetRequestLogger(ctx).Infof("request variables")
|
||||
//
|
||||
// Like above, if we want to include the request data in all log messages in
|
||||
// the context, we push the logger to a new context and use that one:
|
||||
//
|
||||
// ctx = WithLogger(ctx, GetRequestLogger(ctx))
|
||||
//
|
||||
// The concept is fairly powerful and ensures that calls throughout the stack
|
||||
// can be traced in log messages. Using the fields like "http.request.id", one
|
||||
// can analyze call flow for a particular request with a simple grep of the
|
||||
// logs.
|
||||
package context
|
364
vendor/github.com/docker/distribution/context/http.go
generated
vendored
Normal file
364
vendor/github.com/docker/distribution/context/http.go
generated
vendored
Normal file
|
@ -0,0 +1,364 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// Common errors used with this package.
|
||||
var (
|
||||
ErrNoRequestContext = errors.New("no http request in context")
|
||||
ErrNoResponseWriterContext = errors.New("no http response in context")
|
||||
)
|
||||
|
||||
func parseIP(ipStr string) net.IP {
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip == nil {
|
||||
log.Warnf("invalid remote IP address: %q", ipStr)
|
||||
}
|
||||
return ip
|
||||
}
|
||||
|
||||
// RemoteAddr extracts the remote address of the request, taking into
|
||||
// account proxy headers.
|
||||
func RemoteAddr(r *http.Request) string {
|
||||
if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
|
||||
proxies := strings.Split(prior, ",")
|
||||
if len(proxies) > 0 {
|
||||
remoteAddr := strings.Trim(proxies[0], " ")
|
||||
if parseIP(remoteAddr) != nil {
|
||||
return remoteAddr
|
||||
}
|
||||
}
|
||||
}
|
||||
// X-Real-Ip is less supported, but worth checking in the
|
||||
// absence of X-Forwarded-For
|
||||
if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
|
||||
if parseIP(realIP) != nil {
|
||||
return realIP
|
||||
}
|
||||
}
|
||||
|
||||
return r.RemoteAddr
|
||||
}
|
||||
|
||||
// RemoteIP extracts the remote IP of the request, taking into
|
||||
// account proxy headers.
|
||||
func RemoteIP(r *http.Request) string {
|
||||
addr := RemoteAddr(r)
|
||||
|
||||
// Try parsing it as "IP:port"
|
||||
if ip, _, err := net.SplitHostPort(addr); err == nil {
|
||||
return ip
|
||||
}
|
||||
|
||||
return addr
|
||||
}
|
||||
|
||||
// WithRequest places the request on the context. The context of the request
|
||||
// is assigned a unique id, available at "http.request.id". The request itself
|
||||
// is available at "http.request". Other common attributes are available under
|
||||
// the prefix "http.request.". If a request is already present on the context,
|
||||
// this method will panic.
|
||||
func WithRequest(ctx Context, r *http.Request) Context {
|
||||
if ctx.Value("http.request") != nil {
|
||||
// NOTE(stevvooe): This needs to be considered a programming error. It
|
||||
// is unlikely that we'd want to have more than one request in
|
||||
// context.
|
||||
panic("only one request per context")
|
||||
}
|
||||
|
||||
return &httpRequestContext{
|
||||
Context: ctx,
|
||||
startedAt: time.Now(),
|
||||
id: uuid.Generate().String(),
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
// GetRequest returns the http request in the given context. Returns
|
||||
// ErrNoRequestContext if the context does not have an http request associated
|
||||
// with it.
|
||||
func GetRequest(ctx Context) (*http.Request, error) {
|
||||
if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok {
|
||||
return r, nil
|
||||
}
|
||||
return nil, ErrNoRequestContext
|
||||
}
|
||||
|
||||
// GetRequestID attempts to resolve the current request id, if possible. An
|
||||
// error is return if it is not available on the context.
|
||||
func GetRequestID(ctx Context) string {
|
||||
return GetStringValue(ctx, "http.request.id")
|
||||
}
|
||||
|
||||
// WithResponseWriter returns a new context and response writer that makes
|
||||
// interesting response statistics available within the context.
|
||||
func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
|
||||
irw := instrumentedResponseWriter{
|
||||
ResponseWriter: w,
|
||||
Context: ctx,
|
||||
}
|
||||
|
||||
if closeNotifier, ok := w.(http.CloseNotifier); ok {
|
||||
irwCN := &instrumentedResponseWriterCN{
|
||||
instrumentedResponseWriter: irw,
|
||||
CloseNotifier: closeNotifier,
|
||||
}
|
||||
|
||||
return irwCN, irwCN
|
||||
}
|
||||
|
||||
return &irw, &irw
|
||||
}
|
||||
|
||||
// GetResponseWriter returns the http.ResponseWriter from the provided
|
||||
// context. If not present, ErrNoResponseWriterContext is returned. The
|
||||
// returned instance provides instrumentation in the context.
|
||||
func GetResponseWriter(ctx Context) (http.ResponseWriter, error) {
|
||||
v := ctx.Value("http.response")
|
||||
|
||||
rw, ok := v.(http.ResponseWriter)
|
||||
if !ok || rw == nil {
|
||||
return nil, ErrNoResponseWriterContext
|
||||
}
|
||||
|
||||
return rw, nil
|
||||
}
|
||||
|
||||
// getVarsFromRequest let's us change request vars implementation for testing
|
||||
// and maybe future changes.
|
||||
var getVarsFromRequest = mux.Vars
|
||||
|
||||
// WithVars extracts gorilla/mux vars and makes them available on the returned
|
||||
// context. Variables are available at keys with the prefix "vars.". For
|
||||
// example, if looking for the variable "name", it can be accessed as
|
||||
// "vars.name". Implementations that are accessing values need not know that
|
||||
// the underlying context is implemented with gorilla/mux vars.
|
||||
func WithVars(ctx Context, r *http.Request) Context {
|
||||
return &muxVarsContext{
|
||||
Context: ctx,
|
||||
vars: getVarsFromRequest(r),
|
||||
}
|
||||
}
|
||||
|
||||
// GetRequestLogger returns a logger that contains fields from the request in
|
||||
// the current context. If the request is not available in the context, no
|
||||
// fields will display. Request loggers can safely be pushed onto the context.
|
||||
func GetRequestLogger(ctx Context) Logger {
|
||||
return GetLogger(ctx,
|
||||
"http.request.id",
|
||||
"http.request.method",
|
||||
"http.request.host",
|
||||
"http.request.uri",
|
||||
"http.request.referer",
|
||||
"http.request.useragent",
|
||||
"http.request.remoteaddr",
|
||||
"http.request.contenttype")
|
||||
}
|
||||
|
||||
// GetResponseLogger reads the current response stats and builds a logger.
|
||||
// Because the values are read at call time, pushing a logger returned from
|
||||
// this function on the context will lead to missing or invalid data. Only
|
||||
// call this at the end of a request, after the response has been written.
|
||||
func GetResponseLogger(ctx Context) Logger {
|
||||
l := getLogrusLogger(ctx,
|
||||
"http.response.written",
|
||||
"http.response.status",
|
||||
"http.response.contenttype")
|
||||
|
||||
duration := Since(ctx, "http.request.startedat")
|
||||
|
||||
if duration > 0 {
|
||||
l = l.WithField("http.response.duration", duration.String())
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// httpRequestContext makes information about a request available to context.
|
||||
type httpRequestContext struct {
|
||||
Context
|
||||
|
||||
startedAt time.Time
|
||||
id string
|
||||
r *http.Request
|
||||
}
|
||||
|
||||
// Value returns a keyed element of the request for use in the context. To get
|
||||
// the request itself, query "request". For other components, access them as
|
||||
// "request.<component>". For example, r.RequestURI
|
||||
func (ctx *httpRequestContext) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "http.request" {
|
||||
return ctx.r
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(keyStr, "http.request.") {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
parts := strings.Split(keyStr, ".")
|
||||
|
||||
if len(parts) != 3 {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
switch parts[2] {
|
||||
case "uri":
|
||||
return ctx.r.RequestURI
|
||||
case "remoteaddr":
|
||||
return RemoteAddr(ctx.r)
|
||||
case "method":
|
||||
return ctx.r.Method
|
||||
case "host":
|
||||
return ctx.r.Host
|
||||
case "referer":
|
||||
referer := ctx.r.Referer()
|
||||
if referer != "" {
|
||||
return referer
|
||||
}
|
||||
case "useragent":
|
||||
return ctx.r.UserAgent()
|
||||
case "id":
|
||||
return ctx.id
|
||||
case "startedat":
|
||||
return ctx.startedAt
|
||||
case "contenttype":
|
||||
ct := ctx.r.Header.Get("Content-Type")
|
||||
if ct != "" {
|
||||
return ct
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fallback:
|
||||
return ctx.Context.Value(key)
|
||||
}
|
||||
|
||||
type muxVarsContext struct {
|
||||
Context
|
||||
vars map[string]string
|
||||
}
|
||||
|
||||
func (ctx *muxVarsContext) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "vars" {
|
||||
return ctx.vars
|
||||
}
|
||||
|
||||
if strings.HasPrefix(keyStr, "vars.") {
|
||||
keyStr = strings.TrimPrefix(keyStr, "vars.")
|
||||
}
|
||||
|
||||
if v, ok := ctx.vars[keyStr]; ok {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.Context.Value(key)
|
||||
}
|
||||
|
||||
// instrumentedResponseWriterCN provides response writer information in a
|
||||
// context. It implements http.CloseNotifier so that users can detect
|
||||
// early disconnects.
|
||||
type instrumentedResponseWriterCN struct {
|
||||
instrumentedResponseWriter
|
||||
http.CloseNotifier
|
||||
}
|
||||
|
||||
// instrumentedResponseWriter provides response writer information in a
|
||||
// context. This variant is only used in the case where CloseNotifier is not
|
||||
// implemented by the parent ResponseWriter.
|
||||
type instrumentedResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
Context
|
||||
|
||||
mu sync.Mutex
|
||||
status int
|
||||
written int64
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = irw.ResponseWriter.Write(p)
|
||||
|
||||
irw.mu.Lock()
|
||||
irw.written += int64(n)
|
||||
|
||||
// Guess the likely status if not set.
|
||||
if irw.status == 0 {
|
||||
irw.status = http.StatusOK
|
||||
}
|
||||
|
||||
irw.mu.Unlock()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) WriteHeader(status int) {
|
||||
irw.ResponseWriter.WriteHeader(status)
|
||||
|
||||
irw.mu.Lock()
|
||||
irw.status = status
|
||||
irw.mu.Unlock()
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) Flush() {
|
||||
if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "http.response" {
|
||||
return irw
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(keyStr, "http.response.") {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
parts := strings.Split(keyStr, ".")
|
||||
|
||||
if len(parts) != 3 {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
irw.mu.Lock()
|
||||
defer irw.mu.Unlock()
|
||||
|
||||
switch parts[2] {
|
||||
case "written":
|
||||
return irw.written
|
||||
case "status":
|
||||
return irw.status
|
||||
case "contenttype":
|
||||
contentType := irw.Header().Get("Content-Type")
|
||||
if contentType != "" {
|
||||
return contentType
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fallback:
|
||||
return irw.Context.Value(key)
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "http.response" {
|
||||
return irw
|
||||
}
|
||||
}
|
||||
|
||||
return irw.instrumentedResponseWriter.Value(key)
|
||||
}
|
116
vendor/github.com/docker/distribution/context/logger.go
generated
vendored
Normal file
116
vendor/github.com/docker/distribution/context/logger.go
generated
vendored
Normal file
|
@ -0,0 +1,116 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Logger provides a leveled-logging interface.
|
||||
type Logger interface {
|
||||
// standard logger methods
|
||||
Print(args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
Println(args ...interface{})
|
||||
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Fatalln(args ...interface{})
|
||||
|
||||
Panic(args ...interface{})
|
||||
Panicf(format string, args ...interface{})
|
||||
Panicln(args ...interface{})
|
||||
|
||||
// Leveled methods, from logrus
|
||||
Debug(args ...interface{})
|
||||
Debugf(format string, args ...interface{})
|
||||
Debugln(args ...interface{})
|
||||
|
||||
Error(args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
Errorln(args ...interface{})
|
||||
|
||||
Info(args ...interface{})
|
||||
Infof(format string, args ...interface{})
|
||||
Infoln(args ...interface{})
|
||||
|
||||
Warn(args ...interface{})
|
||||
Warnf(format string, args ...interface{})
|
||||
Warnln(args ...interface{})
|
||||
}
|
||||
|
||||
// WithLogger creates a new context with provided logger.
|
||||
func WithLogger(ctx Context, logger Logger) Context {
|
||||
return WithValue(ctx, "logger", logger)
|
||||
}
|
||||
|
||||
// GetLoggerWithField returns a logger instance with the specified field key
|
||||
// and value without affecting the context. Extra specified keys will be
|
||||
// resolved from the context.
|
||||
func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger {
|
||||
return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
|
||||
}
|
||||
|
||||
// GetLoggerWithFields returns a logger instance with the specified fields
|
||||
// without affecting the context. Extra specified keys will be resolved from
|
||||
// the context.
|
||||
func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger {
|
||||
// must convert from interface{} -> interface{} to string -> interface{} for logrus.
|
||||
lfields := make(logrus.Fields, len(fields))
|
||||
for key, value := range fields {
|
||||
lfields[fmt.Sprint(key)] = value
|
||||
}
|
||||
|
||||
return getLogrusLogger(ctx, keys...).WithFields(lfields)
|
||||
}
|
||||
|
||||
// GetLogger returns the logger from the current context, if present. If one
|
||||
// or more keys are provided, they will be resolved on the context and
|
||||
// included in the logger. While context.Value takes an interface, any key
|
||||
// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
|
||||
// a logging key field. If context keys are integer constants, for example,
|
||||
// its recommended that a String method is implemented.
|
||||
func GetLogger(ctx Context, keys ...interface{}) Logger {
|
||||
return getLogrusLogger(ctx, keys...)
|
||||
}
|
||||
|
||||
// GetLogrusLogger returns the logrus logger for the context. If one more keys
|
||||
// are provided, they will be resolved on the context and included in the
|
||||
// logger. Only use this function if specific logrus functionality is
|
||||
// required.
|
||||
func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
|
||||
var logger *logrus.Entry
|
||||
|
||||
// Get a logger, if it is present.
|
||||
loggerInterface := ctx.Value("logger")
|
||||
if loggerInterface != nil {
|
||||
if lgr, ok := loggerInterface.(*logrus.Entry); ok {
|
||||
logger = lgr
|
||||
}
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
fields := logrus.Fields{}
|
||||
|
||||
// Fill in the instance id, if we have it.
|
||||
instanceID := ctx.Value("instance.id")
|
||||
if instanceID != nil {
|
||||
fields["instance.id"] = instanceID
|
||||
}
|
||||
|
||||
fields["go.version"] = runtime.Version()
|
||||
// If no logger is found, just return the standard logger.
|
||||
logger = logrus.StandardLogger().WithFields(fields)
|
||||
}
|
||||
|
||||
fields := logrus.Fields{}
|
||||
for _, key := range keys {
|
||||
v := ctx.Value(key)
|
||||
if v != nil {
|
||||
fields[fmt.Sprint(key)] = v
|
||||
}
|
||||
}
|
||||
|
||||
return logger.WithFields(fields)
|
||||
}
|
104
vendor/github.com/docker/distribution/context/trace.go
generated
vendored
Normal file
104
vendor/github.com/docker/distribution/context/trace.go
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
)
|
||||
|
||||
// WithTrace allocates a traced timing span in a new context. This allows a
|
||||
// caller to track the time between calling WithTrace and the returned done
|
||||
// function. When the done function is called, a log message is emitted with a
|
||||
// "trace.duration" field, corresponding to the elapsed time and a
|
||||
// "trace.func" field, corresponding to the function that called WithTrace.
|
||||
//
|
||||
// The logging keys "trace.id" and "trace.parent.id" are provided to implement
|
||||
// dapper-like tracing. This function should be complemented with a WithSpan
|
||||
// method that could be used for tracing distributed RPC calls.
|
||||
//
|
||||
// The main benefit of this function is to post-process log messages or
|
||||
// intercept them in a hook to provide timing data. Trace ids and parent ids
|
||||
// can also be linked to provide call tracing, if so required.
|
||||
//
|
||||
// Here is an example of the usage:
|
||||
//
|
||||
// func timedOperation(ctx Context) {
|
||||
// ctx, done := WithTrace(ctx)
|
||||
// defer done("this will be the log message")
|
||||
// // ... function body ...
|
||||
// }
|
||||
//
|
||||
// If the function ran for roughly 1s, such a usage would emit a log message
|
||||
// as follows:
|
||||
//
|
||||
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
|
||||
//
|
||||
// Notice that the function name is automatically resolved, along with the
|
||||
// package and a trace id is emitted that can be linked with parent ids.
|
||||
func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) {
|
||||
if ctx == nil {
|
||||
ctx = Background()
|
||||
}
|
||||
|
||||
pc, file, line, _ := runtime.Caller(1)
|
||||
f := runtime.FuncForPC(pc)
|
||||
ctx = &traced{
|
||||
Context: ctx,
|
||||
id: uuid.Generate().String(),
|
||||
start: time.Now(),
|
||||
parent: GetStringValue(ctx, "trace.id"),
|
||||
fnname: f.Name(),
|
||||
file: file,
|
||||
line: line,
|
||||
}
|
||||
|
||||
return ctx, func(format string, a ...interface{}) {
|
||||
GetLogger(ctx,
|
||||
"trace.duration",
|
||||
"trace.id",
|
||||
"trace.parent.id",
|
||||
"trace.func",
|
||||
"trace.file",
|
||||
"trace.line").
|
||||
Debugf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// traced represents a context that is traced for function call timing. It
|
||||
// also provides fast lookup for the various attributes that are available on
|
||||
// the trace.
|
||||
type traced struct {
|
||||
Context
|
||||
id string
|
||||
parent string
|
||||
start time.Time
|
||||
fnname string
|
||||
file string
|
||||
line int
|
||||
}
|
||||
|
||||
func (ts *traced) Value(key interface{}) interface{} {
|
||||
switch key {
|
||||
case "trace.start":
|
||||
return ts.start
|
||||
case "trace.duration":
|
||||
return time.Since(ts.start)
|
||||
case "trace.id":
|
||||
return ts.id
|
||||
case "trace.parent.id":
|
||||
if ts.parent == "" {
|
||||
return nil // must return nil to signal no parent.
|
||||
}
|
||||
|
||||
return ts.parent
|
||||
case "trace.func":
|
||||
return ts.fnname
|
||||
case "trace.file":
|
||||
return ts.file
|
||||
case "trace.line":
|
||||
return ts.line
|
||||
}
|
||||
|
||||
return ts.Context.Value(key)
|
||||
}
|
24
vendor/github.com/docker/distribution/context/util.go
generated
vendored
Normal file
24
vendor/github.com/docker/distribution/context/util.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Since looks up key, which should be a time.Time, and returns the duration
|
||||
// since that time. If the key is not found, the value returned will be zero.
|
||||
// This is helpful when inferring metrics related to context execution times.
|
||||
func Since(ctx Context, key interface{}) time.Duration {
|
||||
if startedAt, ok := ctx.Value(key).(time.Time); ok {
|
||||
return time.Since(startedAt)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetStringValue returns a string value from the context. The empty string
|
||||
// will be returned if not found.
|
||||
func GetStringValue(ctx Context, key interface{}) (value string) {
|
||||
if valuev, ok := ctx.Value(key).(string); ok {
|
||||
value = valuev
|
||||
}
|
||||
return value
|
||||
}
|
16
vendor/github.com/docker/distribution/context/version.go
generated
vendored
Normal file
16
vendor/github.com/docker/distribution/context/version.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
package context
|
||||
|
||||
// WithVersion stores the application version in the context. The new context
|
||||
// gets a logger to ensure log messages are marked with the application
|
||||
// version.
|
||||
func WithVersion(ctx Context, version string) Context {
|
||||
ctx = WithValue(ctx, "version", version)
|
||||
// push a new logger onto the stack
|
||||
return WithLogger(ctx, GetLogger(ctx, "version"))
|
||||
}
|
||||
|
||||
// GetVersion returns the application version from the context. An empty
|
||||
// string may returned if the version was not set on the context.
|
||||
func GetVersion(ctx Context) string {
|
||||
return GetStringValue(ctx, "version")
|
||||
}
|
126
vendor/github.com/docker/distribution/uuid/uuid.go
generated
vendored
Normal file
126
vendor/github.com/docker/distribution/uuid/uuid.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
// Package uuid provides simple UUID generation. Only version 4 style UUIDs
|
||||
// can be generated.
|
||||
//
|
||||
// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs.
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Bits is the number of bits in a UUID
|
||||
Bits = 128
|
||||
|
||||
// Size is the number of bytes in a UUID
|
||||
Size = Bits / 8
|
||||
|
||||
format = "%08x-%04x-%04x-%04x-%012x"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUUIDInvalid indicates a parsed string is not a valid uuid.
|
||||
ErrUUIDInvalid = fmt.Errorf("invalid uuid")
|
||||
|
||||
// Loggerf can be used to override the default logging destination. Such
|
||||
// log messages in this library should be logged at warning or higher.
|
||||
Loggerf = func(format string, args ...interface{}) {}
|
||||
)
|
||||
|
||||
// UUID represents a UUID value. UUIDs can be compared and set to other values
|
||||
// and accessed by byte.
|
||||
type UUID [Size]byte
|
||||
|
||||
// Generate creates a new, version 4 uuid.
|
||||
func Generate() (u UUID) {
|
||||
const (
|
||||
// ensures we backoff for less than 450ms total. Use the following to
|
||||
// select new value, in units of 10ms:
|
||||
// n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
|
||||
maxretries = 9
|
||||
backoff = time.Millisecond * 10
|
||||
)
|
||||
|
||||
var (
|
||||
totalBackoff time.Duration
|
||||
count int
|
||||
retries int
|
||||
)
|
||||
|
||||
for {
|
||||
// This should never block but the read may fail. Because of this,
|
||||
// we just try to read the random number generator until we get
|
||||
// something. This is a very rare condition but may happen.
|
||||
b := time.Duration(retries) * backoff
|
||||
time.Sleep(b)
|
||||
totalBackoff += b
|
||||
|
||||
n, err := io.ReadFull(rand.Reader, u[count:])
|
||||
if err != nil {
|
||||
if retryOnError(err) && retries < maxretries {
|
||||
count += n
|
||||
retries++
|
||||
Loggerf("error generating version 4 uuid, retrying: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Any other errors represent a system problem. What did someone
|
||||
// do to /dev/urandom?
|
||||
panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
u[6] = (u[6] & 0x0f) | 0x40 // set version byte
|
||||
u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b}
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
// Parse attempts to extract a uuid from the string or returns an error.
|
||||
func Parse(s string) (u UUID, err error) {
|
||||
if len(s) != 36 {
|
||||
return UUID{}, ErrUUIDInvalid
|
||||
}
|
||||
|
||||
// create stack addresses for each section of the uuid.
|
||||
p := make([][]byte, 5)
|
||||
|
||||
if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil {
|
||||
return u, err
|
||||
}
|
||||
|
||||
copy(u[0:4], p[0])
|
||||
copy(u[4:6], p[1])
|
||||
copy(u[6:8], p[2])
|
||||
copy(u[8:10], p[3])
|
||||
copy(u[10:16], p[4])
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (u UUID) String() string {
|
||||
return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:])
|
||||
}
|
||||
|
||||
// retryOnError tries to detect whether or not retrying would be fruitful.
|
||||
func retryOnError(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case *os.PathError:
|
||||
return retryOnError(err.Err) // unpack the target error
|
||||
case syscall.Errno:
|
||||
if err == syscall.EPERM {
|
||||
// EPERM represents an entropy pool exhaustion, a condition under
|
||||
// which we backoff and retry.
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
59
vendor/github.com/docker/docker/daemon/graphdriver/counter.go
generated
vendored
59
vendor/github.com/docker/docker/daemon/graphdriver/counter.go
generated
vendored
|
@ -2,31 +2,66 @@ package graphdriver
|
|||
|
||||
import "sync"
|
||||
|
||||
type minfo struct {
|
||||
check bool
|
||||
count int
|
||||
}
|
||||
|
||||
// RefCounter is a generic counter for use by graphdriver Get/Put calls
|
||||
type RefCounter struct {
|
||||
counts map[string]int
|
||||
mu sync.Mutex
|
||||
counts map[string]*minfo
|
||||
mu sync.Mutex
|
||||
checker Checker
|
||||
}
|
||||
|
||||
// NewRefCounter returns a new RefCounter
|
||||
func NewRefCounter() *RefCounter {
|
||||
return &RefCounter{counts: make(map[string]int)}
|
||||
func NewRefCounter(c Checker) *RefCounter {
|
||||
return &RefCounter{
|
||||
checker: c,
|
||||
counts: make(map[string]*minfo),
|
||||
}
|
||||
}
|
||||
|
||||
// Increment increaes the ref count for the given id and returns the current count
|
||||
func (c *RefCounter) Increment(id string) int {
|
||||
func (c *RefCounter) Increment(path string) int {
|
||||
c.mu.Lock()
|
||||
c.counts[id]++
|
||||
count := c.counts[id]
|
||||
m := c.counts[path]
|
||||
if m == nil {
|
||||
m = &minfo{}
|
||||
c.counts[path] = m
|
||||
}
|
||||
// if we are checking this path for the first time check to make sure
|
||||
// if it was already mounted on the system and make sure we have a correct ref
|
||||
// count if it is mounted as it is in use.
|
||||
if !m.check {
|
||||
m.check = true
|
||||
if c.checker.IsMounted(path) {
|
||||
m.count++
|
||||
}
|
||||
}
|
||||
m.count++
|
||||
c.mu.Unlock()
|
||||
return count
|
||||
return m.count
|
||||
}
|
||||
|
||||
// Decrement decreases the ref count for the given id and returns the current count
|
||||
func (c *RefCounter) Decrement(id string) int {
|
||||
func (c *RefCounter) Decrement(path string) int {
|
||||
c.mu.Lock()
|
||||
c.counts[id]--
|
||||
count := c.counts[id]
|
||||
m := c.counts[path]
|
||||
if m == nil {
|
||||
m = &minfo{}
|
||||
c.counts[path] = m
|
||||
}
|
||||
// if we are checking this path for the first time check to make sure
|
||||
// if it was already mounted on the system and make sure we have a correct ref
|
||||
// count if it is mounted as it is in use.
|
||||
if !m.check {
|
||||
m.check = true
|
||||
if c.checker.IsMounted(path) {
|
||||
m.count++
|
||||
}
|
||||
}
|
||||
m.count--
|
||||
c.mu.Unlock()
|
||||
return count
|
||||
return m.count
|
||||
}
|
||||
|
|
13
vendor/github.com/docker/docker/daemon/graphdriver/driver.go
generated
vendored
13
vendor/github.com/docker/docker/daemon/graphdriver/driver.go
generated
vendored
|
@ -46,9 +46,12 @@ type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDM
|
|||
type ProtoDriver interface {
|
||||
// String returns a string representation of this driver.
|
||||
String() string
|
||||
// CreateReadWrite creates a new, empty filesystem layer that is ready
|
||||
// to be used as the storage for a container.
|
||||
CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error
|
||||
// Create creates a new, empty, filesystem layer with the
|
||||
// specified id and parent and mountLabel. Parent and mountLabel may be "".
|
||||
Create(id, parent, mountLabel string) error
|
||||
Create(id, parent, mountLabel string, storageOpt map[string]string) error
|
||||
// Remove attempts to remove the filesystem layer with this id.
|
||||
Remove(id string) error
|
||||
// Get returns the mountpoint for the layered filesystem referred
|
||||
|
@ -110,11 +113,17 @@ type FileGetCloser interface {
|
|||
Close() error
|
||||
}
|
||||
|
||||
// Checker makes checks on specified filesystems.
|
||||
type Checker interface {
|
||||
// IsMounted returns true if the provided path is mounted for the specific checker
|
||||
IsMounted(path string) bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
drivers = make(map[string]InitFunc)
|
||||
}
|
||||
|
||||
// Register registers a InitFunc for the driver.
|
||||
// Register registers an InitFunc for the driver.
|
||||
func Register(name string, initFunc InitFunc) error {
|
||||
if _, exists := drivers[name]; exists {
|
||||
return fmt.Errorf("Name already registered %s", name)
|
||||
|
|
34
vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go
generated
vendored
34
vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go
generated
vendored
|
@ -5,6 +5,8 @@ package graphdriver
|
|||
import (
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -14,6 +16,8 @@ const (
|
|||
FsMagicBtrfs = FsMagic(0x9123683E)
|
||||
// FsMagicCramfs filesystem id for Cramfs
|
||||
FsMagicCramfs = FsMagic(0x28cd3d45)
|
||||
// FsMagicEcryptfs filesystem id for eCryptfs
|
||||
FsMagicEcryptfs = FsMagic(0xf15f)
|
||||
// FsMagicExtfs filesystem id for Extfs
|
||||
FsMagicExtfs = FsMagic(0x0000EF53)
|
||||
// FsMagicF2fs filesystem id for F2fs
|
||||
|
@ -89,6 +93,36 @@ func GetFSMagic(rootpath string) (FsMagic, error) {
|
|||
return FsMagic(buf.Type), nil
|
||||
}
|
||||
|
||||
// NewFsChecker returns a checker configured for the provied FsMagic
|
||||
func NewFsChecker(t FsMagic) Checker {
|
||||
return &fsChecker{
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
type fsChecker struct {
|
||||
t FsMagic
|
||||
}
|
||||
|
||||
func (c *fsChecker) IsMounted(path string) bool {
|
||||
m, _ := Mounted(c.t, path)
|
||||
return m
|
||||
}
|
||||
|
||||
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
|
||||
// if the specified path is mounted.
|
||||
func NewDefaultChecker() Checker {
|
||||
return &defaultChecker{}
|
||||
}
|
||||
|
||||
type defaultChecker struct {
|
||||
}
|
||||
|
||||
func (c *defaultChecker) IsMounted(path string) bool {
|
||||
m, _ := mount.Mounted(path)
|
||||
return m
|
||||
}
|
||||
|
||||
// Mounted checks if the given path is mounted as the fs type
|
||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
||||
var buf syscall.Statfs_t
|
||||
|
|
65
vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go
generated
vendored
Normal file
65
vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
// +build solaris,cgo
|
||||
|
||||
package graphdriver
|
||||
|
||||
/*
|
||||
#include <sys/statvfs.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
static inline struct statvfs *getstatfs(char *s) {
|
||||
struct statvfs *buf;
|
||||
int err;
|
||||
buf = (struct statvfs *)malloc(sizeof(struct statvfs));
|
||||
err = statvfs(s, buf);
|
||||
return buf;
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"path/filepath"
|
||||
"unsafe"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// FsMagicZfs filesystem id for Zfs
|
||||
FsMagicZfs = FsMagic(0x2fc12fc1)
|
||||
)
|
||||
|
||||
var (
|
||||
// Slice of drivers that should be used in an order
|
||||
priority = []string{
|
||||
"zfs",
|
||||
}
|
||||
|
||||
// FsNames maps filesystem id to name of the filesystem.
|
||||
FsNames = map[FsMagic]string{
|
||||
FsMagicZfs: "zfs",
|
||||
}
|
||||
)
|
||||
|
||||
// GetFSMagic returns the filesystem id given the path.
|
||||
func GetFSMagic(rootpath string) (FsMagic, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Mounted checks if the given path is mounted as the fs type
|
||||
//Solaris supports only ZFS for now
|
||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
||||
|
||||
cs := C.CString(filepath.Dir(mountPath))
|
||||
buf := C.getstatfs(cs)
|
||||
|
||||
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
|
||||
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
|
||||
(buf.f_basetype[3] != 0) {
|
||||
log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
|
||||
C.free(unsafe.Pointer(buf))
|
||||
return false, ErrPrerequisites
|
||||
}
|
||||
|
||||
C.free(unsafe.Pointer(buf))
|
||||
C.free(unsafe.Pointer(cs))
|
||||
return true, nil
|
||||
}
|
2
vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go
generated
vendored
2
vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build !linux,!windows,!freebsd
|
||||
// +build !linux,!windows,!freebsd,!solaris
|
||||
|
||||
package graphdriver
|
||||
|
||||
|
|
2
vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go
generated
vendored
2
vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go
generated
vendored
|
@ -4,8 +4,6 @@ var (
|
|||
// Slice of drivers that should be used in order
|
||||
priority = []string{
|
||||
"windowsfilter",
|
||||
"windowsdiff",
|
||||
"vfs",
|
||||
}
|
||||
)
|
||||
|
||||
|
|
2
vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go
generated
vendored
2
vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go
generated
vendored
|
@ -132,7 +132,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (s
|
|||
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
|
||||
GIDMaps: gdw.gidMaps}
|
||||
start := time.Now().UTC()
|
||||
logrus.Debugf("Start untar layer")
|
||||
logrus.Debug("Start untar layer")
|
||||
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
|
||||
return
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/docker/daemon/graphdriver/plugin.go
generated
vendored
2
vendor/github.com/docker/docker/daemon/graphdriver/plugin.go
generated
vendored
|
@ -23,7 +23,7 @@ func lookupPlugin(name, home string, opts []string) (Driver, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
|
||||
}
|
||||
return newPluginDriver(name, home, opts, pl.Client)
|
||||
return newPluginDriver(name, home, opts, pl.Client())
|
||||
}
|
||||
|
||||
func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) {
|
||||
|
|
18
vendor/github.com/docker/docker/daemon/graphdriver/proxy.go
generated
vendored
18
vendor/github.com/docker/docker/daemon/graphdriver/proxy.go
generated
vendored
|
@ -54,7 +54,23 @@ func (d *graphDriverProxy) String() string {
|
|||
return d.name
|
||||
}
|
||||
|
||||
func (d *graphDriverProxy) Create(id, parent, mountLabel string) error {
|
||||
func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
|
||||
args := &graphDriverRequest{
|
||||
ID: id,
|
||||
Parent: parent,
|
||||
MountLabel: mountLabel,
|
||||
}
|
||||
var ret graphDriverResponse
|
||||
if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
|
||||
return err
|
||||
}
|
||||
if ret.Err != "" {
|
||||
return errors.New(ret.Err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
|
||||
args := &graphDriverRequest{
|
||||
ID: id,
|
||||
Parent: parent,
|
||||
|
|
15
vendor/github.com/docker/docker/image/fs.go
generated
vendored
15
vendor/github.com/docker/docker/image/fs.go
generated
vendored
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
)
|
||||
|
||||
// IDWalkFunc is function called by StoreBackend.Walk
|
||||
|
@ -118,12 +119,7 @@ func (s *fs) Set(data []byte) (ID, error) {
|
|||
}
|
||||
|
||||
id := ID(digest.FromBytes(data))
|
||||
filePath := s.contentFile(id)
|
||||
tempFilePath := s.contentFile(id) + ".tmp"
|
||||
if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := os.Rename(tempFilePath, filePath); err != nil {
|
||||
if err := ioutils.AtomicWriteFile(s.contentFile(id), data, 0600); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -156,12 +152,7 @@ func (s *fs) SetMetadata(id ID, key string, data []byte) error {
|
|||
if err := os.MkdirAll(baseDir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
filePath := filepath.Join(s.metadataDir(id), key)
|
||||
tempFilePath := filePath + ".tmp"
|
||||
if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(tempFilePath, filePath)
|
||||
return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(id), key), data, 0600)
|
||||
}
|
||||
|
||||
// GetMetadata returns metadata for a given ID.
|
||||
|
|
8
vendor/github.com/docker/docker/image/image.go
generated
vendored
8
vendor/github.com/docker/docker/image/image.go
generated
vendored
|
@ -48,9 +48,11 @@ type V1Image struct {
|
|||
// Image stores the image configuration
|
||||
type Image struct {
|
||||
V1Image
|
||||
Parent ID `json:"parent,omitempty"`
|
||||
RootFS *RootFS `json:"rootfs,omitempty"`
|
||||
History []History `json:"history,omitempty"`
|
||||
Parent ID `json:"parent,omitempty"`
|
||||
RootFS *RootFS `json:"rootfs,omitempty"`
|
||||
History []History `json:"history,omitempty"`
|
||||
OSVersion string `json:"os.version,omitempty"`
|
||||
OSFeatures []string `json:"os.features,omitempty"`
|
||||
|
||||
// rawJSON caches the immutable JSON associated with this image.
|
||||
rawJSON []byte
|
||||
|
|
8
vendor/github.com/docker/docker/image/rootfs.go
generated
vendored
8
vendor/github.com/docker/docker/image/rootfs.go
generated
vendored
|
@ -2,6 +2,14 @@ package image
|
|||
|
||||
import "github.com/docker/docker/layer"
|
||||
|
||||
// TypeLayers is used for RootFS.Type for filesystems organized into layers.
|
||||
const TypeLayers = "layers"
|
||||
|
||||
// NewRootFS returns empty RootFS struct
|
||||
func NewRootFS() *RootFS {
|
||||
return &RootFS{Type: TypeLayers}
|
||||
}
|
||||
|
||||
// Append appends a new diffID to rootfs
|
||||
func (r *RootFS) Append(id layer.DiffID) {
|
||||
r.DiffIDs = append(r.DiffIDs, id)
|
||||
|
|
5
vendor/github.com/docker/docker/image/rootfs_unix.go
generated
vendored
5
vendor/github.com/docker/docker/image/rootfs_unix.go
generated
vendored
|
@ -16,8 +16,3 @@ type RootFS struct {
|
|||
func (r *RootFS) ChainID() layer.ChainID {
|
||||
return layer.CreateChainID(r.DiffIDs)
|
||||
}
|
||||
|
||||
// NewRootFS returns empty RootFS struct
|
||||
func NewRootFS() *RootFS {
|
||||
return &RootFS{Type: "layers"}
|
||||
}
|
||||
|
|
21
vendor/github.com/docker/docker/image/rootfs_windows.go
generated
vendored
21
vendor/github.com/docker/docker/image/rootfs_windows.go
generated
vendored
|
@ -10,6 +10,9 @@ import (
|
|||
"github.com/docker/docker/layer"
|
||||
)
|
||||
|
||||
// TypeLayersWithBase is used for RootFS.Type for Windows filesystems that have layers and a centrally-stored base layer.
|
||||
const TypeLayersWithBase = "layers+base"
|
||||
|
||||
// RootFS describes images root filesystem
|
||||
// This is currently a placeholder that only supports layers. In the future
|
||||
// this can be made into an interface that supports different implementations.
|
||||
|
@ -21,17 +24,25 @@ type RootFS struct {
|
|||
|
||||
// BaseLayerID returns the 64 byte hex ID for the baselayer name.
|
||||
func (r *RootFS) BaseLayerID() string {
|
||||
if r.Type != TypeLayersWithBase {
|
||||
panic("tried to get base layer ID without a base layer")
|
||||
}
|
||||
baseID := sha512.Sum384([]byte(r.BaseLayer))
|
||||
return fmt.Sprintf("%x", baseID[:32])
|
||||
}
|
||||
|
||||
// ChainID returns the ChainID for the top layer in RootFS.
|
||||
func (r *RootFS) ChainID() layer.ChainID {
|
||||
baseDiffID := digest.FromBytes([]byte(r.BaseLayerID()))
|
||||
return layer.CreateChainID(append([]layer.DiffID{layer.DiffID(baseDiffID)}, r.DiffIDs...))
|
||||
ids := r.DiffIDs
|
||||
if r.Type == TypeLayersWithBase {
|
||||
// Add an extra ID for the base.
|
||||
baseDiffID := layer.DiffID(digest.FromBytes([]byte(r.BaseLayerID())))
|
||||
ids = append([]layer.DiffID{baseDiffID}, ids...)
|
||||
}
|
||||
return layer.CreateChainID(ids)
|
||||
}
|
||||
|
||||
// NewRootFS returns empty RootFS struct
|
||||
func NewRootFS() *RootFS {
|
||||
return &RootFS{Type: "layers+base"}
|
||||
// NewRootFSWithBaseLayer returns a RootFS struct with a base layer
|
||||
func NewRootFSWithBaseLayer(baseLayer string) *RootFS {
|
||||
return &RootFS{Type: TypeLayersWithBase, BaseLayer: baseLayer}
|
||||
}
|
||||
|
|
18
vendor/github.com/docker/docker/image/v1/imagev1.go
generated
vendored
18
vendor/github.com/docker/docker/image/v1/imagev1.go
generated
vendored
|
@ -3,6 +3,7 @@ package v1
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
|
@ -10,7 +11,7 @@ import (
|
|||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/version"
|
||||
"github.com/docker/engine-api/types/versions"
|
||||
)
|
||||
|
||||
var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
|
||||
|
@ -18,7 +19,7 @@ var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
|
|||
// noFallbackMinVersion is the minimum version for which v1compatibility
|
||||
// information will not be marshaled through the Image struct to remove
|
||||
// blank fields.
|
||||
var noFallbackMinVersion = version.Version("1.8.3")
|
||||
var noFallbackMinVersion = "1.8.3"
|
||||
|
||||
// HistoryFromConfig creates a History struct from v1 configuration JSON
|
||||
func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) {
|
||||
|
@ -76,7 +77,7 @@ func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []im
|
|||
return nil, err
|
||||
}
|
||||
|
||||
useFallback := version.Version(dver.DockerVersion).LessThan(noFallbackMinVersion)
|
||||
useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion)
|
||||
|
||||
if useFallback {
|
||||
var v1Image image.V1Image
|
||||
|
@ -118,8 +119,15 @@ func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway
|
|||
}
|
||||
|
||||
// Delete fields that didn't exist in old manifest
|
||||
delete(configAsMap, "rootfs")
|
||||
delete(configAsMap, "history")
|
||||
imageType := reflect.TypeOf(img).Elem()
|
||||
for i := 0; i < imageType.NumField(); i++ {
|
||||
f := imageType.Field(i)
|
||||
jsonName := strings.Split(f.Tag.Get("json"), ",")[0]
|
||||
// Parent is handled specially below.
|
||||
if jsonName != "" && jsonName != "parent" {
|
||||
delete(configAsMap, jsonName)
|
||||
}
|
||||
}
|
||||
configAsMap["id"] = rawJSON(v1ID)
|
||||
if parentV1ID != "" {
|
||||
configAsMap["parent"] = rawJSON(parentV1ID)
|
||||
|
|
28
vendor/github.com/docker/docker/layer/filestore.go
generated
vendored
28
vendor/github.com/docker/docker/layer/filestore.go
generated
vendored
|
@ -2,6 +2,7 @@ package layer
|
|||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -13,6 +14,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
)
|
||||
|
@ -98,6 +100,14 @@ func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error {
|
|||
return ioutil.WriteFile(filepath.Join(fm.root, "cache-id"), []byte(cacheID), 0644)
|
||||
}
|
||||
|
||||
func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error {
|
||||
jsonRef, err := json.Marshal(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(filepath.Join(fm.root, "descriptor.json"), jsonRef, 0644)
|
||||
}
|
||||
|
||||
func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) {
|
||||
f, err := os.OpenFile(filepath.Join(fm.root, "tar-split.json.gz"), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
|
@ -191,6 +201,24 @@ func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) {
|
|||
return content, nil
|
||||
}
|
||||
|
||||
func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) {
|
||||
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json"))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// only return empty descriptor to represent what is stored
|
||||
return distribution.Descriptor{}, nil
|
||||
}
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
var ref distribution.Descriptor
|
||||
err = json.Unmarshal(content, &ref)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
return ref, err
|
||||
}
|
||||
|
||||
func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) {
|
||||
fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz"))
|
||||
if err != nil {
|
||||
|
|
14
vendor/github.com/docker/docker/layer/layer.go
generated
vendored
14
vendor/github.com/docker/docker/layer/layer.go
generated
vendored
|
@ -14,6 +14,7 @@ import (
|
|||
"io"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
@ -50,7 +51,7 @@ var (
|
|||
// greater than the 125 max.
|
||||
ErrMaxDepthExceeded = errors.New("max depth exceeded")
|
||||
|
||||
// ErrNotSupported is used when the action is not supppoted
|
||||
// ErrNotSupported is used when the action is not supported
|
||||
// on the current platform
|
||||
ErrNotSupported = errors.New("not support on this platform")
|
||||
)
|
||||
|
@ -171,10 +172,9 @@ type Store interface {
|
|||
Get(ChainID) (Layer, error)
|
||||
Release(Layer) ([]Metadata, error)
|
||||
|
||||
CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error)
|
||||
CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error)
|
||||
GetRWLayer(id string) (RWLayer, error)
|
||||
GetMountID(id string) (string, error)
|
||||
ReinitRWLayer(l RWLayer) error
|
||||
ReleaseRWLayer(RWLayer) ([]Metadata, error)
|
||||
|
||||
Cleanup() error
|
||||
|
@ -182,6 +182,12 @@ type Store interface {
|
|||
DriverName() string
|
||||
}
|
||||
|
||||
// DescribableStore represents a layer store capable of storing
|
||||
// descriptors for layers.
|
||||
type DescribableStore interface {
|
||||
RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error)
|
||||
}
|
||||
|
||||
// MetadataTransaction represents functions for setting layer metadata
|
||||
// with a single transaction.
|
||||
type MetadataTransaction interface {
|
||||
|
@ -189,6 +195,7 @@ type MetadataTransaction interface {
|
|||
SetParent(parent ChainID) error
|
||||
SetDiffID(DiffID) error
|
||||
SetCacheID(string) error
|
||||
SetDescriptor(distribution.Descriptor) error
|
||||
TarSplitWriter(compressInput bool) (io.WriteCloser, error)
|
||||
|
||||
Commit(ChainID) error
|
||||
|
@ -208,6 +215,7 @@ type MetadataStore interface {
|
|||
GetParent(ChainID) (ChainID, error)
|
||||
GetDiffID(ChainID) (DiffID, error)
|
||||
GetCacheID(ChainID) (string, error)
|
||||
GetDescriptor(ChainID) (distribution.Descriptor, error)
|
||||
TarSplitReader(ChainID) (io.ReadCloser, error)
|
||||
|
||||
SetMountID(string, string) error
|
||||
|
|
43
vendor/github.com/docker/docker/layer/layer_store.go
generated
vendored
43
vendor/github.com/docker/docker/layer/layer_store.go
generated
vendored
|
@ -8,6 +8,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
|
@ -128,6 +129,11 @@ func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) {
|
|||
return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err)
|
||||
}
|
||||
|
||||
descriptor, err := ls.store.GetDescriptor(layer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err)
|
||||
}
|
||||
|
||||
cl = &roLayer{
|
||||
chainID: layer,
|
||||
diffID: diff,
|
||||
|
@ -135,6 +141,7 @@ func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) {
|
|||
cacheID: cacheID,
|
||||
layerStore: ls,
|
||||
references: map[Layer]struct{}{},
|
||||
descriptor: descriptor,
|
||||
}
|
||||
|
||||
if parent != "" {
|
||||
|
@ -228,6 +235,10 @@ func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent stri
|
|||
}
|
||||
|
||||
func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) {
|
||||
return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{})
|
||||
}
|
||||
|
||||
func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) {
|
||||
// err is used to hold the error which will always trigger
|
||||
// cleanup of creates sources but may not be an error returned
|
||||
// to the caller (already exists).
|
||||
|
@ -261,9 +272,10 @@ func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) {
|
|||
referenceCount: 1,
|
||||
layerStore: ls,
|
||||
references: map[Layer]struct{}{},
|
||||
descriptor: descriptor,
|
||||
}
|
||||
|
||||
if err = ls.driver.Create(layer.cacheID, pid, ""); err != nil {
|
||||
if err = ls.driver.Create(layer.cacheID, pid, "", nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -417,7 +429,7 @@ func (ls *layerStore) Release(l Layer) ([]Metadata, error) {
|
|||
return ls.releaseLayer(layer)
|
||||
}
|
||||
|
||||
func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error) {
|
||||
func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) {
|
||||
ls.mountL.Lock()
|
||||
defer ls.mountL.Unlock()
|
||||
m, ok := ls.mounts[name]
|
||||
|
@ -454,14 +466,14 @@ func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel stri
|
|||
}
|
||||
|
||||
if initFunc != nil {
|
||||
pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc)
|
||||
pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.initID = pid
|
||||
}
|
||||
|
||||
if err = ls.driver.Create(m.mountID, pid, ""); err != nil {
|
||||
if err = ls.driver.CreateReadWrite(m.mountID, pid, "", storageOpt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -495,25 +507,6 @@ func (ls *layerStore) GetMountID(id string) (string, error) {
|
|||
return mount.mountID, nil
|
||||
}
|
||||
|
||||
// ReinitRWLayer reinitializes a given mount to the layerstore, specifically
|
||||
// initializing the usage count. It should strictly only be used in the
|
||||
// daemon's restore path to restore state of live containers.
|
||||
func (ls *layerStore) ReinitRWLayer(l RWLayer) error {
|
||||
ls.mountL.Lock()
|
||||
defer ls.mountL.Unlock()
|
||||
|
||||
m, ok := ls.mounts[l.Name()]
|
||||
if !ok {
|
||||
return ErrMountDoesNotExist
|
||||
}
|
||||
|
||||
if err := m.incActivityCount(l); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
|
||||
ls.mountL.Lock()
|
||||
defer ls.mountL.Unlock()
|
||||
|
@ -583,14 +576,14 @@ func (ls *layerStore) saveMount(mount *mountedLayer) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit) (string, error) {
|
||||
func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) {
|
||||
// Use "<graph-id>-init" to maintain compatibility with graph drivers
|
||||
// which are expecting this layer with this special name. If all
|
||||
// graph drivers can be updated to not rely on knowing about this layer
|
||||
// then the initID should be randomly generated.
|
||||
initID := fmt.Sprintf("%s-init", graphID)
|
||||
|
||||
if err := ls.driver.Create(initID, parent, mountLabel); err != nil {
|
||||
if err := ls.driver.Create(initID, parent, mountLabel, storageOpt); err != nil {
|
||||
return "", err
|
||||
}
|
||||
p, err := ls.driver.Get(initID, "")
|
||||
|
|
11
vendor/github.com/docker/docker/layer/layer_store_windows.go
generated
vendored
Normal file
11
vendor/github.com/docker/docker/layer/layer_store_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
)
|
||||
|
||||
func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) {
|
||||
return ls.registerWithDescriptor(ts, parent, descriptor)
|
||||
}
|
2
vendor/github.com/docker/docker/layer/layer_unix.go
generated
vendored
2
vendor/github.com/docker/docker/layer/layer_unix.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build linux freebsd darwin openbsd
|
||||
// +build linux freebsd darwin openbsd solaris
|
||||
|
||||
package layer
|
||||
|
||||
|
|
91
vendor/github.com/docker/docker/layer/mounted_layer.go
generated
vendored
91
vendor/github.com/docker/docker/layer/mounted_layer.go
generated
vendored
|
@ -2,7 +2,6 @@ package layer
|
|||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
@ -50,14 +49,6 @@ func (ml *mountedLayer) Parent() Layer {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ml *mountedLayer) Mount(mountLabel string) (string, error) {
|
||||
return ml.layerStore.driver.Get(ml.mountID, mountLabel)
|
||||
}
|
||||
|
||||
func (ml *mountedLayer) Unmount() error {
|
||||
return ml.layerStore.driver.Put(ml.mountID)
|
||||
}
|
||||
|
||||
func (ml *mountedLayer) Size() (int64, error) {
|
||||
return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent())
|
||||
}
|
||||
|
@ -83,106 +74,30 @@ func (ml *mountedLayer) hasReferences() bool {
|
|||
return len(ml.references) > 0
|
||||
}
|
||||
|
||||
func (ml *mountedLayer) incActivityCount(ref RWLayer) error {
|
||||
rl, ok := ml.references[ref]
|
||||
if !ok {
|
||||
return ErrLayerNotRetained
|
||||
}
|
||||
|
||||
if err := rl.acquire(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ml *mountedLayer) deleteReference(ref RWLayer) error {
|
||||
rl, ok := ml.references[ref]
|
||||
if !ok {
|
||||
if _, ok := ml.references[ref]; !ok {
|
||||
return ErrLayerNotRetained
|
||||
}
|
||||
|
||||
if err := rl.release(); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(ml.references, ref)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ml *mountedLayer) retakeReference(r RWLayer) {
|
||||
if ref, ok := r.(*referencedRWLayer); ok {
|
||||
ref.activityCount = 0
|
||||
ml.references[ref] = ref
|
||||
}
|
||||
}
|
||||
|
||||
type referencedRWLayer struct {
|
||||
*mountedLayer
|
||||
|
||||
activityL sync.Mutex
|
||||
activityCount int
|
||||
}
|
||||
|
||||
func (rl *referencedRWLayer) acquire() error {
|
||||
rl.activityL.Lock()
|
||||
defer rl.activityL.Unlock()
|
||||
|
||||
rl.activityCount++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rl *referencedRWLayer) release() error {
|
||||
rl.activityL.Lock()
|
||||
defer rl.activityL.Unlock()
|
||||
|
||||
if rl.activityCount > 0 {
|
||||
return ErrActiveMount
|
||||
}
|
||||
|
||||
rl.activityCount = -1
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
|
||||
rl.activityL.Lock()
|
||||
defer rl.activityL.Unlock()
|
||||
|
||||
if rl.activityCount == -1 {
|
||||
return "", ErrLayerNotRetained
|
||||
}
|
||||
|
||||
if rl.activityCount > 0 {
|
||||
rl.activityCount++
|
||||
return rl.path, nil
|
||||
}
|
||||
|
||||
m, err := rl.mountedLayer.Mount(mountLabel)
|
||||
if err == nil {
|
||||
rl.activityCount++
|
||||
rl.path = m
|
||||
}
|
||||
return m, err
|
||||
return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel)
|
||||
}
|
||||
|
||||
// Unmount decrements the activity count and unmounts the underlying layer
|
||||
// Callers should only call `Unmount` once per call to `Mount`, even on error.
|
||||
func (rl *referencedRWLayer) Unmount() error {
|
||||
rl.activityL.Lock()
|
||||
defer rl.activityL.Unlock()
|
||||
|
||||
if rl.activityCount == 0 {
|
||||
return ErrNotMounted
|
||||
}
|
||||
if rl.activityCount == -1 {
|
||||
return ErrLayerNotRetained
|
||||
}
|
||||
|
||||
rl.activityCount--
|
||||
if rl.activityCount > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return rl.mountedLayer.Unmount()
|
||||
return rl.layerStore.driver.Put(rl.mountedLayer.mountID)
|
||||
}
|
||||
|
|
8
vendor/github.com/docker/docker/layer/ro_layer.go
generated
vendored
8
vendor/github.com/docker/docker/layer/ro_layer.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
)
|
||||
|
||||
|
@ -14,6 +15,7 @@ type roLayer struct {
|
|||
cacheID string
|
||||
size int64
|
||||
layerStore *layerStore
|
||||
descriptor distribution.Descriptor
|
||||
|
||||
referenceCount int
|
||||
references map[Layer]struct{}
|
||||
|
@ -118,6 +120,12 @@ func storeLayer(tx MetadataTransaction, layer *roLayer) error {
|
|||
if err := tx.SetCacheID(layer.cacheID); err != nil {
|
||||
return err
|
||||
}
|
||||
// Do not store empty descriptors
|
||||
if layer.descriptor.Digest != "" {
|
||||
if err := tx.SetDescriptor(layer.descriptor); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if layer.parent != nil {
|
||||
if err := tx.SetParent(layer.parent.chainID); err != nil {
|
||||
return err
|
||||
|
|
9
vendor/github.com/docker/docker/layer/ro_layer_windows.go
generated
vendored
Normal file
9
vendor/github.com/docker/docker/layer/ro_layer_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
package layer
|
||||
|
||||
import "github.com/docker/distribution"
|
||||
|
||||
var _ distribution.Describable = &roLayer{}
|
||||
|
||||
func (rl *roLayer) Descriptor() distribution.Descriptor {
|
||||
return rl.descriptor
|
||||
}
|
84
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
84
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
|
@ -33,6 +33,8 @@ type (
|
|||
Reader io.Reader
|
||||
// Compression is the state represents if compressed or not.
|
||||
Compression int
|
||||
// WhiteoutFormat is the format of whiteouts unpacked
|
||||
WhiteoutFormat int
|
||||
// TarChownOptions wraps the chown options UID and GID.
|
||||
TarChownOptions struct {
|
||||
UID, GID int
|
||||
|
@ -47,6 +49,10 @@ type (
|
|||
GIDMaps []idtools.IDMap
|
||||
ChownOpts *TarChownOptions
|
||||
IncludeSourceDir bool
|
||||
// WhiteoutFormat is the expected on disk format for whiteout files.
|
||||
// This format will be converted to the standard format on pack
|
||||
// and from the standard format on unpack.
|
||||
WhiteoutFormat WhiteoutFormat
|
||||
// When unpacking, specifies whether overwriting a directory with a
|
||||
// non-directory is allowed and vice versa.
|
||||
NoOverwriteDirNonDir bool
|
||||
|
@ -93,6 +99,14 @@ const (
|
|||
Xz
|
||||
)
|
||||
|
||||
const (
|
||||
// AUFSWhiteoutFormat is the default format for whiteouts
|
||||
AUFSWhiteoutFormat WhiteoutFormat = iota
|
||||
// OverlayWhiteoutFormat formats whiteout according to the overlay
|
||||
// standard.
|
||||
OverlayWhiteoutFormat
|
||||
)
|
||||
|
||||
// IsArchive checks for the magic bytes of a tar or any supported compression
|
||||
// algorithm.
|
||||
func IsArchive(header []byte) bool {
|
||||
|
@ -130,7 +144,7 @@ func DetectCompression(source []byte) Compression {
|
|||
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
|
||||
} {
|
||||
if len(source) < len(m) {
|
||||
logrus.Debugf("Len too short")
|
||||
logrus.Debug("Len too short")
|
||||
continue
|
||||
}
|
||||
if bytes.Compare(m, source[:len(m)]) == 0 {
|
||||
|
@ -146,7 +160,7 @@ func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
|
|||
return cmdStream(exec.Command(args[0], args[1:]...), archive)
|
||||
}
|
||||
|
||||
// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive.
|
||||
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
|
||||
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
|
||||
p := pools.BufioReader32KPool
|
||||
buf := p.Get(archive)
|
||||
|
@ -192,8 +206,8 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// CompressStream compresses the dest with specified compression algorithm.
|
||||
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
|
||||
// CompressStream compresseses the dest with specified compression algorithm.
|
||||
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
|
||||
p := pools.BufioWriter32KPool
|
||||
buf := p.Get(dest)
|
||||
switch compression {
|
||||
|
@ -228,6 +242,11 @@ func (compression *Compression) Extension() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
type tarWhiteoutConverter interface {
|
||||
ConvertWrite(*tar.Header, string, os.FileInfo) error
|
||||
ConvertRead(*tar.Header, string) (bool, error)
|
||||
}
|
||||
|
||||
type tarAppender struct {
|
||||
TarWriter *tar.Writer
|
||||
Buffer *bufio.Writer
|
||||
|
@ -236,6 +255,12 @@ type tarAppender struct {
|
|||
SeenFiles map[uint64]string
|
||||
UIDMaps []idtools.IDMap
|
||||
GIDMaps []idtools.IDMap
|
||||
|
||||
// For packing and unpacking whiteout files in the
|
||||
// non standard format. The whiteout files defined
|
||||
// by the AUFS standard are used as the tar whiteout
|
||||
// standard.
|
||||
WhiteoutConverter tarWhiteoutConverter
|
||||
}
|
||||
|
||||
// canonicalTarName provides a platform-independent and consistent posix-style
|
||||
|
@ -253,6 +278,7 @@ func canonicalTarName(name string, isDir bool) (string, error) {
|
|||
return name, nil
|
||||
}
|
||||
|
||||
// addTarFile adds to the tar archive a file from `path` as `name`
|
||||
func (ta *tarAppender) addTarFile(path, name string) error {
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
|
@ -323,11 +349,17 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
|||
hdr.Gid = xGID
|
||||
}
|
||||
|
||||
if ta.WhiteoutConverter != nil {
|
||||
if err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg {
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -408,7 +440,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||
}
|
||||
|
||||
case tar.TypeXGlobalHeader:
|
||||
logrus.Debugf("PAX Global Extended Headers found and ignored")
|
||||
logrus.Debug("PAX Global Extended Headers found and ignored")
|
||||
return nil
|
||||
|
||||
default:
|
||||
|
@ -425,10 +457,26 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||
}
|
||||
}
|
||||
|
||||
var errors []string
|
||||
for key, value := range hdr.Xattrs {
|
||||
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
|
||||
if err == syscall.ENOTSUP {
|
||||
// We ignore errors here because not all graphdrivers support
|
||||
// xattrs *cough* old versions of AUFS *cough*. However only
|
||||
// ENOTSUP should be emitted in that case, otherwise we still
|
||||
// bail.
|
||||
errors = append(errors, err.Error())
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"errors": errors,
|
||||
}).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
|
||||
}
|
||||
|
||||
// There is no LChmod, so ignore mode for symlink. Also, this
|
||||
|
@ -492,11 +540,12 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
|
||||
go func() {
|
||||
ta := &tarAppender{
|
||||
TarWriter: tar.NewWriter(compressWriter),
|
||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||
SeenFiles: make(map[uint64]string),
|
||||
UIDMaps: options.UIDMaps,
|
||||
GIDMaps: options.GIDMaps,
|
||||
TarWriter: tar.NewWriter(compressWriter),
|
||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||
SeenFiles: make(map[uint64]string),
|
||||
UIDMaps: options.UIDMaps,
|
||||
GIDMaps: options.GIDMaps,
|
||||
WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
@ -634,7 +683,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
|
||||
if err := ta.addTarFile(filePath, relFilePath); err != nil {
|
||||
logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
|
||||
// if pipe is broken, stop writting tar stream to it
|
||||
// if pipe is broken, stop writing tar stream to it
|
||||
if err == io.ErrClosedPipe {
|
||||
return err
|
||||
}
|
||||
|
@ -658,6 +707,7 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
loop:
|
||||
|
@ -757,6 +807,16 @@ loop:
|
|||
hdr.Gid = xGID
|
||||
}
|
||||
|
||||
if whiteoutConverter != nil {
|
||||
writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !writeFile {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
91
vendor/github.com/docker/docker/pkg/archive/archive_linux.go
generated
vendored
Normal file
91
vendor/github.com/docker/docker/pkg/archive/archive_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
||||
if format == OverlayWhiteoutFormat {
|
||||
return overlayWhiteoutConverter{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type overlayWhiteoutConverter struct{}
|
||||
|
||||
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) error {
|
||||
// convert whiteouts to AUFS format
|
||||
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
|
||||
// we just rename the file and make it normal
|
||||
dir, filename := filepath.Split(hdr.Name)
|
||||
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
|
||||
hdr.Mode = 0600
|
||||
hdr.Typeflag = tar.TypeReg
|
||||
hdr.Size = 0
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeDir != 0 {
|
||||
// convert opaque dirs to AUFS format by writing an empty file with the prefix
|
||||
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
|
||||
// create a header for the whiteout file
|
||||
// it should inherit some properties from the parent, but be a regular file
|
||||
*hdr = tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: hdr.Mode & int64(os.ModePerm),
|
||||
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
|
||||
Size: 0,
|
||||
Uid: hdr.Uid,
|
||||
Uname: hdr.Uname,
|
||||
Gid: hdr.Gid,
|
||||
Gname: hdr.Gname,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
|
||||
base := filepath.Base(path)
|
||||
dir := filepath.Dir(path)
|
||||
|
||||
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
|
||||
if base == WhiteoutOpaqueDir {
|
||||
if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// don't write the file itself
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// if a file was deleted and we are using overlay, we need to create a character device
|
||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
originalBase := base[len(WhiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
|
||||
if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// don't write the file itself
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
7
vendor/github.com/docker/docker/pkg/archive/archive_other.go
generated
vendored
Normal file
7
vendor/github.com/docker/docker/pkg/archive/archive_other.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build !linux
|
||||
|
||||
package archive
|
||||
|
||||
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
||||
return nil
|
||||
}
|
46
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
46
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
|
@ -81,6 +81,33 @@ func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
|||
// Changes walks the path rw and determines changes for the files in the path,
|
||||
// with respect to the parent layers
|
||||
func Changes(layers []string, rw string) ([]Change, error) {
|
||||
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
|
||||
}
|
||||
|
||||
func aufsMetadataSkip(path string) (skip bool, err error) {
|
||||
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
|
||||
if err != nil {
|
||||
skip = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||
f := filepath.Base(path)
|
||||
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(f, WhiteoutPrefix) {
|
||||
originalFile := f[len(WhiteoutPrefix):]
|
||||
return filepath.Join(filepath.Dir(path), originalFile), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
type skipChange func(string) (bool, error)
|
||||
type deleteChange func(string, string, os.FileInfo) (string, error)
|
||||
|
||||
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
|
||||
var (
|
||||
changes []Change
|
||||
changedDirs = make(map[string]struct{})
|
||||
|
@ -105,21 +132,24 @@ func Changes(layers []string, rw string) ([]Change, error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Skip AUFS metadata
|
||||
if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched {
|
||||
return err
|
||||
if sc != nil {
|
||||
if skip, err := sc(path); skip {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
change := Change{
|
||||
Path: path,
|
||||
}
|
||||
|
||||
deletedFile, err := dc(rw, path, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find out what kind of modification happened
|
||||
file := filepath.Base(path)
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(file, WhiteoutPrefix) {
|
||||
originalFile := file[len(WhiteoutPrefix):]
|
||||
change.Path = filepath.Join(filepath.Dir(path), originalFile)
|
||||
if deletedFile != "" {
|
||||
change.Path = deletedFile
|
||||
change.Kind = ChangeDelete
|
||||
} else {
|
||||
// Otherwise, the file was added
|
||||
|
|
27
vendor/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
27
vendor/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
|
@ -283,3 +283,30 @@ func clen(n []byte) int {
|
|||
}
|
||||
return len(n)
|
||||
}
|
||||
|
||||
// OverlayChanges walks the path rw and determines changes for the files in the path,
|
||||
// with respect to the parent layers
|
||||
func OverlayChanges(layers []string, rw string) ([]Change, error) {
|
||||
return changes(layers, rw, overlayDeletedFile, nil)
|
||||
}
|
||||
|
||||
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||
if fi.Mode()&os.ModeCharDevice != 0 {
|
||||
s := fi.Sys().(*syscall.Stat_t)
|
||||
if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
if fi.Mode()&os.ModeDir != 0 {
|
||||
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
|
@ -103,7 +103,7 @@ func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err erro
|
|||
return
|
||||
}
|
||||
|
||||
// Separate the source path between it's directory and
|
||||
// Separate the source path between its directory and
|
||||
// the entry in that directory which we are archiving.
|
||||
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||
|
||||
|
|
10
vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go
generated
vendored
10
vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go
generated
vendored
|
@ -11,19 +11,11 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func chroot(path string) error {
|
||||
if err := syscall.Chroot(path); err != nil {
|
||||
return err
|
||||
}
|
||||
return syscall.Chdir("/")
|
||||
}
|
||||
|
||||
// untar is the entry-point for docker-untar on re-exec. This is not used on
|
||||
// Windows as it does not support chroot, hence no point sandboxing through
|
||||
// chroot and rexec.
|
||||
|
@ -88,7 +80,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
|
|||
// pending on write pipe forever
|
||||
io.Copy(ioutil.Discard, decompressedArchive)
|
||||
|
||||
return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output)
|
||||
return fmt.Errorf("Error processing tar file(%v): %s", err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
103
vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go
generated
vendored
Normal file
103
vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
package chrootarchive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
)
|
||||
|
||||
// chroot on linux uses pivot_root instead of chroot
|
||||
// pivot_root takes a new root and an old root.
|
||||
// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root.
|
||||
// New root is where the new rootfs is set to.
|
||||
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
|
||||
// This is similar to how libcontainer sets up a container's rootfs
|
||||
func chroot(path string) (err error) {
|
||||
if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil {
|
||||
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
|
||||
}
|
||||
|
||||
if err := mount.MakeRPrivate(path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// setup oldRoot for pivot_root
|
||||
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error setting up pivot dir: %v", err)
|
||||
}
|
||||
|
||||
var mounted bool
|
||||
defer func() {
|
||||
if mounted {
|
||||
// make sure pivotDir is not mounted before we try to remove it
|
||||
if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil {
|
||||
if err == nil {
|
||||
err = errCleanup
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
errCleanup := os.Remove(pivotDir)
|
||||
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
|
||||
// because we already cleaned it up on failed pivot_root
|
||||
if errCleanup != nil && !os.IsNotExist(errCleanup) {
|
||||
errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
|
||||
if err == nil {
|
||||
err = errCleanup
|
||||
}
|
||||
}
|
||||
|
||||
if errCleanup := syscall.Unmount("/", syscall.MNT_DETACH); errCleanup != nil {
|
||||
if err == nil {
|
||||
err = fmt.Errorf("error unmounting root: %v", errCleanup)
|
||||
}
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if err := syscall.PivotRoot(path, pivotDir); err != nil {
|
||||
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
|
||||
if err := os.Remove(pivotDir); err != nil {
|
||||
return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
|
||||
}
|
||||
return realChroot(path)
|
||||
}
|
||||
mounted = true
|
||||
|
||||
// This is the new path for where the old root (prior to the pivot) has been moved to
|
||||
// This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
|
||||
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
|
||||
|
||||
if err := syscall.Chdir("/"); err != nil {
|
||||
return fmt.Errorf("Error changing to new root: %v", err)
|
||||
}
|
||||
|
||||
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
|
||||
if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil {
|
||||
return fmt.Errorf("Error making old root private after pivot: %v", err)
|
||||
}
|
||||
|
||||
// Now unmount the old root so it's no longer visible from the new root
|
||||
if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
|
||||
return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
|
||||
}
|
||||
mounted = false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func realChroot(path string) error {
|
||||
if err := syscall.Chroot(path); err != nil {
|
||||
return fmt.Errorf("Error after fallback to chroot: %v", err)
|
||||
}
|
||||
if err := syscall.Chdir("/"); err != nil {
|
||||
return fmt.Errorf("Error changing to new root after chroot: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
12
vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go
generated
vendored
Normal file
12
vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
// +build !windows,!linux
|
||||
|
||||
package chrootarchive
|
||||
|
||||
import "syscall"
|
||||
|
||||
func chroot(path string) error {
|
||||
if err := syscall.Chroot(path); err != nil {
|
||||
return err
|
||||
}
|
||||
return syscall.Chdir("/")
|
||||
}
|
8
vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
8
vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
|
@ -13,12 +13,12 @@ import (
|
|||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// exclusion return true if the specified pattern is an exclusion
|
||||
// exclusion returns true if the specified pattern is an exclusion
|
||||
func exclusion(pattern string) bool {
|
||||
return pattern[0] == '!'
|
||||
}
|
||||
|
||||
// empty return true if the specified pattern is empty
|
||||
// empty returns true if the specified pattern is empty
|
||||
func empty(pattern string) bool {
|
||||
return pattern == ""
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
|
|||
// Loop over exclusion patterns and:
|
||||
// 1. Clean them up.
|
||||
// 2. Indicate whether we are dealing with any exception rules.
|
||||
// 3. Error if we see a single exclusion marker on it's own (!).
|
||||
// 3. Error if we see a single exclusion marker on its own (!).
|
||||
cleanedPatterns := []string{}
|
||||
patternDirs := [][]string{}
|
||||
exceptions := false
|
||||
|
@ -217,7 +217,7 @@ func regexpMatch(pattern, path string) (bool, error) {
|
|||
}
|
||||
|
||||
// CopyFile copies from src to dst until either EOF is reached
|
||||
// on src or an error occurs. It verifies src exists and remove
|
||||
// on src or an error occurs. It verifies src exists and removes
|
||||
// the dst if it exists.
|
||||
func CopyFile(src, dst string) (int64, error) {
|
||||
cleanSrc := filepath.Clean(src)
|
||||
|
|
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
generated
vendored
Normal file
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
package fileutils
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors.
|
||||
// On Solaris these limits are per process and not systemwide
|
||||
func GetTotalUsedFds() int {
|
||||
return -1
|
||||
}
|
20
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
20
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
|
@ -8,6 +8,7 @@ import (
|
|||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
|
||||
|
@ -16,6 +17,7 @@ import (
|
|||
// useradd -r -s /bin/false <username>
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
userCommand string
|
||||
|
||||
cmdTemplates = map[string]string{
|
||||
|
@ -31,15 +33,6 @@ var (
|
|||
userMod = "usermod"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// set up which commands are used for adding users/groups dependent on distro
|
||||
if _, err := resolveBinary("adduser"); err == nil {
|
||||
userCommand = "adduser"
|
||||
} else if _, err := resolveBinary("useradd"); err == nil {
|
||||
userCommand = "useradd"
|
||||
}
|
||||
}
|
||||
|
||||
func resolveBinary(binname string) (string, error) {
|
||||
binaryPath, err := exec.LookPath(binname)
|
||||
if err != nil {
|
||||
|
@ -94,7 +87,14 @@ func AddNamespaceRangesUser(name string) (int, int, error) {
|
|||
}
|
||||
|
||||
func addUser(userName string) error {
|
||||
|
||||
once.Do(func() {
|
||||
// set up which commands are used for adding users/groups dependent on distro
|
||||
if _, err := resolveBinary("adduser"); err == nil {
|
||||
userCommand = "adduser"
|
||||
} else if _, err := resolveBinary("useradd"); err == nil {
|
||||
userCommand = "useradd"
|
||||
}
|
||||
})
|
||||
if userCommand == "" {
|
||||
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
|
||||
}
|
||||
|
|
51
vendor/github.com/docker/docker/pkg/ioutils/buffer.go
generated
vendored
Normal file
51
vendor/github.com/docker/docker/pkg/ioutils/buffer.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
package ioutils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var errBufferFull = errors.New("buffer is full")
|
||||
|
||||
type fixedBuffer struct {
|
||||
buf []byte
|
||||
pos int
|
||||
lastRead int
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Write(p []byte) (int, error) {
|
||||
n := copy(b.buf[b.pos:cap(b.buf)], p)
|
||||
b.pos += n
|
||||
|
||||
if n < len(p) {
|
||||
if b.pos == cap(b.buf) {
|
||||
return n, errBufferFull
|
||||
}
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Read(p []byte) (int, error) {
|
||||
n := copy(p, b.buf[b.lastRead:b.pos])
|
||||
b.lastRead += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Len() int {
|
||||
return b.pos - b.lastRead
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Cap() int {
|
||||
return cap(b.buf)
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Reset() {
|
||||
b.pos = 0
|
||||
b.lastRead = 0
|
||||
b.buf = b.buf[:0]
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) String() string {
|
||||
return string(b.buf[b.lastRead:b.pos])
|
||||
}
|
130
vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
generated
vendored
130
vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
generated
vendored
|
@ -9,12 +9,20 @@ import (
|
|||
// maxCap is the highest capacity to use in byte slices that buffer data.
|
||||
const maxCap = 1e6
|
||||
|
||||
// minCap is the lowest capacity to use in byte slices that buffer data
|
||||
const minCap = 64
|
||||
|
||||
// blockThreshold is the minimum number of bytes in the buffer which will cause
|
||||
// a write to BytesPipe to block when allocating a new slice.
|
||||
const blockThreshold = 1e6
|
||||
|
||||
// ErrClosed is returned when Write is called on a closed BytesPipe.
|
||||
var ErrClosed = errors.New("write to closed BytesPipe")
|
||||
var (
|
||||
// ErrClosed is returned when Write is called on a closed BytesPipe.
|
||||
ErrClosed = errors.New("write to closed BytesPipe")
|
||||
|
||||
bufPools = make(map[int]*sync.Pool)
|
||||
bufPoolsLock sync.Mutex
|
||||
)
|
||||
|
||||
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
|
||||
// All written data may be read at most once. Also, BytesPipe allocates
|
||||
|
@ -23,22 +31,17 @@ var ErrClosed = errors.New("write to closed BytesPipe")
|
|||
type BytesPipe struct {
|
||||
mu sync.Mutex
|
||||
wait *sync.Cond
|
||||
buf [][]byte // slice of byte-slices of buffered data
|
||||
lastRead int // index in the first slice to a read point
|
||||
bufLen int // length of data buffered over the slices
|
||||
closeErr error // error to return from next Read. set to nil if not closed.
|
||||
buf []*fixedBuffer
|
||||
bufLen int
|
||||
closeErr error // error to return from next Read. set to nil if not closed.
|
||||
}
|
||||
|
||||
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
|
||||
// If buf is nil, then it will be initialized with slice which cap is 64.
|
||||
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
|
||||
func NewBytesPipe(buf []byte) *BytesPipe {
|
||||
if cap(buf) == 0 {
|
||||
buf = make([]byte, 0, 64)
|
||||
}
|
||||
bp := &BytesPipe{
|
||||
buf: [][]byte{buf[:0]},
|
||||
}
|
||||
func NewBytesPipe() *BytesPipe {
|
||||
bp := &BytesPipe{}
|
||||
bp.buf = append(bp.buf, getBuffer(minCap))
|
||||
bp.wait = sync.NewCond(&bp.mu)
|
||||
return bp
|
||||
}
|
||||
|
@ -47,23 +50,31 @@ func NewBytesPipe(buf []byte) *BytesPipe {
|
|||
// It can allocate new []byte slices in a process of writing.
|
||||
func (bp *BytesPipe) Write(p []byte) (int, error) {
|
||||
bp.mu.Lock()
|
||||
defer bp.mu.Unlock()
|
||||
|
||||
written := 0
|
||||
loop0:
|
||||
for {
|
||||
if bp.closeErr != nil {
|
||||
bp.mu.Unlock()
|
||||
return written, ErrClosed
|
||||
}
|
||||
// write data to the last buffer
|
||||
b := bp.buf[len(bp.buf)-1]
|
||||
// copy data to the current empty allocated area
|
||||
n := copy(b[len(b):cap(b)], p)
|
||||
// increment buffered data length
|
||||
bp.bufLen += n
|
||||
// include written data in last buffer
|
||||
bp.buf[len(bp.buf)-1] = b[:len(b)+n]
|
||||
|
||||
if len(bp.buf) == 0 {
|
||||
bp.buf = append(bp.buf, getBuffer(64))
|
||||
}
|
||||
// get the last buffer
|
||||
b := bp.buf[len(bp.buf)-1]
|
||||
|
||||
n, err := b.Write(p)
|
||||
written += n
|
||||
bp.bufLen += n
|
||||
|
||||
// errBufferFull is an error we expect to get if the buffer is full
|
||||
if err != nil && err != errBufferFull {
|
||||
bp.wait.Broadcast()
|
||||
bp.mu.Unlock()
|
||||
return written, err
|
||||
}
|
||||
|
||||
// if there was enough room to write all then break
|
||||
if len(p) == n {
|
||||
|
@ -73,7 +84,7 @@ loop0:
|
|||
// more data: write to the next slice
|
||||
p = p[n:]
|
||||
|
||||
// block if too much data is still in the buffer
|
||||
// make sure the buffer doesn't grow too big from this write
|
||||
for bp.bufLen >= blockThreshold {
|
||||
bp.wait.Wait()
|
||||
if bp.closeErr != nil {
|
||||
|
@ -81,15 +92,15 @@ loop0:
|
|||
}
|
||||
}
|
||||
|
||||
// allocate slice that has twice the size of the last unless maximum reached
|
||||
nextCap := 2 * cap(bp.buf[len(bp.buf)-1])
|
||||
// add new byte slice to the buffers slice and continue writing
|
||||
nextCap := b.Cap() * 2
|
||||
if nextCap > maxCap {
|
||||
nextCap = maxCap
|
||||
}
|
||||
// add new byte slice to the buffers slice and continue writing
|
||||
bp.buf = append(bp.buf, make([]byte, 0, nextCap))
|
||||
bp.buf = append(bp.buf, getBuffer(nextCap))
|
||||
}
|
||||
bp.wait.Broadcast()
|
||||
bp.mu.Unlock()
|
||||
return written, nil
|
||||
}
|
||||
|
||||
|
@ -111,46 +122,65 @@ func (bp *BytesPipe) Close() error {
|
|||
return bp.CloseWithError(nil)
|
||||
}
|
||||
|
||||
func (bp *BytesPipe) len() int {
|
||||
return bp.bufLen - bp.lastRead
|
||||
}
|
||||
|
||||
// Read reads bytes from BytesPipe.
|
||||
// Data could be read only once.
|
||||
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
|
||||
bp.mu.Lock()
|
||||
defer bp.mu.Unlock()
|
||||
if bp.len() == 0 {
|
||||
if bp.bufLen == 0 {
|
||||
if bp.closeErr != nil {
|
||||
bp.mu.Unlock()
|
||||
return 0, bp.closeErr
|
||||
}
|
||||
bp.wait.Wait()
|
||||
if bp.len() == 0 && bp.closeErr != nil {
|
||||
return 0, bp.closeErr
|
||||
if bp.bufLen == 0 && bp.closeErr != nil {
|
||||
err := bp.closeErr
|
||||
bp.mu.Unlock()
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
for {
|
||||
read := copy(p, bp.buf[0][bp.lastRead:])
|
||||
|
||||
for bp.bufLen > 0 {
|
||||
b := bp.buf[0]
|
||||
read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
|
||||
n += read
|
||||
bp.lastRead += read
|
||||
if bp.len() == 0 {
|
||||
// we have read everything. reset to the beginning.
|
||||
bp.lastRead = 0
|
||||
bp.bufLen -= len(bp.buf[0])
|
||||
bp.buf[0] = bp.buf[0][:0]
|
||||
break
|
||||
bp.bufLen -= read
|
||||
|
||||
if b.Len() == 0 {
|
||||
// it's empty so return it to the pool and move to the next one
|
||||
returnBuffer(b)
|
||||
bp.buf[0] = nil
|
||||
bp.buf = bp.buf[1:]
|
||||
}
|
||||
// break if everything was read
|
||||
|
||||
if len(p) == read {
|
||||
break
|
||||
}
|
||||
// more buffered data and more asked. read from next slice.
|
||||
|
||||
p = p[read:]
|
||||
bp.lastRead = 0
|
||||
bp.bufLen -= len(bp.buf[0])
|
||||
bp.buf[0] = nil // throw away old slice
|
||||
bp.buf = bp.buf[1:] // switch to next
|
||||
}
|
||||
|
||||
bp.wait.Broadcast()
|
||||
bp.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func returnBuffer(b *fixedBuffer) {
|
||||
b.Reset()
|
||||
bufPoolsLock.Lock()
|
||||
pool := bufPools[b.Cap()]
|
||||
bufPoolsLock.Unlock()
|
||||
if pool != nil {
|
||||
pool.Put(b)
|
||||
}
|
||||
}
|
||||
|
||||
func getBuffer(size int) *fixedBuffer {
|
||||
bufPoolsLock.Lock()
|
||||
pool, ok := bufPools[size]
|
||||
if !ok {
|
||||
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
|
||||
bufPools[size] = pool
|
||||
}
|
||||
bufPoolsLock.Unlock()
|
||||
return pool.Get().(*fixedBuffer)
|
||||
}
|
||||
|
|
82
vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
generated
vendored
Normal file
82
vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
package ioutils
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
|
||||
// temporary file and closing it atomically changes the temporary file to
|
||||
// destination path. Writing and closing concurrently is not allowed.
|
||||
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
|
||||
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
abspath, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &atomicFileWriter{
|
||||
f: f,
|
||||
fn: abspath,
|
||||
perm: perm,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AtomicWriteFile atomically writes data to a file named by filename.
|
||||
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
f, err := NewAtomicFileWriter(filename, perm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, err := f.Write(data)
|
||||
if err == nil && n < len(data) {
|
||||
err = io.ErrShortWrite
|
||||
f.(*atomicFileWriter).writeErr = err
|
||||
}
|
||||
if err1 := f.Close(); err == nil {
|
||||
err = err1
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type atomicFileWriter struct {
|
||||
f *os.File
|
||||
fn string
|
||||
writeErr error
|
||||
perm os.FileMode
|
||||
}
|
||||
|
||||
func (w *atomicFileWriter) Write(dt []byte) (int, error) {
|
||||
n, err := w.f.Write(dt)
|
||||
if err != nil {
|
||||
w.writeErr = err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *atomicFileWriter) Close() (retErr error) {
|
||||
defer func() {
|
||||
if retErr != nil || w.writeErr != nil {
|
||||
os.Remove(w.f.Name())
|
||||
}
|
||||
}()
|
||||
if err := w.f.Sync(); err != nil {
|
||||
w.f.Close()
|
||||
return err
|
||||
}
|
||||
if err := w.f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Chmod(w.f.Name(), w.perm); err != nil {
|
||||
return err
|
||||
}
|
||||
if w.writeErr == nil {
|
||||
return os.Rename(w.f.Name(), w.fn)
|
||||
}
|
||||
return nil
|
||||
}
|
2
vendor/github.com/docker/docker/pkg/ioutils/readers.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/ioutils/readers.go
generated
vendored
|
@ -55,7 +55,7 @@ func HashData(src io.Reader) (string, error) {
|
|||
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// OnEOFReader wraps a io.ReadCloser and a function
|
||||
// OnEOFReader wraps an io.ReadCloser and a function
|
||||
// the function will run at the end of file or close the file.
|
||||
type OnEOFReader struct {
|
||||
Rc io.ReadCloser
|
||||
|
|
6
vendor/github.com/docker/docker/pkg/ioutils/scheduler.go
generated
vendored
6
vendor/github.com/docker/docker/pkg/ioutils/scheduler.go
generated
vendored
|
@ -1,6 +0,0 @@
|
|||
// +build !gccgo
|
||||
|
||||
package ioutils
|
||||
|
||||
func callSchedulerIfNecessary() {
|
||||
}
|
13
vendor/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go
generated
vendored
13
vendor/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go
generated
vendored
|
@ -1,13 +0,0 @@
|
|||
// +build gccgo
|
||||
|
||||
package ioutils
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func callSchedulerIfNecessary() {
|
||||
//allow or force Go scheduler to switch context, without explicitly
|
||||
//forcing this will make it hang when using gccgo implementation
|
||||
runtime.Gosched()
|
||||
}
|
40
vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go
generated
vendored
40
vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go
generated
vendored
|
@ -1,40 +0,0 @@
|
|||
package jsonlog
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// JSONLog represents a log message, typically a single entry from a given log stream.
|
||||
// JSONLogs can be easily serialized to and from JSON and support custom formatting.
|
||||
type JSONLog struct {
|
||||
// Log is the log message
|
||||
Log string `json:"log,omitempty"`
|
||||
// Stream is the log source
|
||||
Stream string `json:"stream,omitempty"`
|
||||
// Created is the created timestamp of log
|
||||
Created time.Time `json:"time"`
|
||||
}
|
||||
|
||||
// Format returns the log formatted according to format
|
||||
// If format is nil, returns the log message
|
||||
// If format is json, returns the log marshaled in json format
|
||||
// By default, returns the log with the log time formatted according to format.
|
||||
func (jl *JSONLog) Format(format string) (string, error) {
|
||||
if format == "" {
|
||||
return jl.Log, nil
|
||||
}
|
||||
if format == "json" {
|
||||
m, err := json.Marshal(jl)
|
||||
return string(m), err
|
||||
}
|
||||
return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil
|
||||
}
|
||||
|
||||
// Reset resets the log to nil.
|
||||
func (jl *JSONLog) Reset() {
|
||||
jl.Log = ""
|
||||
jl.Stream = ""
|
||||
jl.Created = time.Time{}
|
||||
}
|
178
vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go
generated
vendored
178
vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go
generated
vendored
|
@ -1,178 +0,0 @@
|
|||
// This code was initially generated by ffjson <https://github.com/pquerna/ffjson>
|
||||
// This code was generated via the following steps:
|
||||
// $ go get -u github.com/pquerna/ffjson
|
||||
// $ make BIND_DIR=. shell
|
||||
// $ ffjson pkg/jsonlog/jsonlog.go
|
||||
// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go
|
||||
//
|
||||
// It has been modified to improve the performance of time marshalling to JSON
|
||||
// and to clean it up.
|
||||
// Should this code need to be regenerated when the JSONLog struct is changed,
|
||||
// the relevant changes which have been made are:
|
||||
// import (
|
||||
// "bytes"
|
||||
//-
|
||||
// "unicode/utf8"
|
||||
// )
|
||||
//
|
||||
// func (mj *JSONLog) MarshalJSON() ([]byte, error) {
|
||||
//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) {
|
||||
// }
|
||||
// return buf.Bytes(), nil
|
||||
// }
|
||||
//+
|
||||
// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
|
||||
//- var err error
|
||||
//- var obj []byte
|
||||
//- var first bool = true
|
||||
//- _ = obj
|
||||
//- _ = err
|
||||
//- _ = first
|
||||
//+ var (
|
||||
//+ err error
|
||||
//+ timestamp string
|
||||
//+ first bool = true
|
||||
//+ )
|
||||
// buf.WriteString(`{`)
|
||||
// if len(mj.Log) != 0 {
|
||||
// if first == true {
|
||||
//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
|
||||
// buf.WriteString(`,`)
|
||||
// }
|
||||
// buf.WriteString(`"time":`)
|
||||
//- obj, err = mj.Created.MarshalJSON()
|
||||
//+ timestamp, err = FastTimeMarshalJSON(mj.Created)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//- buf.Write(obj)
|
||||
//+ buf.WriteString(timestamp)
|
||||
// buf.WriteString(`}`)
|
||||
// return nil
|
||||
// }
|
||||
// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
|
||||
// if len(mj.Log) != 0 {
|
||||
// - if first == true {
|
||||
// - first = false
|
||||
// - } else {
|
||||
// - buf.WriteString(`,`)
|
||||
// - }
|
||||
// + first = false
|
||||
// buf.WriteString(`"log":`)
|
||||
// ffjsonWriteJSONString(buf, mj.Log)
|
||||
// }
|
||||
|
||||
package jsonlog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// MarshalJSON marshals the JSONLog.
|
||||
func (mj *JSONLog) MarshalJSON() ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
buf.Grow(1024)
|
||||
if err := mj.MarshalJSONBuf(&buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer.
|
||||
func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
|
||||
var (
|
||||
err error
|
||||
timestamp string
|
||||
first = true
|
||||
)
|
||||
buf.WriteString(`{`)
|
||||
if len(mj.Log) != 0 {
|
||||
first = false
|
||||
buf.WriteString(`"log":`)
|
||||
ffjsonWriteJSONString(buf, mj.Log)
|
||||
}
|
||||
if len(mj.Stream) != 0 {
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
buf.WriteString(`,`)
|
||||
}
|
||||
buf.WriteString(`"stream":`)
|
||||
ffjsonWriteJSONString(buf, mj.Stream)
|
||||
}
|
||||
if !first {
|
||||
buf.WriteString(`,`)
|
||||
}
|
||||
buf.WriteString(`"time":`)
|
||||
timestamp, err = FastTimeMarshalJSON(mj.Created)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf.WriteString(timestamp)
|
||||
buf.WriteString(`}`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func ffjsonWriteJSONString(buf *bytes.Buffer, s string) {
|
||||
const hex = "0123456789abcdef"
|
||||
|
||||
buf.WriteByte('"')
|
||||
start := 0
|
||||
for i := 0; i < len(s); {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < i {
|
||||
buf.WriteString(s[start:i])
|
||||
}
|
||||
switch b {
|
||||
case '\\', '"':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte(b)
|
||||
case '\n':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('n')
|
||||
case '\r':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('r')
|
||||
default:
|
||||
|
||||
buf.WriteString(`\u00`)
|
||||
buf.WriteByte(hex[b>>4])
|
||||
buf.WriteByte(hex[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRuneInString(s[i:])
|
||||
if c == utf8.RuneError && size == 1 {
|
||||
if start < i {
|
||||
buf.WriteString(s[start:i])
|
||||
}
|
||||
buf.WriteString(`\ufffd`)
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
|
||||
if c == '\u2028' || c == '\u2029' {
|
||||
if start < i {
|
||||
buf.WriteString(s[start:i])
|
||||
}
|
||||
buf.WriteString(`\u202`)
|
||||
buf.WriteByte(hex[c&0xF])
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start < len(s) {
|
||||
buf.WriteString(s[start:])
|
||||
}
|
||||
buf.WriteByte('"')
|
||||
}
|
122
vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go
generated
vendored
122
vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go
generated
vendored
|
@ -1,122 +0,0 @@
|
|||
package jsonlog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// JSONLogs is based on JSONLog.
|
||||
// It allows marshalling JSONLog from Log as []byte
|
||||
// and an already marshalled Created timestamp.
|
||||
type JSONLogs struct {
|
||||
Log []byte `json:"log,omitempty"`
|
||||
Stream string `json:"stream,omitempty"`
|
||||
Created string `json:"time"`
|
||||
|
||||
// json-encoded bytes
|
||||
RawAttrs json.RawMessage `json:"attrs,omitempty"`
|
||||
}
|
||||
|
||||
// MarshalJSONBuf is based on the same method from JSONLog
|
||||
// It has been modified to take into account the necessary changes.
|
||||
func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error {
|
||||
var first = true
|
||||
|
||||
buf.WriteString(`{`)
|
||||
if len(mj.Log) != 0 {
|
||||
first = false
|
||||
buf.WriteString(`"log":`)
|
||||
ffjsonWriteJSONBytesAsString(buf, mj.Log)
|
||||
}
|
||||
if len(mj.Stream) != 0 {
|
||||
if first == true {
|
||||
first = false
|
||||
} else {
|
||||
buf.WriteString(`,`)
|
||||
}
|
||||
buf.WriteString(`"stream":`)
|
||||
ffjsonWriteJSONString(buf, mj.Stream)
|
||||
}
|
||||
if len(mj.RawAttrs) > 0 {
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
buf.WriteString(`,`)
|
||||
}
|
||||
buf.WriteString(`"attrs":`)
|
||||
buf.Write(mj.RawAttrs)
|
||||
}
|
||||
if !first {
|
||||
buf.WriteString(`,`)
|
||||
}
|
||||
buf.WriteString(`"time":`)
|
||||
buf.WriteString(mj.Created)
|
||||
buf.WriteString(`}`)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is based on ffjsonWriteJSONBytesAsString. It has been changed
|
||||
// to accept a string passed as a slice of bytes.
|
||||
func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) {
|
||||
const hex = "0123456789abcdef"
|
||||
|
||||
buf.WriteByte('"')
|
||||
start := 0
|
||||
for i := 0; i < len(s); {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < i {
|
||||
buf.Write(s[start:i])
|
||||
}
|
||||
switch b {
|
||||
case '\\', '"':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte(b)
|
||||
case '\n':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('n')
|
||||
case '\r':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('r')
|
||||
default:
|
||||
|
||||
buf.WriteString(`\u00`)
|
||||
buf.WriteByte(hex[b>>4])
|
||||
buf.WriteByte(hex[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRune(s[i:])
|
||||
if c == utf8.RuneError && size == 1 {
|
||||
if start < i {
|
||||
buf.Write(s[start:i])
|
||||
}
|
||||
buf.WriteString(`\ufffd`)
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
|
||||
if c == '\u2028' || c == '\u2029' {
|
||||
if start < i {
|
||||
buf.Write(s[start:i])
|
||||
}
|
||||
buf.WriteString(`\u202`)
|
||||
buf.WriteByte(hex[c&0xF])
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start < len(s) {
|
||||
buf.Write(s[start:])
|
||||
}
|
||||
buf.WriteByte('"')
|
||||
}
|
27
vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go
generated
vendored
27
vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON.
|
||||
package jsonlog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// RFC3339NanoFixed is our own version of RFC339Nano because we want one
|
||||
// that pads the nano seconds part with zeros to ensure
|
||||
// the timestamps are aligned in the logs.
|
||||
RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
// JSONFormat is the format used by FastMarshalJSON
|
||||
JSONFormat = `"` + time.RFC3339Nano + `"`
|
||||
)
|
||||
|
||||
// FastTimeMarshalJSON avoids one of the extra allocations that
|
||||
// time.MarshalJSON is making.
|
||||
func FastTimeMarshalJSON(t time.Time) (string, error) {
|
||||
if y := t.Year(); y < 0 || y >= 10000 {
|
||||
// RFC 3339 is clear that years are 4 digits exactly.
|
||||
// See golang.org/issue/4556#c15 for more discussion.
|
||||
return "", errors.New("time.MarshalJSON: year outside of range [0,9999]")
|
||||
}
|
||||
return t.Format(JSONFormat), nil
|
||||
}
|
221
vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
generated
vendored
221
vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
generated
vendored
|
@ -1,221 +0,0 @@
|
|||
package jsonmessage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/jsonlog"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// JSONError wraps a concrete Code and Message, `Code` is
|
||||
// is a integer error code, `Message` is the error message.
|
||||
type JSONError struct {
|
||||
Code int `json:"code,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
func (e *JSONError) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
// JSONProgress describes a Progress. terminalFd is the fd of the current terminal,
|
||||
// Start is the initial value for the operation. Current is the current status and
|
||||
// value of the progress made towards Total. Total is the end value describing when
|
||||
// we made 100% progress for an operation.
|
||||
type JSONProgress struct {
|
||||
terminalFd uintptr
|
||||
Current int64 `json:"current,omitempty"`
|
||||
Total int64 `json:"total,omitempty"`
|
||||
Start int64 `json:"start,omitempty"`
|
||||
}
|
||||
|
||||
func (p *JSONProgress) String() string {
|
||||
var (
|
||||
width = 200
|
||||
pbBox string
|
||||
numbersBox string
|
||||
timeLeftBox string
|
||||
)
|
||||
|
||||
ws, err := term.GetWinsize(p.terminalFd)
|
||||
if err == nil {
|
||||
width = int(ws.Width)
|
||||
}
|
||||
|
||||
if p.Current <= 0 && p.Total <= 0 {
|
||||
return ""
|
||||
}
|
||||
current := units.HumanSize(float64(p.Current))
|
||||
if p.Total <= 0 {
|
||||
return fmt.Sprintf("%8v", current)
|
||||
}
|
||||
total := units.HumanSize(float64(p.Total))
|
||||
percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
|
||||
if percentage > 50 {
|
||||
percentage = 50
|
||||
}
|
||||
if width > 110 {
|
||||
// this number can't be negative gh#7136
|
||||
numSpaces := 0
|
||||
if 50-percentage > 0 {
|
||||
numSpaces = 50 - percentage
|
||||
}
|
||||
pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
|
||||
}
|
||||
|
||||
numbersBox = fmt.Sprintf("%8v/%v", current, total)
|
||||
|
||||
if p.Current > p.Total {
|
||||
// remove total display if the reported current is wonky.
|
||||
numbersBox = fmt.Sprintf("%8v", current)
|
||||
}
|
||||
|
||||
if p.Current > 0 && p.Start > 0 && percentage < 50 {
|
||||
fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0))
|
||||
perEntry := fromStart / time.Duration(p.Current)
|
||||
left := time.Duration(p.Total-p.Current) * perEntry
|
||||
left = (left / time.Second) * time.Second
|
||||
|
||||
if width > 50 {
|
||||
timeLeftBox = " " + left.String()
|
||||
}
|
||||
}
|
||||
return pbBox + numbersBox + timeLeftBox
|
||||
}
|
||||
|
||||
// JSONMessage defines a message struct. It describes
|
||||
// the created time, where it from, status, ID of the
|
||||
// message. It's used for docker events.
|
||||
type JSONMessage struct {
|
||||
Stream string `json:"stream,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
Progress *JSONProgress `json:"progressDetail,omitempty"`
|
||||
ProgressMessage string `json:"progress,omitempty"` //deprecated
|
||||
ID string `json:"id,omitempty"`
|
||||
From string `json:"from,omitempty"`
|
||||
Time int64 `json:"time,omitempty"`
|
||||
TimeNano int64 `json:"timeNano,omitempty"`
|
||||
Error *JSONError `json:"errorDetail,omitempty"`
|
||||
ErrorMessage string `json:"error,omitempty"` //deprecated
|
||||
// Aux contains out-of-band data, such as digests for push signing.
|
||||
Aux *json.RawMessage `json:"aux,omitempty"`
|
||||
}
|
||||
|
||||
// Display displays the JSONMessage to `out`. `isTerminal` describes if `out`
|
||||
// is a terminal. If this is the case, it will erase the entire current line
|
||||
// when displaying the progressbar.
|
||||
func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
|
||||
if jm.Error != nil {
|
||||
if jm.Error.Code == 401 {
|
||||
return fmt.Errorf("Authentication is required.")
|
||||
}
|
||||
return jm.Error
|
||||
}
|
||||
var endl string
|
||||
if isTerminal && jm.Stream == "" && jm.Progress != nil {
|
||||
// <ESC>[2K = erase entire current line
|
||||
fmt.Fprintf(out, "%c[2K\r", 27)
|
||||
endl = "\r"
|
||||
} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
|
||||
return nil
|
||||
}
|
||||
if jm.TimeNano != 0 {
|
||||
fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed))
|
||||
} else if jm.Time != 0 {
|
||||
fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed))
|
||||
}
|
||||
if jm.ID != "" {
|
||||
fmt.Fprintf(out, "%s: ", jm.ID)
|
||||
}
|
||||
if jm.From != "" {
|
||||
fmt.Fprintf(out, "(from %s) ", jm.From)
|
||||
}
|
||||
if jm.Progress != nil && isTerminal {
|
||||
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
|
||||
} else if jm.ProgressMessage != "" { //deprecated
|
||||
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
|
||||
} else if jm.Stream != "" {
|
||||
fmt.Fprintf(out, "%s%s", jm.Stream, endl)
|
||||
} else {
|
||||
fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal`
|
||||
// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of
|
||||
// each line and move the cursor while displaying.
|
||||
func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error {
|
||||
var (
|
||||
dec = json.NewDecoder(in)
|
||||
ids = make(map[string]int)
|
||||
)
|
||||
for {
|
||||
diff := 0
|
||||
var jm JSONMessage
|
||||
if err := dec.Decode(&jm); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if jm.Aux != nil {
|
||||
if auxCallback != nil {
|
||||
auxCallback(jm.Aux)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if jm.Progress != nil {
|
||||
jm.Progress.terminalFd = terminalFd
|
||||
}
|
||||
if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
|
||||
line, ok := ids[jm.ID]
|
||||
if !ok {
|
||||
// NOTE: This approach of using len(id) to
|
||||
// figure out the number of lines of history
|
||||
// only works as long as we clear the history
|
||||
// when we output something that's not
|
||||
// accounted for in the map, such as a line
|
||||
// with no ID.
|
||||
line = len(ids)
|
||||
ids[jm.ID] = line
|
||||
if isTerminal {
|
||||
fmt.Fprintf(out, "\n")
|
||||
}
|
||||
} else {
|
||||
diff = len(ids) - line
|
||||
}
|
||||
if isTerminal {
|
||||
// NOTE: this appears to be necessary even if
|
||||
// diff == 0.
|
||||
// <ESC>[{diff}A = move cursor up diff rows
|
||||
fmt.Fprintf(out, "%c[%dA", 27, diff)
|
||||
}
|
||||
} else {
|
||||
// When outputting something that isn't progress
|
||||
// output, clear the history of previous lines. We
|
||||
// don't want progress entries from some previous
|
||||
// operation to be updated (for example, pull -a
|
||||
// with multiple tags).
|
||||
ids = make(map[string]int)
|
||||
}
|
||||
err := jm.Display(out, isTerminal)
|
||||
if jm.ID != "" && isTerminal {
|
||||
// NOTE: this appears to be necessary even if
|
||||
// diff == 0.
|
||||
// <ESC>[{diff}B = move cursor down diff rows
|
||||
fmt.Fprintf(out, "%c[%dB", 27, diff)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
155
vendor/github.com/docker/docker/pkg/mount/flags.go
generated
vendored
155
vendor/github.com/docker/docker/pkg/mount/flags.go
generated
vendored
|
@ -5,6 +5,112 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
var flags = map[string]struct {
|
||||
clear bool
|
||||
flag int
|
||||
}{
|
||||
"defaults": {false, 0},
|
||||
"ro": {false, RDONLY},
|
||||
"rw": {true, RDONLY},
|
||||
"suid": {true, NOSUID},
|
||||
"nosuid": {false, NOSUID},
|
||||
"dev": {true, NODEV},
|
||||
"nodev": {false, NODEV},
|
||||
"exec": {true, NOEXEC},
|
||||
"noexec": {false, NOEXEC},
|
||||
"sync": {false, SYNCHRONOUS},
|
||||
"async": {true, SYNCHRONOUS},
|
||||
"dirsync": {false, DIRSYNC},
|
||||
"remount": {false, REMOUNT},
|
||||
"mand": {false, MANDLOCK},
|
||||
"nomand": {true, MANDLOCK},
|
||||
"atime": {true, NOATIME},
|
||||
"noatime": {false, NOATIME},
|
||||
"diratime": {true, NODIRATIME},
|
||||
"nodiratime": {false, NODIRATIME},
|
||||
"bind": {false, BIND},
|
||||
"rbind": {false, RBIND},
|
||||
"unbindable": {false, UNBINDABLE},
|
||||
"runbindable": {false, RUNBINDABLE},
|
||||
"private": {false, PRIVATE},
|
||||
"rprivate": {false, RPRIVATE},
|
||||
"shared": {false, SHARED},
|
||||
"rshared": {false, RSHARED},
|
||||
"slave": {false, SLAVE},
|
||||
"rslave": {false, RSLAVE},
|
||||
"relatime": {false, RELATIME},
|
||||
"norelatime": {true, RELATIME},
|
||||
"strictatime": {false, STRICTATIME},
|
||||
"nostrictatime": {true, STRICTATIME},
|
||||
}
|
||||
|
||||
var validFlags = map[string]bool{
|
||||
"": true,
|
||||
"size": true,
|
||||
"mode": true,
|
||||
"uid": true,
|
||||
"gid": true,
|
||||
"nr_inodes": true,
|
||||
"nr_blocks": true,
|
||||
"mpol": true,
|
||||
}
|
||||
|
||||
var propagationFlags = map[string]bool{
|
||||
"bind": true,
|
||||
"rbind": true,
|
||||
"unbindable": true,
|
||||
"runbindable": true,
|
||||
"private": true,
|
||||
"rprivate": true,
|
||||
"shared": true,
|
||||
"rshared": true,
|
||||
"slave": true,
|
||||
"rslave": true,
|
||||
}
|
||||
|
||||
// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
|
||||
func MergeTmpfsOptions(options []string) ([]string, error) {
|
||||
// We use collisions maps to remove duplicates.
|
||||
// For flag, the key is the flag value (the key for propagation flag is -1)
|
||||
// For data=value, the key is the data
|
||||
flagCollisions := map[int]bool{}
|
||||
dataCollisions := map[string]bool{}
|
||||
|
||||
var newOptions []string
|
||||
// We process in reverse order
|
||||
for i := len(options) - 1; i >= 0; i-- {
|
||||
option := options[i]
|
||||
if option == "defaults" {
|
||||
continue
|
||||
}
|
||||
if f, ok := flags[option]; ok && f.flag != 0 {
|
||||
// There is only one propagation mode
|
||||
key := f.flag
|
||||
if propagationFlags[option] {
|
||||
key = -1
|
||||
}
|
||||
// Check to see if there is collision for flag
|
||||
if !flagCollisions[key] {
|
||||
// We prepend the option and add to collision map
|
||||
newOptions = append([]string{option}, newOptions...)
|
||||
flagCollisions[key] = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
opt := strings.SplitN(option, "=", 2)
|
||||
if len(opt) != 2 || !validFlags[opt[0]] {
|
||||
return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
|
||||
}
|
||||
if !dataCollisions[opt[0]] {
|
||||
// We prepend the option and add to collision map
|
||||
newOptions = append([]string{option}, newOptions...)
|
||||
dataCollisions[opt[0]] = true
|
||||
}
|
||||
}
|
||||
|
||||
return newOptions, nil
|
||||
}
|
||||
|
||||
// Parse fstab type mount options into mount() flags
|
||||
// and device specific data
|
||||
func parseOptions(options string) (int, string) {
|
||||
|
@ -13,45 +119,6 @@ func parseOptions(options string) (int, string) {
|
|||
data []string
|
||||
)
|
||||
|
||||
flags := map[string]struct {
|
||||
clear bool
|
||||
flag int
|
||||
}{
|
||||
"defaults": {false, 0},
|
||||
"ro": {false, RDONLY},
|
||||
"rw": {true, RDONLY},
|
||||
"suid": {true, NOSUID},
|
||||
"nosuid": {false, NOSUID},
|
||||
"dev": {true, NODEV},
|
||||
"nodev": {false, NODEV},
|
||||
"exec": {true, NOEXEC},
|
||||
"noexec": {false, NOEXEC},
|
||||
"sync": {false, SYNCHRONOUS},
|
||||
"async": {true, SYNCHRONOUS},
|
||||
"dirsync": {false, DIRSYNC},
|
||||
"remount": {false, REMOUNT},
|
||||
"mand": {false, MANDLOCK},
|
||||
"nomand": {true, MANDLOCK},
|
||||
"atime": {true, NOATIME},
|
||||
"noatime": {false, NOATIME},
|
||||
"diratime": {true, NODIRATIME},
|
||||
"nodiratime": {false, NODIRATIME},
|
||||
"bind": {false, BIND},
|
||||
"rbind": {false, RBIND},
|
||||
"unbindable": {false, UNBINDABLE},
|
||||
"runbindable": {false, RUNBINDABLE},
|
||||
"private": {false, PRIVATE},
|
||||
"rprivate": {false, RPRIVATE},
|
||||
"shared": {false, SHARED},
|
||||
"rshared": {false, RSHARED},
|
||||
"slave": {false, SLAVE},
|
||||
"rslave": {false, RSLAVE},
|
||||
"relatime": {false, RELATIME},
|
||||
"norelatime": {true, RELATIME},
|
||||
"strictatime": {false, STRICTATIME},
|
||||
"nostrictatime": {true, STRICTATIME},
|
||||
}
|
||||
|
||||
for _, o := range strings.Split(options, ",") {
|
||||
// If the option does not exist in the flags table or the flag
|
||||
// is not supported on the platform,
|
||||
|
@ -72,16 +139,6 @@ func parseOptions(options string) (int, string) {
|
|||
// ParseTmpfsOptions parse fstab type mount options into flags and data
|
||||
func ParseTmpfsOptions(options string) (int, string, error) {
|
||||
flags, data := parseOptions(options)
|
||||
validFlags := map[string]bool{
|
||||
"": true,
|
||||
"size": true,
|
||||
"mode": true,
|
||||
"uid": true,
|
||||
"gid": true,
|
||||
"nr_inodes": true,
|
||||
"nr_blocks": true,
|
||||
"mpol": true,
|
||||
}
|
||||
for _, o := range strings.Split(data, ",") {
|
||||
opt := strings.SplitN(o, "=", 2)
|
||||
if !validFlags[opt[0]] {
|
||||
|
|
2
vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build !linux,!freebsd freebsd,!cgo
|
||||
// +build !linux,!freebsd freebsd,!cgo solaris,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
|
|
4
vendor/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
4
vendor/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
|
@ -9,8 +9,8 @@ func GetMounts() ([]*Info, error) {
|
|||
return parseMountTable()
|
||||
}
|
||||
|
||||
// Mounted looks at /proc/self/mountinfo to determine of the specified
|
||||
// mountpoint has been mounted
|
||||
// Mounted determines if a specified mountpoint has been mounted.
|
||||
// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
|
||||
func Mounted(mountpoint string) (bool, error) {
|
||||
entries, err := parseMountTable()
|
||||
if err != nil {
|
||||
|
|
33
vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
generated
vendored
Normal file
33
vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
// +build solaris,cgo
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #include <stdlib.h>
|
||||
// #include <stdio.h>
|
||||
// #include <sys/mount.h>
|
||||
// int Mount(const char *spec, const char *dir, int mflag,
|
||||
// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) {
|
||||
// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen);
|
||||
// }
|
||||
import "C"
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
spec := C.CString(device)
|
||||
dir := C.CString(target)
|
||||
fstype := C.CString(mType)
|
||||
_, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0)
|
||||
C.free(unsafe.Pointer(spec))
|
||||
C.free(unsafe.Pointer(dir))
|
||||
C.free(unsafe.Pointer(fstype))
|
||||
return err
|
||||
}
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
err := unix.Unmount(target, flag)
|
||||
return err
|
||||
}
|
2
vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build !linux,!freebsd freebsd,!cgo
|
||||
// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
|
|
37
vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
generated
vendored
Normal file
37
vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
// +build solaris,cgo
|
||||
|
||||
package mount
|
||||
|
||||
/*
|
||||
#include <stdio.h>
|
||||
#include <sys/mnttab.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func parseMountTable() ([]*Info, error) {
|
||||
mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
|
||||
if mnttab == nil {
|
||||
return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
|
||||
}
|
||||
|
||||
var out []*Info
|
||||
var mp C.struct_mnttab
|
||||
|
||||
ret := C.getmntent(mnttab, &mp)
|
||||
for ret == 0 {
|
||||
var mountinfo Info
|
||||
mountinfo.Mountpoint = C.GoString(mp.mnt_mountp)
|
||||
mountinfo.Source = C.GoString(mp.mnt_special)
|
||||
mountinfo.Fstype = C.GoString(mp.mnt_fstype)
|
||||
mountinfo.Opts = C.GoString(mp.mnt_mntopts)
|
||||
out = append(out, &mountinfo)
|
||||
ret = C.getmntent(mnttab, &mp)
|
||||
}
|
||||
|
||||
C.fclose(mnttab)
|
||||
return out, nil
|
||||
}
|
2
vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build !windows,!linux,!freebsd freebsd,!cgo
|
||||
// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
|
|
14
vendor/github.com/docker/docker/pkg/plugins/client.go
generated
vendored
14
vendor/github.com/docker/docker/pkg/plugins/client.go
generated
vendored
|
@ -20,14 +20,16 @@ const (
|
|||
)
|
||||
|
||||
// NewClient creates a new plugin client (http).
|
||||
func NewClient(addr string, tlsConfig tlsconfig.Options) (*Client, error) {
|
||||
func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) {
|
||||
tr := &http.Transport{}
|
||||
|
||||
c, err := tlsconfig.Client(tlsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if tlsConfig != nil {
|
||||
c, err := tlsconfig.Client(*tlsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr.TLSClientConfig = c
|
||||
}
|
||||
tr.TLSClientConfig = c
|
||||
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
|
@ -130,7 +132,7 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool)
|
|||
return nil, err
|
||||
}
|
||||
retries++
|
||||
logrus.Warnf("Unable to connect to plugin: %s:%s, retrying in %v", req.URL.Host, req.URL.Path, timeOff)
|
||||
logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff)
|
||||
time.Sleep(timeOff)
|
||||
continue
|
||||
}
|
||||
|
|
8
vendor/github.com/docker/docker/pkg/plugins/discovery.go
generated
vendored
8
vendor/github.com/docker/docker/pkg/plugins/discovery.go
generated
vendored
|
@ -64,7 +64,7 @@ func (l *localRegistry) Plugin(name string) (*Plugin, error) {
|
|||
|
||||
for _, p := range socketpaths {
|
||||
if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 {
|
||||
return newLocalPlugin(name, "unix://"+p), nil
|
||||
return NewLocalPlugin(name, "unix://"+p), nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,7 @@ func readPluginInfo(name, path string) (*Plugin, error) {
|
|||
return nil, fmt.Errorf("Unknown protocol")
|
||||
}
|
||||
|
||||
return newLocalPlugin(name, addr), nil
|
||||
return NewLocalPlugin(name, addr), nil
|
||||
}
|
||||
|
||||
func readPluginJSONInfo(name, path string) (*Plugin, error) {
|
||||
|
@ -115,8 +115,8 @@ func readPluginJSONInfo(name, path string) (*Plugin, error) {
|
|||
if err := json.NewDecoder(f).Decode(&p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.Name = name
|
||||
if len(p.TLSConfig.CAFile) == 0 {
|
||||
p.name = name
|
||||
if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 {
|
||||
p.TLSConfig.InsecureSkipVerify = true
|
||||
}
|
||||
p.activateWait = sync.NewCond(&sync.Mutex{})
|
||||
|
|
39
vendor/github.com/docker/docker/pkg/plugins/plugins.go
generated
vendored
39
vendor/github.com/docker/docker/pkg/plugins/plugins.go
generated
vendored
|
@ -55,29 +55,46 @@ type Manifest struct {
|
|||
// Plugin is the definition of a docker plugin.
|
||||
type Plugin struct {
|
||||
// Name of the plugin
|
||||
Name string `json:"-"`
|
||||
name string
|
||||
// Address of the plugin
|
||||
Addr string
|
||||
// TLS configuration of the plugin
|
||||
TLSConfig tlsconfig.Options
|
||||
TLSConfig *tlsconfig.Options
|
||||
// Client attached to the plugin
|
||||
Client *Client `json:"-"`
|
||||
client *Client
|
||||
// Manifest of the plugin (see above)
|
||||
Manifest *Manifest `json:"-"`
|
||||
|
||||
// error produced by activation
|
||||
activateErr error
|
||||
// specifies if the activation sequence is completed (not if it is sucessful or not)
|
||||
// specifies if the activation sequence is completed (not if it is successful or not)
|
||||
activated bool
|
||||
// wait for activation to finish
|
||||
activateWait *sync.Cond
|
||||
}
|
||||
|
||||
func newLocalPlugin(name, addr string) *Plugin {
|
||||
// Name returns the name of the plugin.
|
||||
func (p *Plugin) Name() string {
|
||||
return p.name
|
||||
}
|
||||
|
||||
// Client returns a ready-to-use plugin client that can be used to communicate with the plugin.
|
||||
func (p *Plugin) Client() *Client {
|
||||
return p.client
|
||||
}
|
||||
|
||||
// IsLegacy returns true for legacy plugins and false otherwise.
|
||||
func (p *Plugin) IsLegacy() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// NewLocalPlugin creates a new local plugin.
|
||||
func NewLocalPlugin(name, addr string) *Plugin {
|
||||
return &Plugin{
|
||||
Name: name,
|
||||
Addr: addr,
|
||||
TLSConfig: tlsconfig.Options{InsecureSkipVerify: true},
|
||||
name: name,
|
||||
Addr: addr,
|
||||
// TODO: change to nil
|
||||
TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true},
|
||||
activateWait: sync.NewCond(&sync.Mutex{}),
|
||||
}
|
||||
}
|
||||
|
@ -102,10 +119,10 @@ func (p *Plugin) activateWithLock() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Client = c
|
||||
p.client = c
|
||||
|
||||
m := new(Manifest)
|
||||
if err = p.Client.Call("Plugin.Activate", nil, m); err != nil {
|
||||
if err = p.client.Call("Plugin.Activate", nil, m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -116,7 +133,7 @@ func (p *Plugin) activateWithLock() error {
|
|||
if !handled {
|
||||
continue
|
||||
}
|
||||
handler(p.Name, p.Client)
|
||||
handler(p.name, p.client)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
8
vendor/github.com/docker/docker/pkg/pools/pools.go
generated
vendored
8
vendor/github.com/docker/docker/pkg/pools/pools.go
generated
vendored
|
@ -28,7 +28,7 @@ const buffer32K = 32 * 1024
|
|||
|
||||
// BufioReaderPool is a bufio reader that uses sync.Pool.
|
||||
type BufioReaderPool struct {
|
||||
pool sync.Pool
|
||||
pool *sync.Pool
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -39,7 +39,7 @@ func init() {
|
|||
// newBufioReaderPoolWithSize is unexported because new pools should be
|
||||
// added here to be shared where required.
|
||||
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
|
||||
pool := sync.Pool{
|
||||
pool := &sync.Pool{
|
||||
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
|
||||
}
|
||||
return &BufioReaderPool{pool: pool}
|
||||
|
@ -80,13 +80,13 @@ func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Rea
|
|||
|
||||
// BufioWriterPool is a bufio writer that uses sync.Pool.
|
||||
type BufioWriterPool struct {
|
||||
pool sync.Pool
|
||||
pool *sync.Pool
|
||||
}
|
||||
|
||||
// newBufioWriterPoolWithSize is unexported because new pools should be
|
||||
// added here to be shared where required.
|
||||
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
|
||||
pool := sync.Pool{
|
||||
pool := &sync.Pool{
|
||||
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
|
||||
}
|
||||
return &BufioWriterPool{pool: pool}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// +build freebsd
|
||||
// +build freebsd solaris
|
||||
|
||||
package reexec
|
||||
|
2
vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build !linux,!windows,!freebsd
|
||||
// +build !linux,!windows,!freebsd,!solaris
|
||||
|
||||
package reexec
|
||||
|
||||
|
|
2
vendor/github.com/docker/docker/pkg/reexec/reexec.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/reexec/reexec.go
generated
vendored
|
@ -12,7 +12,7 @@ var registeredInitializers = make(map[string]func())
|
|||
// Register adds an initialization func under the specified name
|
||||
func Register(name string, initializer func()) {
|
||||
if _, exists := registeredInitializers[name]; exists {
|
||||
panic(fmt.Sprintf("reexec func already registred under name %q", name))
|
||||
panic(fmt.Sprintf("reexec func already registered under name %q", name))
|
||||
}
|
||||
|
||||
registeredInitializers[name] = initializer
|
||||
|
|
178
vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
generated
vendored
178
vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
generated
vendored
|
@ -1,178 +0,0 @@
|
|||
package stdcopy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// StdType is the type of standard stream
|
||||
// a writer can multiplex to.
|
||||
type StdType byte
|
||||
|
||||
const (
|
||||
// Stdin represents standard input stream type.
|
||||
Stdin StdType = iota
|
||||
// Stdout represents standard output stream type.
|
||||
Stdout
|
||||
// Stderr represents standard error steam type.
|
||||
Stderr
|
||||
|
||||
stdWriterPrefixLen = 8
|
||||
stdWriterFdIndex = 0
|
||||
stdWriterSizeIndex = 4
|
||||
|
||||
startingBufLen = 32*1024 + stdWriterPrefixLen + 1
|
||||
)
|
||||
|
||||
// stdWriter is wrapper of io.Writer with extra customized info.
|
||||
type stdWriter struct {
|
||||
io.Writer
|
||||
prefix byte
|
||||
}
|
||||
|
||||
// Write sends the buffer to the underneath writer.
|
||||
// It insert the prefix header before the buffer,
|
||||
// so stdcopy.StdCopy knows where to multiplex the output.
|
||||
// It makes stdWriter to implement io.Writer.
|
||||
func (w *stdWriter) Write(buf []byte) (n int, err error) {
|
||||
if w == nil || w.Writer == nil {
|
||||
return 0, errors.New("Writer not instantiated")
|
||||
}
|
||||
if buf == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
|
||||
binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(buf)))
|
||||
|
||||
line := append(header[:], buf...)
|
||||
|
||||
n, err = w.Writer.Write(line)
|
||||
n -= stdWriterPrefixLen
|
||||
|
||||
if n < 0 {
|
||||
n = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewStdWriter instantiates a new Writer.
|
||||
// Everything written to it will be encapsulated using a custom format,
|
||||
// and written to the underlying `w` stream.
|
||||
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
|
||||
// `t` indicates the id of the stream to encapsulate.
|
||||
// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
|
||||
func NewStdWriter(w io.Writer, t StdType) io.Writer {
|
||||
return &stdWriter{
|
||||
Writer: w,
|
||||
prefix: byte(t),
|
||||
}
|
||||
}
|
||||
|
||||
// StdCopy is a modified version of io.Copy.
|
||||
//
|
||||
// StdCopy will demultiplex `src`, assuming that it contains two streams,
|
||||
// previously multiplexed together using a StdWriter instance.
|
||||
// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
|
||||
//
|
||||
// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
|
||||
// In other words: if `err` is non nil, it indicates a real underlying error.
|
||||
//
|
||||
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
|
||||
func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
|
||||
var (
|
||||
buf = make([]byte, startingBufLen)
|
||||
bufLen = len(buf)
|
||||
nr, nw int
|
||||
er, ew error
|
||||
out io.Writer
|
||||
frameSize int
|
||||
)
|
||||
|
||||
for {
|
||||
// Make sure we have at least a full header
|
||||
for nr < stdWriterPrefixLen {
|
||||
var nr2 int
|
||||
nr2, er = src.Read(buf[nr:])
|
||||
nr += nr2
|
||||
if er == io.EOF {
|
||||
if nr < stdWriterPrefixLen {
|
||||
logrus.Debugf("Corrupted prefix: %v", buf[:nr])
|
||||
return written, nil
|
||||
}
|
||||
break
|
||||
}
|
||||
if er != nil {
|
||||
logrus.Debugf("Error reading header: %s", er)
|
||||
return 0, er
|
||||
}
|
||||
}
|
||||
|
||||
// Check the first byte to know where to write
|
||||
switch StdType(buf[stdWriterFdIndex]) {
|
||||
case Stdin:
|
||||
fallthrough
|
||||
case Stdout:
|
||||
// Write on stdout
|
||||
out = dstout
|
||||
case Stderr:
|
||||
// Write on stderr
|
||||
out = dsterr
|
||||
default:
|
||||
logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex])
|
||||
return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
|
||||
}
|
||||
|
||||
// Retrieve the size of the frame
|
||||
frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
|
||||
logrus.Debugf("framesize: %d", frameSize)
|
||||
|
||||
// Check if the buffer is big enough to read the frame.
|
||||
// Extend it if necessary.
|
||||
if frameSize+stdWriterPrefixLen > bufLen {
|
||||
logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf))
|
||||
buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
|
||||
bufLen = len(buf)
|
||||
}
|
||||
|
||||
// While the amount of bytes read is less than the size of the frame + header, we keep reading
|
||||
for nr < frameSize+stdWriterPrefixLen {
|
||||
var nr2 int
|
||||
nr2, er = src.Read(buf[nr:])
|
||||
nr += nr2
|
||||
if er == io.EOF {
|
||||
if nr < frameSize+stdWriterPrefixLen {
|
||||
logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr])
|
||||
return written, nil
|
||||
}
|
||||
break
|
||||
}
|
||||
if er != nil {
|
||||
logrus.Debugf("Error reading frame: %s", er)
|
||||
return 0, er
|
||||
}
|
||||
}
|
||||
|
||||
// Write the retrieved frame (without header)
|
||||
nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
|
||||
if ew != nil {
|
||||
logrus.Debugf("Error writing frame: %s", ew)
|
||||
return 0, ew
|
||||
}
|
||||
// If the frame has not been fully written: error
|
||||
if nw != frameSize {
|
||||
logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
|
||||
return 0, io.ErrShortWrite
|
||||
}
|
||||
written += int64(nw)
|
||||
|
||||
// Move the rest of the buffer to the beginning
|
||||
copy(buf, buf[frameSize+stdWriterPrefixLen:])
|
||||
// Move the index
|
||||
nr -= frameSize + stdWriterPrefixLen
|
||||
}
|
||||
}
|
4
vendor/github.com/docker/docker/pkg/stringid/stringid.go
generated
vendored
4
vendor/github.com/docker/docker/pkg/stringid/stringid.go
generated
vendored
|
@ -24,7 +24,7 @@ func IsShortID(id string) bool {
|
|||
// TruncateID returns a shorthand version of a string identifier for convenience.
|
||||
// A collision with other shorthands is very unlikely, but possible.
|
||||
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
|
||||
// will need to use a langer prefix, or the full-length Id.
|
||||
// will need to use a longer prefix, or the full-length Id.
|
||||
func TruncateID(id string) string {
|
||||
if i := strings.IndexRune(id, ':'); i >= 0 {
|
||||
id = id[i+1:]
|
||||
|
@ -57,7 +57,7 @@ func generateID(crypto bool) string {
|
|||
}
|
||||
}
|
||||
|
||||
// GenerateRandomID returns an unique id.
|
||||
// GenerateRandomID returns a unique id.
|
||||
func GenerateRandomID() string {
|
||||
return generateID(true)
|
||||
|
||||
|
|
191
vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE
generated
vendored
191
vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE
generated
vendored
|
@ -1,191 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2014-2016 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
143
vendor/github.com/docker/docker/pkg/symlink/fs.go
generated
vendored
143
vendor/github.com/docker/docker/pkg/symlink/fs.go
generated
vendored
|
@ -1,143 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.BSD file.
|
||||
|
||||
// This code is a modified version of path/filepath/symlink.go from the Go standard library.
|
||||
|
||||
package symlink
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an
|
||||
// absolute path. This function handles paths in a platform-agnostic manner.
|
||||
func FollowSymlinkInScope(path, root string) (string, error) {
|
||||
path, err := filepath.Abs(filepath.FromSlash(path))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
root, err = filepath.Abs(filepath.FromSlash(root))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return evalSymlinksInScope(path, root)
|
||||
}
|
||||
|
||||
// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return
|
||||
// a result guaranteed to be contained within the scope `root`, at the time of the call.
|
||||
// Symlinks in `root` are not evaluated and left as-is.
|
||||
// Errors encountered while attempting to evaluate symlinks in path will be returned.
|
||||
// Non-existing paths are valid and do not constitute an error.
|
||||
// `path` has to contain `root` as a prefix, or else an error will be returned.
|
||||
// Trying to break out from `root` does not constitute an error.
|
||||
//
|
||||
// Example:
|
||||
// If /foo/bar -> /outside,
|
||||
// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide"
|
||||
//
|
||||
// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks
|
||||
// are created and not to create subsequently, additional symlinks that could potentially make a
|
||||
// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo")
|
||||
// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should
|
||||
// no longer be considered safely contained in "/foo".
|
||||
func evalSymlinksInScope(path, root string) (string, error) {
|
||||
root = filepath.Clean(root)
|
||||
if path == root {
|
||||
return path, nil
|
||||
}
|
||||
if !strings.HasPrefix(path, root) {
|
||||
return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
|
||||
}
|
||||
const maxIter = 255
|
||||
originalPath := path
|
||||
// given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c"
|
||||
path = path[len(root):]
|
||||
if root == string(filepath.Separator) {
|
||||
path = string(filepath.Separator) + path
|
||||
}
|
||||
if !strings.HasPrefix(path, string(filepath.Separator)) {
|
||||
return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
|
||||
}
|
||||
path = filepath.Clean(path)
|
||||
// consume path by taking each frontmost path element,
|
||||
// expanding it if it's a symlink, and appending it to b
|
||||
var b bytes.Buffer
|
||||
// b here will always be considered to be the "current absolute path inside
|
||||
// root" when we append paths to it, we also append a slash and use
|
||||
// filepath.Clean after the loop to trim the trailing slash
|
||||
for n := 0; path != ""; n++ {
|
||||
if n > maxIter {
|
||||
return "", errors.New("evalSymlinksInScope: too many links in " + originalPath)
|
||||
}
|
||||
|
||||
// find next path component, p
|
||||
i := strings.IndexRune(path, filepath.Separator)
|
||||
var p string
|
||||
if i == -1 {
|
||||
p, path = path, ""
|
||||
} else {
|
||||
p, path = path[:i], path[i+1:]
|
||||
}
|
||||
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// this takes a b.String() like "b/../" and a p like "c" and turns it
|
||||
// into "/b/../c" which then gets filepath.Cleaned into "/c" and then
|
||||
// root gets prepended and we Clean again (to remove any trailing slash
|
||||
// if the first Clean gave us just "/")
|
||||
cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p)
|
||||
if cleanP == string(filepath.Separator) {
|
||||
// never Lstat "/" itself
|
||||
b.Reset()
|
||||
continue
|
||||
}
|
||||
fullP := filepath.Clean(root + cleanP)
|
||||
|
||||
fi, err := os.Lstat(fullP)
|
||||
if os.IsNotExist(err) {
|
||||
// if p does not exist, accept it
|
||||
b.WriteString(p)
|
||||
b.WriteRune(filepath.Separator)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink == 0 {
|
||||
b.WriteString(p + string(filepath.Separator))
|
||||
continue
|
||||
}
|
||||
|
||||
// it's a symlink, put it at the front of path
|
||||
dest, err := os.Readlink(fullP)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if system.IsAbs(dest) {
|
||||
b.Reset()
|
||||
}
|
||||
path = dest + string(filepath.Separator) + path
|
||||
}
|
||||
|
||||
// see note above on "fullP := ..." for why this is double-cleaned and
|
||||
// what's happening here
|
||||
return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil
|
||||
}
|
||||
|
||||
// EvalSymlinks returns the path name after the evaluation of any symbolic
|
||||
// links.
|
||||
// If path is relative the result will be relative to the current directory,
|
||||
// unless one of the components is an absolute symbolic link.
|
||||
// This version has been updated to support long paths prepended with `\\?\`.
|
||||
func EvalSymlinks(path string) (string, error) {
|
||||
return evalSymlinks(path)
|
||||
}
|
11
vendor/github.com/docker/docker/pkg/symlink/fs_unix.go
generated
vendored
11
vendor/github.com/docker/docker/pkg/symlink/fs_unix.go
generated
vendored
|
@ -1,11 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package symlink
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func evalSymlinks(path string) (string, error) {
|
||||
return filepath.EvalSymlinks(path)
|
||||
}
|
155
vendor/github.com/docker/docker/pkg/symlink/fs_windows.go
generated
vendored
155
vendor/github.com/docker/docker/pkg/symlink/fs_windows.go
generated
vendored
|
@ -1,155 +0,0 @@
|
|||
package symlink
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
func toShort(path string) (string, error) {
|
||||
p, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b := p // GetShortPathName says we can reuse buffer
|
||||
n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b)))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if n > uint32(len(b)) {
|
||||
b = make([]uint16, n)
|
||||
if _, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return syscall.UTF16ToString(b), nil
|
||||
}
|
||||
|
||||
func toLong(path string) (string, error) {
|
||||
p, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b := p // GetLongPathName says we can reuse buffer
|
||||
n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b)))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if n > uint32(len(b)) {
|
||||
b = make([]uint16, n)
|
||||
n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b)))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
b = b[:n]
|
||||
return syscall.UTF16ToString(b), nil
|
||||
}
|
||||
|
||||
func evalSymlinks(path string) (string, error) {
|
||||
path, err := walkSymlinks(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
p, err := toShort(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
p, err = toLong(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// syscall.GetLongPathName does not change the case of the drive letter,
|
||||
// but the result of EvalSymlinks must be unique, so we have
|
||||
// EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`).
|
||||
// Make drive letter upper case.
|
||||
if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' {
|
||||
p = string(p[0]+'A'-'a') + p[1:]
|
||||
} else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' {
|
||||
p = p[:3] + string(p[4]+'A'-'a') + p[5:]
|
||||
}
|
||||
return filepath.Clean(p), nil
|
||||
}
|
||||
|
||||
const utf8RuneSelf = 0x80
|
||||
|
||||
func walkSymlinks(path string) (string, error) {
|
||||
const maxIter = 255
|
||||
originalPath := path
|
||||
// consume path by taking each frontmost path element,
|
||||
// expanding it if it's a symlink, and appending it to b
|
||||
var b bytes.Buffer
|
||||
for n := 0; path != ""; n++ {
|
||||
if n > maxIter {
|
||||
return "", errors.New("EvalSymlinks: too many links in " + originalPath)
|
||||
}
|
||||
|
||||
// A path beginning with `\\?\` represents the root, so automatically
|
||||
// skip that part and begin processing the next segment.
|
||||
if strings.HasPrefix(path, longpath.Prefix) {
|
||||
b.WriteString(longpath.Prefix)
|
||||
path = path[4:]
|
||||
continue
|
||||
}
|
||||
|
||||
// find next path component, p
|
||||
var i = -1
|
||||
for j, c := range path {
|
||||
if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) {
|
||||
i = j
|
||||
break
|
||||
}
|
||||
}
|
||||
var p string
|
||||
if i == -1 {
|
||||
p, path = path, ""
|
||||
} else {
|
||||
p, path = path[:i], path[i+1:]
|
||||
}
|
||||
|
||||
if p == "" {
|
||||
if b.Len() == 0 {
|
||||
// must be absolute path
|
||||
b.WriteRune(filepath.Separator)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If this is the first segment after the long path prefix, accept the
|
||||
// current segment as a volume root or UNC share and move on to the next.
|
||||
if b.String() == longpath.Prefix {
|
||||
b.WriteString(p)
|
||||
b.WriteRune(filepath.Separator)
|
||||
continue
|
||||
}
|
||||
|
||||
fi, err := os.Lstat(b.String() + p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink == 0 {
|
||||
b.WriteString(p)
|
||||
if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') {
|
||||
b.WriteRune(filepath.Separator)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// it's a symlink, put it at the front of path
|
||||
dest, err := os.Readlink(b.String() + p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) {
|
||||
b.Reset()
|
||||
}
|
||||
path = dest + string(filepath.Separator) + path
|
||||
}
|
||||
return filepath.Clean(b.String()), nil
|
||||
}
|
128
vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
generated
vendored
Normal file
128
vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
// +build solaris,cgo
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #cgo LDFLAGS: -lkstat
|
||||
// #include <unistd.h>
|
||||
// #include <stdlib.h>
|
||||
// #include <stdio.h>
|
||||
// #include <kstat.h>
|
||||
// #include <sys/swap.h>
|
||||
// #include <sys/param.h>
|
||||
// struct swaptable *allocSwaptable(int num) {
|
||||
// struct swaptable *st;
|
||||
// struct swapent *swapent;
|
||||
// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
|
||||
// swapent = st->swt_ent;
|
||||
// for (int i = 0; i < num; i++,swapent++) {
|
||||
// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
|
||||
// }
|
||||
// st->swt_n = num;
|
||||
// return st;
|
||||
//}
|
||||
// void freeSwaptable (struct swaptable *st) {
|
||||
// struct swapent *swapent = st->swt_ent;
|
||||
// for (int i = 0; i < st->swt_n; i++,swapent++) {
|
||||
// free(swapent->ste_path);
|
||||
// }
|
||||
// free(st);
|
||||
// }
|
||||
// swapent_t getSwapEnt(swapent_t *ent, int i) {
|
||||
// return ent[i];
|
||||
// }
|
||||
// int64_t getPpKernel() {
|
||||
// int64_t pp_kernel = 0;
|
||||
// kstat_ctl_t *ksc;
|
||||
// kstat_t *ks;
|
||||
// kstat_named_t *knp;
|
||||
// kid_t kid;
|
||||
//
|
||||
// if ((ksc = kstat_open()) == NULL) {
|
||||
// return -1;
|
||||
// }
|
||||
// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
|
||||
// return -1;
|
||||
// }
|
||||
// if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
|
||||
// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
|
||||
// return -1;
|
||||
// }
|
||||
// switch (knp->data_type) {
|
||||
// case KSTAT_DATA_UINT64:
|
||||
// pp_kernel = knp->value.ui64;
|
||||
// break;
|
||||
// case KSTAT_DATA_UINT32:
|
||||
// pp_kernel = knp->value.ui32;
|
||||
// break;
|
||||
// }
|
||||
// pp_kernel *= sysconf(_SC_PAGESIZE);
|
||||
// return (pp_kernel > 0 ? pp_kernel : -1);
|
||||
// }
|
||||
import "C"
|
||||
|
||||
// Get the system memory info using sysconf same as prtconf
|
||||
func getTotalMem() int64 {
|
||||
pagesize := C.sysconf(C._SC_PAGESIZE)
|
||||
npages := C.sysconf(C._SC_PHYS_PAGES)
|
||||
return int64(pagesize * npages)
|
||||
}
|
||||
|
||||
func getFreeMem() int64 {
|
||||
pagesize := C.sysconf(C._SC_PAGESIZE)
|
||||
npages := C.sysconf(C._SC_AVPHYS_PAGES)
|
||||
return int64(pagesize * npages)
|
||||
}
|
||||
|
||||
// ReadMemInfo retrieves memory statistics of the host system and returns a
|
||||
// MemInfo type.
|
||||
func ReadMemInfo() (*MemInfo, error) {
|
||||
|
||||
ppKernel := C.getPpKernel()
|
||||
MemTotal := getTotalMem()
|
||||
MemFree := getFreeMem()
|
||||
SwapTotal, SwapFree, err := getSysSwap()
|
||||
|
||||
if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
|
||||
SwapFree < 0 {
|
||||
return nil, fmt.Errorf("Error getting system memory info %v\n", err)
|
||||
}
|
||||
|
||||
meminfo := &MemInfo{}
|
||||
// Total memory is total physical memory less than memory locked by kernel
|
||||
meminfo.MemTotal = MemTotal - int64(ppKernel)
|
||||
meminfo.MemFree = MemFree
|
||||
meminfo.SwapTotal = SwapTotal
|
||||
meminfo.SwapFree = SwapFree
|
||||
|
||||
return meminfo, nil
|
||||
}
|
||||
|
||||
func getSysSwap() (int64, int64, error) {
|
||||
var tSwap int64
|
||||
var fSwap int64
|
||||
var diskblksPerPage int64
|
||||
num, err := C.swapctl(C.SC_GETNSWP, nil)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
st := C.allocSwaptable(num)
|
||||
_, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
|
||||
if err != nil {
|
||||
C.freeSwaptable(st)
|
||||
return -1, -1, err
|
||||
}
|
||||
|
||||
diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
|
||||
for i := 0; i < int(num); i++ {
|
||||
swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
|
||||
tSwap += int64(swapent.ste_pages) * diskblksPerPage
|
||||
fSwap += int64(swapent.ste_free) * diskblksPerPage
|
||||
}
|
||||
C.freeSwaptable(st)
|
||||
return tSwap, fSwap, nil
|
||||
}
|
2
vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build !linux,!windows
|
||||
// +build !linux,!windows,!solaris
|
||||
|
||||
package system
|
||||
|
||||
|
|
6
vendor/github.com/docker/docker/pkg/system/path_unix.go
generated
vendored
6
vendor/github.com/docker/docker/pkg/system/path_unix.go
generated
vendored
|
@ -6,3 +6,9 @@ package system
|
|||
// executables. Each directory is separated from the next by a colon
|
||||
// ':' character .
|
||||
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
|
||||
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
|
||||
// is the system drive. This is a no-op on Linux.
|
||||
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
|
||||
return path, nil
|
||||
}
|
||||
|
|
30
vendor/github.com/docker/docker/pkg/system/path_windows.go
generated
vendored
30
vendor/github.com/docker/docker/pkg/system/path_windows.go
generated
vendored
|
@ -2,6 +2,36 @@
|
|||
|
||||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
|
||||
// the container. Docker has no context of what the default path should be.
|
||||
const DefaultPathEnv = ""
|
||||
|
||||
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
|
||||
// This is used, for example, when validating a user provided path in docker cp.
|
||||
// If a drive letter is supplied, it must be the system drive. The drive letter
|
||||
// is always removed. Also, it translates it to OS semantics (IOW / to \). We
|
||||
// need the path in this syntax so that it can ultimately be contatenated with
|
||||
// a Windows long-path which doesn't support drive-letters. Examples:
|
||||
// C: --> Fail
|
||||
// C:\ --> \
|
||||
// a --> a
|
||||
// /a --> \a
|
||||
// d:\ --> Fail
|
||||
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
|
||||
if len(path) == 2 && string(path[1]) == ":" {
|
||||
return "", fmt.Errorf("No relative path specified in %q", path)
|
||||
}
|
||||
if !filepath.IsAbs(path) || len(path) < 2 {
|
||||
return filepath.FromSlash(path), nil
|
||||
}
|
||||
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
|
||||
return "", fmt.Errorf("The specified path is not on the system drive (C:)")
|
||||
}
|
||||
return filepath.FromSlash(path[2:]), nil
|
||||
}
|
||||
|
|
17
vendor/github.com/docker/docker/pkg/system/stat_solaris.go
generated
vendored
17
vendor/github.com/docker/docker/pkg/system/stat_solaris.go
generated
vendored
|
@ -15,3 +15,20 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
|||
rdev: uint64(s.Rdev),
|
||||
mtim: s.Mtim}, nil
|
||||
}
|
||||
|
||||
// FromStatT loads a system.StatT from a syscal.Stat_t.
|
||||
func FromStatT(s *syscall.Stat_t) (*StatT, error) {
|
||||
return fromStatT(s)
|
||||
}
|
||||
|
||||
// Stat takes a path to a file and returns
|
||||
// a system.StatT type pertaining to that file.
|
||||
//
|
||||
// Throws an error if the file does not exist
|
||||
func Stat(path string) (*StatT, error) {
|
||||
s := &syscall.Stat_t{}
|
||||
if err := syscall.Stat(path, s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fromStatT(s)
|
||||
}
|
||||
|
|
51
vendor/github.com/docker/docker/pkg/system/syscall_windows.go
generated
vendored
51
vendor/github.com/docker/docker/pkg/system/syscall_windows.go
generated
vendored
|
@ -1,9 +1,15 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
|
||||
procGetVersionExW = modkernel32.NewProc("GetVersionExW")
|
||||
)
|
||||
|
||||
// OSVersion is a wrapper for Windows version information
|
||||
|
@ -15,19 +21,47 @@ type OSVersion struct {
|
|||
Build uint16
|
||||
}
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
|
||||
type osVersionInfoEx struct {
|
||||
OSVersionInfoSize uint32
|
||||
MajorVersion uint32
|
||||
MinorVersion uint32
|
||||
BuildNumber uint32
|
||||
PlatformID uint32
|
||||
CSDVersion [128]uint16
|
||||
ServicePackMajor uint16
|
||||
ServicePackMinor uint16
|
||||
SuiteMask uint16
|
||||
ProductType byte
|
||||
Reserve byte
|
||||
}
|
||||
|
||||
// GetOSVersion gets the operating system version on Windows. Note that
|
||||
// docker.exe must be manifested to get the correct version information.
|
||||
func GetOSVersion() (OSVersion, error) {
|
||||
func GetOSVersion() OSVersion {
|
||||
var err error
|
||||
osv := OSVersion{}
|
||||
osv.Version, err = syscall.GetVersion()
|
||||
if err != nil {
|
||||
return osv, fmt.Errorf("Failed to call GetVersion()")
|
||||
// GetVersion never fails.
|
||||
panic(err)
|
||||
}
|
||||
osv.MajorVersion = uint8(osv.Version & 0xFF)
|
||||
osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
|
||||
osv.Build = uint16(osv.Version >> 16)
|
||||
return osv, nil
|
||||
return osv
|
||||
}
|
||||
|
||||
// IsWindowsClient returns true if the SKU is client
|
||||
func IsWindowsClient() bool {
|
||||
osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
|
||||
r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
|
||||
if r1 == 0 {
|
||||
logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err)
|
||||
return false
|
||||
}
|
||||
const verNTWorkstation = 0x00000001
|
||||
return osviex.ProductType == verNTWorkstation
|
||||
}
|
||||
|
||||
// Unmount is a platform-specific helper function to call
|
||||
|
@ -58,3 +92,12 @@ func CommandLineToArgv(commandLine string) ([]string, error) {
|
|||
|
||||
return newArgs, nil
|
||||
}
|
||||
|
||||
// HasWin32KSupport determines whether containers that depend on win32k can
|
||||
// run on this machine. Win32k is the driver used to implement windowing.
|
||||
func HasWin32KSupport() bool {
|
||||
// For now, check for ntuser API support on the host. In the future, a host
|
||||
// may support win32k in containers even if the host does not support ntuser
|
||||
// APIs.
|
||||
return ntuserApiset.Load() == nil
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/docker/pkg/system/umask.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/system/umask.go
generated
vendored
|
@ -7,7 +7,7 @@ import (
|
|||
)
|
||||
|
||||
// Umask sets current process's file mode creation mask to newmask
|
||||
// and return oldmask.
|
||||
// and returns oldmask.
|
||||
func Umask(newmask int) (oldmask int, err error) {
|
||||
return syscall.Umask(newmask), nil
|
||||
}
|
||||
|
|
66
vendor/github.com/docker/docker/pkg/term/ascii.go
generated
vendored
66
vendor/github.com/docker/docker/pkg/term/ascii.go
generated
vendored
|
@ -1,66 +0,0 @@
|
|||
package term
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ASCII list the possible supported ASCII key sequence
|
||||
var ASCII = []string{
|
||||
"ctrl-@",
|
||||
"ctrl-a",
|
||||
"ctrl-b",
|
||||
"ctrl-c",
|
||||
"ctrl-d",
|
||||
"ctrl-e",
|
||||
"ctrl-f",
|
||||
"ctrl-g",
|
||||
"ctrl-h",
|
||||
"ctrl-i",
|
||||
"ctrl-j",
|
||||
"ctrl-k",
|
||||
"ctrl-l",
|
||||
"ctrl-m",
|
||||
"ctrl-n",
|
||||
"ctrl-o",
|
||||
"ctrl-p",
|
||||
"ctrl-q",
|
||||
"ctrl-r",
|
||||
"ctrl-s",
|
||||
"ctrl-t",
|
||||
"ctrl-u",
|
||||
"ctrl-v",
|
||||
"ctrl-w",
|
||||
"ctrl-x",
|
||||
"ctrl-y",
|
||||
"ctrl-z",
|
||||
"ctrl-[",
|
||||
"ctrl-\\",
|
||||
"ctrl-]",
|
||||
"ctrl-^",
|
||||
"ctrl-_",
|
||||
}
|
||||
|
||||
// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code.
|
||||
func ToBytes(keys string) ([]byte, error) {
|
||||
codes := []byte{}
|
||||
next:
|
||||
for _, key := range strings.Split(keys, ",") {
|
||||
if len(key) != 1 {
|
||||
for code, ctrl := range ASCII {
|
||||
if ctrl == key {
|
||||
codes = append(codes, byte(code))
|
||||
continue next
|
||||
}
|
||||
}
|
||||
if key == "DEL" {
|
||||
codes = append(codes, 127)
|
||||
} else {
|
||||
return nil, fmt.Errorf("Unknown character: '%s'", key)
|
||||
}
|
||||
} else {
|
||||
codes = append(codes, byte(key[0]))
|
||||
}
|
||||
}
|
||||
return codes, nil
|
||||
}
|
50
vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go
generated
vendored
50
vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go
generated
vendored
|
@ -1,50 +0,0 @@
|
|||
// +build linux,cgo
|
||||
|
||||
package term
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// #include <termios.h>
|
||||
import "C"
|
||||
|
||||
// Termios is the Unix API for terminal I/O.
|
||||
// It is passthgrouh for syscall.Termios in order to make it portable with
|
||||
// other platforms where it is not available or handled differently.
|
||||
type Termios syscall.Termios
|
||||
|
||||
// MakeRaw put the terminal connected to the given file descriptor into raw
|
||||
// mode and returns the previous state of the terminal so that it can be
|
||||
// restored.
|
||||
func MakeRaw(fd uintptr) (*State, error) {
|
||||
var oldState State
|
||||
if err := tcget(fd, &oldState.termios); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newState := oldState.termios
|
||||
|
||||
C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState)))
|
||||
if err := tcset(fd, &newState); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
return &oldState, nil
|
||||
}
|
||||
|
||||
func tcget(fd uintptr, p *Termios) syscall.Errno {
|
||||
ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
|
||||
if ret != 0 {
|
||||
return err.(syscall.Errno)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func tcset(fd uintptr, p *Termios) syscall.Errno {
|
||||
ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
|
||||
if ret != 0 {
|
||||
return err.(syscall.Errno)
|
||||
}
|
||||
return 0
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue