diff --git a/.travis.yml b/.travis.yml index 9776b475..427989e6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,12 +13,20 @@ services: before_install: - sudo apt-get -qq update - sudo apt-get -qq install btrfs-tools libdevmapper-dev libgpgme11-dev libapparmor-dev libseccomp-dev + - sudo apt-get -qq install autoconf automake bison e2fslibs-dev libfuse-dev libtool liblzma-dev install: - make install.tools + - git clone https://github.com/ostreedev/ostree ${TRAVIS_BUILD_DIR}/ostree + - pushd ${TRAVIS_BUILD_DIR}/ostree + - ./autogen.sh --prefix=/usr/local + - make all + - sudo make install + - popd before_script: - export PATH=$HOME/gopath/bin:$PATH + - export LD_LIBRARY_PATH=/usr/local/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} script: - make .gitvalidation diff --git a/Dockerfile b/Dockerfile index e10ba360..4ba5c430 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,17 +5,24 @@ RUN echo 'deb http://httpredir.debian.org/debian jessie-backports main' > /etc/a RUN apt-get update && apt-get install -y \ apparmor \ + autoconf \ + automake \ + bison \ build-essential \ curl \ + e2fslibs-dev \ gawk \ iptables \ pkg-config \ libaio-dev \ libcap-dev \ + libfuse-dev \ + libostree-dev \ libprotobuf-dev \ libprotobuf-c0-dev \ libseccomp2/jessie-backports \ libseccomp-dev/jessie-backports \ + libtool \ protobuf-c-compiler \ protobuf-compiler \ python-minimal \ @@ -25,6 +32,7 @@ RUN apt-get update && apt-get install -y \ libdevmapper1.02.1 \ libdevmapper-dev \ libgpgme11-dev \ + liblzma-dev \ netcat \ --no-install-recommends \ && apt-get clean @@ -78,6 +86,8 @@ WORKDIR /go/src/github.com/kubernetes-incubator/cri-o ADD . /go/src/github.com/kubernetes-incubator/cri-o +RUN make .install.ostree + RUN make test/copyimg/copyimg \ && mkdir -p .artifacts/redis-image \ && ./test/copyimg/copyimg --import-from=docker://redis --export-to=dir:.artifacts/redis-image --signature-policy ./test/policy.json diff --git a/Makefile b/Makefile index 25f646dd..a6d4f001 100644 --- a/Makefile +++ b/Makefile @@ -202,6 +202,14 @@ install.tools: .install.gitvalidation .install.gometalinter .install.md2man go get -u github.com/cpuguy83/go-md2man; \ fi +.install.ostree: .gopathok + if ! pkg-config ostree-1 2> /dev/null ; then \ + git clone https://github.com/ostreedev/ostree $(GOPATH)/src/github.com/ostreedev/ostree ; \ + cd $(GOPATH)/src/github.com/ostreedev/ostree ; \ + ./autogen.sh --prefix=/usr/local; \ + make all install; \ + fi + .PHONY: \ binaries \ clean \ diff --git a/README.md b/README.md index 25fae28f..bb40dbad 100644 --- a/README.md +++ b/README.md @@ -85,6 +85,7 @@ yum install -y \ libgpg-error-devel \ libseccomp-devel \ libselinux-devel \ + ostree-devel \ pkgconfig \ runc ``` @@ -106,7 +107,9 @@ apt install -y \ runc ``` -If using an older release or a long-term support release, be careful to double-check that the version of `runc` is new enough, or else build your own. +Debian, Ubuntu, and related distributions will also need a copy of the development libraries for `ostree`, either in the form of the `libostree-dev` package from the [flatpak](https://launchpad.net/~alexlarsson/+archive/ubuntu/flatpak) PPA, or built [from source](https://github.com/ostreedev/ostree) (more on that [here](https://ostree.readthedocs.io/en/latest/#building)). + +If using an older release or a long-term support release, be careful to double-check that the version of `runc` is new enough (running `runc --version` should produce `spec: 1.0.0`), or else build your own. **Optional** diff --git a/cmd/kpod/containerImageRef.go b/cmd/kpod/containerImageRef.go index 64ce465e..f43a76ad 100644 --- a/cmd/kpod/containerImageRef.go +++ b/cmd/kpod/containerImageRef.go @@ -208,7 +208,7 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestType continue } // Start reading the layer. - rc, err := i.store.Diff("", layerID) + rc, err := i.store.Diff("", layerID, nil) if err != nil { return nil, errors.Wrapf(err, "error extracting layer %q", layerID) } diff --git a/contrib/test/crio-integration-playbook.yaml b/contrib/test/crio-integration-playbook.yaml index b8ec108d..762677a3 100644 --- a/contrib/test/crio-integration-playbook.yaml +++ b/contrib/test/crio-integration-playbook.yaml @@ -39,6 +39,7 @@ - container-selinux - btrfs-progs-devel - device-mapper-devel + - ostree-devel - glibc-devel - gpgme-devel - libassuan-devel @@ -65,6 +66,7 @@ - container-selinux - btrfs-progs-devel - device-mapper-devel + - ostree-devel - glibc-devel - gpgme-devel - libassuan-devel diff --git a/vendor.conf b/vendor.conf index 5e8aa373..b29ca9f5 100644 --- a/vendor.conf +++ b/vendor.conf @@ -5,15 +5,16 @@ k8s.io/apimachinery release-1.6 https://github.com/kubernetes/apimachinery k8s.io/apiserver release-1.6 https://github.com/kubernetes/apiserver # github.com/Sirupsen/logrus v0.11.5 -github.com/containers/image cd150088f25d1ac81c6dff3a0bf458725cb8b339 -github.com/containers/storage 2c75d14b978bff468e7d5ec3ff8a003eca443209 +github.com/containers/image c2a797dfe5bb4a9dd7f48332ce40c6223ffba492 +github.com/ostreedev/ostree-go master +github.com/containers/storage 5d8c2f87387fa5be9fa526ae39fbd79b8bdf27be github.com/containernetworking/cni v0.4.0 google.golang.org/grpc v1.0.1-GA https://github.com/grpc/grpc-go github.com/opencontainers/selinux v1.0.0-rc1 github.com/opencontainers/go-digest v1.0.0-rc0 github.com/opencontainers/runtime-tools 20db5990713e97e64bc2d340531d61f2edf4cccb github.com/opencontainers/runc c5ec25487693612aed95673800863e134785f946 -github.com/opencontainers/image-spec v1.0.0-rc6 +github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/runtime-spec v1.0.0 github.com/juju/ratelimit acf38b000a03e4ab89e40f20f1e548f4e6ac7f72 github.com/tchap/go-patricia v2.2.6 diff --git a/vendor/github.com/containers/image/README.md b/vendor/github.com/containers/image/README.md index ca8afd4c..8e812bb7 100644 --- a/vendor/github.com/containers/image/README.md +++ b/vendor/github.com/containers/image/README.md @@ -51,14 +51,20 @@ Ensure that the dependencies documented [in vendor.conf](https://github.com/cont are also available (using those exact versions or different versions of your choosing). -This library, by default, also depends on the GpgME C library. Either install it: +This library, by default, also depends on the GpgME and libostree C libraries. Either install them: ```sh -Fedora$ dnf install gpgme-devel libassuan-devel +Fedora$ dnf install gpgme-devel libassuan-devel libostree-devel macOS$ brew install gpgme ``` -or use the `containers_image_openpgp` build tag (e.g. using `go build -tags …`) -This will use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation; +or use the build tags described below to avoid the dependencies (e.g. using `go build -tags …`) + +### Supported build tags + +- `containers_image_openpgp`: Use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation; the primary downside is that creating new signatures with the Golang-only implementation is not supported. +- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. + + (Note that explicitly importing `github.com/containers/image/ostree` will still depend on the `libostree` library, this build tag only affects generic users of …`/alltransports`.) ## Contributing diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go index fd131765..a8afcc9e 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/copy/copy.go @@ -7,6 +7,7 @@ import ( "io" "io/ioutil" "reflect" + "runtime" "strings" "time" @@ -157,6 +158,10 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe } }() + if err := checkImageDestinationForCurrentRuntimeOS(src, dest); err != nil { + return err + } + if src.IsMultiImage() { return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef)) } @@ -277,6 +282,22 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe return nil } +func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageDestination) error { + if dest.MustMatchRuntimeOS() { + c, err := src.OCIConfig() + if err != nil { + return errors.Wrapf(err, "Error parsing image configuration") + } + osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, runtime.GOOS) + if runtime.GOOS == "windows" && c.OS == "linux" { + return osErr + } else if runtime.GOOS != "windows" && c.OS == "windows" { + return osErr + } + } + return nil +} + // updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. func updateEmbeddedDockerReference(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDestination, src types.Image, canModifyManifest bool) error { destRef := dest.Reference().DockerReference() diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go index c0f5d977..ea46a27e 100644 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ b/vendor/github.com/containers/image/directory/directory_dest.go @@ -51,6 +51,11 @@ func (d *dirImageDestination) AcceptsForeignLayerURLs() bool { return false } +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *dirImageDestination) MustMatchRuntimeOS() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go index 3c20acba..31cb2eac 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go @@ -78,6 +78,11 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe defer resp.Body.Close() } +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *daemonImageDestination) MustMatchRuntimeOS() bool { + return true +} + // Close removes resources associated with an initialized ImageDestination, if any. func (d *daemonImageDestination) Close() error { if !d.committed { diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go index b9897703..13439462 100644 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ b/vendor/github.com/containers/image/docker/docker_client.go @@ -308,31 +308,36 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error { if len(c.challenges) == 0 { return nil } - // assume just one... - challenge := c.challenges[0] - switch challenge.Scheme { - case "basic": - req.SetBasicAuth(c.username, c.password) - return nil - case "bearer": - if c.token == nil || time.Now().After(c.tokenExpiration) { - realm, ok := challenge.Parameters["realm"] - if !ok { - return errors.Errorf("missing realm in bearer auth challenge") + schemeNames := make([]string, 0, len(c.challenges)) + for _, challenge := range c.challenges { + schemeNames = append(schemeNames, challenge.Scheme) + switch challenge.Scheme { + case "basic": + req.SetBasicAuth(c.username, c.password) + return nil + case "bearer": + if c.token == nil || time.Now().After(c.tokenExpiration) { + realm, ok := challenge.Parameters["realm"] + if !ok { + return errors.Errorf("missing realm in bearer auth challenge") + } + service, _ := challenge.Parameters["service"] // Will be "" if not present + scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions) + token, err := c.getBearerToken(realm, service, scope) + if err != nil { + return err + } + c.token = token + c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) } - service, _ := challenge.Parameters["service"] // Will be "" if not present - scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions) - token, err := c.getBearerToken(realm, service, scope) - if err != nil { - return err - } - c.token = token - c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token)) + return nil + default: + logrus.Debugf("no handler for %s authentication", challenge.Scheme) } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token)) - return nil } - return errors.Errorf("no handler for %s authentication", challenge.Scheme) + logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) + return nil } func (c *dockerClient) getBearerToken(realm, service, scope string) (*bearerToken, error) { diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go index 155aa14f..a3a4ca39 100644 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/docker/docker_image_dest.go @@ -99,6 +99,11 @@ func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { return true } +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *dockerImageDestination) MustMatchRuntimeOS() bool { + return false +} + // sizeCounter is an io.Writer which only counts the total size of its input. type sizeCounter struct{ size int64 } diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go index 6a8ff2fe..d0b78d63 100644 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/docker/tarfile/dest.go @@ -81,6 +81,11 @@ func (d *Destination) AcceptsForeignLayerURLs() bool { return false } +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *Destination) MustMatchRuntimeOS() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. @@ -176,7 +181,7 @@ func (d *Destination) PutManifest(m []byte) error { layerPaths = append(layerPaths, l.Digest.String()) } - items := []manifestItem{{ + items := []ManifestItem{{ Config: man.Config.Digest.String(), RepoTags: []string{d.repoTag}, Layers: layerPaths, diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go index 21d33b77..a526f3ef 100644 --- a/vendor/github.com/containers/image/docker/tarfile/src.go +++ b/vendor/github.com/containers/image/docker/tarfile/src.go @@ -20,7 +20,7 @@ import ( type Source struct { tarPath string // The following data is only available after ensureCachedDataIsPresent() succeeds - tarManifest *manifestItem // nil if not available yet. + tarManifest *ManifestItem // nil if not available yet. configBytes []byte configDigest digest.Digest orderedDiffIDList []diffID @@ -145,23 +145,28 @@ func (s *Source) ensureCachedDataIsPresent() error { return err } + // Check to make sure length is 1 + if len(tarManifest) != 1 { + return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest)) + } + // Read and parse config. - configBytes, err := s.readTarComponent(tarManifest.Config) + configBytes, err := s.readTarComponent(tarManifest[0].Config) if err != nil { return err } var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs. if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { - return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config) + return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) } - knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig) + knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig) if err != nil { return err } // Success; commit. - s.tarManifest = tarManifest + s.tarManifest = &tarManifest[0] s.configBytes = configBytes s.configDigest = digest.FromBytes(configBytes) s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs @@ -170,23 +175,25 @@ func (s *Source) ensureCachedDataIsPresent() error { } // loadTarManifest loads and decodes the manifest.json. -func (s *Source) loadTarManifest() (*manifestItem, error) { +func (s *Source) loadTarManifest() ([]ManifestItem, error) { // FIXME? Do we need to deal with the legacy format? bytes, err := s.readTarComponent(manifestFileName) if err != nil { return nil, err } - var items []manifestItem + var items []ManifestItem if err := json.Unmarshal(bytes, &items); err != nil { return nil, errors.Wrap(err, "Error decoding tar manifest.json") } - if len(items) != 1 { - return nil, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(items)) - } - return &items[0], nil + return items, nil } -func (s *Source) prepareLayerData(tarManifest *manifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) { +// LoadTarManifest loads and decodes the manifest.json +func (s *Source) LoadTarManifest() ([]ManifestItem, error) { + return s.loadTarManifest() +} + +func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) { // Collect layer data available in manifest and config. if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go index 85c3f7f7..f16cc8c6 100644 --- a/vendor/github.com/containers/image/docker/tarfile/types.go +++ b/vendor/github.com/containers/image/docker/tarfile/types.go @@ -13,7 +13,8 @@ const ( // legacyRepositoriesFileName = "repositories" ) -type manifestItem struct { +// ManifestItem is an element of the array stored in the top-level manifest.json file. +type ManifestItem struct { Config string RepoTags []string Layers []string diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go index 47679415..c79adacc 100644 --- a/vendor/github.com/containers/image/image/docker_list.go +++ b/vendor/github.com/containers/image/image/docker_list.go @@ -16,7 +16,7 @@ type platformSpec struct { OSVersion string `json:"os.version,omitempty"` OSFeatures []string `json:"os.features,omitempty"` Variant string `json:"variant,omitempty"` - Features []string `json:"features,omitempty"` + Features []string `json:"features,omitempty"` // removed in OCI } // A manifestDescriptor references a platform-specific manifest. diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go index a2a36ea2..9c242cf0 100644 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ b/vendor/github.com/containers/image/image/docker_schema2.go @@ -183,6 +183,7 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ } copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos)) for i, info := range options.LayerInfos { + copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType copy.LayersDescriptors[i].Digest = info.Digest copy.LayersDescriptors[i].Size = info.Size copy.LayersDescriptors[i].URLs = info.URLs @@ -213,15 +214,17 @@ func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) { return nil, err } - config := descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), + config := descriptorOCI1{ + descriptor: descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), + }, } - layers := make([]descriptor, len(m.LayersDescriptors)) + layers := make([]descriptorOCI1, len(m.LayersDescriptors)) for idx := range layers { - layers[idx] = m.LayersDescriptors[idx] + layers[idx] = descriptorOCI1{descriptor: m.LayersDescriptors[idx]} if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable } else { diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go index 2575d1e0..048387ec 100644 --- a/vendor/github.com/containers/image/image/oci.go +++ b/vendor/github.com/containers/image/image/oci.go @@ -12,12 +12,18 @@ import ( "github.com/pkg/errors" ) +type descriptorOCI1 struct { + descriptor + Annotations map[string]string `json:"annotations,omitempty"` +} + type manifestOCI1 struct { src types.ImageSource // May be nil if configBlob is not nil configBlob []byte // If set, corresponds to contents of ConfigDescriptor. SchemaVersion int `json:"schemaVersion"` - ConfigDescriptor descriptor `json:"config"` - LayersDescriptors []descriptor `json:"layers"` + ConfigDescriptor descriptorOCI1 `json:"config"` + LayersDescriptors []descriptorOCI1 `json:"layers"` + Annotations map[string]string `json:"annotations,omitempty"` } func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { @@ -29,7 +35,7 @@ func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericMa } // manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: -func manifestOCI1FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest { +func manifestOCI1FromComponents(config descriptorOCI1, src types.ImageSource, configBlob []byte, layers []descriptorOCI1) genericManifest { return &manifestOCI1{ src: src, configBlob: configBlob, @@ -148,8 +154,9 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types. if len(copy.LayersDescriptors) != len(options.LayerInfos) { return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) } - copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos)) + copy.LayersDescriptors = make([]descriptorOCI1, len(options.LayerInfos)) for i, info := range options.LayerInfos { + copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType copy.LayersDescriptors[i].Digest = info.Digest copy.LayersDescriptors[i].Size = info.Size } @@ -169,7 +176,7 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types. func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { // Create a copy of the descriptor. - config := m.ConfigDescriptor + config := m.ConfigDescriptor.descriptor // The only difference between OCI and DockerSchema2 is the mediatypes. The // media type of the manifest is handled by manifestSchema2FromComponents. @@ -177,7 +184,7 @@ func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { layers := make([]descriptor, len(m.LayersDescriptors)) for idx := range layers { - layers[idx] = m.LayersDescriptors[idx] + layers[idx] = m.LayersDescriptors[idx].descriptor layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType } diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go index 0c571531..05367309 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go @@ -66,6 +66,11 @@ func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { return false } +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *ociImageDestination) MustMatchRuntimeOS() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go index 93802f4d..88659212 100644 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ b/vendor/github.com/containers/image/openshift/openshift.go @@ -358,6 +358,11 @@ func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { return true } +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go index 8a247370..c056b83b 100644 --- a/vendor/github.com/containers/image/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/ostree/ostree_dest.go @@ -11,12 +11,15 @@ import ( "path/filepath" "strconv" "strings" + "time" "github.com/containers/image/manifest" "github.com/containers/image/types" "github.com/containers/storage/pkg/archive" "github.com/opencontainers/go-digest" "github.com/pkg/errors" + + "github.com/ostreedev/ostree-go/pkg/otbuiltin" ) type blobToImport struct { @@ -86,6 +89,11 @@ func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool { return false } +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *ostreeImageDestination) MustMatchRuntimeOS() bool { + return true +} + func (d *ostreeImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") if err != nil { @@ -153,7 +161,17 @@ func fixFiles(dir string, usermode bool) error { return nil } -func (d *ostreeImageDestination) importBlob(blob *blobToImport) error { +func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error { + opts := otbuiltin.NewCommitOptions() + opts.AddMetadataString = metadata + opts.Timestamp = time.Now() + // OCI layers have no parent OSTree commit + opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" + _, err := repo.Commit(root, branch, opts) + return err +} + +func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToImport) error { ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") if err := ensureDirectoryExists(destinationPath); err != nil { @@ -181,11 +199,7 @@ func (d *ostreeImageDestination) importBlob(blob *blobToImport) error { return err } } - return exec.Command("ostree", "commit", - "--repo", d.ref.repo, - fmt.Sprintf("--add-metadata-string=docker.size=%d", blob.Size), - "--branch", ostreeBranch, - fmt.Sprintf("--tree=dir=%s", destinationPath)).Run() + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) } func (d *ostreeImageDestination) importConfig(blob *blobToImport) error { @@ -253,6 +267,16 @@ func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error { } func (d *ostreeImageDestination) Commit() error { + repo, err := otbuiltin.OpenRepo(d.ref.repo) + if err != nil { + return err + } + + _, err = repo.PrepareTransaction() + if err != nil { + return err + } + for _, layer := range d.schema.LayersDescriptors { hash := layer.Digest.Hex() blob := d.blobs[hash] @@ -261,7 +285,7 @@ func (d *ostreeImageDestination) Commit() error { if blob == nil { continue } - err := d.importBlob(blob) + err := d.importBlob(repo, blob) if err != nil { return err } @@ -277,11 +301,11 @@ func (d *ostreeImageDestination) Commit() error { } manifestPath := filepath.Join(d.tmpDirPath, "manifest") - err := exec.Command("ostree", "commit", - "--repo", d.ref.repo, - fmt.Sprintf("--add-metadata-string=docker.manifest=%s", string(d.manifest)), - fmt.Sprintf("--branch=ociimage/%s", d.ref.branchName), - manifestPath).Run() + + metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest))} + err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata) + + _, err = repo.CommitTransaction() return err } diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go index 7879aeca..8b708bdf 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -174,11 +174,11 @@ func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobI } // Attempt to create the identified layer and import its contents. layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi) - if err != nil && err != storage.ErrDuplicateID { + if err != nil && errors.Cause(err) != storage.ErrDuplicateID { logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err) return errorBlobInfo, err } - if err == storage.ErrDuplicateID { + if errors.Cause(err) == storage.ErrDuplicateID { // We specified an ID, and there's already a layer with // the same ID. Drain the input so that we can look at // its length and digest. @@ -291,7 +291,7 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI // it returns a non-nil error only on an unexpected failure. func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { if blobinfo.Digest == "" { - return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) + return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`) } for _, blob := range s.BlobList { if blob.Digest == blobinfo.Digest { @@ -331,7 +331,7 @@ func (s *storageImageDestination) Commit() error { } img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil) if err != nil { - if err != storage.ErrDuplicateID { + if errors.Cause(err) != storage.ErrDuplicateID { logrus.Debugf("error creating image: %q", err) return errors.Wrapf(err, "error creating image %q", s.ID) } @@ -340,8 +340,8 @@ func (s *storageImageDestination) Commit() error { return errors.Wrapf(err, "error reading image %q", s.ID) } if img.TopLayer != lastLayer { - logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", err) - return errors.Wrapf(err, "image with ID %q already exists, but uses a different top layer", s.ID) + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", s.ID) + return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", s.ID) } logrus.Debugf("reusing image ID %q", img.ID) } else { @@ -449,6 +449,11 @@ func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { return false } +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (s *storageImageDestination) MustMatchRuntimeOS() bool { + return true +} + func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { sizes := []int{} sigblob := []byte{} @@ -516,7 +521,7 @@ func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, } else { n = layerMeta.CompressedSize } - diff, err := store.Diff("", layer.ID) + diff, err := store.Diff("", layer.ID, nil) if err != nil { return nil, -1, err } diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go index 66a64792..9aee75be 100644 --- a/vendor/github.com/containers/image/storage/storage_reference.go +++ b/vendor/github.com/containers/image/storage/storage_reference.go @@ -70,7 +70,9 @@ func (s *storageReference) resolveImage() (*storage.Image, error) { // to build this reference object. func (s storageReference) Transport() types.ImageTransport { return &storageTransport{ - store: s.transport.store, + store: s.transport.store, + defaultUIDMap: s.transport.defaultUIDMap, + defaultGIDMap: s.transport.defaultGIDMap, } } @@ -83,7 +85,12 @@ func (s storageReference) DockerReference() reference.Named { // disambiguate between images which may be present in multiple stores and // share only their names. func (s storageReference) StringWithinTransport() string { - storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + optionsList := "" + options := s.transport.store.GraphOptions() + if len(options) > 0 { + optionsList = ":" + strings.Join(options, ",") + } + storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" if s.name == nil { return storeSpec + "@" + s.id } @@ -94,7 +101,14 @@ func (s storageReference) StringWithinTransport() string { } func (s storageReference) PolicyConfigurationIdentity() string { - return s.StringWithinTransport() + storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + if s.name == nil { + return storeSpec + "@" + s.id + } + if s.id == "" { + return storeSpec + s.reference + } + return storeSpec + s.reference + "@" + s.id } // Also accept policy that's tied to the combination of the graph root and diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go index 6539e7ea..336dd814 100644 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ b/vendor/github.com/containers/image/storage/storage_transport.go @@ -11,6 +11,7 @@ import ( "github.com/containers/image/transports" "github.com/containers/image/types" "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" "github.com/opencontainers/go-digest" ddigest "github.com/opencontainers/go-digest" ) @@ -46,10 +47,20 @@ type StoreTransport interface { // ParseStoreReference parses a reference, overriding any store // specification that it may contain. ParseStoreReference(store storage.Store, reference string) (*storageReference, error) + // SetDefaultUIDMap sets the default UID map to use when opening stores. + SetDefaultUIDMap(idmap []idtools.IDMap) + // SetDefaultGIDMap sets the default GID map to use when opening stores. + SetDefaultGIDMap(idmap []idtools.IDMap) + // DefaultUIDMap returns the default UID map used when opening stores. + DefaultUIDMap() []idtools.IDMap + // DefaultGIDMap returns the default GID map used when opening stores. + DefaultGIDMap() []idtools.IDMap } type storageTransport struct { - store storage.Store + store storage.Store + defaultUIDMap []idtools.IDMap + defaultGIDMap []idtools.IDMap } func (s *storageTransport) Name() string { @@ -66,6 +77,26 @@ func (s *storageTransport) SetStore(store storage.Store) { s.store = store } +// SetDefaultUIDMap sets the default UID map to use when opening stores. +func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { + s.defaultUIDMap = idmap +} + +// SetDefaultGIDMap sets the default GID map to use when opening stores. +func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) { + s.defaultGIDMap = idmap +} + +// DefaultUIDMap returns the default UID map used when opening stores. +func (s *storageTransport) DefaultUIDMap() []idtools.IDMap { + return s.defaultUIDMap +} + +// DefaultGIDMap returns the default GID map used when opening stores. +func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { + return s.defaultGIDMap +} + // ParseStoreReference takes a name or an ID, tries to figure out which it is // relative to the given store, and returns it in a reference object. func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { @@ -110,7 +141,12 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( // recognize. return nil, ErrInvalidReference } - storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "]" + optionsList := "" + options := store.GraphOptions() + if len(options) > 0 { + optionsList = ":" + strings.Join(options, ",") + } + storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]" id := "" if sum.Validate() == nil { id = sum.Hex() @@ -127,14 +163,17 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( } else { logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id) } - return newReference(storageTransport{store: store}, refname, id, name), nil + return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil } func (s *storageTransport) GetStore() (storage.Store, error) { // Return the transport's previously-set store. If we don't have one // of those, initialize one now. if s.store == nil { - store, err := storage.GetStore(storage.DefaultStoreOptions) + options := storage.DefaultStoreOptions + options.UIDMap = s.defaultUIDMap + options.GIDMap = s.defaultGIDMap + store, err := storage.GetStore(options) if err != nil { return nil, err } @@ -145,15 +184,11 @@ func (s *storageTransport) GetStore() (storage.Store, error) { // ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"), // possibly prefixed with a store specifier in the form "[_graphroot_]" or -// "[_driver_@_graphroot_]", tries to figure out which it is, and returns it in -// a reference object. If the _graphroot_ is a location other than the default, -// it needs to have been previously opened using storage.GetStore(), so that it -// can figure out which run root goes with the graph root. +// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or +// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", +// tries to figure out which it is, and returns it in a reference object. func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { - store, err := s.GetStore() - if err != nil { - return nil, err - } + var store storage.Store // Check if there's a store location prefix. If there is, then it // needs to match a store that was previously initialized using // storage.GetStore(), or be enough to let the storage library fill out @@ -165,37 +200,65 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc } storeSpec := reference[1:closeIndex] reference = reference[closeIndex+1:] - storeInfo := strings.SplitN(storeSpec, "@", 2) - if len(storeInfo) == 1 && storeInfo[0] != "" { - // One component: the graph root. - if !filepath.IsAbs(storeInfo[0]) { - return nil, ErrPathNotAbsolute + // Peel off a "driver@" from the start. + driverInfo := "" + driverSplit := strings.SplitN(storeSpec, "@", 2) + if len(driverSplit) != 2 { + if storeSpec == "" { + return nil, ErrInvalidReference } - store2, err := storage.GetStore(storage.StoreOptions{ - GraphRoot: storeInfo[0], - }) - if err != nil { - return nil, err - } - store = store2 - } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { - // Two components: the driver type and the graph root. - if !filepath.IsAbs(storeInfo[1]) { - return nil, ErrPathNotAbsolute - } - store2, err := storage.GetStore(storage.StoreOptions{ - GraphDriverName: storeInfo[0], - GraphRoot: storeInfo[1], - }) - if err != nil { - return nil, err - } - store = store2 } else { - // Anything else: store specified in a form we don't - // recognize. - return nil, ErrInvalidReference + driverInfo = driverSplit[0] + if driverInfo == "" { + return nil, ErrInvalidReference + } + storeSpec = driverSplit[1] + if storeSpec == "" { + return nil, ErrInvalidReference + } } + // Peel off a ":options" from the end. + var options []string + optionsSplit := strings.SplitN(storeSpec, ":", 2) + if len(optionsSplit) == 2 { + options = strings.Split(optionsSplit[1], ",") + storeSpec = optionsSplit[0] + } + // Peel off a "+runroot" from the new end. + runRootInfo := "" + runRootSplit := strings.SplitN(storeSpec, "+", 2) + if len(runRootSplit) == 2 { + runRootInfo = runRootSplit[1] + storeSpec = runRootSplit[0] + } + // The rest is our graph root. + rootInfo := storeSpec + // Check that any paths are absolute paths. + if rootInfo != "" && !filepath.IsAbs(rootInfo) { + return nil, ErrPathNotAbsolute + } + if runRootInfo != "" && !filepath.IsAbs(runRootInfo) { + return nil, ErrPathNotAbsolute + } + store2, err := storage.GetStore(storage.StoreOptions{ + GraphDriverName: driverInfo, + GraphRoot: rootInfo, + RunRoot: runRootInfo, + GraphDriverOptions: options, + UIDMap: s.defaultUIDMap, + GIDMap: s.defaultGIDMap, + }) + if err != nil { + return nil, err + } + store = store2 + } else { + // We didn't have a store spec, so use the default. + store2, err := s.GetStore() + if err != nil { + return nil, err + } + store = store2 } return s.ParseStoreReference(store, reference) } @@ -250,7 +313,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { return ErrPathNotAbsolute } } else { - // Anything else: store specified in a form we don't + // Anything else: scope specified in a form we don't // recognize. return ErrInvalidReference } diff --git a/vendor/github.com/containers/image/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/transports/alltransports/alltransports.go index dc70fadd..dd80b7f9 100644 --- a/vendor/github.com/containers/image/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/transports/alltransports/alltransports.go @@ -12,7 +12,7 @@ import ( _ "github.com/containers/image/docker/daemon" _ "github.com/containers/image/oci/layout" _ "github.com/containers/image/openshift" - _ "github.com/containers/image/ostree" + // The ostree transport is registered by ostree*.go _ "github.com/containers/image/storage" "github.com/containers/image/transports" "github.com/containers/image/types" diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree.go b/vendor/github.com/containers/image/transports/alltransports/ostree.go new file mode 100644 index 00000000..0fc5d7ef --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/ostree.go @@ -0,0 +1,8 @@ +// +build !containers_image_ostree_stub + +package alltransports + +import ( + // Register the ostree transport + _ "github.com/containers/image/ostree" +) diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go new file mode 100644 index 00000000..8b01afe7 --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go @@ -0,0 +1,9 @@ +// +build containers_image_ostree_stub + +package alltransports + +import "github.com/containers/image/transports" + +func init() { + transports.Register(transports.NewStubTransport("ostree")) +} diff --git a/vendor/github.com/containers/image/transports/stub.go b/vendor/github.com/containers/image/transports/stub.go new file mode 100644 index 00000000..087f69b6 --- /dev/null +++ b/vendor/github.com/containers/image/transports/stub.go @@ -0,0 +1,36 @@ +package transports + +import ( + "fmt" + + "github.com/containers/image/types" +) + +// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +type stubTransport string + +// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +func NewStubTransport(name string) types.ImageTransport { + return stubTransport(name) +} + +// Name returns the name of the transport, which must be unique among other transports. +func (s stubTransport) Name() string { + return string(s) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { + return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { + // Allowing any reference in here allows tools with some transports stubbed-out to still + // use signature verification policies which refer to these stubbed-out transports. + // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . + return nil +} diff --git a/vendor/github.com/containers/image/transports/transports.go b/vendor/github.com/containers/image/transports/transports.go index 03969be7..c932806d 100644 --- a/vendor/github.com/containers/image/transports/transports.go +++ b/vendor/github.com/containers/image/transports/transports.go @@ -2,6 +2,7 @@ package transports import ( "fmt" + "sort" "sync" "github.com/containers/image/types" @@ -69,3 +70,15 @@ func Register(t types.ImageTransport) { func ImageName(ref types.ImageReference) string { return ref.Transport().Name() + ":" + ref.StringWithinTransport() } + +// ListNames returns a list of transport names +func ListNames() []string { + kt.mu.Lock() + defer kt.mu.Unlock() + var names []string + for _, transport := range kt.transports { + names = append(names, transport.Name()) + } + sort.Strings(names) + return names +} diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go index 6d571c17..0788904c 100644 --- a/vendor/github.com/containers/image/types/types.go +++ b/vendor/github.com/containers/image/types/types.go @@ -148,11 +148,11 @@ type ImageDestination interface { SupportsSignatures() error // ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. ShouldCompressLayers() bool - // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually // uploaded to the image destination, true otherwise. AcceptsForeignLayerURLs() bool - + // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. + MustMatchRuntimeOS() bool // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf index ffc6e67a..d0fb1aad 100644 --- a/vendor/github.com/containers/image/vendor.conf +++ b/vendor/github.com/containers/image/vendor.conf @@ -1,5 +1,5 @@ github.com/Sirupsen/logrus 7f4b1adc791766938c29457bed0703fb9134421a -github.com/containers/storage 989b1c1d85f5dfe2076c67b54289cc13dc836c8c +github.com/containers/storage 105f7c77aef0c797429e41552743bf5b03b63263 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/distribution df5327f76fb6468b84a87771e361762b8be23fdb github.com/docker/docker 75843d36aa5c3eaade50da005f9e0ff2602f3d5e @@ -15,7 +15,7 @@ github.com/mattn/go-shellwords 005a0944d84452842197c2108bd9168ced206f78 github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9 github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc -github.com/opencontainers/image-spec v1.0.0-rc6 +github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3 github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 github.com/pkg/errors 248dadf4e9068a0b3e79f02ed0a610d935de5302 @@ -34,4 +34,4 @@ github.com/xeipuuv/gojsonpointer master github.com/tchap/go-patricia v2.2.6 github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0 -github.com/ostreedev/ostree-go 61532f383f1f48e5c27080b0b9c8b022c3706a97 +github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 diff --git a/vendor/github.com/containers/storage/README.md b/vendor/github.com/containers/storage/README.md index 4d100a35..00a47fd9 100644 --- a/vendor/github.com/containers/storage/README.md +++ b/vendor/github.com/containers/storage/README.md @@ -1,6 +1,6 @@ `storage` is a Go library which aims to provide methods for storing filesystem -layers, container images, and containers. An `oci-storage` CLI wrapper is also -included for manual and scripting use. +layers, container images, and containers. A `containers-storage` CLI wrapper +is also included for manual and scripting use. To build the CLI wrapper, use 'make build-binary'. diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index 90a0bc0b..0908bdd1 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -50,6 +50,12 @@ type Container struct { // that has been stored, if they're known. BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + // Created is the datestamp for when this container was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + Flags map[string]interface{} `json:"flags,omitempty"` } @@ -93,7 +99,7 @@ type ContainerStore interface { type containerStore struct { lockfile Locker dir string - containers []Container + containers []*Container idindex *truncindex.TruncIndex byid map[string]*Container bylayer map[string]*Container @@ -101,7 +107,11 @@ type containerStore struct { } func (r *containerStore) Containers() ([]Container, error) { - return r.containers, nil + containers := make([]Container, len(r.containers)) + for i := range r.containers { + containers[i] = *(r.containers[i]) + } + return containers, nil } func (r *containerStore) containerspath() string { @@ -123,7 +133,7 @@ func (r *containerStore) Load() error { if err != nil && !os.IsNotExist(err) { return err } - containers := []Container{} + containers := []*Container{} layers := make(map[string]*Container) idlist := []string{} ids := make(map[string]*Container) @@ -131,14 +141,14 @@ func (r *containerStore) Load() error { if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil { for n, container := range containers { idlist = append(idlist, container.ID) - ids[container.ID] = &containers[n] - layers[container.LayerID] = &containers[n] + ids[container.ID] = containers[n] + layers[container.LayerID] = containers[n] for _, name := range container.Names { if conflict, ok := names[name]; ok { r.removeName(conflict, name) needSave = true } - names[name] = &containers[n] + names[name] = containers[n] } } } @@ -148,7 +158,6 @@ func (r *containerStore) Load() error { r.bylayer = layers r.byname = names if needSave { - r.Touch() return r.Save() } return nil @@ -163,6 +172,7 @@ func (r *containerStore) Save() error { if err != nil { return err } + defer r.Touch() return ioutils.AtomicWriteFile(rpath, jdata, 0600) } @@ -179,7 +189,7 @@ func newContainerStore(dir string) (ContainerStore, error) { cstore := containerStore{ lockfile: lockfile, dir: dir, - containers: []Container{}, + containers: []*Container{}, byid: make(map[string]*Container), bylayer: make(map[string]*Container), byname: make(map[string]*Container), @@ -241,7 +251,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat } } if err == nil { - newContainer := Container{ + container = &Container{ ID: id, Names: names, ImageID: image, @@ -249,10 +259,10 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat Metadata: metadata, BigDataNames: []string{}, BigDataSizes: make(map[string]int64), + Created: time.Now().UTC(), Flags: make(map[string]interface{}), } - r.containers = append(r.containers, newContainer) - container = &r.containers[len(r.containers)-1] + r.containers = append(r.containers, container) r.byid[id] = container r.idindex.Add(id) r.bylayer[layer] = container @@ -306,10 +316,11 @@ func (r *containerStore) Delete(id string) error { return ErrContainerUnknown } id = container.ID - newContainers := []Container{} - for _, candidate := range r.containers { - if candidate.ID != id { - newContainers = append(newContainers, candidate) + toDeleteIndex := -1 + for i, candidate := range r.containers { + if candidate.ID == id { + toDeleteIndex = i + break } } delete(r.byid, id) @@ -318,7 +329,14 @@ func (r *containerStore) Delete(id string) error { for _, name := range container.Names { delete(r.byname, name) } - r.containers = newContainers + if toDeleteIndex != -1 { + // delete the container at toDeleteIndex + if toDeleteIndex == len(r.containers)-1 { + r.containers = r.containers[:len(r.containers)-1] + } else { + r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...) + } + } if err := r.Save(); err != nil { return err } @@ -437,6 +455,10 @@ func (r *containerStore) Modified() (bool, error) { return r.lockfile.Modified() } +func (r *containerStore) IsReadWrite() bool { + return r.lockfile.IsReadWrite() +} + func (r *containerStore) TouchedSince(when time.Time) bool { return r.lockfile.TouchedSince(when) } diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index 8caa91fe..4742892a 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -47,6 +47,7 @@ import ( rsystem "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" ) var ( @@ -81,7 +82,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap // Try to load the aufs kernel module if err := supportsAufs(); err != nil { - return nil, graphdriver.ErrNotSupported + return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs") } fsMagic, err := graphdriver.GetFSMagic(root) @@ -95,7 +96,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap switch fsMagic { case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: logrus.Errorf("AUFS is not supported over %s", backingFs) - return nil, graphdriver.ErrIncompatibleFS + return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs) } paths := []string{ @@ -372,6 +373,12 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) { }) } +// AdditionalImageStores returns additional image stores supported by the driver +func (a *Driver) AdditionalImageStores() []string { + var imageStores []string + return imageStores +} + type fileGetNilCloser struct { storage.FileGetter } diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go index d030b066..c807902d 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go @@ -2,7 +2,7 @@ package aufs -import "errors" +import "github.com/pkg/errors" // MsRemount declared to specify a non-linux system mount. const MsRemount = 0 diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index 5bcee11f..9e16f894 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -29,6 +29,7 @@ import ( "github.com/containers/storage/pkg/parsers" "github.com/docker/go-units" "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" ) func init() { @@ -55,7 +56,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap } if fsMagic != graphdriver.FsMagicBtrfs { - return nil, graphdriver.ErrPrerequisites + return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "%q is not on a btrfs filesystem", home) } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) @@ -518,3 +519,9 @@ func (d *Driver) Exists(id string) bool { _, err := os.Stat(dir) return err == nil } + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + var imageStores []string + return imageStores +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index caa7474c..1304f8a7 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -5,7 +5,6 @@ package devmapper import ( "bufio" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -28,10 +27,10 @@ import ( "github.com/containers/storage/pkg/loopback" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/storageversion" "github.com/docker/go-units" "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" ) var ( @@ -1475,7 +1474,7 @@ func determineDriverCapabilities(version string) error { versionSplit := strings.Split(version, ".") major, err := strconv.Atoi(versionSplit[0]) if err != nil { - return graphdriver.ErrNotSupported + return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver major version %q as a number", versionSplit[0]) } if major > 4 { @@ -1489,7 +1488,7 @@ func determineDriverCapabilities(version string) error { minor, err := strconv.Atoi(versionSplit[1]) if err != nil { - return graphdriver.ErrNotSupported + return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver minor version %q as a number", versionSplit[1]) } /* @@ -1656,11 +1655,11 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { version, err := devicemapper.GetDriverVersion() if err != nil { // Can't even get driver version, assume not supported - return graphdriver.ErrNotSupported + return errors.Wrap(graphdriver.ErrNotSupported, "unable to determine version of device mapper") } if err := determineDriverCapabilities(version); err != nil { - return graphdriver.ErrNotSupported + return errors.Wrap(graphdriver.ErrNotSupported, "unable to determine device mapper driver capabilities") } if err := devices.enableDeferredRemovalDeletion(); err != nil { @@ -1668,17 +1667,17 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { } // https://github.com/docker/docker/issues/4036 - if supported := devicemapper.UdevSetSyncSupport(true); !supported { - if storageversion.IAmStatic == "true" { - logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") - } else { - logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") - } - - if !devices.overrideUdevSyncCheck { - return graphdriver.ErrNotSupported - } - } + // if supported := devicemapper.UdevSetSyncSupport(true); !supported { + // if storageversion.IAmStatic == "true" { + // logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + // } else { + // logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + // } + // + // if !devices.overrideUdevSyncCheck { + // return graphdriver.ErrNotSupported + // } + // } //create the root dir of the devmapper driver ownership to match this //daemon's remapped root uid/gid so containers can start properly @@ -1734,6 +1733,15 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { metadataFile *os.File ) + fsMagic, err := graphdriver.GetFSMagic(devices.loopbackDir()) + if err != nil { + return err + } + switch fsMagic { + case graphdriver.FsMagicAufs: + return errors.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems") + } + if devices.dataDevice == "" { // Make sure the sparse images exist in /devicemapper/data @@ -1960,7 +1968,7 @@ func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) erro // If syncDelete is true, we want to return error. If deferred // deletion is not enabled, we return an error. If error is // something other then EBUSY, return an error. - if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { + if syncDelete || !devices.deferredDelete || errors.Cause(err) != devicemapper.ErrBusy { logrus.Debugf("devmapper: Error deleting device: %s", err) return err } @@ -2115,7 +2123,7 @@ func (devices *DeviceSet) removeDevice(devname string) error { if err == nil { break } - if err != devicemapper.ErrBusy { + if errors.Cause(err) != devicemapper.ErrBusy { return err } @@ -2150,12 +2158,12 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { break } - if err == devicemapper.ErrEnxio { + if errors.Cause(err) == devicemapper.ErrEnxio { // Device is probably already gone. Return success. return nil } - if err != devicemapper.ErrBusy { + if errors.Cause(err) != devicemapper.ErrBusy { return err } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go index a1174240..3b584c06 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -224,3 +224,9 @@ func (d *Driver) Put(id string) error { func (d *Driver) Exists(id string) bool { return d.DeviceSet.HasDevice(id) } + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + var imageStores []string + return imageStores +} diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index cdf91d02..e04772d0 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -1,13 +1,13 @@ package graphdriver import ( - "errors" "fmt" "os" "path/filepath" "strings" "github.com/Sirupsen/logrus" + "github.com/pkg/errors" "github.com/vbatts/tar-split/tar/storage" "github.com/containers/storage/pkg/archive" @@ -74,6 +74,8 @@ type ProtoDriver interface { // held by the driver, e.g., unmounting all layered filesystems // known to this driver. Cleanup() error + // AdditionalImageStores returns additional image stores supported by the driver + AdditionalImageStores() []string } // Driver is the interface for layered/snapshot file system drivers. @@ -142,7 +144,7 @@ func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.I return pluginDriver, nil } logrus.Errorf("Failed to GetDriver graph %s %s", name, home) - return nil, ErrNotSupported + return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, home) } // getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins @@ -151,7 +153,7 @@ func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []id return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) } logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) - return nil, ErrNotSupported + return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home) } // New creates the driver and initializes it at the specified root. @@ -226,7 +228,8 @@ func New(root string, name string, options []string, uidMaps, gidMaps []idtools. // isDriverNotSupported returns true if the error initializing // the graph driver is a non-supported error. func isDriverNotSupported(err error) bool { - return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS + cause := errors.Cause(err) + return cause == ErrNotSupported || cause == ErrPrerequisites || cause == ErrIncompatibleFS } // scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go index 5658a28d..6e1f2ee3 100644 --- a/vendor/github.com/containers/storage/drivers/driver_linux.go +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -53,7 +53,7 @@ const ( var ( // Slice of drivers that should be used in an order priority = []string{ - "overlay2", + "overlay", "devicemapper", "aufs", "btrfs", diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go index 29719ffa..d8dc63f4 100644 --- a/vendor/github.com/containers/storage/drivers/driver_solaris.go +++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go @@ -20,6 +20,7 @@ import ( "unsafe" log "github.com/Sirupsen/logrus" + "github.com/pkg/errors" ) const ( @@ -56,7 +57,7 @@ func Mounted(fsType FsMagic, mountPath string) (bool, error) { (buf.f_basetype[3] != 0) { log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) C.free(unsafe.Pointer(buf)) - return false, ErrPrerequisites + return false, errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", mountPath) } C.free(unsafe.Pointer(buf)) diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 6c1642cb..af659b0f 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -4,12 +4,12 @@ package overlay import ( "bufio" - "errors" "fmt" "io/ioutil" "os" "os/exec" "path" + "path/filepath" "strconv" "strings" "syscall" @@ -26,6 +26,7 @@ import ( "github.com/containers/storage/pkg/parsers/kernel" "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" ) var ( @@ -82,6 +83,7 @@ type Driver struct { uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter + opts *overlayOptions } var backingFs = "" @@ -100,7 +102,7 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool } if err := supportsOverlay(); err != nil { - return nil, graphdriver.ErrNotSupported + return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support overlay fs") } // require kernel 4.0.0 to ensure multiple lower dirs are supported @@ -110,7 +112,7 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool } if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { if !opts.overrideKernelCheck { - return nil, graphdriver.ErrNotSupported + return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") } logrus.Warnf("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update") } @@ -127,7 +129,7 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool switch fsMagic { case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: logrus.Errorf("'overlay' is not supported over %s", backingFs) - return nil, graphdriver.ErrIncompatibleFS + return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs) } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) @@ -149,6 +151,7 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + opts: opts, } return d, nil @@ -170,6 +173,7 @@ func InitAsOverlay2(home string, options []string, uidMaps, gidMaps []idtools.ID type overlayOptions struct { overrideKernelCheck bool + imageStores []string } func parseOptions(options []string) (*overlayOptions, error) { @@ -186,6 +190,22 @@ func parseOptions(options []string) (*overlayOptions, error) { if err != nil { return nil, err } + case "overlay.imagestore": + // Additional read only image stores to use for lower paths + for _, store := range strings.Split(val, ",") { + store = filepath.Clean(store) + if !filepath.IsAbs(store) { + return nil, fmt.Errorf("overlay: image path %q is not absolute. Can not be relative", store) + } + st, err := os.Stat(store) + if err != nil { + return nil, fmt.Errorf("overlay: Can't stat imageStore dir %s: %v", store, err) + } + if !st.IsDir() { + return nil, fmt.Errorf("overlay: image path %q must be a directory", store) + } + o.imageStores = append(o.imageStores, store) + } default: return nil, fmt.Errorf("overlay: Unknown option %s", key) } @@ -211,7 +231,7 @@ func supportsOverlay() error { } } logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") - return graphdriver.ErrNotSupported + return errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") } func (d *Driver) String() string { @@ -357,8 +377,18 @@ func (d *Driver) getLower(parent string) (string, error) { return strings.Join(lowers, ":"), nil } -func (d *Driver) dir(id string) string { - return path.Join(d.home, id) +func (d *Driver) dir(val string) string { + newpath := path.Join(d.home, val) + if _, err := os.Stat(newpath); err != nil { + for _, p := range d.AdditionalImageStores() { + l := path.Join(p, d.name, val) + _, err = os.Stat(l) + if err == nil { + return l + } + } + } + return newpath } func (d *Driver) getLowerDirs(id string) ([]string, error) { @@ -366,11 +396,12 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) { lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { - lp, err := os.Readlink(path.Join(d.home, s)) + lower := d.dir(s) + lp, err := os.Readlink(lower) if err != nil { return nil, err } - lowersArray = append(lowersArray, path.Clean(path.Join(d.home, "link", lp))) + lowersArray = append(lowersArray, path.Clean(d.dir(path.Join("link", lp)))) } } else if !os.IsNotExist(err) { return nil, err @@ -411,6 +442,31 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { return "", err } + newlowers := "" + for _, l := range strings.Split(string(lowers), ":") { + lower := "" + newpath := path.Join(d.home, l) + if _, err := os.Stat(newpath); err != nil { + for _, p := range d.AdditionalImageStores() { + lower = path.Join(p, d.name, l) + if _, err2 := os.Stat(lower); err2 == nil { + break + } + lower = "" + } + if lower == "" { + return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err) + } + } else { + lower = l + } + if newlowers == "" { + newlowers = lower + } else { + newlowers = newlowers + ":" + lower + } + } + mergedDir := path.Join(dir, "merged") if count := d.ctr.Increment(mergedDir); count > 1 { return mergedDir, nil @@ -424,7 +480,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { }() workDir := path.Join(dir, "work") - opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, path.Join(id, "diff"), path.Join(id, "work")) mountLabel = label.FormatMountLabel(opts, mountLabel) if len(mountLabel) > syscall.Getpagesize() { return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountLabel)) @@ -527,3 +583,8 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return archive.OverlayChanges(layers, diffPath) } + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return d.opts.imageStores +} diff --git a/vendor/github.com/containers/storage/drivers/proxy.go b/vendor/github.com/containers/storage/drivers/proxy.go index 0e4a5b8e..d56b8731 100644 --- a/vendor/github.com/containers/storage/drivers/proxy.go +++ b/vendor/github.com/containers/storage/drivers/proxy.go @@ -3,10 +3,10 @@ package graphdriver import ( - "errors" "fmt" "github.com/containers/storage/pkg/archive" + "github.com/pkg/errors" ) type graphDriverProxy struct { diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index 5dd934fd..ff7a88f1 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -143,3 +143,9 @@ func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + var imageStores []string + return imageStores +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index 8fd17e6c..62cc96b2 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -20,6 +20,7 @@ import ( "github.com/containers/storage/pkg/parsers" zfs "github.com/mistifyio/go-zfs" "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" ) type zfsOptions struct { @@ -47,13 +48,13 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri if _, err := exec.LookPath("zfs"); err != nil { logrus.Debugf("[zfs] zfs command is not available: %v", err) - return nil, graphdriver.ErrPrerequisites + return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available") } file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) if err != nil { logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) - return nil, graphdriver.ErrPrerequisites + return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err) } defer file.Close() @@ -403,3 +404,9 @@ func (d *Driver) Exists(id string) bool { defer d.Unlock() return d.filesystemsCache[d.zfsPath(id)] == true } + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + var imageStores []string + return imageStores +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go index a25e2a45..3ec1837c 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -7,6 +7,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/containers/storage/drivers" + "github.com/pkg/errors" ) func checkRootdirFs(rootdir string) error { @@ -18,7 +19,7 @@ func checkRootdirFs(rootdir string) error { // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) - return graphdriver.ErrPrerequisites + return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) } return nil diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go index 6aa41d90..392adae2 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -6,6 +6,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/containers/storage/drivers" + "github.com/pkg/errors" ) func checkRootdirFs(rootdir string) error { @@ -16,7 +17,7 @@ func checkRootdirFs(rootdir string) error { if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) - return graphdriver.ErrPrerequisites + return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) } return nil diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go index 56d09cac..25e89044 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go @@ -22,6 +22,7 @@ import ( log "github.com/Sirupsen/logrus" "github.com/containers/storage/drivers" + "github.com/pkg/errors" ) func checkRootdirFs(rootdir string) error { @@ -34,7 +35,7 @@ func checkRootdirFs(rootdir string) error { (buf.f_basetype[3] != 0) { log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) C.free(unsafe.Pointer(buf)) - return graphdriver.ErrPrerequisites + return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) } C.free(unsafe.Pointer(buf)) diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index 6d9a7b58..fe17f631 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -2,7 +2,6 @@ package storage import ( "encoding/json" - "errors" "io/ioutil" "os" "path/filepath" @@ -11,6 +10,7 @@ import ( "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/truncindex" + "github.com/pkg/errors" ) var ( @@ -46,24 +46,20 @@ type Image struct { // that has been stored, if they're known. BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + // Created is the datestamp for when this image was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + Flags map[string]interface{} `json:"flags,omitempty"` } -// ImageStore provides bookkeeping for information about Images. -type ImageStore interface { - FileBasedStore - MetadataStore - BigDataStore - FlaggableStore - - // Create creates an image that has a specified ID (or a random one) and - // optional names, using the specified layer as its topmost (hopefully - // read-only) layer. That layer can be referenced by multiple images. - Create(id string, names []string, layer, metadata string) (*Image, error) - - // SetNames replaces the list of names associated with an image with the - // supplied values. - SetNames(id string, names []string) error +// ROImageStore provides bookkeeping for information about Images. +type ROImageStore interface { + ROFileBasedStore + ROMetadataStore + ROBigDataStore // Exists checks if there is an image with the given ID or name. Exists(id string) bool @@ -71,12 +67,6 @@ type ImageStore interface { // Get retrieves information about an image given an ID or name. Get(id string) (*Image, error) - // Delete removes the record of the image. - Delete(id string) error - - // Wipe removes records of all images. - Wipe() error - // Lookup attempts to translate a name to an ID. Most methods do this // implicitly. Lookup(name string) (string, error) @@ -85,17 +75,45 @@ type ImageStore interface { Images() ([]Image, error) } +// ImageStore provides bookkeeping for information about Images. +type ImageStore interface { + ROImageStore + RWFileBasedStore + RWMetadataStore + RWBigDataStore + FlaggableStore + + // Create creates an image that has a specified ID (or a random one) and + // optional names, using the specified layer as its topmost (hopefully + // read-only) layer. That layer can be referenced by multiple images. + Create(id string, names []string, layer, metadata string, created time.Time) (*Image, error) + + // SetNames replaces the list of names associated with an image with the + // supplied values. + SetNames(id string, names []string) error + + // Delete removes the record of the image. + Delete(id string) error + + // Wipe removes records of all images. + Wipe() error +} + type imageStore struct { lockfile Locker dir string - images []Image + images []*Image idindex *truncindex.TruncIndex byid map[string]*Image byname map[string]*Image } func (r *imageStore) Images() ([]Image, error) { - return r.images, nil + images := make([]Image, len(r.images)) + for i := range r.images { + images[i] = *(r.images[i]) + } + return images, nil } func (r *imageStore) imagespath() string { @@ -111,41 +129,46 @@ func (r *imageStore) datapath(id, key string) string { } func (r *imageStore) Load() error { - needSave := false + shouldSave := false rpath := r.imagespath() data, err := ioutil.ReadFile(rpath) if err != nil && !os.IsNotExist(err) { return err } - images := []Image{} + images := []*Image{} idlist := []string{} ids := make(map[string]*Image) names := make(map[string]*Image) if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { for n, image := range images { - ids[image.ID] = &images[n] + ids[image.ID] = images[n] idlist = append(idlist, image.ID) for _, name := range image.Names { if conflict, ok := names[name]; ok { r.removeName(conflict, name) - needSave = true + shouldSave = true } - names[name] = &images[n] + names[name] = images[n] } } } + if shouldSave && !r.IsReadWrite() { + return errors.New("image store assigns the same name to multiple images") + } r.images = images r.idindex = truncindex.NewTruncIndex(idlist) r.byid = ids r.byname = names - if needSave { - r.Touch() + if shouldSave { return r.Save() } return nil } func (r *imageStore) Save() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath()) + } rpath := r.imagespath() if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { return err @@ -154,6 +177,7 @@ func (r *imageStore) Save() error { if err != nil { return err } + defer r.Touch() return ioutils.AtomicWriteFile(rpath, jdata, 0600) } @@ -170,7 +194,27 @@ func newImageStore(dir string) (ImageStore, error) { istore := imageStore{ lockfile: lockfile, dir: dir, - images: []Image{}, + images: []*Image{}, + byid: make(map[string]*Image), + byname: make(map[string]*Image), + } + if err := istore.Load(); err != nil { + return nil, err + } + return &istore, nil +} + +func newROImageStore(dir string) (ROImageStore, error) { + lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + istore := imageStore{ + lockfile: lockfile, + dir: dir, + images: []*Image{}, byid: make(map[string]*Image), byname: make(map[string]*Image), } @@ -193,6 +237,9 @@ func (r *imageStore) lookup(id string) (*Image, bool) { } func (r *imageStore) ClearFlag(id string, flag string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath()) + } image, ok := r.lookup(id) if !ok { return ErrImageUnknown @@ -202,6 +249,9 @@ func (r *imageStore) ClearFlag(id string, flag string) error { } func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath()) + } image, ok := r.lookup(id) if !ok { return ErrImageUnknown @@ -210,7 +260,10 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { return r.Save() } -func (r *imageStore) Create(id string, names []string, layer, metadata string) (image *Image, err error) { +func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time) (image *Image, err error) { + if !r.IsReadWrite() { + return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath()) + } if id == "" { id = stringid.GenerateRandomID() _, idInUse := r.byid[id] @@ -227,18 +280,21 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string) ( return nil, ErrDuplicateName } } + if created.IsZero() { + created = time.Now().UTC() + } if err == nil { - newImage := Image{ + image = &Image{ ID: id, Names: names, TopLayer: layer, Metadata: metadata, BigDataNames: []string{}, BigDataSizes: make(map[string]int64), + Created: created, Flags: make(map[string]interface{}), } - r.images = append(r.images, newImage) - image = &r.images[len(r.images)-1] + r.images = append(r.images, image) r.idindex.Add(id) r.byid[id] = image for _, name := range names { @@ -257,6 +313,9 @@ func (r *imageStore) Metadata(id string) (string, error) { } func (r *imageStore) SetMetadata(id, metadata string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath()) + } if image, ok := r.lookup(id); ok { image.Metadata = metadata return r.Save() @@ -269,6 +328,9 @@ func (r *imageStore) removeName(image *Image, name string) { } func (r *imageStore) SetNames(id string, names []string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) + } if image, ok := r.lookup(id); ok { for _, name := range image.Names { delete(r.byname, name) @@ -286,15 +348,18 @@ func (r *imageStore) SetNames(id string, names []string) error { } func (r *imageStore) Delete(id string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) + } image, ok := r.lookup(id) if !ok { return ErrImageUnknown } id = image.ID - newImages := []Image{} - for _, candidate := range r.images { - if candidate.ID != id { - newImages = append(newImages, candidate) + toDeleteIndex := -1 + for i, candidate := range r.images { + if candidate.ID == id { + toDeleteIndex = i } } delete(r.byid, id) @@ -302,7 +367,14 @@ func (r *imageStore) Delete(id string) error { for _, name := range image.Names { delete(r.byname, name) } - r.images = newImages + if toDeleteIndex != -1 { + // delete the image at toDeleteIndex + if toDeleteIndex == len(r.images)-1 { + r.images = r.images[:len(r.images)-1] + } else { + r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...) + } + } if err := r.Save(); err != nil { return err } @@ -359,6 +431,9 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) { } func (r *imageStore) SetBigData(id, key string, data []byte) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath()) + } image, ok := r.lookup(id) if !ok { return ErrImageUnknown @@ -393,6 +468,9 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error { } func (r *imageStore) Wipe() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) + } ids := []string{} for id := range r.byid { ids = append(ids, id) @@ -421,6 +499,10 @@ func (r *imageStore) Modified() (bool, error) { return r.lockfile.Modified() } +func (r *imageStore) IsReadWrite() bool { + return r.lockfile.IsReadWrite() +} + func (r *imageStore) TouchedSince(when time.Time) bool { return r.lockfile.TouchedSince(when) } diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 9a5aac6f..7cdc2e25 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -4,7 +4,6 @@ import ( "bytes" "compress/gzip" "encoding/json" - "errors" "io" "io/ioutil" "os" @@ -16,6 +15,8 @@ import ( "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/truncindex" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) @@ -66,6 +67,38 @@ type Layer struct { // mounted at the mount point. MountCount int `json:"-"` + // Created is the datestamp for when this layer was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + + // CompressedDigest is the digest of the blob that was last passed to + // ApplyDiff() or Put(), as it was presented to us. + CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"` + + // CompressedSize is the length of the blob that was last passed to + // ApplyDiff() or Put(), as it was presented to us. If + // CompressedDigest is not set, this should be treated as if it were an + // uninitialized value. + CompressedSize int64 `json:"compressed-size,omitempty"` + + // UncompressedDigest is the digest of the blob that was last passed to + // ApplyDiff() or Put(), after we decompressed it. Often referred to + // as a DiffID. + UncompressedDigest digest.Digest `json:"diff-digest,omitempty"` + + // UncompressedSize is the length of the blob that was last passed to + // ApplyDiff() or Put(), after we decompressed it. If + // UncompressedDigest is not set, this should be treated as if it were + // an uninitialized value. + UncompressedSize int64 `json:"diff-size,omitempty"` + + // CompressionType is the type of compression which we detected on the blob + // that was last passed to ApplyDiff() or Put(). + CompressionType archive.Compression `json:"compression,omitempty"` + + // Flags is arbitrary data about the layer. Flags map[string]interface{} `json:"flags,omitempty"` } @@ -75,12 +108,74 @@ type layerMountPoint struct { MountCount int `json:"count"` } +// DiffOptions override the default behavior of Diff() methods. +type DiffOptions struct { + // Compression, if set overrides the default compressor when generating a diff. + Compression *archive.Compression +} + +// ROLayerStore wraps a graph driver, adding the ability to refer to layers by +// name, and keeping track of parent-child relationships, along with a list of +// all known layers. +type ROLayerStore interface { + ROFileBasedStore + ROMetadataStore + + // Exists checks if a layer with the specified name or ID is known. + Exists(id string) bool + + // Get retrieves information about a layer given an ID or name. + Get(id string) (*Layer, error) + + // Status returns an slice of key-value pairs, suitable for human consumption, + // relaying whatever status information the underlying driver can share. + Status() ([][2]string, error) + + // Changes returns a slice of Change structures, which contain a pathname + // (Path) and a description of what sort of change (Kind) was made by the + // layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a + // specified layer. By default, the layer's parent is used as a reference. + Changes(from, to string) ([]archive.Change, error) + + // Diff produces a tarstream which can be applied to a layer with the contents + // of the first layer to produce a layer with the contents of the second layer. + // By default, the parent of the second layer is used as the first + // layer, so it need not be specified. Options can be used to override + // default behavior, but are also not required. + Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) + + // DiffSize produces an estimate of the length of the tarstream which would be + // produced by Diff. + DiffSize(from, to string) (int64, error) + + // Size produces a cached value for the uncompressed size of the layer, + // if one is known, or -1 if it is not known. If the layer can not be + // found, it returns an error. + Size(name string) (int64, error) + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // LayersByCompressedDigest returns a slice of the layers with the + // specified compressed digest value recorded for them. + LayersByCompressedDigest(d digest.Digest) ([]Layer, error) + + // LayersByUncompressedDigest returns a slice of the layers with the + // specified uncompressed digest value recorded for them. + LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + + // Layers returns a slice of the known layers. + Layers() ([]Layer, error) +} + // LayerStore wraps a graph driver, adding the ability to refer to layers by // name, and keeping track of parent-child relationships, along with a list of // all known layers. type LayerStore interface { - FileBasedStore - MetadataStore + ROLayerStore + RWFileBasedStore + RWMetadataStore FlaggableStore // Create creates a new layer, optionally giving it a specified ID rather than @@ -98,20 +193,10 @@ type LayerStore interface { // Put combines the functions of CreateWithFlags and ApplyDiff. Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (*Layer, int64, error) - // Exists checks if a layer with the specified name or ID is known. - Exists(id string) bool - - // Get retrieves information about a layer given an ID or name. - Get(id string) (*Layer, error) - // SetNames replaces the list of names associated with a layer with the // supplied values. SetNames(id string, names []string) error - // Status returns an slice of key-value pairs, suitable for human consumption, - // relaying whatever status information the underlying driver can share. - Status() ([][2]string, error) - // Delete deletes a layer with the specified name or ID. Delete(id string) error @@ -126,48 +211,31 @@ type LayerStore interface { // Unmount unmounts a layer when it is no longer in use. Unmount(id string) error - // Changes returns a slice of Change structures, which contain a pathname - // (Path) and a description of what sort of change (Kind) was made by the - // layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a - // specified layer. By default, the layer's parent is used as a reference. - Changes(from, to string) ([]archive.Change, error) - - // Diff produces a tarstream which can be applied to a layer with the contents - // of the first layer to produce a layer with the contents of the second layer. - // By default, the parent of the second layer is used as the first - // layer, so it need not be specified. - Diff(from, to string) (io.ReadCloser, error) - - // DiffSize produces an estimate of the length of the tarstream which would be - // produced by Diff. - DiffSize(from, to string) (int64, error) - // ApplyDiff reads a tarstream which was created by a previous call to Diff and // applies its changes to a specified layer. ApplyDiff(to string, diff archive.Reader) (int64, error) - - // Lookup attempts to translate a name to an ID. Most methods do this - // implicitly. - Lookup(name string) (string, error) - - // Layers returns a slice of the known layers. - Layers() ([]Layer, error) } type layerStore struct { - lockfile Locker - rundir string - driver drivers.Driver - layerdir string - layers []Layer - idindex *truncindex.TruncIndex - byid map[string]*Layer - byname map[string]*Layer - bymount map[string]*Layer + lockfile Locker + rundir string + driver drivers.Driver + layerdir string + layers []*Layer + idindex *truncindex.TruncIndex + byid map[string]*Layer + byname map[string]*Layer + bymount map[string]*Layer + bycompressedsum map[digest.Digest][]string + byuncompressedsum map[digest.Digest][]string } func (r *layerStore) Layers() ([]Layer, error) { - return r.layers, nil + layers := make([]Layer, len(r.layers)) + for i := range r.layers { + layers[i] = *(r.layers[i]) + } + return layers, nil } func (r *layerStore) mountspath() string { @@ -179,36 +247,41 @@ func (r *layerStore) layerspath() string { } func (r *layerStore) Load() error { - needSave := false + shouldSave := false rpath := r.layerspath() data, err := ioutil.ReadFile(rpath) if err != nil && !os.IsNotExist(err) { return err } - layers := []Layer{} + layers := []*Layer{} idlist := []string{} ids := make(map[string]*Layer) names := make(map[string]*Layer) mounts := make(map[string]*Layer) - parents := make(map[string][]*Layer) + compressedsums := make(map[digest.Digest][]string) + uncompressedsums := make(map[digest.Digest][]string) if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil { for n, layer := range layers { - ids[layer.ID] = &layers[n] + ids[layer.ID] = layers[n] idlist = append(idlist, layer.ID) for _, name := range layer.Names { if conflict, ok := names[name]; ok { r.removeName(conflict, name) - needSave = true + shouldSave = true } - names[name] = &layers[n] + names[name] = layers[n] } - if pslice, ok := parents[layer.Parent]; ok { - parents[layer.Parent] = append(pslice, &layers[n]) - } else { - parents[layer.Parent] = []*Layer{&layers[n]} + if layer.CompressedDigest != "" { + compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID) } } } + if shouldSave && !r.IsReadWrite() { + return errors.New("layer store assigns the same name to multiple layers") + } mpath := r.mountspath() data, err = ioutil.ReadFile(mpath) if err != nil && !os.IsNotExist(err) { @@ -231,29 +304,35 @@ func (r *layerStore) Load() error { r.byid = ids r.byname = names r.bymount = mounts + r.bycompressedsum = compressedsums + r.byuncompressedsum = uncompressedsums err = nil - // Last step: try to remove anything that a previous user of this - // storage area marked for deletion but didn't manage to actually - // delete. - for _, layer := range r.layers { - if cleanup, ok := layer.Flags[incompleteFlag]; ok { - if b, ok := cleanup.(bool); ok && b { - err = r.Delete(layer.ID) - if err != nil { - break + // Last step: if we're writable, try to remove anything that a previous + // user of this storage area marked for deletion but didn't manage to + // actually delete. + if r.IsReadWrite() { + for _, layer := range r.layers { + if cleanup, ok := layer.Flags[incompleteFlag]; ok { + if b, ok := cleanup.(bool); ok && b { + err = r.Delete(layer.ID) + if err != nil { + break + } + shouldSave = true } - needSave = true } } - } - if needSave { - r.Touch() - return r.Save() + if shouldSave { + return r.Save() + } } return err } func (r *layerStore) Save() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) + } rpath := r.layerspath() if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { return err @@ -283,6 +362,7 @@ func (r *layerStore) Save() error { if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil { return err } + defer r.Touch() return ioutils.AtomicWriteFile(mpath, jmdata, 0600) } @@ -314,6 +394,28 @@ func newLayerStore(rundir string, layerdir string, driver drivers.Driver) (Layer return &rlstore, nil } +func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) { + lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + rlstore := layerStore{ + lockfile: lockfile, + driver: driver, + rundir: rundir, + layerdir: layerdir, + byid: make(map[string]*Layer), + bymount: make(map[string]*Layer), + byname: make(map[string]*Layer), + } + if err := rlstore.Load(); err != nil { + return nil, err + } + return &rlstore, nil +} + func (r *layerStore) lookup(id string) (*Layer, bool) { if layer, ok := r.byid[id]; ok { return layer, ok @@ -326,7 +428,24 @@ func (r *layerStore) lookup(id string) (*Layer, bool) { return nil, false } +func (r *layerStore) Size(name string) (int64, error) { + layer, ok := r.lookup(name) + if !ok { + return -1, ErrLayerUnknown + } + // We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that + // a zero value is not just present because it was never set to anything else (which can happen if the layer was + // created by a version of this library that didn't keep track of digest and size information). + if layer.UncompressedDigest != "" { + return layer.UncompressedSize, nil + } + return -1, nil +} + func (r *layerStore) ClearFlag(id string, flag string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath()) + } layer, ok := r.lookup(id) if !ok { return ErrLayerUnknown @@ -336,6 +455,9 @@ func (r *layerStore) ClearFlag(id string, flag string) error { } func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath()) + } layer, ok := r.lookup(id) if !ok { return ErrLayerUnknown @@ -349,6 +471,9 @@ func (r *layerStore) Status() ([][2]string, error) { } func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (layer *Layer, size int64, err error) { + if !r.IsReadWrite() { + return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath()) + } size = -1 if err := os.MkdirAll(r.rundir, 0700); err != nil { return nil, -1, err @@ -383,15 +508,15 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o err = r.driver.Create(id, parent, mountLabel, options) } if err == nil { - newLayer := Layer{ + layer = &Layer{ ID: id, Parent: parent, Names: names, MountLabel: mountLabel, + Created: time.Now().UTC(), Flags: make(map[string]interface{}), } - r.layers = append(r.layers, newLayer) - layer = &r.layers[len(r.layers)-1] + r.layers = append(r.layers, layer) r.idindex.Add(id) r.byid[id] = layer for _, name := range names { @@ -441,6 +566,9 @@ func (r *layerStore) Create(id, parent string, names []string, mountLabel string } func (r *layerStore) Mount(id, mountLabel string) (string, error) { + if !r.IsReadWrite() { + return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) + } layer, ok := r.lookup(id) if !ok { return "", ErrLayerUnknown @@ -466,6 +594,9 @@ func (r *layerStore) Mount(id, mountLabel string) (string, error) { } func (r *layerStore) Unmount(id string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) + } layer, ok := r.lookup(id) if !ok { layerByMount, ok := r.bymount[filepath.Clean(id)] @@ -495,6 +626,9 @@ func (r *layerStore) removeName(layer *Layer, name string) { } func (r *layerStore) SetNames(id string, names []string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) + } if layer, ok := r.lookup(id); ok { for _, name := range layer.Names { delete(r.byname, name) @@ -519,6 +653,9 @@ func (r *layerStore) Metadata(id string) (string, error) { } func (r *layerStore) SetMetadata(id, metadata string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath()) + } if layer, ok := r.lookup(id); ok { layer.Metadata = metadata return r.Save() @@ -531,6 +668,9 @@ func (r *layerStore) tspath(id string) string { } func (r *layerStore) Delete(id string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) + } layer, ok := r.lookup(id) if !ok { return ErrLayerUnknown @@ -549,13 +689,21 @@ func (r *layerStore) Delete(id string) error { if layer.MountPoint != "" { delete(r.bymount, layer.MountPoint) } - newLayers := []Layer{} - for _, candidate := range r.layers { - if candidate.ID != id { - newLayers = append(newLayers, candidate) + toDeleteIndex := -1 + for i, candidate := range r.layers { + if candidate.ID == id { + toDeleteIndex = i + break + } + } + if toDeleteIndex != -1 { + // delete the layer at toDeleteIndex + if toDeleteIndex == len(r.layers)-1 { + r.layers = r.layers[:len(r.layers)-1] + } else { + r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...) } } - r.layers = newLayers if err = r.Save(); err != nil { return err } @@ -583,6 +731,9 @@ func (r *layerStore) Get(id string) (*Layer, error) { } func (r *layerStore) Wipe() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) + } ids := []string{} for id := range r.byid { ids = append(ids, id) @@ -657,48 +808,64 @@ func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { }, nil } -func (r *layerStore) Diff(from, to string) (io.ReadCloser, error) { +func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { var metadata storage.Unpacker from, to, toLayer, err := r.findParentAndLayer(from, to) if err != nil { return nil, ErrLayerUnknown } - compression := archive.Uncompressed - if cflag, ok := toLayer.Flags[compressionFlag]; ok { - if ctype, ok := cflag.(float64); ok { - compression = archive.Compression(ctype) - } else if ctype, ok := cflag.(archive.Compression); ok { - compression = archive.Compression(ctype) - } + // Default to applying the type of compression that we noted was used + // for the layerdiff when it was applied. + compression := toLayer.CompressionType + // If a particular compression type (or no compression) was selected, + // use that instead. + if options != nil && options.Compression != nil { + compression = *options.Compression } + maybeCompressReadCloser := func(rc io.ReadCloser) (io.ReadCloser, error) { + // Depending on whether or not compression is desired, return either the + // passed-in ReadCloser, or a new one that provides its readers with a + // compressed version of the data that the original would have provided + // to its readers. + if compression == archive.Uncompressed { + return rc, nil + } + preader, pwriter := io.Pipe() + compressor, err := archive.CompressStream(pwriter, compression) + if err != nil { + rc.Close() + pwriter.Close() + preader.Close() + return nil, err + } + go func() { + defer pwriter.Close() + defer compressor.Close() + defer rc.Close() + io.Copy(compressor, rc) + }() + return preader, nil + } + if from != toLayer.Parent { diff, err := r.driver.Diff(to, from) - if err == nil && (compression != archive.Uncompressed) { - preader, pwriter := io.Pipe() - compressor, err := archive.CompressStream(pwriter, compression) - if err != nil { - diff.Close() - pwriter.Close() - return nil, err - } - go func() { - io.Copy(compressor, diff) - diff.Close() - compressor.Close() - pwriter.Close() - }() - diff = preader + if err != nil { + return nil, err } - return diff, err + return maybeCompressReadCloser(diff) } tsfile, err := os.Open(r.tspath(to)) if err != nil { - if os.IsNotExist(err) { - return r.driver.Diff(to, from) + if !os.IsNotExist(err) { + return nil, err } - return nil, err + diff, err := r.driver.Diff(to, from) + if err != nil { + return nil, err + } + return maybeCompressReadCloser(diff) } defer tsfile.Close() @@ -720,33 +887,16 @@ func (r *layerStore) Diff(from, to string) (io.ReadCloser, error) { return nil, err } - var stream io.ReadCloser - if compression != archive.Uncompressed { - preader, pwriter := io.Pipe() - compressor, err := archive.CompressStream(pwriter, compression) - if err != nil { - fgetter.Close() - pwriter.Close() - preader.Close() - return nil, err - } - go func() { - asm.WriteOutputTarStream(fgetter, metadata, compressor) - compressor.Close() - pwriter.Close() - }() - stream = preader - } else { - stream = asm.NewOutputTarStream(fgetter, metadata) - } - return ioutils.NewReadCloserWrapper(stream, func() error { - err1 := stream.Close() + tarstream := asm.NewOutputTarStream(fgetter, metadata) + rc := ioutils.NewReadCloserWrapper(tarstream, func() error { + err1 := tarstream.Close() err2 := fgetter.Close() if err2 == nil { return err1 } return err2 - }), nil + }) + return maybeCompressReadCloser(rc) } func (r *layerStore) DiffSize(from, to string) (size int64, err error) { @@ -758,6 +908,10 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) { } func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err error) { + if !r.IsReadWrite() { + return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath()) + } + layer, ok := r.lookup(to) if !ok { return -1, ErrLayerUnknown @@ -770,7 +924,9 @@ func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err } compression := archive.DetectCompression(header[:n]) - defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff) + compressedDigest := digest.Canonical.Digester() + compressedCounter := ioutils.NewWriteCounter(compressedDigest.Hash()) + defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter) tsdata := bytes.Buffer{} compressor, err := gzip.NewWriterLevel(&tsdata, gzip.BestSpeed) @@ -778,15 +934,20 @@ func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err compressor = gzip.NewWriter(&tsdata) } metadata := storage.NewJSONPacker(compressor) - decompressed, err := archive.DecompressStream(defragmented) + uncompressed, err := archive.DecompressStream(defragmented) if err != nil { return -1, err } - payload, err := asm.NewInputTarStream(decompressed, metadata, storage.NewDiscardFilePutter()) + uncompressedDigest := digest.Canonical.Digester() + uncompressedCounter := ioutils.NewWriteCounter(uncompressedDigest.Hash()) + payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedCounter), metadata, storage.NewDiscardFilePutter()) if err != nil { return -1, err } size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, payload) + if err != nil { + return -1, err + } compressor.Close() if err == nil { if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil { @@ -797,15 +958,57 @@ func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err } } - if compression != archive.Uncompressed { - layer.Flags[compressionFlag] = compression - } else { - delete(layer.Flags, compressionFlag) + updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) { + var newList []string + if oldvalue != "" { + for _, value := range (*m)[oldvalue] { + if value != id { + newList = append(newList, value) + } + } + if len(newList) > 0 { + (*m)[oldvalue] = newList + } else { + delete(*m, oldvalue) + } + } + if newvalue != "" { + (*m)[newvalue] = append((*m)[newvalue], id) + } } + updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest.Digest(), layer.ID) + layer.CompressedDigest = compressedDigest.Digest() + layer.CompressedSize = compressedCounter.Count + updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest.Digest(), layer.ID) + layer.UncompressedDigest = uncompressedDigest.Digest() + layer.UncompressedSize = uncompressedCounter.Count + layer.CompressionType = compression + + err = r.Save() return size, err } +func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) { + var layers []Layer + for _, layerID := range m[d] { + layer, ok := r.lookup(layerID) + if !ok { + return nil, ErrLayerUnknown + } + layers = append(layers, *layer) + } + return layers, nil +} + +func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { + return r.layersByDigestMap(r.bycompressedsum, d) +} + +func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { + return r.layersByDigestMap(r.byuncompressedsum, d) +} + func (r *layerStore) Lock() { r.lockfile.Lock() } @@ -822,6 +1025,10 @@ func (r *layerStore) Modified() (bool, error) { return r.lockfile.Modified() } +func (r *layerStore) IsReadWrite() bool { + return r.lockfile.IsReadWrite() +} + func (r *layerStore) TouchedSince(when time.Time) bool { return r.lockfile.TouchedSince(when) } diff --git a/vendor/github.com/containers/storage/lockfile.go b/vendor/github.com/containers/storage/lockfile.go index 33f5822f..6e09b526 100644 --- a/vendor/github.com/containers/storage/lockfile.go +++ b/vendor/github.com/containers/storage/lockfile.go @@ -1,14 +1,15 @@ package storage import ( + "fmt" "os" "path/filepath" "sync" "time" - "golang.org/x/sys/unix" - "github.com/containers/storage/pkg/stringid" + "github.com/pkg/errors" + "golang.org/x/sys/unix" ) // A Locker represents a file lock where the file is used to cache an @@ -27,43 +28,80 @@ type Locker interface { // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time. TouchedSince(when time.Time) bool + + // IsReadWrite() checks if the lock file is read-write + IsReadWrite() bool } type lockfile struct { - mu sync.Mutex - file string - fd uintptr - lw string + mu sync.Mutex + file string + fd uintptr + lw string + locktype int16 } var ( lockfiles map[string]*lockfile lockfilesLock sync.Mutex + // ErrLockReadOnly indicates that the caller only took a read-only lock, and is not allowed to write + ErrLockReadOnly = errors.New("lock is not a read-write lock") ) -// GetLockfile opens a lock file, creating it if necessary. The Locker object -// return will be returned unlocked. +// GetLockfile opens a read-write lock file, creating it if necessary. The +// Locker object it returns will be returned unlocked. func GetLockfile(path string) (Locker, error) { lockfilesLock.Lock() defer lockfilesLock.Unlock() if lockfiles == nil { lockfiles = make(map[string]*lockfile) } - if locker, ok := lockfiles[filepath.Clean(path)]; ok { + cleanPath := filepath.Clean(path) + if locker, ok := lockfiles[cleanPath]; ok { + if !locker.IsReadWrite() { + return nil, errors.Wrapf(ErrLockReadOnly, "lock %q is a read-only lock", cleanPath) + } return locker, nil } - fd, err := unix.Open(filepath.Clean(path), os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR) + fd, err := unix.Open(cleanPath, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "error opening %q", cleanPath) } - locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID()} + unix.CloseOnExec(fd) + locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_WRLCK} lockfiles[filepath.Clean(path)] = locker return locker, nil } +// GetROLockfile opens a read-only lock file. The Locker object it returns +// will be returned unlocked. +func GetROLockfile(path string) (Locker, error) { + lockfilesLock.Lock() + defer lockfilesLock.Unlock() + if lockfiles == nil { + lockfiles = make(map[string]*lockfile) + } + cleanPath := filepath.Clean(path) + if locker, ok := lockfiles[cleanPath]; ok { + if locker.IsReadWrite() { + return nil, fmt.Errorf("lock %q is a read-write lock", cleanPath) + } + return locker, nil + } + fd, err := unix.Open(cleanPath, os.O_RDONLY, 0) + if err != nil { + return nil, errors.Wrapf(err, "error opening %q", cleanPath) + } + unix.CloseOnExec(fd) + locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_RDLCK} + lockfiles[filepath.Clean(path)] = locker + return locker, nil +} + +// Lock locks the lock file func (l *lockfile) Lock() { lk := unix.Flock_t{ - Type: unix.F_WRLCK, + Type: l.locktype, Whence: int16(os.SEEK_SET), Start: 0, Len: 0, @@ -75,6 +113,7 @@ func (l *lockfile) Lock() { } } +// Unlock unlocks the lock file func (l *lockfile) Unlock() { lk := unix.Flock_t{ Type: unix.F_UNLCK, @@ -89,6 +128,7 @@ func (l *lockfile) Unlock() { l.mu.Unlock() } +// Touch updates the lock file with the UID of the user func (l *lockfile) Touch() error { l.lw = stringid.GenerateRandomID() id := []byte(l.lw) @@ -110,6 +150,7 @@ func (l *lockfile) Touch() error { return nil } +// Modified indicates if the lock file has been updated since the last time it was loaded func (l *lockfile) Modified() (bool, error) { id := []byte(l.lw) _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) @@ -128,6 +169,7 @@ func (l *lockfile) Modified() (bool, error) { return l.lw != lw, nil } +// TouchedSince indicates if the lock file has been touched since the specified time func (l *lockfile) TouchedSince(when time.Time) bool { st := unix.Stat_t{} err := unix.Fstat(int(l.fd), &st) @@ -137,3 +179,8 @@ func (l *lockfile) TouchedSince(when time.Time) bool { touched := time.Unix(statTMtimeUnix(st)) return when.Before(touched) } + +// IsRWLock indicates if the lock file is a read-write lock +func (l *lockfile) IsReadWrite() bool { + return (l.locktype == unix.F_WRLCK) +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go index 971f45eb..f1ede0c1 100644 --- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -38,7 +38,15 @@ func getNextFreeLoopbackIndex() (int, error) { return index, err } -func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { +func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) { + // Read information about the loopback file. + var st syscall.Stat_t + err = syscall.Fstat(int(sparseFile.Fd()), &st) + if err != nil { + logrus.Errorf("Error reading information about loopback file %s: %v", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + // Start looking for a free /dev/loop for { target := fmt.Sprintf("/dev/loop%d", index) @@ -77,6 +85,18 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil // Otherwise, we keep going with the loop continue } + + // Check if the loopback driver and underlying filesystem agree on the loopback file's + // device and inode numbers. + dev, ino, err := getLoopbackBackingFile(loopFile) + if err != nil { + logrus.Errorf("Error getting loopback backing file: %s", err) + return nil, ErrGetLoopbackBackingFile + } + if dev != st.Dev || ino != st.Ino { + logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino) + } + // In case of success, we finished. Break the loop. break } @@ -110,7 +130,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) { } defer sparseFile.Close() - loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/pkg/plugins/discovery.go b/vendor/github.com/containers/storage/pkg/plugins/discovery.go index fdff71cf..4cb5a1a3 100644 --- a/vendor/github.com/containers/storage/pkg/plugins/discovery.go +++ b/vendor/github.com/containers/storage/pkg/plugins/discovery.go @@ -15,8 +15,8 @@ import ( var ( // ErrNotFound plugin not found ErrNotFound = errors.New("plugin not found") - socketsPath = "/run/oci-storage/plugins" - specsPaths = []string{"/etc/oci-storage/plugins", "/usr/lib/oci-storage/plugins"} + socketsPath = "/run/containers/storage/plugins" + specsPaths = []string{"/etc/containers/storage/plugins", "/usr/lib/containers/storage/plugins"} ) // localRegistry defines a registry that is local (using unix socket). diff --git a/vendor/github.com/containers/storage/pkg/plugins/plugins.go b/vendor/github.com/containers/storage/pkg/plugins/plugins.go index 9cf8aecf..f6662c4b 100644 --- a/vendor/github.com/containers/storage/pkg/plugins/plugins.go +++ b/vendor/github.com/containers/storage/pkg/plugins/plugins.go @@ -3,10 +3,11 @@ // // Storage discovers plugins by looking for them in the plugin directory whenever // a user or container tries to use one by name. UNIX domain socket files must -// be located under /run/oci-storage/plugins, whereas spec files can be located -// either under /etc/oci-storage/plugins or /usr/lib/oci-storage/plugins. This -// is handled by the Registry interface, which lets you list all plugins or get -// a plugin by its name if it exists. +// be located under /run/containers/storage/plugins, whereas spec files can be +// located either under /etc/containers/storage/plugins or +// /usr/lib/containers/storage/plugins. This is handled by the Registry +// interface, which lets you list all plugins or get a plugin by its name if it +// exists. // // The plugins need to implement an HTTP server and bind this to the UNIX socket // or the address specified in the spec files. diff --git a/vendor/github.com/containers/storage/storageversion/version_lib.go b/vendor/github.com/containers/storage/storageversion/version_lib.go deleted file mode 100644 index 34a531a9..00000000 --- a/vendor/github.com/containers/storage/storageversion/version_lib.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !containersstorageautogen - -// Package storageversion is auto-generated at build-time -package storageversion - -// Default build-time variable for library-import. -// This file is overridden on build with build-time informations. -const ( - GitCommit string = "library-import" - Version string = "library-import" - BuildTime string = "library-import" - IAmStatic string = "library-import" -) diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index d297097f..43efa940 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -2,7 +2,7 @@ package storage import ( "encoding/base64" - "errors" + "fmt" "io" "io/ioutil" "os" @@ -14,12 +14,14 @@ import ( // register all of the built-in drivers _ "github.com/containers/storage/drivers/register" + "github.com/BurntSushi/toml" drivers "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" - "github.com/containers/storage/storageversion" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" ) var ( @@ -51,43 +53,62 @@ var ( ErrIncompleteOptions = errors.New("missing necessary StoreOptions") // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer. ErrSizeUnknown = errors.New("size is not known") + // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents. + ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") // DefaultStoreOptions is a reasonable default set of options. DefaultStoreOptions StoreOptions stores []*store storesLock sync.Mutex ) -// FileBasedStore wraps up the most common methods of the various types of file-based -// data stores that we implement. -type FileBasedStore interface { +// ROFileBasedStore wraps up the methods of the various types of file-based +// data stores that we implement which are needed for both read-only and +// read-write files. +type ROFileBasedStore interface { Locker // Load reloads the contents of the store from disk. It should be called // with the lock held. Load() error +} +// RWFileBasedStore wraps up the methods of various types of file-based data +// stores that we implement using read-write files. +type RWFileBasedStore interface { // Save saves the contents of the store to disk. It should be called with // the lock held, and Touch() should be called afterward before releasing the // lock. Save() error } -// MetadataStore wraps up methods for getting and setting metadata associated with IDs. -type MetadataStore interface { +// FileBasedStore wraps up the common methods of various types of file-based +// data stores that we implement. +type FileBasedStore interface { + ROFileBasedStore + RWFileBasedStore +} + +// ROMetadataStore wraps a method for reading metadata associated with an ID. +type ROMetadataStore interface { // Metadata reads metadata associated with an item with the specified ID. Metadata(id string) (string, error) +} +// RWMetadataStore wraps a method for setting metadata associated with an ID. +type RWMetadataStore interface { // SetMetadata updates the metadata associated with the item with the specified ID. SetMetadata(id, metadata string) error } -// A BigDataStore wraps up the most common methods of the various types of -// file-based lookaside stores that we implement. -type BigDataStore interface { - // SetBigData stores a (potentially large) piece of data associated with this - // ID. - SetBigData(id, key string, data []byte) error +// MetadataStore wraps up methods for getting and setting metadata associated with IDs. +type MetadataStore interface { + ROMetadataStore + RWMetadataStore +} +// An ROBigDataStore wraps up the read-only big-data related methods of the +// various types of file-based lookaside stores that we implement. +type ROBigDataStore interface { // BigData retrieves a (potentially large) piece of data associated with // this ID, if it has previously been set. BigData(id, key string) ([]byte, error) @@ -101,6 +122,21 @@ type BigDataStore interface { BigDataNames(id string) ([]string, error) } +// A RWBigDataStore wraps up the read-write big-data related methods of the +// various types of file-based lookaside stores that we implement. +type RWBigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated with this + // ID. + SetBigData(id, key string, data []byte) error +} + +// A BigDataStore wraps up the most common big-data related methods of the +// various types of file-based lookaside stores that we implement. +type BigDataStore interface { + ROBigDataStore + RWBigDataStore +} + // A FlaggableStore can have flags set and cleared on items which it manages. type FlaggableStore interface { // ClearFlag removes a named flag from an item in the store. @@ -147,26 +183,48 @@ type Store interface { // by the Store. GraphDriver() (drivers.Driver, error) - // LayerStore obtains and returns a handle to the layer store object used by - // the Store. + // LayerStore obtains and returns a handle to the writeable layer store + // object used by the Store. Accessing this store directly will bypass + // locking and synchronization, so use it with care. LayerStore() (LayerStore, error) - // ImageStore obtains and returns a handle to the image store object used by - // the Store. + // ROLayerStore obtains additional read/only layer store objects used + // by the Store. Accessing these stores directly will bypass locking + // and synchronization, so use them with care. + ROLayerStores() ([]ROLayerStore, error) + + // ImageStore obtains and returns a handle to the writable image store + // object used by the Store. Accessing this store directly will bypass + // locking and synchronization, so use it with care. ImageStore() (ImageStore, error) - // ContainerStore obtains and returns a handle to the container store object - // used by the Store. + // ROImageStores obtains additional read/only image store objects used + // by the Store. Accessing these stores directly will bypass locking + // and synchronization, so use them with care. + ROImageStores() ([]ROImageStore, error) + + // ContainerStore obtains and returns a handle to the container store + // object used by the Store. Accessing this store directly will bypass + // locking and synchronization, so use it with care. ContainerStore() (ContainerStore, error) - // CreateLayer creates a new layer in the underlying storage driver, optionally - // having the specified ID (one will be assigned if none is specified), with - // the specified layer (or no layer) as its parent, and with optional names. - // (The writeable flag is ignored.) + // CreateLayer creates a new layer in the underlying storage driver, + // optionally having the specified ID (one will be assigned if none is + // specified), with the specified layer (or no layer) as its parent, + // and with optional names. (The writeable flag is ignored.) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool) (*Layer, error) - // PutLayer combines the functions of CreateLayer and ApplyDiff, marking the - // layer for automatic removal if applying the diff fails for any reason. + // PutLayer combines the functions of CreateLayer and ApplyDiff, + // marking the layer for automatic removal if applying the diff fails + // for any reason. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init { + // return + // } PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff archive.Reader) (*Layer, int64, error) // CreateImage creates a new image, optionally with the specified ID @@ -177,37 +235,39 @@ type Store interface { // convenience of its caller. CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) - // CreateContainer creates a new container, optionally with the specified ID - // (one will be assigned if none is specified), with optional names, - // using the specified image's top layer as the basis for the - // container's layer, and assigning the specified ID to that layer (one - // will be created if none is specified). A container is a layer which - // is associated with additional bookkeeping information which the - // library stores for the convenience of its caller. + // CreateContainer creates a new container, optionally with the + // specified ID (one will be assigned if none is specified), with + // optional names, using the specified image's top layer as the basis + // for the container's layer, and assigning the specified ID to that + // layer (one will be created if none is specified). A container is a + // layer which is associated with additional bookkeeping information + // which the library stores for the convenience of its caller. CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) - // Metadata retrieves the metadata which is associated with a layer, image, - // or container (whichever the passed-in ID refers to). + // Metadata retrieves the metadata which is associated with a layer, + // image, or container (whichever the passed-in ID refers to). Metadata(id string) (string, error) - // SetMetadata updates the metadata which is associated with a layer, image, or - // container (whichever the passed-in ID refers to) to match the specified - // value. The metadata value can be retrieved at any time using Metadata, - // or using Layer, Image, or Container and reading the object directly. + // SetMetadata updates the metadata which is associated with a layer, + // image, or container (whichever the passed-in ID refers to) to match + // the specified value. The metadata value can be retrieved at any + // time using Metadata, or using Layer, Image, or Container and reading + // the object directly. SetMetadata(id, metadata string) error // Exists checks if there is a layer, image, or container which has the // passed-in ID or name. Exists(id string) bool - // Status asks for a status report, in the form of key-value pairs, from the - // underlying storage driver. The contents vary from driver to driver. + // Status asks for a status report, in the form of key-value pairs, + // from the underlying storage driver. The contents vary from driver + // to driver. Status() ([][2]string, error) - // Delete removes the layer, image, or container which has the passed-in ID or - // name. Note that no safety checks are performed, so this can leave images - // with references to layers which do not exist, and layers with references to - // parents which no longer exist. + // Delete removes the layer, image, or container which has the + // passed-in ID or name. Note that no safety checks are performed, so + // this can leave images with references to layers which do not exist, + // and layers with references to parents which no longer exist. Delete(id string) error // DeleteLayer attempts to remove the specified layer. If the layer is the @@ -227,43 +287,73 @@ type Store interface { // but the list of layers which would be removed is still returned. DeleteImage(id string, commit bool) (layers []string, err error) - // DeleteContainer removes the specified container and its layer. If there is - // no matching container, or if the container exists but its layer does not, an - // error will be returned. + // DeleteContainer removes the specified container and its layer. If + // there is no matching container, or if the container exists but its + // layer does not, an error will be returned. DeleteContainer(id string) error // Wipe removes all known layers, images, and containers. Wipe() error - // Mount attempts to mount a layer, image, or container for access, and returns - // the pathname if it succeeds. + // Mount attempts to mount a layer, image, or container for access, and + // returns the pathname if it succeeds. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init { + // return + // } Mount(id, mountLabel string) (string, error) // Unmount attempts to unmount a layer, image, or container, given an ID, a // name, or a mount path. Unmount(id string) error - // Changes returns a summary of the changes which would need to be made to one - // layer to make its contents the same as a second layer. If the first layer - // is not specified, the second layer's parent is assumed. Each Change - // structure contains a Path relative to the layer's root directory, and a Kind - // which is either ChangeAdd, ChangeModify, or ChangeDelete. + // Changes returns a summary of the changes which would need to be made + // to one layer to make its contents the same as a second layer. If + // the first layer is not specified, the second layer's parent is + // assumed. Each Change structure contains a Path relative to the + // layer's root directory, and a Kind which is either ChangeAdd, + // ChangeModify, or ChangeDelete. Changes(from, to string) ([]archive.Change, error) - // DiffSize returns a count of the size of the tarstream which would specify - // the changes returned by Changes. + // DiffSize returns a count of the size of the tarstream which would + // specify the changes returned by Changes. DiffSize(from, to string) (int64, error) - // Diff returns the tarstream which would specify the changes returned by - // Changes. - Diff(from, to string) (io.ReadCloser, error) + // Diff returns the tarstream which would specify the changes returned + // by Changes. If options are passed in, they can override default + // behaviors. + Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) - // ApplyDiff applies a tarstream to a layer. Information about the tarstream - // is cached with the layer. Typically, a layer which is populated using a - // tarstream will be expected to not be modified in any other way, either - // before or after the diff is applied. + // ApplyDiff applies a tarstream to a layer. Information about the + // tarstream is cached with the layer. Typically, a layer which is + // populated using a tarstream will be expected to not be modified in + // any other way, either before or after the diff is applied. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init { + // return + // } ApplyDiff(to string, diff archive.Reader) (int64, error) + // LayersByCompressedDigest returns a slice of the layers with the + // specified compressed digest value recorded for them. + LayersByCompressedDigest(d digest.Digest) ([]Layer, error) + + // LayersByUncompressedDigest returns a slice of the layers with the + // specified uncompressed digest value recorded for them. + LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + + // LayerSize returns a cached approximation of the layer's size, or -1 + // if we don't have a value on hand. + LayerSize(id string) (int64, error) + // Layers returns a list of the currently known layers. Layers() ([]Layer, error) @@ -279,12 +369,12 @@ type Store interface { // SetNames changes the list of names for a layer, image, or container. SetNames(id string, names []string) error - // ListImageBigData retrieves a list of the (possibly large) chunks of named - // data associated with an image. + // ListImageBigData retrieves a list of the (possibly large) chunks of + // named data associated with an image. ListImageBigData(id string) ([]string, error) - // ImageBigData retrieves a (possibly large) chunk of named data associated - // with an image. + // ImageBigData retrieves a (possibly large) chunk of named data + // associated with an image. ImageBigData(id, key string) ([]byte, error) // ImageBigDataSize retrieves the size of a (possibly large) chunk @@ -379,6 +469,9 @@ type Store interface { // ImageOptions is used for passing options to a Store's CreateImage() method. type ImageOptions struct { + // CreationDate, if not zero, will override the default behavior of marking the image as having been + // created when CreateImage() was called, recording CreationDate instead. + CreationDate time.Time } // ContainerOptions is used for passing options to a Store's CreateContainer() method. @@ -396,7 +489,9 @@ type store struct { gidMap []idtools.IDMap graphDriver drivers.Driver layerStore LayerStore + roLayerStores []ROLayerStore imageStore ImageStore + roImageStores []ROImageStore containerStore ContainerStore } @@ -512,6 +607,9 @@ func (s *store) load() error { return err } s.layerStore = rls + if _, err := s.ROLayerStores(); err != nil { + return err + } gipath := filepath.Join(s.graphRoot, driverPrefix+"images") if err := os.MkdirAll(gipath, 0700); err != nil { @@ -522,6 +620,10 @@ func (s *store) load() error { return err } s.imageStore = ris + if _, err := s.ROImageStores(); err != nil { + return err + } + gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") if err := os.MkdirAll(gcpath, 0700); err != nil { return err @@ -594,6 +696,32 @@ func (s *store) LayerStore() (LayerStore, error) { return s.layerStore, nil } +func (s *store) ROLayerStores() ([]ROLayerStore, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.roLayerStores != nil { + return s.roLayerStores, nil + } + driver, err := s.getGraphDriver() + if err != nil { + return nil, err + } + driverPrefix := s.graphDriverName + "-" + rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") + if err := os.MkdirAll(rlpath, 0700); err != nil { + return nil, err + } + for _, store := range driver.AdditionalImageStores() { + glpath := filepath.Join(store, driverPrefix+"layers") + rls, err := newROLayerStore(rlpath, glpath, driver) + if err != nil { + return nil, err + } + s.roLayerStores = append(s.roLayerStores, rls) + } + return s.roLayerStores, nil +} + func (s *store) ImageStore() (ImageStore, error) { if s.imageStore != nil { return s.imageStore, nil @@ -601,6 +729,26 @@ func (s *store) ImageStore() (ImageStore, error) { return nil, ErrLoadError } +func (s *store) ROImageStores() ([]ROImageStore, error) { + if len(s.roImageStores) != 0 { + return s.roImageStores, nil + } + driver, err := s.getGraphDriver() + if err != nil { + return nil, err + } + driverPrefix := s.graphDriverName + "-" + for _, store := range driver.AdditionalImageStores() { + gipath := filepath.Join(store, driverPrefix+"images") + ris, err := newROImageStore(gipath) + if err != nil { + return nil, err + } + s.roImageStores = append(s.roImageStores, ris) + } + return s.roImageStores, nil +} + func (s *store) ContainerStore() (ContainerStore, error) { if s.containerStore != nil { return s.containerStore, nil @@ -613,10 +761,6 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w if err != nil { return nil, -1, err } - ristore, err := s.ImageStore() - if err != nil { - return nil, -1, err - } rcstore, err := s.ContainerStore() if err != nil { return nil, -1, err @@ -624,19 +768,11 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w rlstore.Lock() defer rlstore.Unlock() - defer rlstore.Touch() if modified, err := rlstore.Modified(); modified || err != nil { rlstore.Load() } - ristore.Lock() - defer ristore.Unlock() - defer ristore.Touch() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } rcstore.Lock() defer rcstore.Unlock() - defer rcstore.Touch() if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } @@ -668,49 +804,52 @@ func (s *store) CreateLayer(id, parent string, names []string, mountLabel string } func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } - ristore, err := s.ImageStore() - if err != nil { - return nil, err - } - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - defer ristore.Touch() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - rcstore.Lock() - defer rcstore.Unlock() - defer rcstore.Touch() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } if id == "" { id = stringid.GenerateRandomID() } - ilayer, err := rlstore.Get(layer) + rlstore, err := s.LayerStore() if err != nil { return nil, err } + stores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + stores = append([]ROLayerStore{rlstore}, stores...) + var ilayer *Layer + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ilayer, err = rlstore.Get(layer) + if err == nil { + break + } + } if ilayer == nil { return nil, ErrLayerUnknown } layer = ilayer.ID - return ristore.Create(id, names, layer, metadata) + + ristore, err := s.ImageStore() + if err != nil { + return nil, err + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + creationDate := time.Now().UTC() + if options != nil { + creationDate = options.CreationDate + } + + return ristore.Create(id, names, layer, metadata, creationDate) } func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { @@ -718,33 +857,11 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat if err != nil { return nil, err } - ristore, err := s.ImageStore() - if err != nil { - return nil, err - } - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - rlstore.Lock() defer rlstore.Unlock() - defer rlstore.Touch() if modified, err := rlstore.Modified(); modified || err != nil { rlstore.Load() } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - rcstore.Lock() - defer rcstore.Unlock() - defer rcstore.Touch() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - if id == "" { id = stringid.GenerateRandomID() } @@ -752,10 +869,27 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat imageTopLayer := "" imageID := "" if image != "" { - cimage, err := ristore.Get(image) + ristore, err := s.ImageStore() if err != nil { return nil, err } + stores, err := s.ROImageStores() + if err != nil { + return nil, err + } + stores = append([]ROImageStore{ristore}, stores...) + var cimage *Image + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + cimage, err = ristore.Get(image) + if err == nil { + break + } + } if cimage == nil { return nil, ErrImageUnknown } @@ -767,6 +901,15 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat return nil, err } layer = clayer.ID + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } container, err := rcstore.Create(id, names, imageID, layer, metadata) if err != nil || container == nil { rlstore.Delete(layer) @@ -805,15 +948,12 @@ func (s *store) SetMetadata(id, metadata string) error { } if rlstore.Exists(id) { - defer rlstore.Touch() return rlstore.SetMetadata(id, metadata) } if ristore.Exists(id) { - defer ristore.Touch() return ristore.SetMetadata(id, metadata) } if rcstore.Exists(id) { - defer rcstore.Touch() return rcstore.SetMetadata(id, metadata) } return ErrNotAnID @@ -824,37 +964,51 @@ func (s *store) Metadata(id string) (string, error) { if err != nil { return "", err } - ristore, err := s.ImageStore() + stores, err := s.ROLayerStores() if err != nil { return "", err } + stores = append([]ROLayerStore{rlstore}, stores...) + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + if rlstore.Exists(id) { + return rlstore.Metadata(id) + } + } + + istore, err := s.ImageStore() + if err != nil { + return "", err + } + istores, err := s.ROImageStores() + if err != nil { + return "", err + } + istores = append([]ROImageStore{istore}, istores...) + for _, ristore := range istores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + if ristore.Exists(id) { + return ristore.Metadata(id) + } + } + rcstore, err := s.ContainerStore() if err != nil { return "", err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } rcstore.Lock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } - - if rlstore.Exists(id) { - return rlstore.Metadata(id) - } - if ristore.Exists(id) { - return ristore.Metadata(id) - } if rcstore.Exists(id) { return rcstore.Metadata(id) } @@ -862,92 +1016,84 @@ func (s *store) Metadata(id string) (string, error) { } func (s *store) ListImageBigData(id string) ([]string, error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } ristore, err := s.ImageStore() if err != nil { return nil, err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + stores, err := s.ROImageStores() + if err != nil { + return nil, err } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + bigDataNames, err := ristore.BigDataNames(id) + if err == nil { + return bigDataNames, err + } } - - return ristore.BigDataNames(id) + return nil, ErrImageUnknown } func (s *store) ImageBigDataSize(id, key string) (int64, error) { - rlstore, err := s.LayerStore() - if err != nil { - return -1, err - } ristore, err := s.ImageStore() if err != nil { return -1, err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + stores, err := s.ROImageStores() + if err != nil { + return -1, err } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + size, err := ristore.BigDataSize(id, key) + if err == nil { + return size, nil + } } - - return ristore.BigDataSize(id, key) + return -1, ErrSizeUnknown } func (s *store) ImageBigData(id, key string) ([]byte, error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } ristore, err := s.ImageStore() if err != nil { return nil, err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + stores, err := s.ROImageStores() + if err != nil { + return nil, err } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + data, err := ristore.BigData(id, key) + if err == nil { + return data, nil + } } - return ristore.BigData(id, key) + return nil, ErrImageUnknown } func (s *store) SetImageBigData(id, key string, data []byte) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } ristore, err := s.ImageStore() if err != nil { return err } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } ristore.Lock() defer ristore.Unlock() if modified, err := ristore.Modified(); modified || err != nil { @@ -958,29 +1104,11 @@ func (s *store) SetImageBigData(id, key string, data []byte) error { } func (s *store) ListContainerBigData(id string) ([]string, error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } - ristore, err := s.ImageStore() - if err != nil { - return nil, err - } rcstore, err := s.ContainerStore() if err != nil { return nil, err } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } rcstore.Lock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { @@ -991,29 +1119,10 @@ func (s *store) ListContainerBigData(id string) ([]string, error) { } func (s *store) ContainerBigDataSize(id, key string) (int64, error) { - rlstore, err := s.LayerStore() - if err != nil { - return -1, err - } - ristore, err := s.ImageStore() - if err != nil { - return -1, err - } rcstore, err := s.ContainerStore() if err != nil { return -1, err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } rcstore.Lock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { @@ -1024,29 +1133,10 @@ func (s *store) ContainerBigDataSize(id, key string) (int64, error) { } func (s *store) ContainerBigData(id, key string) ([]byte, error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } - ristore, err := s.ImageStore() - if err != nil { - return nil, err - } rcstore, err := s.ContainerStore() if err != nil { return nil, err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } rcstore.Lock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { @@ -1057,29 +1147,10 @@ func (s *store) ContainerBigData(id, key string) ([]byte, error) { } func (s *store) SetContainerBigData(id, key string, data []byte) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } rcstore, err := s.ContainerStore() if err != nil { return err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } rcstore.Lock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { @@ -1090,74 +1161,63 @@ func (s *store) SetContainerBigData(id, key string, data []byte) error { } func (s *store) Exists(id string) bool { - rcstore, err := s.ContainerStore() + lstore, err := s.LayerStore() if err != nil { return false } + lstores, err := s.ROLayerStores() + if err != nil { + return false + } + lstores = append([]ROLayerStore{lstore}, lstores...) + for _, rlstore := range lstores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + if rlstore.Exists(id) { + return true + } + } + ristore, err := s.ImageStore() if err != nil { return false } - rlstore, err := s.LayerStore() + stores, err := s.ROImageStores() if err != nil { return false } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + if ristore.Exists(id) { + return true + } } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + + rcstore, err := s.ContainerStore() + if err != nil { + return false } rcstore.Lock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } - if rcstore.Exists(id) { return true } - if ristore.Exists(id) { - return true - } - return rlstore.Exists(id) + + return false } func (s *store) SetNames(id string, names []string) error { - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rlstore, err := s.LayerStore() - if err != nil { - return err - } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - deduped := []string{} seen := make(map[string]bool) for _, name := range names { @@ -1167,12 +1227,41 @@ func (s *store) SetNames(id string, names []string) error { } } + rlstore, err := s.LayerStore() + if err != nil { + return err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } if rlstore.Exists(id) { return rlstore.SetNames(id, deduped) } + + ristore, err := s.ImageStore() + if err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } if ristore.Exists(id) { return ristore.SetNames(id, deduped) } + + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } if rcstore.Exists(id) { return rcstore.SetNames(id, deduped) } @@ -1180,41 +1269,55 @@ func (s *store) SetNames(id string, names []string) error { } func (s *store) Names(id string) ([]string, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - ristore, err := s.ImageStore() - if err != nil { - return nil, err - } rlstore, err := s.LayerStore() if err != nil { return nil, err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + stores, err := s.ROLayerStores() + if err != nil { + return nil, err } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + stores = append([]ROLayerStore{rlstore}, stores...) + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + if l, err := rlstore.Get(id); l != nil && err == nil { + return l.Names, nil + } + } + + ristore, err := s.ImageStore() + if err != nil { + return nil, err + } + ristores, err := s.ROImageStores() + if err != nil { + return nil, err + } + ristores = append([]ROImageStore{ristore}, ristores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + if i, err := ristore.Get(id); i != nil && err == nil { + return i.Names, nil + } + } + + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err } rcstore.Lock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } - - if l, err := rlstore.Get(id); l != nil && err == nil { - return l.Names, nil - } - if i, err := ristore.Get(id); i != nil && err == nil { - return i.Names, nil - } if c, err := rcstore.Get(id); c != nil && err == nil { return c.Names, nil } @@ -1294,8 +1397,6 @@ func (s *store) DeleteLayer(id string) error { } if rlstore.Exists(id) { - defer rlstore.Touch() - defer rcstore.Touch() if l, err := rlstore.Get(id); err != nil { id = l.ID } @@ -1314,7 +1415,7 @@ func (s *store) DeleteLayer(id string) error { } for _, image := range images { if image.TopLayer == id { - return ErrLayerUsedByImage + return errors.Wrapf(ErrLayerUsedByImage, "Layer %v used by image %v", id, image.ID) } } containers, err := rcstore.Containers() @@ -1323,7 +1424,7 @@ func (s *store) DeleteLayer(id string) error { } for _, container := range containers { if container.LayerID == id { - return ErrLayerUsedByContainer + return errors.Wrapf(ErrLayerUsedByContainer, "Layer %v used by container %v", id, container.ID) } } return rlstore.Delete(id) @@ -1367,8 +1468,6 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) return nil, err } id = image.ID - defer rlstore.Touch() - defer ristore.Touch() containers, err := rcstore.Containers() if err != nil { return nil, err @@ -1377,8 +1476,8 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) for _, container := range containers { aContainerByImage[container.ImageID] = container.ID } - if _, ok := aContainerByImage[id]; ok { - return nil, ErrImageUsedByContainer + if container, ok := aContainerByImage[id]; ok { + return nil, errors.Wrapf(ErrImageUsedByContainer, "Image used by %v", container) } images, err := ristore.Images() if err != nil { @@ -1482,8 +1581,6 @@ func (s *store) DeleteContainer(id string) error { } if rcstore.Exists(id) { - defer rlstore.Touch() - defer rcstore.Touch() if container, err := rcstore.Get(id); err == nil { if rlstore.Exists(container.LayerID) { if err = rlstore.Delete(container.LayerID); err != nil { @@ -1540,8 +1637,6 @@ func (s *store) Delete(id string) error { } if rcstore.Exists(id) { - defer rlstore.Touch() - defer rcstore.Touch() if container, err := rcstore.Get(id); err == nil { if rlstore.Exists(container.LayerID) { if err = rlstore.Delete(container.LayerID); err != nil { @@ -1565,11 +1660,9 @@ func (s *store) Delete(id string) error { } } if ristore.Exists(id) { - defer ristore.Touch() return ristore.Delete(id) } if rlstore.Exists(id) { - defer rlstore.Touch() return rlstore.Delete(id) } return ErrLayerUnknown @@ -1591,19 +1684,16 @@ func (s *store) Wipe() error { rlstore.Lock() defer rlstore.Unlock() - defer rlstore.Touch() if modified, err := rlstore.Modified(); modified || err != nil { rlstore.Load() } ristore.Lock() defer ristore.Unlock() - defer ristore.Touch() if modified, err := ristore.Modified(); modified || err != nil { ristore.Load() } rcstore.Lock() defer rcstore.Unlock() - defer rcstore.Touch() if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } @@ -1626,67 +1716,45 @@ func (s *store) Status() ([][2]string, error) { } func (s *store) Version() ([][2]string, error) { - return [][2]string{ - {"GitCommit", storageversion.GitCommit}, - {"Version", storageversion.Version}, - {"BuildTime", storageversion.BuildTime}, - }, nil + return [][2]string{}, nil } func (s *store) Mount(id, mountLabel string) (string, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err + if layerID, err := s.ContainerLayerID(id); err == nil { + id = layerID } rlstore, err := s.LayerStore() if err != nil { return "", err } - rlstore.Lock() defer rlstore.Unlock() - defer rlstore.Touch() if modified, err := rlstore.Modified(); modified || err != nil { rlstore.Load() } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() + if rlstore.Exists(id) { + return rlstore.Mount(id, mountLabel) } - - if c, err := rcstore.Get(id); c != nil && err == nil { - id = c.LayerID - } - return rlstore.Mount(id, mountLabel) + return "", ErrLayerUnknown } func (s *store) Unmount(id string) error { - rcstore, err := s.ContainerStore() - if err != nil { - return err + if layerID, err := s.ContainerLayerID(id); err == nil { + id = layerID } rlstore, err := s.LayerStore() if err != nil { return err } - rlstore.Lock() defer rlstore.Unlock() - defer rlstore.Touch() if modified, err := rlstore.Modified(); modified || err != nil { rlstore.Load() } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() + if rlstore.Exists(id) { + return rlstore.Unmount(id) } - - if c, err := rcstore.Get(id); c != nil && err == nil { - id = c.LayerID - } - return rlstore.Unmount(id) + return ErrLayerUnknown } func (s *store) Changes(from, to string) ([]archive.Change, error) { @@ -1694,14 +1762,22 @@ func (s *store) Changes(from, to string) ([]archive.Change, error) { if err != nil { return nil, err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + stores, err := s.ROLayerStores() + if err != nil { + return nil, err } - - return rlstore.Changes(from, to) + stores = append([]ROLayerStore{rlstore}, stores...) + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + if rlstore.Exists(to) { + return rlstore.Changes(from, to) + } + } + return nil, ErrLayerUnknown } func (s *store) DiffSize(from, to string) (int64, error) { @@ -1709,29 +1785,45 @@ func (s *store) DiffSize(from, to string) (int64, error) { if err != nil { return -1, err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + stores, err := s.ROLayerStores() + if err != nil { + return -1, err } - - return rlstore.DiffSize(from, to) + stores = append([]ROLayerStore{rlstore}, stores...) + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + if rlstore.Exists(to) { + return rlstore.DiffSize(from, to) + } + } + return -1, ErrLayerUnknown } -func (s *store) Diff(from, to string) (io.ReadCloser, error) { +func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { rlstore, err := s.LayerStore() if err != nil { return nil, err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + stores, err := s.ROLayerStores() + if err != nil { + return nil, err } - - return rlstore.Diff(from, to) + stores = append([]ROLayerStore{rlstore}, stores...) + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + if rlstore.Exists(to) { + return rlstore.Diff(from, to, options) + } + } + return nil, ErrLayerUnknown } func (s *store) ApplyDiff(to string, diff archive.Reader) (int64, error) { @@ -1739,79 +1831,137 @@ func (s *store) ApplyDiff(to string, diff archive.Reader) (int64, error) { if err != nil { return -1, err } - rlstore.Lock() defer rlstore.Unlock() if modified, err := rlstore.Modified(); modified || err != nil { rlstore.Load() } + if rlstore.Exists(to) { + return rlstore.ApplyDiff(to, diff) + } + return -1, ErrLayerUnknown +} - return rlstore.ApplyDiff(to, diff) +func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { + var layers []Layer + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + + stores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + stores = append([]ROLayerStore{rlstore}, stores...) + + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + slayers, err := m(rlstore, d) + if err != nil { + return nil, err + } + layers = append(layers, slayers...) + } + return layers, nil +} + +func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { + return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) +} + +func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { + return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) +} + +func (s *store) LayerSize(id string) (int64, error) { + lstore, err := s.LayerStore() + if err != nil { + return -1, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, err + } + lstores = append([]ROLayerStore{lstore}, lstores...) + for _, rlstore := range lstores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + if rlstore.Exists(id) { + return rlstore.Size(id) + } + } + return -1, ErrLayerUnknown } func (s *store) Layers() ([]Layer, error) { + var layers []Layer rlstore, err := s.LayerStore() if err != nil { return nil, err } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + stores, err := s.ROLayerStores() + if err != nil { + return nil, err } + stores = append([]ROLayerStore{rlstore}, stores...) - return rlstore.Layers() + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + slayers, err := rlstore.Layers() + if err != nil { + return nil, err + } + layers = append(layers, slayers...) + } + return layers, nil } func (s *store) Images() ([]Image, error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } + var images []Image ristore, err := s.ImageStore() if err != nil { return nil, err } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + stores, err := s.ROImageStores() + if err != nil { + return nil, err } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + simages, err := ristore.Images() + if err != nil { + return nil, err + } + images = append(images, simages...) } - - return ristore.Images() + return images, nil } func (s *store) Containers() ([]Container, error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } - ristore, err := s.ImageStore() - if err != nil { - return nil, err - } rcstore, err := s.ContainerStore() if err != nil { return nil, err } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } rcstore.Lock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { @@ -1827,13 +1977,24 @@ func (s *store) Layer(id string) (*Layer, error) { return nil, err } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() + stores, err := s.ROLayerStores() + if err != nil { + return nil, err } + stores = append([]ROLayerStore{rlstore}, stores...) - return rlstore.Get(id) + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + layer, err := rlstore.Get(id) + if err == nil { + return layer, nil + } + } + return nil, ErrLayerUnknown } func (s *store) Image(id string) (*Image, error) { @@ -1841,88 +2002,66 @@ func (s *store) Image(id string) (*Image, error) { if err != nil { return nil, err } - rlstore, err := s.LayerStore() + stores, err := s.ROImageStores() + if err != nil { + return nil, err + } + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + image, err := ristore.Get(id) + if err == nil { + return image, nil + } + } + return nil, ErrImageUnknown +} + +func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { + images := []*Image{} + layer, err := s.Layer(id) if err != nil { return nil, err } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - - return ristore.Get(id) -} - -func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { ristore, err := s.ImageStore() if err != nil { return nil, err } - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - - layer, err := rlstore.Get(id) + stores, err := s.ROImageStores() if err != nil { return nil, err } - images := []*Image{} - imageList, err := ristore.Images() - if err != nil { - return nil, err - } - for _, image := range imageList { - if image.TopLayer == layer.ID { - images = append(images, &image) + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + imageList, err := ristore.Images() + if err != nil { + return nil, err + } + for _, image := range imageList { + if image.TopLayer == layer.ID { + images = append(images, &image) + } } } - return images, nil } func (s *store) Container(id string) (*Container, error) { - ristore, err := s.ImageStore() - if err != nil { - return nil, err - } - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } rcstore, err := s.ContainerStore() if err != nil { return nil, err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } rcstore.Lock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { @@ -1932,12 +2071,25 @@ func (s *store) Container(id string) (*Container, error) { return rcstore.Get(id) } -func (s *store) ContainerByLayer(id string) (*Container, error) { - ristore, err := s.ImageStore() +func (s *store) ContainerLayerID(id string) (string, error) { + rcstore, err := s.ContainerStore() if err != nil { - return nil, err + return "", err } - rlstore, err := s.LayerStore() + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + container, err := rcstore.Get(id) + if err != nil { + return "", err + } + return container.LayerID, nil +} + +func (s *store) ContainerByLayer(id string) (*Container, error) { + layer, err := s.Layer(id) if err != nil { return nil, err } @@ -1945,27 +2097,11 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { if err != nil { return nil, err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } rcstore.Lock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } - - layer, err := rlstore.Get(id) - if err != nil { - return nil, err - } containerList, err := rcstore.Containers() if err != nil { return nil, err @@ -1980,29 +2116,10 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { } func (s *store) ContainerDirectory(id string) (string, error) { - rlstore, err := s.LayerStore() - if err != nil { - return "", err - } - ristore, err := s.ImageStore() - if err != nil { - return "", err - } rcstore, err := s.ContainerStore() if err != nil { return "", err } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } rcstore.Lock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { @@ -2023,29 +2140,11 @@ func (s *store) ContainerDirectory(id string) (string, error) { } func (s *store) ContainerRunDirectory(id string) (string, error) { - rlstore, err := s.LayerStore() - if err != nil { - return "", err - } - ristore, err := s.ImageStore() - if err != nil { - return "", err - } rcstore, err := s.ContainerStore() if err != nil { return "", err } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } rcstore.Lock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { @@ -2145,7 +2244,7 @@ func (s *store) Shutdown(force bool) ([]string, error) { } } if len(mounted) > 0 && err == nil { - err = ErrLayerUsedByContainer + err = errors.Wrap(ErrLayerUsedByContainer, "A layer is mounted") } if err == nil { err = s.graphDriver.Cleanup() @@ -2187,11 +2286,64 @@ func stringSliceWithoutValue(slice []string, value string) []string { return modified } +const configFile = "/etc/containers/storage.conf" + +// OptionsConfig represents the "storage.options" TOML config table. +type OptionsConfig struct { + // AdditionalImagesStores is the location of additional read/only + // Image stores. Usually used to access Networked File System + // for shared image content + AdditionalImageStores []string `toml:"additionalimagestores"` +} + +// TOML-friendly explicit tables used for conversions. +type tomlConfig struct { + Storage struct { + Driver string `toml:"driver"` + RunRoot string `toml:"runroot"` + GraphRoot string `toml:"graphroot"` + Options struct{ OptionsConfig } `toml:"options"` + } `toml:"storage"` +} + func init() { DefaultStoreOptions.RunRoot = "/var/run/containers/storage" DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage" - DefaultStoreOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") - DefaultStoreOptions.GraphDriverOptions = strings.Split(os.Getenv("STORAGE_OPTS"), ",") + DefaultStoreOptions.GraphDriverName = "overlay" + + data, err := ioutil.ReadFile(configFile) + if err != nil { + if !os.IsNotExist(err) { + fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) + return + } + } + + config := new(tomlConfig) + + if _, err := toml.Decode(string(data), config); err != nil { + fmt.Printf("Failed to parse %s %v\n", configFile, err.Error()) + return + } + if config.Storage.Driver != "" { + DefaultStoreOptions.GraphDriverName = config.Storage.Driver + } + if config.Storage.RunRoot != "" { + DefaultStoreOptions.RunRoot = config.Storage.RunRoot + } + if config.Storage.GraphRoot != "" { + DefaultStoreOptions.GraphRoot = config.Storage.GraphRoot + } + for _, s := range config.Storage.Options.AdditionalImageStores { + DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) + } + + if os.Getenv("STORAGE_DRIVER") != "" { + DefaultStoreOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") + } + if os.Getenv("STORAGE_OPTS") != "" { + DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...) + } if len(DefaultStoreOptions.GraphDriverOptions) == 1 && DefaultStoreOptions.GraphDriverOptions[0] == "" { DefaultStoreOptions.GraphDriverOptions = nil } diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf index c7070f44..0f3adb02 100644 --- a/vendor/github.com/containers/storage/vendor.conf +++ b/vendor/github.com/containers/storage/vendor.conf @@ -1,3 +1,4 @@ +github.com/BurntSushi/toml master github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165 github.com/Microsoft/hcsshim 0f615c198a84e0344b4ed49c464d8833d4648dfc github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d @@ -7,9 +8,11 @@ github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 github.com/go-check/check 20d25e2804050c1cd24a7eea1e7a6447dd0e74ec github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6 github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 +github.com/opencontainers/go-digest master github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07 github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 +github.com/pkg/errors master github.com/tchap/go-patricia v2.2.6 github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721 github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go new file mode 100644 index 00000000..35d81089 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -0,0 +1,56 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). + AnnotationCreated = "org.opencontainers.image.created" + + // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). + AnnotationAuthors = "org.opencontainers.image.authors" + + // AnnotationURL is the annotation key for the URL to find more information on the image. + AnnotationURL = "org.opencontainers.image.url" + + // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. + AnnotationDocumentation = "org.opencontainers.image.documentation" + + // AnnotationSource is the annotation key for the URL to get source code for building the image. + AnnotationSource = "org.opencontainers.image.source" + + // AnnotationVersion is the annotation key for the version of the packaged software. + // The version MAY match a label or tag in the source code repository. + // The version MAY be Semantic versioning-compatible. + AnnotationVersion = "org.opencontainers.image.version" + + // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. + AnnotationRevision = "org.opencontainers.image.revision" + + // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. + AnnotationVendor = "org.opencontainers.image.vendor" + + // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. + AnnotationLicenses = "org.opencontainers.image.licenses" + + // AnnotationRefName is the annotation key for the name of the reference for a target. + // SHOULD only be considered valid when on descriptors on `index.json` within image layout. + AnnotationRefName = "org.opencontainers.image.ref.name" + + // AnnotationTitle is the annotation key for the human-readable title of the image. + AnnotationTitle = "org.opencontainers.image.title" + + // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. + AnnotationDescription = "org.opencontainers.image.description" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go index 8475ff74..fe799bd6 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -37,7 +37,7 @@ type ImageConfig struct { // Cmd defines the default arguments to the entrypoint of the container. Cmd []string `json:"Cmd,omitempty"` - // Volumes is a set of directories which should be created as data volumes in a container running this image. + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. Volumes map[string]struct{} `json:"Volumes,omitempty"` // WorkingDir sets the current working directory of the entrypoint process in the container. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index f4cda6ed..e3eee29b 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc6-dev" + VersionDev = "" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/ostreedev/ostree-go/LICENSE b/vendor/github.com/ostreedev/ostree-go/LICENSE new file mode 100644 index 00000000..aa93b4da --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/LICENSE @@ -0,0 +1,17 @@ +Portions of this code are derived from: + +https://github.com/dradtke/gotk3 + +Copyright (c) 2013 Conformal Systems LLC. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/ostreedev/ostree-go/README.md b/vendor/github.com/ostreedev/ostree-go/README.md new file mode 100644 index 00000000..c79010a0 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/README.md @@ -0,0 +1,4 @@ +OSTree-Go +========= + +Go bindings for OSTree. Find out more about OSTree [here](https://github.com/ostreedev/ostree) diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go new file mode 100644 index 00000000..a4ad0f00 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +import ( + "unsafe" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" + +/* + * GBoolean + */ + +// GBoolean is a Go representation of glib's gboolean +type GBoolean C.gboolean + +func NewGBoolean() GBoolean { + return GBoolean(0) +} + +func GBool(b bool) GBoolean { + if b { + return GBoolean(1) + } + return GBoolean(0) +} + +func (b GBoolean) Ptr() unsafe.Pointer { + return unsafe.Pointer(&b) +} + +func GoBool(b GBoolean) bool { + if b != 0 { + return true + } + return false +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go new file mode 100644 index 00000000..537db472 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" + +import ( + "unsafe" +) + +// GIO types + +type GCancellable struct { + *GObject +} + +func (self *GCancellable) native() *C.GCancellable { + return (*C.GCancellable)(unsafe.Pointer(self)) +} + +func (self *GCancellable) Ptr() unsafe.Pointer { + return unsafe.Pointer(self) +} + +// At the moment, no cancellable API, just pass nil diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go new file mode 100644 index 00000000..714b15d0 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" +import ( + "errors" + "unsafe" +) + +/* + * GError + */ + +// GError is a representation of GLib's GError +type GError struct { + ptr unsafe.Pointer +} + +func NewGError() GError { + return GError{nil} +} + +func (e GError) Ptr() unsafe.Pointer { + if e.ptr == nil { + return nil + } + return e.ptr +} + +func (e GError) Nil() { + e.ptr = nil +} + +func (e *GError) native() *C.GError { + if e == nil || e.ptr == nil { + return nil + } + return (*C.GError)(e.ptr) +} + +func ToGError(ptr unsafe.Pointer) GError { + return GError{ptr} +} + +func ConvertGError(e GError) error { + defer C.g_error_free(e.native()) + return errors.New(C.GoString((*C.char)(C._g_error_get_message(e.native())))) +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go new file mode 100644 index 00000000..babe7050 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" +import ( + "unsafe" +) + +/* + * GFile + */ + +type GFile struct { + ptr unsafe.Pointer +} + +func (f GFile) Ptr() unsafe.Pointer { + return f.ptr +} + +func NewGFile() *GFile { + return &GFile{nil} +} + +func ToGFile(ptr unsafe.Pointer) *GFile { + gf := NewGFile() + gf.ptr = ptr + return gf +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go new file mode 100644 index 00000000..9c155834 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" +import ( + "unsafe" +) + +/* + * GFileInfo + */ + +type GFileInfo struct { + ptr unsafe.Pointer +} + +func (fi GFileInfo) Ptr() unsafe.Pointer { + return fi.ptr +} + +func NewGFileInfo() GFileInfo { + var fi GFileInfo = GFileInfo{nil} + return fi +} + +func ToGFileInfo(p unsafe.Pointer) *GFileInfo { + var fi *GFileInfo = &GFileInfo{} + fi.ptr = p + return fi +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go new file mode 100644 index 00000000..20cc321c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +import ( + "unsafe" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" + +/* + * GHashTable + */ +type GHashTable struct { + ptr unsafe.Pointer +} + +func (ht *GHashTable) Ptr() unsafe.Pointer { + return ht.ptr +} + +func (ht *GHashTable) native() *C.GHashTable { + return (*C.GHashTable)(ht.ptr) +} + +func ToGHashTable(ptr unsafe.Pointer) *GHashTable { + return &GHashTable{ptr} +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go new file mode 100644 index 00000000..1657edf5 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +import ( + "unsafe" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" + +/* + * GHashTableIter + */ +type GHashTableIter struct { + ptr unsafe.Pointer +} + +func (ht *GHashTableIter) Ptr() unsafe.Pointer { + return ht.ptr +} + +func (ht *GHashTableIter) native() *C.GHashTableIter { + return (*C.GHashTableIter)(ht.ptr) +} + +func ToGHashTableIter(ptr unsafe.Pointer) *GHashTableIter { + return &GHashTableIter{ptr} +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go new file mode 100644 index 00000000..f3d3aa52 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h new file mode 100644 index 00000000..a55bd242 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h @@ -0,0 +1,17 @@ +#include + +static char * +_g_error_get_message (GError *error) +{ + g_assert (error != NULL); + return error->message; +} + +static const char * +_g_variant_lookup_string (GVariant *v, const char *key) +{ + const char *r; + if (g_variant_lookup (v, key, "&s", &r)) + return r; + return NULL; +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go new file mode 100644 index 00000000..dedbe749 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" +import ( + "unsafe" +) + +/* + * GObject + */ + +// IObject is an interface type implemented by Object and all types which embed +// an Object. It is meant to be used as a type for function arguments which +// require GObjects or any subclasses thereof. +type IObject interface { + toGObject() *C.GObject + ToObject() *GObject +} + +// GObject is a representation of GLib's GObject. +type GObject struct { + ptr unsafe.Pointer +} + +func (v *GObject) Ptr() unsafe.Pointer { + return v.ptr +} + +func (v *GObject) native() *C.GObject { + if v == nil { + return nil + } + return (*C.GObject)(v.ptr) +} + +func (v *GObject) Ref() { + C.g_object_ref(C.gpointer(v.Ptr())) +} + +func (v *GObject) Unref() { + C.g_object_unref(C.gpointer(v.Ptr())) +} + +func (v *GObject) RefSink() { + C.g_object_ref_sink(C.gpointer(v.native())) +} + +func (v *GObject) IsFloating() bool { + c := C.g_object_is_floating(C.gpointer(v.native())) + return GoBool(GBoolean(c)) +} + +func (v *GObject) ForceFloating() { + C.g_object_force_floating(v.native()) +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go new file mode 100644 index 00000000..05fd54a1 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +import ( + "unsafe" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" + +/* + * GOptionContext + */ + +type GOptionContext struct { + ptr unsafe.Pointer +} + +func (oc *GOptionContext) Ptr() unsafe.Pointer { + return oc.ptr +} + +func (oc *GOptionContext) native() *C.GOptionContext { + return (*C.GOptionContext)(oc.ptr) +} + +func ToGOptionContext(ptr unsafe.Pointer) GOptionContext { + return GOptionContext{ptr} +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go new file mode 100644 index 00000000..30572ea8 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2013 Conformal Systems + * + * This file originated from: http://opensource.conformal.com/ + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package glibobject + +// #cgo pkg-config: glib-2.0 gobject-2.0 +// #include +// #include +// #include +// #include "glibobject.go.h" +// #include +import "C" +import ( + "fmt" + "unsafe" +) + +/* + * GVariant + */ + +type GVariant struct { + ptr unsafe.Pointer +} + +//func GVariantNew(p unsafe.Pointer) *GVariant { +//o := &GVariant{p} +//runtime.SetFinalizer(o, (*GVariant).Unref) +//return o; +//} + +//func GVariantNewSink(p unsafe.Pointer) *GVariant { +//o := &GVariant{p} +//runtime.SetFinalizer(o, (*GVariant).Unref) +//o.RefSink() +//return o; +//} + +func (v *GVariant) native() *C.GVariant { + return (*C.GVariant)(v.ptr) +} + +func (v *GVariant) Ptr() unsafe.Pointer { + return v.ptr +} + +func (v *GVariant) Ref() { + C.g_variant_ref(v.native()) +} + +func (v *GVariant) Unref() { + C.g_variant_unref(v.native()) +} + +func (v *GVariant) RefSink() { + C.g_variant_ref_sink(v.native()) +} + +func (v *GVariant) TypeString() string { + cs := (*C.char)(C.g_variant_get_type_string(v.native())) + return C.GoString(cs) +} + +func (v *GVariant) GetChildValue(i int) *GVariant { + cchild := C.g_variant_get_child_value(v.native(), C.gsize(i)) + return (*GVariant)(unsafe.Pointer(cchild)) +} + +func (v *GVariant) LookupString(key string) (string, error) { + ckey := C.CString(key) + defer C.free(unsafe.Pointer(ckey)) + // TODO: Find a way to have constant C strings in golang + cstr := C._g_variant_lookup_string(v.native(), ckey) + if cstr == nil { + return "", fmt.Errorf("No such key: %s", key) + } + return C.GoString(cstr), nil +} + +func ToGVariant(ptr unsafe.Pointer) *GVariant { + return &GVariant{ptr} +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admin.go.h new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admincleanup.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindeploy.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admindiff.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininit.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/admininstutil.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminosinit.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminsetorigin.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminstatus.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminswitch.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminundeploy.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminunlock.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go b/vendor/github.com/ostreedev/ostree-go/pkg/otadmin/adminupgrade.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go new file mode 100644 index 00000000..d3a8ae5f --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go @@ -0,0 +1,93 @@ +// Package otbuiltin contains all of the basic commands for creating and +// interacting with an ostree repository +package otbuiltin + +import ( + "errors" + "fmt" + "runtime" + "unsafe" + + glib "github.com/ostreedev/ostree-go/pkg/glibobject" +) + +// #cgo pkg-config: ostree-1 +// #include +// #include +// #include +// #include "builtin.go.h" +import "C" + +type Repo struct { + //*glib.GObject + ptr unsafe.Pointer +} + +// Converts an ostree repo struct to its C equivalent +func (r *Repo) native() *C.OstreeRepo { + //return (*C.OstreeRepo)(r.Ptr()) + return (*C.OstreeRepo)(r.ptr) +} + +// Takes a C ostree repo and converts it to a Go struct +func repoFromNative(p *C.OstreeRepo) *Repo { + if p == nil { + return nil + } + //o := (*glib.GObject)(unsafe.Pointer(p)) + //r := &Repo{o} + r := &Repo{unsafe.Pointer(p)} + return r +} + +// Checks if the repo has been initialized +func (r *Repo) isInitialized() bool { + if r.ptr != nil { + return true + } + return false +} + +// Attempts to open the repo at the given path +func OpenRepo(path string) (*Repo, error) { + var cerr *C.GError = nil + cpath := C.CString(path) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + crepo := C.ostree_repo_new(pathc) + repo := repoFromNative(crepo) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, nil, &cerr))) + if !r { + return nil, generateError(cerr) + } + return repo, nil +} + +// Enable support for tombstone commits, which allow the repo to distinguish between +// commits that were intentionally deleted and commits that were removed accidentally +func enableTombstoneCommits(repo *Repo) error { + var tombstoneCommits bool + var config *C.GKeyFile = C.ostree_repo_get_config(repo.native()) + var cerr *C.GError + + tombstoneCommits = glib.GoBool(glib.GBoolean(C.g_key_file_get_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), nil))) + + //tombstoneCommits is false only if it really is false or if it is set to FALSE in the config file + if !tombstoneCommits { + C.g_key_file_set_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), C.TRUE) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_config(repo.native(), config, &cerr))) { + return generateError(cerr) + } + } + return nil +} + +func generateError(err *C.GError) error { + goErr := glib.ConvertGError(glib.ToGError(unsafe.Pointer(err))) + _, file, line, ok := runtime.Caller(1) + if ok { + return errors.New(fmt.Sprintf("%s:%d - %s", file, line, goErr)) + } else { + return goErr + } +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h new file mode 100644 index 00000000..734de982 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h @@ -0,0 +1,191 @@ +#ifndef BUILTIN_GO_H +#define BUILTIN_GO_H + +#include +#include +#include +#include + +static guint32 owner_uid; +static guint32 owner_gid; + +static void +_ostree_repo_append_modifier_flags(OstreeRepoCommitModifierFlags *flags, int flag) { + *flags |= flag; +} + +struct CommitFilterData { + GHashTable *mode_adds; + GHashTable *skip_list; +}; + +typedef struct CommitFilterData CommitFilterData; + +static char* _gptr_to_str(gpointer p) +{ + return (char*)p; +} + +// The following 3 functions are wrapper functions for macros since CGO can't parse macros +static OstreeRepoFile* +_ostree_repo_file(GFile *file) +{ + return OSTREE_REPO_FILE (file); +} + +static guint +_gpointer_to_uint (gpointer ptr) +{ + return GPOINTER_TO_UINT (ptr); +} + +static gpointer +_guint_to_pointer (guint u) +{ + return GUINT_TO_POINTER (u); +} + +static void +_g_clear_object (volatile GObject **object_ptr) +{ + g_clear_object(object_ptr); +} + +static const GVariantType* +_g_variant_type (char *type) +{ + return G_VARIANT_TYPE (type); +} + +static int +_at_fdcwd () +{ + return AT_FDCWD; +} + +static guint64 +_guint64_from_be (guint64 val) +{ + return GUINT64_FROM_BE (val); +} + + + +// These functions are wrappers for variadic functions since CGO can't parse variadic functions +static void +_g_printerr_onearg (char* msg, + char* arg) +{ + g_printerr("%s %s\n", msg, arg); +} + +static void +_g_set_error_onearg (GError *err, + char* msg, + char* arg) +{ + g_set_error(&err, G_IO_ERROR, G_IO_ERROR_FAILED, "%s %s", msg, arg); +} + +static void +_g_variant_builder_add_twoargs (GVariantBuilder* builder, + const char *format_string, + char *arg1, + GVariant *arg2) +{ + g_variant_builder_add(builder, format_string, arg1, arg2); +} + +static GHashTable* +_g_hash_table_new_full () +{ + return g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); +} + +static void +_g_variant_get_commit_dump (GVariant *variant, + const char *format, + char **subject, + char **body, + guint64 *timestamp) +{ + return g_variant_get (variant, format, NULL, NULL, NULL, subject, body, timestamp, NULL, NULL); +} + +static guint32 +_binary_or (guint32 a, guint32 b) +{ + return a | b; +} + +static void +_cleanup (OstreeRepo *self, + OstreeRepoCommitModifier *modifier, + GCancellable *cancellable, + GError **out_error) +{ + if (self) + ostree_repo_abort_transaction(self, cancellable, out_error); + if (modifier) + ostree_repo_commit_modifier_unref (modifier); +} + +// The following functions make up a commit_filter function that gets passed into +// another C function (and thus can't be a go function) as well as its helpers +static OstreeRepoCommitFilterResult +_commit_filter (OstreeRepo *self, + const char *path, + GFileInfo *file_info, + gpointer user_data) +{ + struct CommitFilterData *data = user_data; + GHashTable *mode_adds = data->mode_adds; + GHashTable *skip_list = data->skip_list; + gpointer value; + + if (owner_uid >= 0) + g_file_info_set_attribute_uint32 (file_info, "unix::uid", owner_uid); + if (owner_gid >= 0) + g_file_info_set_attribute_uint32 (file_info, "unix::gid", owner_gid); + + if (mode_adds && g_hash_table_lookup_extended (mode_adds, path, NULL, &value)) + { + guint current_mode = g_file_info_get_attribute_uint32 (file_info, "unix::mode"); + guint mode_add = GPOINTER_TO_UINT (value); + g_file_info_set_attribute_uint32 (file_info, "unix::mode", + current_mode | mode_add); + g_hash_table_remove (mode_adds, path); + } + + if (skip_list && g_hash_table_contains (skip_list, path)) + { + g_hash_table_remove (skip_list, path); + return OSTREE_REPO_COMMIT_FILTER_SKIP; + } + + return OSTREE_REPO_COMMIT_FILTER_ALLOW; +} + + +static void +_set_owner_uid (guint32 uid) +{ + owner_uid = uid; +} + +static void _set_owner_gid (guint32 gid) +{ + owner_gid = gid; +} + +// Wrapper function for a function that takes a C function as a parameter. +// That translation doesn't work in go +static OstreeRepoCommitModifier* +_ostree_repo_commit_modifier_new_wrapper (OstreeRepoCommitModifierFlags flags, + gpointer user_data, + GDestroyNotify destroy_notify) +{ + return ostree_repo_commit_modifier_new(flags, _commit_filter, user_data, destroy_notify); +} + +#endif diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go new file mode 100644 index 00000000..55b51bfb --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go @@ -0,0 +1,102 @@ +package otbuiltin + +import ( + "strings" + "unsafe" + + glib "github.com/ostreedev/ostree-go/pkg/glibobject" +) + +// #cgo pkg-config: ostree-1 +// #include +// #include +// #include +// #include "builtin.go.h" +import "C" + +// Global variable for options +var checkoutOpts checkoutOptions + +// Contains all of the options for checking commits out of +// an ostree repo +type checkoutOptions struct { + UserMode bool // Do not change file ownership or initialize extended attributes + Union bool // Keep existing directories and unchanged files, overwriting existing filesystem + AllowNoent bool // Do nothing if the specified filepath does not exist + DisableCache bool // Do not update or use the internal repository uncompressed object caceh + Whiteouts bool // Process 'whiteout' (docker style) entries + RequireHardlinks bool // Do not fall back to full copies if hard linking fails + Subpath string // Checkout sub-directory path + FromFile string // Process many checkouts from the given file +} + +// Instantiates and returns a checkoutOptions struct with default values set +func NewCheckoutOptions() checkoutOptions { + return checkoutOptions{} +} + +// Checks out a commit with the given ref from a repository at the location of repo path to to the destination. Returns an error if the checkout could not be processed +func Checkout(repoPath, destination, commit string, opts checkoutOptions) error { + checkoutOpts = opts + + var cancellable *glib.GCancellable + ccommit := C.CString(commit) + defer C.free(unsafe.Pointer(ccommit)) + var gerr = glib.NewGError() + cerr := (*C.GError)(gerr.Ptr()) + defer C.free(unsafe.Pointer(cerr)) + + repoPathc := C.g_file_new_for_path(C.CString(repoPath)) + defer C.g_object_unref(C.gpointer(repoPathc)) + crepo := C.ostree_repo_new(repoPathc) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return generateError(cerr) + } + + if strings.Compare(checkoutOpts.FromFile, "") != 0 { + err := processManyCheckouts(crepo, destination, cancellable) + if err != nil { + return err + } + } else { + var resolvedCommit *C.char + defer C.free(unsafe.Pointer(resolvedCommit)) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(crepo, ccommit, C.FALSE, &resolvedCommit, &cerr))) { + return generateError(cerr) + } + err := processOneCheckout(crepo, resolvedCommit, checkoutOpts.Subpath, destination, cancellable) + if err != nil { + return err + } + } + return nil +} + +// Processes one checkout from the repo +func processOneCheckout(crepo *C.OstreeRepo, resolvedCommit *C.char, subpath, destination string, cancellable *glib.GCancellable) error { + cdest := C.CString(destination) + defer C.free(unsafe.Pointer(cdest)) + var gerr = glib.NewGError() + cerr := (*C.GError)(gerr.Ptr()) + defer C.free(unsafe.Pointer(cerr)) + var repoCheckoutAtOptions C.OstreeRepoCheckoutAtOptions + + if checkoutOpts.UserMode { + repoCheckoutAtOptions.mode = C.OSTREE_REPO_CHECKOUT_MODE_USER + } + if checkoutOpts.Union { + repoCheckoutAtOptions.overwrite_mode = C.OSTREE_REPO_CHECKOUT_OVERWRITE_UNION_FILES + } + + checkedOut := glib.GoBool(glib.GBoolean(C.ostree_repo_checkout_at(crepo, &repoCheckoutAtOptions, C._at_fdcwd(), cdest, resolvedCommit, nil, &cerr))) + if !checkedOut { + return generateError(cerr) + } + + return nil +} + +// process many checkouts +func processManyCheckouts(crepo *C.OstreeRepo, target string, cancellable *glib.GCancellable) error { + return nil +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go new file mode 100644 index 00000000..9550f802 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go @@ -0,0 +1,482 @@ +package otbuiltin + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + "unsafe" + + glib "github.com/ostreedev/ostree-go/pkg/glibobject" +) + +// #cgo pkg-config: ostree-1 +// #include +// #include +// #include +// #include "builtin.go.h" +import "C" + +// Declare global variable to store commitOptions +var options commitOptions + +// Declare a function prototype for being passed into another function +type handleLineFunc func(string, *glib.GHashTable) error + +// Contains all of the options for commmiting to an ostree repo. Initialize +// with NewCommitOptions() +type commitOptions struct { + Subject string // One line subject + Body string // Full description + Parent string // Parent of the commit + Tree []string // 'dir=PATH' or 'tar=TARFILE' or 'ref=COMMIT': overlay the given argument as a tree + AddMetadataString []string // Add a key/value pair to metadata + AddDetachedMetadataString []string // Add a key/value pair to detached metadata + OwnerUID int // Set file ownership to user id + OwnerGID int // Set file ownership to group id + NoXattrs bool // Do not import extended attributes + LinkCheckoutSpeedup bool // Optimize for commits of trees composed of hardlinks in the repository + TarAutoCreateParents bool // When loading tar archives, automatically create parent directories as needed + SkipIfUnchanged bool // If the contents are unchanged from a previous commit, do nothing + StatOverrideFile string // File containing list of modifications to make permissions + SkipListFile string // File containing list of file paths to skip + GenerateSizes bool // Generate size information along with commit metadata + GpgSign []string // GPG Key ID with which to sign the commit (if you have GPGME - GNU Privacy Guard Made Easy) + GpgHomedir string // GPG home directory to use when looking for keyrings (if you have GPGME - GNU Privacy Guard Made Easy) + Timestamp time.Time // Override the timestamp of the commit + Orphan bool // Commit does not belong to a branch + Fsync bool // Specify whether fsync should be used or not. Default to true +} + +// Initializes a commitOptions struct and sets default values +func NewCommitOptions() commitOptions { + var co commitOptions + co.OwnerUID = -1 + co.OwnerGID = -1 + co.Fsync = true + return co +} + +type OstreeRepoTransactionStats struct { + metadata_objects_total int32 + metadata_objects_written int32 + content_objects_total int32 + content_objects_written int32 + content_bytes_written uint64 +} + +func (repo *Repo) PrepareTransaction() (bool, error) { + var cerr *C.GError = nil + var resume C.gboolean + + r := glib.GoBool(glib.GBoolean(C.ostree_repo_prepare_transaction(repo.native(), &resume, nil, &cerr))) + if !r { + return false, generateError(cerr) + } + return glib.GoBool(glib.GBoolean(resume)), nil +} + +func (repo *Repo) CommitTransaction() (*OstreeRepoTransactionStats, error) { + var cerr *C.GError = nil + var stats OstreeRepoTransactionStats = OstreeRepoTransactionStats{} + statsPtr := (*C.OstreeRepoTransactionStats)(unsafe.Pointer(&stats)) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_commit_transaction(repo.native(), statsPtr, nil, &cerr))) + if !r { + return nil, generateError(cerr) + } + return &stats, nil +} + +func (repo *Repo) TransactionSetRef(remote string, ref string, checksum string) { + var cRemote *C.char = nil + var cRef *C.char = nil + var cChecksum *C.char = nil + + if remote != "" { + cRemote = C.CString(remote) + } + if ref != "" { + cRef = C.CString(ref) + } + if checksum != "" { + cChecksum = C.CString(checksum) + } + C.ostree_repo_transaction_set_ref(repo.native(), cRemote, cRef, cChecksum) +} + +func (repo *Repo) AbortTransaction() error { + var cerr *C.GError = nil + r := glib.GoBool(glib.GBoolean(C.ostree_repo_abort_transaction(repo.native(), nil, &cerr))) + if !r { + return generateError(cerr) + } + return nil +} + +func (repo *Repo) RegenerateSummary() error { + var cerr *C.GError = nil + r := glib.GoBool(glib.GBoolean(C.ostree_repo_regenerate_summary(repo.native(), nil, nil, &cerr))) + if !r { + return generateError(cerr) + } + return nil +} + +// Commits a directory, specified by commitPath, to an ostree repo as a given branch +func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string, error) { + options = opts + + var err error + var modeAdds *glib.GHashTable + var skipList *glib.GHashTable + var objectToCommit *glib.GFile + var skipCommit bool = false + var ccommitChecksum *C.char + defer C.free(unsafe.Pointer(ccommitChecksum)) + var flags C.OstreeRepoCommitModifierFlags = 0 + var filter_data C.CommitFilterData + + var cerr *C.GError + defer C.free(unsafe.Pointer(cerr)) + var metadata *C.GVariant = nil + defer func(){ + if metadata != nil { + defer C.g_variant_unref(metadata) + } + }() + + var detachedMetadata *C.GVariant = nil + defer C.free(unsafe.Pointer(detachedMetadata)) + var mtree *C.OstreeMutableTree + defer C.free(unsafe.Pointer(mtree)) + var root *C.GFile + defer C.free(unsafe.Pointer(root)) + var modifier *C.OstreeRepoCommitModifier + defer C.free(unsafe.Pointer(modifier)) + var cancellable *C.GCancellable + defer C.free(unsafe.Pointer(cancellable)) + + cpath := C.CString(commitPath) + defer C.free(unsafe.Pointer(cpath)) + csubject := C.CString(options.Subject) + defer C.free(unsafe.Pointer(csubject)) + cbody := C.CString(options.Body) + defer C.free(unsafe.Pointer(cbody)) + cbranch := C.CString(branch) + defer C.free(unsafe.Pointer(cbranch)) + cparent := C.CString(options.Parent) + defer C.free(unsafe.Pointer(cparent)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_is_writable(repo.native(), &cerr))) { + goto out + } + + // If the user provided a stat override file + if strings.Compare(options.StatOverrideFile, "") != 0 { + modeAdds = glib.ToGHashTable(unsafe.Pointer(C._g_hash_table_new_full())) + if err = parseFileByLine(options.StatOverrideFile, handleStatOverrideLine, modeAdds, cancellable); err != nil { + goto out + } + } + + // If the user provided a skiplist file + if strings.Compare(options.SkipListFile, "") != 0 { + skipList = glib.ToGHashTable(unsafe.Pointer(C._g_hash_table_new_full())) + if err = parseFileByLine(options.SkipListFile, handleSkipListline, skipList, cancellable); err != nil { + goto out + } + } + + if options.AddMetadataString != nil { + metadata, err = parseKeyValueStrings(options.AddMetadataString) + if err != nil { + goto out + } + } + + if options.AddDetachedMetadataString != nil { + _, err := parseKeyValueStrings(options.AddDetachedMetadataString) + if err != nil { + goto out + } + } + + if strings.Compare(branch, "") == 0 && !options.Orphan { + err = errors.New("A branch must be specified or use commitOptions.Orphan") + goto out + } + + if options.NoXattrs { + C._ostree_repo_append_modifier_flags(&flags, C.OSTREE_REPO_COMMIT_MODIFIER_FLAGS_SKIP_XATTRS) + } + if options.GenerateSizes { + C._ostree_repo_append_modifier_flags(&flags, C.OSTREE_REPO_COMMIT_MODIFIER_FLAGS_GENERATE_SIZES) + } + if !options.Fsync { + C.ostree_repo_set_disable_fsync(repo.native(), C.TRUE) + } + + if flags != 0 || options.OwnerUID >= 0 || options.OwnerGID >= 0 || strings.Compare(options.StatOverrideFile, "") != 0 || options.NoXattrs { + filter_data.mode_adds = (*C.GHashTable)(modeAdds.Ptr()) + filter_data.skip_list = (*C.GHashTable)(skipList.Ptr()) + C._set_owner_uid((C.guint32)(options.OwnerUID)) + C._set_owner_gid((C.guint32)(options.OwnerGID)) + modifier = C._ostree_repo_commit_modifier_new_wrapper(flags, C.gpointer(&filter_data), nil) + } + + if strings.Compare(options.Parent, "") != 0 { + if strings.Compare(options.Parent, "none") == 0 { + options.Parent = "" + } + } else if !options.Orphan { + cerr = nil + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.TRUE, &cparent, &cerr))) { + goto out + } + } + + if options.LinkCheckoutSpeedup && !glib.GoBool(glib.GBoolean(C.ostree_repo_scan_hardlinks(repo.native(), cancellable, &cerr))) { + goto out + } + + mtree = C.ostree_mutable_tree_new() + + if len(commitPath) == 0 && (len(options.Tree) == 0 || len(options.Tree[0]) == 0) { + currentDir := (*C.char)(C.g_get_current_dir()) + objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(currentDir))) + C.g_free(C.gpointer(currentDir)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { + goto out + } + } else if len(options.Tree) != 0 { + var eq int = -1 + cerr = nil + for tree := range options.Tree { + eq = strings.Index(options.Tree[tree], "=") + if eq == -1 { + C._g_set_error_onearg(cerr, C.CString("Missing type in tree specification"), C.CString(options.Tree[tree])) + goto out + } + treeType := options.Tree[tree][:eq] + treeVal := options.Tree[tree][eq+1:] + + if strings.Compare(treeType, "dir") == 0 { + objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(treeVal)))) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { + goto out + } + } else if strings.Compare(treeType, "tar") == 0 { + objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(treeVal)))) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_archive_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, (C.gboolean)(glib.GBool(opts.TarAutoCreateParents)), cancellable, &cerr))) { + fmt.Println("error 1") + goto out + } + } else if strings.Compare(treeType, "ref") == 0 { + if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo.native(), C.CString(treeVal), (**C.GFile)(objectToCommit.Ptr()), nil, cancellable, &cerr))) { + goto out + } + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { + goto out + } + } else { + C._g_set_error_onearg(cerr, C.CString("Missing type in tree specification"), C.CString(treeVal)) + goto out + } + } + } else { + objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(cpath))) + cerr = nil + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { + goto out + } + } + + if modeAdds != nil && C.g_hash_table_size((*C.GHashTable)(modeAdds.Ptr())) > 0 { + var hashIter *C.GHashTableIter + + var key, value C.gpointer + + C.g_hash_table_iter_init(hashIter, (*C.GHashTable)(modeAdds.Ptr())) + + for glib.GoBool(glib.GBoolean(C.g_hash_table_iter_next(hashIter, &key, &value))) { + C._g_printerr_onearg(C.CString("Unmatched StatOverride path: "), C._gptr_to_str(key)) + } + err = errors.New("Unmatched StatOverride paths") + C.free(unsafe.Pointer(hashIter)) + C.free(unsafe.Pointer(key)) + C.free(unsafe.Pointer(value)) + goto out + } + + if skipList != nil && C.g_hash_table_size((*C.GHashTable)(skipList.Ptr())) > 0 { + var hashIter *C.GHashTableIter + var key, value C.gpointer + + C.g_hash_table_iter_init(hashIter, (*C.GHashTable)(skipList.Ptr())) + + for glib.GoBool(glib.GBoolean(C.g_hash_table_iter_next(hashIter, &key, &value))) { + C._g_printerr_onearg(C.CString("Unmatched SkipList path: "), C._gptr_to_str(key)) + } + err = errors.New("Unmatched SkipList paths") + C.free(unsafe.Pointer(hashIter)) + C.free(unsafe.Pointer(key)) + C.free(unsafe.Pointer(value)) + goto out + } + + cerr = nil + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_mtree(repo.native(), mtree, &root, cancellable, &cerr))) { + goto out + } + + if options.SkipIfUnchanged && strings.Compare(options.Parent, "") != 0 { + var parentRoot *C.GFile + + cerr = nil + if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo.native(), cparent, &parentRoot, nil, cancellable, &cerr))) { + C.free(unsafe.Pointer(parentRoot)) + goto out + } + + if glib.GoBool(glib.GBoolean(C.g_file_equal(root, parentRoot))) { + skipCommit = true + } + C.free(unsafe.Pointer(parentRoot)) + } + + if !skipCommit { + var timestamp C.guint64 + + if options.Timestamp.IsZero() { + var now *C.GDateTime = C.g_date_time_new_now_utc() + timestamp = (C.guint64)(C.g_date_time_to_unix(now)) + C.g_date_time_unref(now) + + cerr = nil + ret := C.ostree_repo_write_commit(repo.native(), cparent, csubject, cbody, metadata, C._ostree_repo_file(root), &ccommitChecksum, cancellable, &cerr) + if !glib.GoBool(glib.GBoolean(ret)) { + goto out + } + } else { + timestamp = (C.guint64)(options.Timestamp.Unix()) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_commit_with_time(repo.native(), cparent, csubject, cbody, + metadata, C._ostree_repo_file(root), timestamp, &ccommitChecksum, cancellable, &cerr))) { + goto out + } + } + + if detachedMetadata != nil { + C.ostree_repo_write_commit_detached_metadata(repo.native(), ccommitChecksum, detachedMetadata, cancellable, &cerr) + } + + if len(options.GpgSign) != 0 { + for key := range options.GpgSign { + if !glib.GoBool(glib.GBoolean(C.ostree_repo_sign_commit(repo.native(), (*C.gchar)(ccommitChecksum), (*C.gchar)(C.CString(options.GpgSign[key])), (*C.gchar)(C.CString(options.GpgHomedir)), cancellable, &cerr))) { + goto out + } + } + } + + if strings.Compare(branch, "") != 0 { + C.ostree_repo_transaction_set_ref(repo.native(), nil, cbranch, ccommitChecksum) + } else if !options.Orphan { + goto out + } else { + // TODO: Looks like I forgot to implement this. + } + } else { + ccommitChecksum = C.CString(options.Parent) + } + + return C.GoString(ccommitChecksum), nil +out: + if repo.native() != nil { + C.ostree_repo_abort_transaction(repo.native(), cancellable, nil) + //C.free(unsafe.Pointer(repo.native())) + } + if modifier != nil { + C.ostree_repo_commit_modifier_unref(modifier) + } + if err != nil { + return "", err + } + return "", generateError(cerr) +} + +// Parse an array of key value pairs of the format KEY=VALUE and add them to a GVariant +func parseKeyValueStrings(pairs []string) (*C.GVariant, error) { + builder := C.g_variant_builder_new(C._g_variant_type(C.CString("a{sv}"))) + defer C.g_variant_builder_unref(builder) + + for iter := range pairs { + index := strings.Index(pairs[iter], "=") + if index <= 0 { + var buffer bytes.Buffer + buffer.WriteString("Missing '=' in KEY=VALUE metadata '%s'") + buffer.WriteString(pairs[iter]) + return nil, errors.New(buffer.String()) + } + + key := C.CString(pairs[iter][:index]) + value := C.CString(pairs[iter][index+1:]) + + valueVariant := C.g_variant_new_string((*C.gchar)(value)) + + C._g_variant_builder_add_twoargs(builder, C.CString("{sv}"), key, valueVariant) + } + + metadata := C.g_variant_builder_end(builder) + return C.g_variant_ref_sink(metadata), nil +} + +// Parse a file linue by line and handle the line with the handleLineFunc +func parseFileByLine(path string, fn handleLineFunc, table *glib.GHashTable, cancellable *C.GCancellable) error { + var contents *C.char + var file *glib.GFile + var lines []string + var gerr = glib.NewGError() + cerr := (*C.GError)(gerr.Ptr()) + + file = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(path)))) + if !glib.GoBool(glib.GBoolean(C.g_file_load_contents((*C.GFile)(file.Ptr()), cancellable, &contents, nil, nil, &cerr))) { + return generateError(cerr) + } + + lines = strings.Split(C.GoString(contents), "\n") + for line := range lines { + if strings.Compare(lines[line], "") == 0 { + continue + } + + if err := fn(lines[line], table); err != nil { + return generateError(cerr) + } + } + return nil +} + +// Handle an individual line from a Statoverride file +func handleStatOverrideLine(line string, table *glib.GHashTable) error { + var space int + var modeAdd C.guint + + if space = strings.IndexRune(line, ' '); space == -1 { + return errors.New("Malformed StatOverrideFile (no space found)") + } + + modeAdd = (C.guint)(C.g_ascii_strtod((*C.gchar)(C.CString(line)), nil)) + C.g_hash_table_insert((*C.GHashTable)(table.Ptr()), C.gpointer(C.g_strdup((*C.gchar)(C.CString(line[space+1:])))), C._guint_to_pointer(modeAdd)) + + return nil +} + +// Handle an individual line from a Skiplist file +func handleSkipListline(line string, table *glib.GHashTable) error { + C.g_hash_table_add((*C.GHashTable)(table.Ptr()), C.gpointer( C.g_strdup((*C.gchar)(C.CString(line))))) + + return nil +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go new file mode 100644 index 00000000..c1ca2dc7 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go @@ -0,0 +1,90 @@ +package otbuiltin + +import ( + "errors" + "strings" + "unsafe" + + glib "github.com/ostreedev/ostree-go/pkg/glibobject" +) + +// #cgo pkg-config: ostree-1 +// #include +// #include +// #include +// #include "builtin.go.h" +import "C" + +// Declare variables for options +var initOpts initOptions + +// Contains all of the options for initializing an ostree repo +type initOptions struct { + Mode string // either bare, archive-z2, or bare-user + + repoMode C.OstreeRepoMode +} + +// Instantiates and returns an initOptions struct with default values set +func NewInitOptions() initOptions { + io := initOptions{} + io.Mode = "bare" + io.repoMode = C.OSTREE_REPO_MODE_BARE + return io +} + +// Initializes a new ostree repository at the given path. Returns true +// if the repo exists at the location, regardless of whether it was initialized +// by the function or if it already existed. Returns an error if the repo could +// not be initialized +func Init(path string, options initOptions) (bool, error) { + initOpts = options + err := parseMode() + if err != nil { + return false, err + } + + // Create a repo struct from the path + var cerr *C.GError + defer C.free(unsafe.Pointer(cerr)) + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + crepo := C.ostree_repo_new(pathc) + + // If the repo exists in the filesystem, return an error but set exists to true + /* var exists C.gboolean = 0 + success := glib.GoBool(glib.GBoolean(C.ostree_repo_exists(crepo, &exists, &cerr))) + if exists != 0 { + err = errors.New("repository already exists") + return true, err + } else if !success { + return false, generateError(cerr) + }*/ + + cerr = nil + created := glib.GoBool(glib.GBoolean(C.ostree_repo_create(crepo, initOpts.repoMode, nil, &cerr))) + if !created { + errString := generateError(cerr).Error() + if strings.Contains(errString, "File exists") { + return true, generateError(cerr) + } + return false, generateError(cerr) + } + return true, nil +} + +// Converts the mode string to a C.OSTREE_REPO_MODE enum value +func parseMode() error { + if strings.EqualFold(initOpts.Mode, "bare") { + initOpts.repoMode = C.OSTREE_REPO_MODE_BARE + } else if strings.EqualFold(initOpts.Mode, "bare-user") { + initOpts.repoMode = C.OSTREE_REPO_MODE_BARE_USER + } else if strings.EqualFold(initOpts.Mode, "archive-z2") { + initOpts.repoMode = C.OSTREE_REPO_MODE_ARCHIVE_Z2 + } else { + return errors.New("Invalid option for mode") + } + return nil +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go new file mode 100644 index 00000000..2ceea092 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go @@ -0,0 +1,167 @@ +package otbuiltin + +import ( + "fmt" + "strings" + "time" + "unsafe" + + glib "github.com/ostreedev/ostree-go/pkg/glibobject" +) + +// #cgo pkg-config: ostree-1 +// #include +// #include +// #include +// #include "builtin.go.h" +import "C" + +// Declare variables for options +var logOpts logOptions + +// Set the format of the strings in the log +const formatString = "2006-01-02 03:04;05 -0700" + +// Struct for the various pieces of data in a log entry +type LogEntry struct { + Checksum []byte + Variant []byte + Timestamp time.Time + Subject string + Body string +} + +// Convert the log entry to a string +func (l LogEntry) String() string { + if len(l.Variant) == 0 { + return fmt.Sprintf("%s\n%s\n\n\t%s\n\n\t%s\n\n", l.Checksum, l.Timestamp, l.Subject, l.Body) + } + return fmt.Sprintf("%s\n%s\n\n", l.Checksum, l.Variant) +} + +type OstreeDumpFlags uint + +const ( + OSTREE_DUMP_NONE OstreeDumpFlags = 0 + OSTREE_DUMP_RAW OstreeDumpFlags = 1 << iota +) + +// Contains all of the options for initializing an ostree repo +type logOptions struct { + Raw bool // Show raw variant data +} + +//Instantiates and returns a logOptions struct with default values set +func NewLogOptions() logOptions { + return logOptions{} +} + +// Show the logs of a branch starting with a given commit or ref. Returns a +// slice of log entries on success and an error otherwise +func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) { + // attempt to open the repository + repo, err := OpenRepo(repoPath) + if err != nil { + return nil, err + } + + cbranch := C.CString(branch) + defer C.free(unsafe.Pointer(cbranch)) + var checksum *C.char + defer C.free(unsafe.Pointer(checksum)) + var flags OstreeDumpFlags = OSTREE_DUMP_NONE + var cerr *C.GError + defer C.free(unsafe.Pointer(cerr)) + + if logOpts.Raw { + flags |= OSTREE_DUMP_RAW + } + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.FALSE, &checksum, &cerr))) { + return nil, generateError(cerr) + } + + return logCommit(repo, checksum, false, flags) +} + +func logCommit(repo *Repo, checksum *C.char, isRecursive bool, flags OstreeDumpFlags) ([]LogEntry, error) { + var variant *C.GVariant + var parent *C.char + defer C.free(unsafe.Pointer(parent)) + var gerr = glib.NewGError() + var cerr = (*C.GError)(gerr.Ptr()) + defer C.free(unsafe.Pointer(cerr)) + entries := make([]LogEntry, 0, 1) + var err error + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, &variant, &cerr))) { + if isRecursive && glib.GoBool(glib.GBoolean(C.g_error_matches(cerr, C.g_io_error_quark(), C.G_IO_ERROR_NOT_FOUND))) { + return nil, nil + } + return entries, generateError(cerr) + } + + nextLogEntry := dumpLogObject(C.OSTREE_OBJECT_TYPE_COMMIT, checksum, variant, flags) + + // get the parent of this commit + parent = (*C.char)(C.ostree_commit_get_parent(variant)) + defer C.free(unsafe.Pointer(parent)) + if parent != nil { + entries, err = logCommit(repo, parent, true, flags) + if err != nil { + return nil, err + } + } + entries = append(entries, *nextLogEntry) + return entries, nil +} + +func dumpLogObject(objectType C.OstreeObjectType, checksum *C.char, variant *C.GVariant, flags OstreeDumpFlags) *LogEntry { + objLog := new(LogEntry) + objLog.Checksum = []byte(C.GoString(checksum)) + + if (flags & OSTREE_DUMP_RAW) != 0 { + dumpVariant(objLog, variant) + return objLog + } + + switch objectType { + case C.OSTREE_OBJECT_TYPE_COMMIT: + dumpCommit(objLog, variant, flags) + return objLog + default: + return objLog + } +} + +func dumpVariant(log *LogEntry, variant *C.GVariant) { + var byteswappedVariant *C.GVariant + + if C.G_BYTE_ORDER != C.G_BIG_ENDIAN { + byteswappedVariant = C.g_variant_byteswap(variant) + log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE)))) + } else { + log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE)))) + } +} + +func dumpCommit(log *LogEntry, variant *C.GVariant, flags OstreeDumpFlags) { + var subject, body *C.char + defer C.free(unsafe.Pointer(subject)) + defer C.free(unsafe.Pointer(body)) + var timestamp C.guint64 + + C._g_variant_get_commit_dump(variant, C.CString("(a{sv}aya(say)&s&stayay)"), &subject, &body, ×tamp) + + // Timestamp is now a Unix formatted timestamp as a guint64 + timestamp = C._guint64_from_be(timestamp) + log.Timestamp = time.Unix((int64)(timestamp), 0) + + if strings.Compare(C.GoString(subject), "") != 0 { + log.Subject = C.GoString(subject) + } + + if strings.Compare(C.GoString(body), "") != 0 { + log.Body = C.GoString(body) + } +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go new file mode 100644 index 00000000..8dfa40a5 --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go @@ -0,0 +1,217 @@ +package otbuiltin + +import ( + "bytes" + "errors" + "strconv" + "strings" + "time" + "unsafe" + + glib "github.com/ostreedev/ostree-go/pkg/glibobject" +) + +// #cgo pkg-config: ostree-1 +// #include +// #include +// #include +// #include "builtin.go.h" +import "C" + +// Declare gobal variable for options +var pruneOpts pruneOptions + +// Contains all of the options for pruning an ostree repo. Use +// NewPruneOptions() to initialize +type pruneOptions struct { + NoPrune bool // Only display unreachable objects; don't delete + RefsOnly bool // Only compute reachability via refs + DeleteCommit string // Specify a commit to delete + KeepYoungerThan time.Time // All commits older than this date will be pruned + Depth int // Only traverse depths (integer) parents for each commit (default: -1=infinite) + StaticDeltasOnly int // Change the behavior of --keep-younger-than and --delete-commit to prune only the static delta files +} + +// Instantiates and returns a pruneOptions struct with default values set +func NewPruneOptions() pruneOptions { + po := new(pruneOptions) + po.Depth = -1 + return *po +} + +// Search for unreachable objects in the repository given by repoPath. Removes the +// objects unless pruneOptions.NoPrune is specified +func Prune(repoPath string, options pruneOptions) (string, error) { + pruneOpts = options + // attempt to open the repository + repo, err := OpenRepo(repoPath) + if err != nil { + return "", err + } + + var pruneFlags C.OstreeRepoPruneFlags + var numObjectsTotal int + var numObjectsPruned int + var objSizeTotal uint64 + var gerr = glib.NewGError() + var cerr = (*C.GError)(gerr.Ptr()) + defer C.free(unsafe.Pointer(cerr)) + var cancellable *glib.GCancellable + + if !pruneOpts.NoPrune && !glib.GoBool(glib.GBoolean(C.ostree_repo_is_writable(repo.native(), &cerr))) { + return "", generateError(cerr) + } + + cerr = nil + if strings.Compare(pruneOpts.DeleteCommit, "") != 0 { + if pruneOpts.NoPrune { + return "", errors.New("Cannot specify both pruneOptions.DeleteCommit and pruneOptions.NoPrune") + } + + if pruneOpts.StaticDeltasOnly > 0 { + if glib.GoBool(glib.GBoolean(C.ostree_repo_prune_static_deltas(repo.native(), C.CString(pruneOpts.DeleteCommit), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return "", generateError(cerr) + } + } else if err = deleteCommit(repo, pruneOpts.DeleteCommit, cancellable); err != nil { + return "", err + } + } + + if !pruneOpts.KeepYoungerThan.IsZero() { + if pruneOpts.NoPrune { + return "", errors.New("Cannot specify both pruneOptions.KeepYoungerThan and pruneOptions.NoPrune") + } + + if err = pruneCommitsKeepYoungerThanDate(repo, pruneOpts.KeepYoungerThan, cancellable); err != nil { + return "", err + } + } + + if pruneOpts.RefsOnly { + pruneFlags |= C.OSTREE_REPO_PRUNE_FLAGS_REFS_ONLY + } + if pruneOpts.NoPrune { + pruneFlags |= C.OSTREE_REPO_PRUNE_FLAGS_NO_PRUNE + } + + formattedFreedSize := C.GoString((*C.char)(C.g_format_size_full((C.guint64)(objSizeTotal), 0))) + + var buffer bytes.Buffer + + buffer.WriteString("Total objects: ") + buffer.WriteString(strconv.Itoa(numObjectsTotal)) + if numObjectsPruned == 0 { + buffer.WriteString("\nNo unreachable objects") + } else if pruneOpts.NoPrune { + buffer.WriteString("\nWould delete: ") + buffer.WriteString(strconv.Itoa(numObjectsPruned)) + buffer.WriteString(" objects, freeing ") + buffer.WriteString(formattedFreedSize) + } else { + buffer.WriteString("\nDeleted ") + buffer.WriteString(strconv.Itoa(numObjectsPruned)) + buffer.WriteString(" objects, ") + buffer.WriteString(formattedFreedSize) + buffer.WriteString(" freed") + } + + return buffer.String(), nil +} + +// Delete an unreachable commit from the repo +func deleteCommit(repo *Repo, commitToDelete string, cancellable *glib.GCancellable) error { + var refs *glib.GHashTable + var hashIter glib.GHashTableIter + var hashkey, hashvalue C.gpointer + var gerr = glib.NewGError() + var cerr = (*C.GError)(gerr.Ptr()) + defer C.free(unsafe.Pointer(cerr)) + + if glib.GoBool(glib.GBoolean(C.ostree_repo_list_refs(repo.native(), nil, (**C.GHashTable)(refs.Ptr()), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return generateError(cerr) + } + + C.g_hash_table_iter_init((*C.GHashTableIter)(hashIter.Ptr()), (*C.GHashTable)(refs.Ptr())) + for C.g_hash_table_iter_next((*C.GHashTableIter)(hashIter.Ptr()), &hashkey, &hashvalue) != 0 { + var ref string = C.GoString((*C.char)(hashkey)) + var commit string = C.GoString((*C.char)(hashvalue)) + if strings.Compare(commitToDelete, commit) == 0 { + var buffer bytes.Buffer + buffer.WriteString("Commit ") + buffer.WriteString(commitToDelete) + buffer.WriteString(" is referenced by ") + buffer.WriteString(ref) + return errors.New(buffer.String()) + } + } + + if err := enableTombstoneCommits(repo); err != nil { + return err + } + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_delete_object(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, C.CString(commitToDelete), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return generateError(cerr) + } + + return nil +} + +// Prune commits but keep any younger than the given date regardless of whether they +// are reachable +func pruneCommitsKeepYoungerThanDate(repo *Repo, date time.Time, cancellable *glib.GCancellable) error { + var objects *glib.GHashTable + defer C.free(unsafe.Pointer(objects)) + var hashIter glib.GHashTableIter + var key, value C.gpointer + defer C.free(unsafe.Pointer(key)) + defer C.free(unsafe.Pointer(value)) + var gerr = glib.NewGError() + var cerr = (*C.GError)(gerr.Ptr()) + defer C.free(unsafe.Pointer(cerr)) + + if err := enableTombstoneCommits(repo); err != nil { + return err + } + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_list_objects(repo.native(), C.OSTREE_REPO_LIST_OBJECTS_ALL, (**C.GHashTable)(objects.Ptr()), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return generateError(cerr) + } + + C.g_hash_table_iter_init((*C.GHashTableIter)(hashIter.Ptr()), (*C.GHashTable)(objects.Ptr())) + for C.g_hash_table_iter_next((*C.GHashTableIter)(hashIter.Ptr()), &key, &value) != 0 { + var serializedKey *glib.GVariant + defer C.free(unsafe.Pointer(serializedKey)) + var checksum *C.char + defer C.free(unsafe.Pointer(checksum)) + var objType C.OstreeObjectType + var commitTimestamp uint64 + var commit *glib.GVariant = nil + + C.ostree_object_name_deserialize((*C.GVariant)(serializedKey.Ptr()), &checksum, &objType) + + if objType != C.OSTREE_OBJECT_TYPE_COMMIT { + continue + } + + cerr = nil + if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, (**C.GVariant)(commit.Ptr()), &cerr))) { + return generateError(cerr) + } + + commitTimestamp = (uint64)(C.ostree_commit_get_timestamp((*C.GVariant)(commit.Ptr()))) + if commitTimestamp < (uint64)(date.Unix()) { + cerr = nil + if pruneOpts.StaticDeltasOnly != 0 { + if !glib.GoBool(glib.GBoolean(C.ostree_repo_prune_static_deltas(repo.native(), checksum, (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return generateError(cerr) + } + } else { + if !glib.GoBool(glib.GBoolean(C.ostree_repo_delete_object(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, (*C.GCancellable)(cancellable.Ptr()), &cerr))) { + return generateError(cerr) + } + } + } + } + + return nil +} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go new file mode 100644 index 00000000..d43ea07c --- /dev/null +++ b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go @@ -0,0 +1 @@ +package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remote.go.h new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteadd.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotedelete.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotegpgimport.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotelist.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoterefs.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remoteshowurl.go new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go b/vendor/github.com/ostreedev/ostree-go/pkg/otremote/remotesummary.go new file mode 100644 index 00000000..e69de29b