Bump image, storage, and image-spec

Bump containers/image (pulling in its new dependency on ostree-go),
containers/storage, and updated image-spec.

This pulls in the OCI v1.0 specifications and code that allows us to
support 1.0 images.

Signed-off-by: Dan Walsh <dwalsh@redhat.com>
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
Dan Walsh 2017-07-20 16:31:51 -04:00 committed by Nalin Dahyabhai
parent 5138691c3b
commit d76645680f
117 changed files with 3965 additions and 991 deletions

View File

@ -208,7 +208,7 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestType
continue
}
// Start reading the layer.
rc, err := i.store.Diff("", layerID)
rc, err := i.store.Diff("", layerID, nil)
if err != nil {
return nil, errors.Wrapf(err, "error extracting layer %q", layerID)
}

View File

@ -5,15 +5,16 @@ k8s.io/apimachinery release-1.6 https://github.com/kubernetes/apimachinery
k8s.io/apiserver release-1.6 https://github.com/kubernetes/apiserver
#
github.com/Sirupsen/logrus v0.11.5
github.com/containers/image cd150088f25d1ac81c6dff3a0bf458725cb8b339
github.com/containers/storage 2c75d14b978bff468e7d5ec3ff8a003eca443209
github.com/containers/image c2a797dfe5bb4a9dd7f48332ce40c6223ffba492
github.com/ostreedev/ostree-go master
github.com/containers/storage 5d8c2f87387fa5be9fa526ae39fbd79b8bdf27be
github.com/containernetworking/cni v0.4.0
google.golang.org/grpc v1.0.1-GA https://github.com/grpc/grpc-go
github.com/opencontainers/selinux v1.0.0-rc1
github.com/opencontainers/go-digest v1.0.0-rc0
github.com/opencontainers/runtime-tools 20db5990713e97e64bc2d340531d61f2edf4cccb
github.com/opencontainers/runc c5ec25487693612aed95673800863e134785f946
github.com/opencontainers/image-spec v1.0.0-rc6
github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runtime-spec v1.0.0
github.com/juju/ratelimit acf38b000a03e4ab89e40f20f1e548f4e6ac7f72
github.com/tchap/go-patricia v2.2.6

View File

@ -51,14 +51,20 @@ Ensure that the dependencies documented [in vendor.conf](https://github.com/cont
are also available
(using those exact versions or different versions of your choosing).
This library, by default, also depends on the GpgME C library. Either install it:
This library, by default, also depends on the GpgME and libostree C libraries. Either install them:
```sh
Fedora$ dnf install gpgme-devel libassuan-devel
Fedora$ dnf install gpgme-devel libassuan-devel libostree-devel
macOS$ brew install gpgme
```
or use the `containers_image_openpgp` build tag (e.g. using `go build -tags …`)
This will use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation;
or use the build tags described below to avoid the dependencies (e.g. using `go build -tags …`)
### Supported build tags
- `containers_image_openpgp`: Use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation;
the primary downside is that creating new signatures with the Golang-only implementation is not supported.
- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries.
(Note that explicitly importing `github.com/containers/image/ostree` will still depend on the `libostree` library, this build tag only affects generic users of …`/alltransports`.)
## Contributing

View File

@ -7,6 +7,7 @@ import (
"io"
"io/ioutil"
"reflect"
"runtime"
"strings"
"time"
@ -157,6 +158,10 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
}
}()
if err := checkImageDestinationForCurrentRuntimeOS(src, dest); err != nil {
return err
}
if src.IsMultiImage() {
return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef))
}
@ -277,6 +282,22 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
return nil
}
func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageDestination) error {
if dest.MustMatchRuntimeOS() {
c, err := src.OCIConfig()
if err != nil {
return errors.Wrapf(err, "Error parsing image configuration")
}
osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, runtime.GOOS)
if runtime.GOOS == "windows" && c.OS == "linux" {
return osErr
} else if runtime.GOOS != "windows" && c.OS == "windows" {
return osErr
}
}
return nil
}
// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
func updateEmbeddedDockerReference(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDestination, src types.Image, canModifyManifest bool) error {
destRef := dest.Reference().DockerReference()

View File

@ -51,6 +51,11 @@ func (d *dirImageDestination) AcceptsForeignLayerURLs() bool {
return false
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *dirImageDestination) MustMatchRuntimeOS() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.

View File

@ -78,6 +78,11 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
defer resp.Body.Close()
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *daemonImageDestination) MustMatchRuntimeOS() bool {
return true
}
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *daemonImageDestination) Close() error {
if !d.committed {

View File

@ -308,31 +308,36 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
if len(c.challenges) == 0 {
return nil
}
// assume just one...
challenge := c.challenges[0]
switch challenge.Scheme {
case "basic":
req.SetBasicAuth(c.username, c.password)
return nil
case "bearer":
if c.token == nil || time.Now().After(c.tokenExpiration) {
realm, ok := challenge.Parameters["realm"]
if !ok {
return errors.Errorf("missing realm in bearer auth challenge")
schemeNames := make([]string, 0, len(c.challenges))
for _, challenge := range c.challenges {
schemeNames = append(schemeNames, challenge.Scheme)
switch challenge.Scheme {
case "basic":
req.SetBasicAuth(c.username, c.password)
return nil
case "bearer":
if c.token == nil || time.Now().After(c.tokenExpiration) {
realm, ok := challenge.Parameters["realm"]
if !ok {
return errors.Errorf("missing realm in bearer auth challenge")
}
service, _ := challenge.Parameters["service"] // Will be "" if not present
scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
token, err := c.getBearerToken(realm, service, scope)
if err != nil {
return err
}
c.token = token
c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
}
service, _ := challenge.Parameters["service"] // Will be "" if not present
scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
token, err := c.getBearerToken(realm, service, scope)
if err != nil {
return err
}
c.token = token
c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
return nil
default:
logrus.Debugf("no handler for %s authentication", challenge.Scheme)
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
return nil
}
return errors.Errorf("no handler for %s authentication", challenge.Scheme)
logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", "))
return nil
}
func (c *dockerClient) getBearerToken(realm, service, scope string) (*bearerToken, error) {

View File

@ -99,6 +99,11 @@ func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
return true
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
return false
}
// sizeCounter is an io.Writer which only counts the total size of its input.
type sizeCounter struct{ size int64 }

View File

@ -81,6 +81,11 @@ func (d *Destination) AcceptsForeignLayerURLs() bool {
return false
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *Destination) MustMatchRuntimeOS() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
@ -176,7 +181,7 @@ func (d *Destination) PutManifest(m []byte) error {
layerPaths = append(layerPaths, l.Digest.String())
}
items := []manifestItem{{
items := []ManifestItem{{
Config: man.Config.Digest.String(),
RepoTags: []string{d.repoTag},
Layers: layerPaths,

View File

@ -20,7 +20,7 @@ import (
type Source struct {
tarPath string
// The following data is only available after ensureCachedDataIsPresent() succeeds
tarManifest *manifestItem // nil if not available yet.
tarManifest *ManifestItem // nil if not available yet.
configBytes []byte
configDigest digest.Digest
orderedDiffIDList []diffID
@ -145,23 +145,28 @@ func (s *Source) ensureCachedDataIsPresent() error {
return err
}
// Check to make sure length is 1
if len(tarManifest) != 1 {
return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
}
// Read and parse config.
configBytes, err := s.readTarComponent(tarManifest.Config)
configBytes, err := s.readTarComponent(tarManifest[0].Config)
if err != nil {
return err
}
var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs.
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
}
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
if err != nil {
return err
}
// Success; commit.
s.tarManifest = tarManifest
s.tarManifest = &tarManifest[0]
s.configBytes = configBytes
s.configDigest = digest.FromBytes(configBytes)
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
@ -170,23 +175,25 @@ func (s *Source) ensureCachedDataIsPresent() error {
}
// loadTarManifest loads and decodes the manifest.json.
func (s *Source) loadTarManifest() (*manifestItem, error) {
func (s *Source) loadTarManifest() ([]ManifestItem, error) {
// FIXME? Do we need to deal with the legacy format?
bytes, err := s.readTarComponent(manifestFileName)
if err != nil {
return nil, err
}
var items []manifestItem
var items []ManifestItem
if err := json.Unmarshal(bytes, &items); err != nil {
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
}
if len(items) != 1 {
return nil, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(items))
}
return &items[0], nil
return items, nil
}
func (s *Source) prepareLayerData(tarManifest *manifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) {
// LoadTarManifest loads and decodes the manifest.json
func (s *Source) LoadTarManifest() ([]ManifestItem, error) {
return s.loadTarManifest()
}
func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) {
// Collect layer data available in manifest and config.
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))

View File

@ -13,7 +13,8 @@ const (
// legacyRepositoriesFileName = "repositories"
)
type manifestItem struct {
// ManifestItem is an element of the array stored in the top-level manifest.json file.
type ManifestItem struct {
Config string
RepoTags []string
Layers []string

View File

@ -16,7 +16,7 @@ type platformSpec struct {
OSVersion string `json:"os.version,omitempty"`
OSFeatures []string `json:"os.features,omitempty"`
Variant string `json:"variant,omitempty"`
Features []string `json:"features,omitempty"`
Features []string `json:"features,omitempty"` // removed in OCI
}
// A manifestDescriptor references a platform-specific manifest.

View File

@ -183,6 +183,7 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ
}
copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos))
for i, info := range options.LayerInfos {
copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
copy.LayersDescriptors[i].Digest = info.Digest
copy.LayersDescriptors[i].Size = info.Size
copy.LayersDescriptors[i].URLs = info.URLs
@ -213,15 +214,17 @@ func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
return nil, err
}
config := descriptor{
MediaType: imgspecv1.MediaTypeImageConfig,
Size: int64(len(configOCIBytes)),
Digest: digest.FromBytes(configOCIBytes),
config := descriptorOCI1{
descriptor: descriptor{
MediaType: imgspecv1.MediaTypeImageConfig,
Size: int64(len(configOCIBytes)),
Digest: digest.FromBytes(configOCIBytes),
},
}
layers := make([]descriptor, len(m.LayersDescriptors))
layers := make([]descriptorOCI1, len(m.LayersDescriptors))
for idx := range layers {
layers[idx] = m.LayersDescriptors[idx]
layers[idx] = descriptorOCI1{descriptor: m.LayersDescriptors[idx]}
if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType {
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
} else {

View File

@ -12,12 +12,18 @@ import (
"github.com/pkg/errors"
)
type descriptorOCI1 struct {
descriptor
Annotations map[string]string `json:"annotations,omitempty"`
}
type manifestOCI1 struct {
src types.ImageSource // May be nil if configBlob is not nil
configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
SchemaVersion int `json:"schemaVersion"`
ConfigDescriptor descriptor `json:"config"`
LayersDescriptors []descriptor `json:"layers"`
ConfigDescriptor descriptorOCI1 `json:"config"`
LayersDescriptors []descriptorOCI1 `json:"layers"`
Annotations map[string]string `json:"annotations,omitempty"`
}
func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) {
@ -29,7 +35,7 @@ func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericMa
}
// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data:
func manifestOCI1FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest {
func manifestOCI1FromComponents(config descriptorOCI1, src types.ImageSource, configBlob []byte, layers []descriptorOCI1) genericManifest {
return &manifestOCI1{
src: src,
configBlob: configBlob,
@ -148,8 +154,9 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.
if len(copy.LayersDescriptors) != len(options.LayerInfos) {
return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
}
copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos))
copy.LayersDescriptors = make([]descriptorOCI1, len(options.LayerInfos))
for i, info := range options.LayerInfos {
copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
copy.LayersDescriptors[i].Digest = info.Digest
copy.LayersDescriptors[i].Size = info.Size
}
@ -169,7 +176,7 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.
func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) {
// Create a copy of the descriptor.
config := m.ConfigDescriptor
config := m.ConfigDescriptor.descriptor
// The only difference between OCI and DockerSchema2 is the mediatypes. The
// media type of the manifest is handled by manifestSchema2FromComponents.
@ -177,7 +184,7 @@ func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) {
layers := make([]descriptor, len(m.LayersDescriptors))
for idx := range layers {
layers[idx] = m.LayersDescriptors[idx]
layers[idx] = m.LayersDescriptors[idx].descriptor
layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
}

View File

@ -66,6 +66,11 @@ func (d *ociImageDestination) AcceptsForeignLayerURLs() bool {
return false
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *ociImageDestination) MustMatchRuntimeOS() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.

View File

@ -358,6 +358,11 @@ func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool {
return true
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.

View File

@ -11,12 +11,15 @@ import (
"path/filepath"
"strconv"
"strings"
"time"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/archive"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
)
type blobToImport struct {
@ -86,6 +89,11 @@ func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool {
return false
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *ostreeImageDestination) MustMatchRuntimeOS() bool {
return true
}
func (d *ostreeImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
if err != nil {
@ -153,7 +161,17 @@ func fixFiles(dir string, usermode bool) error {
return nil
}
func (d *ostreeImageDestination) importBlob(blob *blobToImport) error {
func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error {
opts := otbuiltin.NewCommitOptions()
opts.AddMetadataString = metadata
opts.Timestamp = time.Now()
// OCI layers have no parent OSTree commit
opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000"
_, err := repo.Commit(root, branch, opts)
return err
}
func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
if err := ensureDirectoryExists(destinationPath); err != nil {
@ -181,11 +199,7 @@ func (d *ostreeImageDestination) importBlob(blob *blobToImport) error {
return err
}
}
return exec.Command("ostree", "commit",
"--repo", d.ref.repo,
fmt.Sprintf("--add-metadata-string=docker.size=%d", blob.Size),
"--branch", ostreeBranch,
fmt.Sprintf("--tree=dir=%s", destinationPath)).Run()
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
}
func (d *ostreeImageDestination) importConfig(blob *blobToImport) error {
@ -253,6 +267,16 @@ func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error {
}
func (d *ostreeImageDestination) Commit() error {
repo, err := otbuiltin.OpenRepo(d.ref.repo)
if err != nil {
return err
}
_, err = repo.PrepareTransaction()
if err != nil {
return err
}
for _, layer := range d.schema.LayersDescriptors {
hash := layer.Digest.Hex()
blob := d.blobs[hash]
@ -261,7 +285,7 @@ func (d *ostreeImageDestination) Commit() error {
if blob == nil {
continue
}
err := d.importBlob(blob)
err := d.importBlob(repo, blob)
if err != nil {
return err
}
@ -277,11 +301,11 @@ func (d *ostreeImageDestination) Commit() error {
}
manifestPath := filepath.Join(d.tmpDirPath, "manifest")
err := exec.Command("ostree", "commit",
"--repo", d.ref.repo,
fmt.Sprintf("--add-metadata-string=docker.manifest=%s", string(d.manifest)),
fmt.Sprintf("--branch=ociimage/%s", d.ref.branchName),
manifestPath).Run()
metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest))}
err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata)
_, err = repo.CommitTransaction()
return err
}

View File

@ -174,11 +174,11 @@ func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobI
}
// Attempt to create the identified layer and import its contents.
layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi)
if err != nil && err != storage.ErrDuplicateID {
if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err)
return errorBlobInfo, err
}
if err == storage.ErrDuplicateID {
if errors.Cause(err) == storage.ErrDuplicateID {
// We specified an ID, and there's already a layer with
// the same ID. Drain the input so that we can look at
// its length and digest.
@ -291,7 +291,7 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
// it returns a non-nil error only on an unexpected failure.
func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
if blobinfo.Digest == "" {
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`)
}
for _, blob := range s.BlobList {
if blob.Digest == blobinfo.Digest {
@ -331,7 +331,7 @@ func (s *storageImageDestination) Commit() error {
}
img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil)
if err != nil {
if err != storage.ErrDuplicateID {
if errors.Cause(err) != storage.ErrDuplicateID {
logrus.Debugf("error creating image: %q", err)
return errors.Wrapf(err, "error creating image %q", s.ID)
}
@ -340,8 +340,8 @@ func (s *storageImageDestination) Commit() error {
return errors.Wrapf(err, "error reading image %q", s.ID)
}
if img.TopLayer != lastLayer {
logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", err)
return errors.Wrapf(err, "image with ID %q already exists, but uses a different top layer", s.ID)
logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", s.ID)
return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", s.ID)
}
logrus.Debugf("reusing image ID %q", img.ID)
} else {
@ -449,6 +449,11 @@ func (s *storageImageDestination) AcceptsForeignLayerURLs() bool {
return false
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (s *storageImageDestination) MustMatchRuntimeOS() bool {
return true
}
func (s *storageImageDestination) PutSignatures(signatures [][]byte) error {
sizes := []int{}
sigblob := []byte{}
@ -516,7 +521,7 @@ func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64,
} else {
n = layerMeta.CompressedSize
}
diff, err := store.Diff("", layer.ID)
diff, err := store.Diff("", layer.ID, nil)
if err != nil {
return nil, -1, err
}

View File

@ -70,7 +70,9 @@ func (s *storageReference) resolveImage() (*storage.Image, error) {
// to build this reference object.
func (s storageReference) Transport() types.ImageTransport {
return &storageTransport{
store: s.transport.store,
store: s.transport.store,
defaultUIDMap: s.transport.defaultUIDMap,
defaultGIDMap: s.transport.defaultGIDMap,
}
}
@ -83,7 +85,12 @@ func (s storageReference) DockerReference() reference.Named {
// disambiguate between images which may be present in multiple stores and
// share only their names.
func (s storageReference) StringWithinTransport() string {
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
optionsList := ""
options := s.transport.store.GraphOptions()
if len(options) > 0 {
optionsList = ":" + strings.Join(options, ",")
}
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
if s.name == nil {
return storeSpec + "@" + s.id
}
@ -94,7 +101,14 @@ func (s storageReference) StringWithinTransport() string {
}
func (s storageReference) PolicyConfigurationIdentity() string {
return s.StringWithinTransport()
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
if s.name == nil {
return storeSpec + "@" + s.id
}
if s.id == "" {
return storeSpec + s.reference
}
return storeSpec + s.reference + "@" + s.id
}
// Also accept policy that's tied to the combination of the graph root and

View File

@ -11,6 +11,7 @@ import (
"github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/opencontainers/go-digest"
ddigest "github.com/opencontainers/go-digest"
)
@ -46,10 +47,20 @@ type StoreTransport interface {
// ParseStoreReference parses a reference, overriding any store
// specification that it may contain.
ParseStoreReference(store storage.Store, reference string) (*storageReference, error)
// SetDefaultUIDMap sets the default UID map to use when opening stores.
SetDefaultUIDMap(idmap []idtools.IDMap)
// SetDefaultGIDMap sets the default GID map to use when opening stores.
SetDefaultGIDMap(idmap []idtools.IDMap)
// DefaultUIDMap returns the default UID map used when opening stores.
DefaultUIDMap() []idtools.IDMap
// DefaultGIDMap returns the default GID map used when opening stores.
DefaultGIDMap() []idtools.IDMap
}
type storageTransport struct {
store storage.Store
store storage.Store
defaultUIDMap []idtools.IDMap
defaultGIDMap []idtools.IDMap
}
func (s *storageTransport) Name() string {
@ -66,6 +77,26 @@ func (s *storageTransport) SetStore(store storage.Store) {
s.store = store
}
// SetDefaultUIDMap sets the default UID map to use when opening stores.
func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) {
s.defaultUIDMap = idmap
}
// SetDefaultGIDMap sets the default GID map to use when opening stores.
func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) {
s.defaultGIDMap = idmap
}
// DefaultUIDMap returns the default UID map used when opening stores.
func (s *storageTransport) DefaultUIDMap() []idtools.IDMap {
return s.defaultUIDMap
}
// DefaultGIDMap returns the default GID map used when opening stores.
func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
return s.defaultGIDMap
}
// ParseStoreReference takes a name or an ID, tries to figure out which it is
// relative to the given store, and returns it in a reference object.
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
@ -110,7 +141,12 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
// recognize.
return nil, ErrInvalidReference
}
storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "]"
optionsList := ""
options := store.GraphOptions()
if len(options) > 0 {
optionsList = ":" + strings.Join(options, ",")
}
storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]"
id := ""
if sum.Validate() == nil {
id = sum.Hex()
@ -127,14 +163,17 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
} else {
logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id)
}
return newReference(storageTransport{store: store}, refname, id, name), nil
return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil
}
func (s *storageTransport) GetStore() (storage.Store, error) {
// Return the transport's previously-set store. If we don't have one
// of those, initialize one now.
if s.store == nil {
store, err := storage.GetStore(storage.DefaultStoreOptions)
options := storage.DefaultStoreOptions
options.UIDMap = s.defaultUIDMap
options.GIDMap = s.defaultGIDMap
store, err := storage.GetStore(options)
if err != nil {
return nil, err
}
@ -145,15 +184,11 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"),
// possibly prefixed with a store specifier in the form "[_graphroot_]" or
// "[_driver_@_graphroot_]", tries to figure out which it is, and returns it in
// a reference object. If the _graphroot_ is a location other than the default,
// it needs to have been previously opened using storage.GetStore(), so that it
// can figure out which run root goes with the graph root.
// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or
// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]",
// tries to figure out which it is, and returns it in a reference object.
func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) {
store, err := s.GetStore()
if err != nil {
return nil, err
}
var store storage.Store
// Check if there's a store location prefix. If there is, then it
// needs to match a store that was previously initialized using
// storage.GetStore(), or be enough to let the storage library fill out
@ -165,37 +200,65 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
}
storeSpec := reference[1:closeIndex]
reference = reference[closeIndex+1:]
storeInfo := strings.SplitN(storeSpec, "@", 2)
if len(storeInfo) == 1 && storeInfo[0] != "" {
// One component: the graph root.
if !filepath.IsAbs(storeInfo[0]) {
return nil, ErrPathNotAbsolute
// Peel off a "driver@" from the start.
driverInfo := ""
driverSplit := strings.SplitN(storeSpec, "@", 2)
if len(driverSplit) != 2 {
if storeSpec == "" {
return nil, ErrInvalidReference
}
store2, err := storage.GetStore(storage.StoreOptions{
GraphRoot: storeInfo[0],
})
if err != nil {
return nil, err
}
store = store2
} else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" {
// Two components: the driver type and the graph root.
if !filepath.IsAbs(storeInfo[1]) {
return nil, ErrPathNotAbsolute
}
store2, err := storage.GetStore(storage.StoreOptions{
GraphDriverName: storeInfo[0],
GraphRoot: storeInfo[1],
})
if err != nil {
return nil, err
}
store = store2
} else {
// Anything else: store specified in a form we don't
// recognize.
return nil, ErrInvalidReference
driverInfo = driverSplit[0]
if driverInfo == "" {
return nil, ErrInvalidReference
}
storeSpec = driverSplit[1]
if storeSpec == "" {
return nil, ErrInvalidReference
}
}
// Peel off a ":options" from the end.
var options []string
optionsSplit := strings.SplitN(storeSpec, ":", 2)
if len(optionsSplit) == 2 {
options = strings.Split(optionsSplit[1], ",")
storeSpec = optionsSplit[0]
}
// Peel off a "+runroot" from the new end.
runRootInfo := ""
runRootSplit := strings.SplitN(storeSpec, "+", 2)
if len(runRootSplit) == 2 {
runRootInfo = runRootSplit[1]
storeSpec = runRootSplit[0]
}
// The rest is our graph root.
rootInfo := storeSpec
// Check that any paths are absolute paths.
if rootInfo != "" && !filepath.IsAbs(rootInfo) {
return nil, ErrPathNotAbsolute
}
if runRootInfo != "" && !filepath.IsAbs(runRootInfo) {
return nil, ErrPathNotAbsolute
}
store2, err := storage.GetStore(storage.StoreOptions{
GraphDriverName: driverInfo,
GraphRoot: rootInfo,
RunRoot: runRootInfo,
GraphDriverOptions: options,
UIDMap: s.defaultUIDMap,
GIDMap: s.defaultGIDMap,
})
if err != nil {
return nil, err
}
store = store2
} else {
// We didn't have a store spec, so use the default.
store2, err := s.GetStore()
if err != nil {
return nil, err
}
store = store2
}
return s.ParseStoreReference(store, reference)
}
@ -250,7 +313,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
return ErrPathNotAbsolute
}
} else {
// Anything else: store specified in a form we don't
// Anything else: scope specified in a form we don't
// recognize.
return ErrInvalidReference
}

View File

@ -12,7 +12,7 @@ import (
_ "github.com/containers/image/docker/daemon"
_ "github.com/containers/image/oci/layout"
_ "github.com/containers/image/openshift"
_ "github.com/containers/image/ostree"
// The ostree transport is registered by ostree*.go
_ "github.com/containers/image/storage"
"github.com/containers/image/transports"
"github.com/containers/image/types"

View File

@ -0,0 +1,8 @@
// +build !containers_image_ostree_stub
package alltransports
import (
// Register the ostree transport
_ "github.com/containers/image/ostree"
)

View File

@ -0,0 +1,9 @@
// +build containers_image_ostree_stub
package alltransports
import "github.com/containers/image/transports"
func init() {
transports.Register(transports.NewStubTransport("ostree"))
}

36
vendor/github.com/containers/image/transports/stub.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
package transports
import (
"fmt"
"github.com/containers/image/types"
)
// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”.
type stubTransport string
// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”.
func NewStubTransport(name string) types.ImageTransport {
return stubTransport(name)
}
// Name returns the name of the transport, which must be unique among other transports.
func (s stubTransport) Name() string {
return string(s)
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) {
return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s))
}
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
// scope passed to this function will not be "", that value is always allowed.
func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error {
// Allowing any reference in here allows tools with some transports stubbed-out to still
// use signature verification policies which refer to these stubbed-out transports.
// See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON .
return nil
}

View File

@ -2,6 +2,7 @@ package transports
import (
"fmt"
"sort"
"sync"
"github.com/containers/image/types"
@ -69,3 +70,15 @@ func Register(t types.ImageTransport) {
func ImageName(ref types.ImageReference) string {
return ref.Transport().Name() + ":" + ref.StringWithinTransport()
}
// ListNames returns a list of transport names
func ListNames() []string {
kt.mu.Lock()
defer kt.mu.Unlock()
var names []string
for _, transport := range kt.transports {
names = append(names, transport.Name())
}
sort.Strings(names)
return names
}

View File

@ -148,11 +148,11 @@ type ImageDestination interface {
SupportsSignatures() error
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
ShouldCompressLayers() bool
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
AcceptsForeignLayerURLs() bool
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
MustMatchRuntimeOS() bool
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.

View File

@ -1,5 +1,5 @@
github.com/Sirupsen/logrus 7f4b1adc791766938c29457bed0703fb9134421a
github.com/containers/storage 989b1c1d85f5dfe2076c67b54289cc13dc836c8c
github.com/containers/storage 105f7c77aef0c797429e41552743bf5b03b63263
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/distribution df5327f76fb6468b84a87771e361762b8be23fdb
github.com/docker/docker 75843d36aa5c3eaade50da005f9e0ff2602f3d5e
@ -15,7 +15,7 @@ github.com/mattn/go-shellwords 005a0944d84452842197c2108bd9168ced206f78
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc
github.com/opencontainers/image-spec v1.0.0-rc6
github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
github.com/pkg/errors 248dadf4e9068a0b3e79f02ed0a610d935de5302
@ -34,4 +34,4 @@ github.com/xeipuuv/gojsonpointer master
github.com/tchap/go-patricia v2.2.6
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
github.com/ostreedev/ostree-go 61532f383f1f48e5c27080b0b9c8b022c3706a97
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460

View File

@ -1,6 +1,6 @@
`storage` is a Go library which aims to provide methods for storing filesystem
layers, container images, and containers. An `oci-storage` CLI wrapper is also
included for manual and scripting use.
layers, container images, and containers. A `containers-storage` CLI wrapper
is also included for manual and scripting use.
To build the CLI wrapper, use 'make build-binary'.

View File

@ -50,6 +50,12 @@ type Container struct {
// that has been stored, if they're known.
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
// Created is the datestamp for when this container was created. Older
// versions of the library did not track this information, so callers
// will likely want to use the IsZero() method to verify that a value
// is set before using it.
Created time.Time `json:"created,omitempty"`
Flags map[string]interface{} `json:"flags,omitempty"`
}
@ -93,7 +99,7 @@ type ContainerStore interface {
type containerStore struct {
lockfile Locker
dir string
containers []Container
containers []*Container
idindex *truncindex.TruncIndex
byid map[string]*Container
bylayer map[string]*Container
@ -101,7 +107,11 @@ type containerStore struct {
}
func (r *containerStore) Containers() ([]Container, error) {
return r.containers, nil
containers := make([]Container, len(r.containers))
for i := range r.containers {
containers[i] = *(r.containers[i])
}
return containers, nil
}
func (r *containerStore) containerspath() string {
@ -123,7 +133,7 @@ func (r *containerStore) Load() error {
if err != nil && !os.IsNotExist(err) {
return err
}
containers := []Container{}
containers := []*Container{}
layers := make(map[string]*Container)
idlist := []string{}
ids := make(map[string]*Container)
@ -131,14 +141,14 @@ func (r *containerStore) Load() error {
if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil {
for n, container := range containers {
idlist = append(idlist, container.ID)
ids[container.ID] = &containers[n]
layers[container.LayerID] = &containers[n]
ids[container.ID] = containers[n]
layers[container.LayerID] = containers[n]
for _, name := range container.Names {
if conflict, ok := names[name]; ok {
r.removeName(conflict, name)
needSave = true
}
names[name] = &containers[n]
names[name] = containers[n]
}
}
}
@ -148,7 +158,6 @@ func (r *containerStore) Load() error {
r.bylayer = layers
r.byname = names
if needSave {
r.Touch()
return r.Save()
}
return nil
@ -163,6 +172,7 @@ func (r *containerStore) Save() error {
if err != nil {
return err
}
defer r.Touch()
return ioutils.AtomicWriteFile(rpath, jdata, 0600)
}
@ -179,7 +189,7 @@ func newContainerStore(dir string) (ContainerStore, error) {
cstore := containerStore{
lockfile: lockfile,
dir: dir,
containers: []Container{},
containers: []*Container{},
byid: make(map[string]*Container),
bylayer: make(map[string]*Container),
byname: make(map[string]*Container),
@ -241,7 +251,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
}
}
if err == nil {
newContainer := Container{
container = &Container{
ID: id,
Names: names,
ImageID: image,
@ -249,10 +259,10 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
Metadata: metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
Created: time.Now().UTC(),
Flags: make(map[string]interface{}),
}
r.containers = append(r.containers, newContainer)
container = &r.containers[len(r.containers)-1]
r.containers = append(r.containers, container)
r.byid[id] = container
r.idindex.Add(id)
r.bylayer[layer] = container
@ -306,10 +316,11 @@ func (r *containerStore) Delete(id string) error {
return ErrContainerUnknown
}
id = container.ID
newContainers := []Container{}
for _, candidate := range r.containers {
if candidate.ID != id {
newContainers = append(newContainers, candidate)
toDeleteIndex := -1
for i, candidate := range r.containers {
if candidate.ID == id {
toDeleteIndex = i
break
}
}
delete(r.byid, id)
@ -318,7 +329,14 @@ func (r *containerStore) Delete(id string) error {
for _, name := range container.Names {
delete(r.byname, name)
}
r.containers = newContainers
if toDeleteIndex != -1 {
// delete the container at toDeleteIndex
if toDeleteIndex == len(r.containers)-1 {
r.containers = r.containers[:len(r.containers)-1]
} else {
r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
}
}
if err := r.Save(); err != nil {
return err
}
@ -437,6 +455,10 @@ func (r *containerStore) Modified() (bool, error) {
return r.lockfile.Modified()
}
func (r *containerStore) IsReadWrite() bool {
return r.lockfile.IsReadWrite()
}
func (r *containerStore) TouchedSince(when time.Time) bool {
return r.lockfile.TouchedSince(when)
}

View File

@ -47,6 +47,7 @@ import (
rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
var (
@ -81,7 +82,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
// Try to load the aufs kernel module
if err := supportsAufs(); err != nil {
return nil, graphdriver.ErrNotSupported
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs")
}
fsMagic, err := graphdriver.GetFSMagic(root)
@ -95,7 +96,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs:
logrus.Errorf("AUFS is not supported over %s", backingFs)
return nil, graphdriver.ErrIncompatibleFS
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs)
}
paths := []string{
@ -372,6 +373,12 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
})
}
// AdditionalImageStores returns additional image stores supported by the driver
func (a *Driver) AdditionalImageStores() []string {
var imageStores []string
return imageStores
}
type fileGetNilCloser struct {
storage.FileGetter
}

View File

@ -2,7 +2,7 @@
package aufs
import "errors"
import "github.com/pkg/errors"
// MsRemount declared to specify a non-linux system mount.
const MsRemount = 0

View File

@ -29,6 +29,7 @@ import (
"github.com/containers/storage/pkg/parsers"
"github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
func init() {
@ -55,7 +56,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
}
if fsMagic != graphdriver.FsMagicBtrfs {
return nil, graphdriver.ErrPrerequisites
return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "%q is not on a btrfs filesystem", home)
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
@ -518,3 +519,9 @@ func (d *Driver) Exists(id string) bool {
_, err := os.Stat(dir)
return err == nil
}
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
var imageStores []string
return imageStores
}

View File

@ -5,7 +5,6 @@ package devmapper
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
@ -28,10 +27,10 @@ import (
"github.com/containers/storage/pkg/loopback"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/storageversion"
"github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
var (
@ -1475,7 +1474,7 @@ func determineDriverCapabilities(version string) error {
versionSplit := strings.Split(version, ".")
major, err := strconv.Atoi(versionSplit[0])
if err != nil {
return graphdriver.ErrNotSupported
return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver major version %q as a number", versionSplit[0])
}
if major > 4 {
@ -1489,7 +1488,7 @@ func determineDriverCapabilities(version string) error {
minor, err := strconv.Atoi(versionSplit[1])
if err != nil {
return graphdriver.ErrNotSupported
return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver minor version %q as a number", versionSplit[1])
}
/*
@ -1656,11 +1655,11 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
version, err := devicemapper.GetDriverVersion()
if err != nil {
// Can't even get driver version, assume not supported
return graphdriver.ErrNotSupported
return errors.Wrap(graphdriver.ErrNotSupported, "unable to determine version of device mapper")
}
if err := determineDriverCapabilities(version); err != nil {
return graphdriver.ErrNotSupported
return errors.Wrap(graphdriver.ErrNotSupported, "unable to determine device mapper driver capabilities")
}
if err := devices.enableDeferredRemovalDeletion(); err != nil {
@ -1668,17 +1667,17 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
}
// https://github.com/docker/docker/issues/4036
if supported := devicemapper.UdevSetSyncSupport(true); !supported {
if storageversion.IAmStatic == "true" {
logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option")
} else {
logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option")
}
if !devices.overrideUdevSyncCheck {
return graphdriver.ErrNotSupported
}
}
// if supported := devicemapper.UdevSetSyncSupport(true); !supported {
// if storageversion.IAmStatic == "true" {
// logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option")
// } else {
// logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option")
// }
//
// if !devices.overrideUdevSyncCheck {
// return graphdriver.ErrNotSupported
// }
// }
//create the root dir of the devmapper driver ownership to match this
//daemon's remapped root uid/gid so containers can start properly
@ -1734,6 +1733,15 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
metadataFile *os.File
)
fsMagic, err := graphdriver.GetFSMagic(devices.loopbackDir())
if err != nil {
return err
}
switch fsMagic {
case graphdriver.FsMagicAufs:
return errors.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems")
}
if devices.dataDevice == "" {
// Make sure the sparse images exist in <root>/devicemapper/data
@ -1960,7 +1968,7 @@ func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) erro
// If syncDelete is true, we want to return error. If deferred
// deletion is not enabled, we return an error. If error is
// something other then EBUSY, return an error.
if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy {
if syncDelete || !devices.deferredDelete || errors.Cause(err) != devicemapper.ErrBusy {
logrus.Debugf("devmapper: Error deleting device: %s", err)
return err
}
@ -2115,7 +2123,7 @@ func (devices *DeviceSet) removeDevice(devname string) error {
if err == nil {
break
}
if err != devicemapper.ErrBusy {
if errors.Cause(err) != devicemapper.ErrBusy {
return err
}
@ -2150,12 +2158,12 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
break
}
if err == devicemapper.ErrEnxio {
if errors.Cause(err) == devicemapper.ErrEnxio {
// Device is probably already gone. Return success.
return nil
}
if err != devicemapper.ErrBusy {
if errors.Cause(err) != devicemapper.ErrBusy {
return err
}

View File

@ -224,3 +224,9 @@ func (d *Driver) Put(id string) error {
func (d *Driver) Exists(id string) bool {
return d.DeviceSet.HasDevice(id)
}
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
var imageStores []string
return imageStores
}

View File

@ -1,13 +1,13 @@
package graphdriver
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
"github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/storage"
"github.com/containers/storage/pkg/archive"
@ -74,6 +74,8 @@ type ProtoDriver interface {
// held by the driver, e.g., unmounting all layered filesystems
// known to this driver.
Cleanup() error
// AdditionalImageStores returns additional image stores supported by the driver
AdditionalImageStores() []string
}
// Driver is the interface for layered/snapshot file system drivers.
@ -142,7 +144,7 @@ func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.I
return pluginDriver, nil
}
logrus.Errorf("Failed to GetDriver graph %s %s", name, home)
return nil, ErrNotSupported
return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, home)
}
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
@ -151,7 +153,7 @@ func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []id
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
}
logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
return nil, ErrNotSupported
return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home)
}
// New creates the driver and initializes it at the specified root.
@ -226,7 +228,8 @@ func New(root string, name string, options []string, uidMaps, gidMaps []idtools.
// isDriverNotSupported returns true if the error initializing
// the graph driver is a non-supported error.
func isDriverNotSupported(err error) bool {
return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS
cause := errors.Cause(err)
return cause == ErrNotSupported || cause == ErrPrerequisites || cause == ErrIncompatibleFS
}
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers

View File

@ -53,7 +53,7 @@ const (
var (
// Slice of drivers that should be used in an order
priority = []string{
"overlay2",
"overlay",
"devicemapper",
"aufs",
"btrfs",

View File

@ -20,6 +20,7 @@ import (
"unsafe"
log "github.com/Sirupsen/logrus"
"github.com/pkg/errors"
)
const (
@ -56,7 +57,7 @@ func Mounted(fsType FsMagic, mountPath string) (bool, error) {
(buf.f_basetype[3] != 0) {
log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
C.free(unsafe.Pointer(buf))
return false, ErrPrerequisites
return false, errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", mountPath)
}
C.free(unsafe.Pointer(buf))

View File

@ -4,12 +4,12 @@ package overlay
import (
"bufio"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"syscall"
@ -26,6 +26,7 @@ import (
"github.com/containers/storage/pkg/parsers/kernel"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
var (
@ -82,6 +83,7 @@ type Driver struct {
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
opts *overlayOptions
}
var backingFs = "<unknown>"
@ -100,7 +102,7 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool
}
if err := supportsOverlay(); err != nil {
return nil, graphdriver.ErrNotSupported
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support overlay fs")
}
// require kernel 4.0.0 to ensure multiple lower dirs are supported
@ -110,7 +112,7 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool
}
if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 {
if !opts.overrideKernelCheck {
return nil, graphdriver.ErrNotSupported
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
}
logrus.Warnf("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update")
}
@ -127,7 +129,7 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool
switch fsMagic {
case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
logrus.Errorf("'overlay' is not supported over %s", backingFs)
return nil, graphdriver.ErrIncompatibleFS
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs)
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
@ -149,6 +151,7 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool
uidMaps: uidMaps,
gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
opts: opts,
}
return d, nil
@ -170,6 +173,7 @@ func InitAsOverlay2(home string, options []string, uidMaps, gidMaps []idtools.ID
type overlayOptions struct {
overrideKernelCheck bool
imageStores []string
}
func parseOptions(options []string) (*overlayOptions, error) {
@ -186,6 +190,22 @@ func parseOptions(options []string) (*overlayOptions, error) {
if err != nil {
return nil, err
}
case "overlay.imagestore":
// Additional read only image stores to use for lower paths
for _, store := range strings.Split(val, ",") {
store = filepath.Clean(store)
if !filepath.IsAbs(store) {
return nil, fmt.Errorf("overlay: image path %q is not absolute. Can not be relative", store)
}
st, err := os.Stat(store)
if err != nil {
return nil, fmt.Errorf("overlay: Can't stat imageStore dir %s: %v", store, err)
}
if !st.IsDir() {
return nil, fmt.Errorf("overlay: image path %q must be a directory", store)
}
o.imageStores = append(o.imageStores, store)
}
default:
return nil, fmt.Errorf("overlay: Unknown option %s", key)
}
@ -211,7 +231,7 @@ func supportsOverlay() error {
}
}
logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
return graphdriver.ErrNotSupported
return errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
}
func (d *Driver) String() string {
@ -357,8 +377,18 @@ func (d *Driver) getLower(parent string) (string, error) {
return strings.Join(lowers, ":"), nil
}
func (d *Driver) dir(id string) string {
return path.Join(d.home, id)
func (d *Driver) dir(val string) string {
newpath := path.Join(d.home, val)
if _, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() {
l := path.Join(p, d.name, val)
_, err = os.Stat(l)
if err == nil {
return l
}
}
}
return newpath
}
func (d *Driver) getLowerDirs(id string) ([]string, error) {
@ -366,11 +396,12 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) {
lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile))
if err == nil {
for _, s := range strings.Split(string(lowers), ":") {
lp, err := os.Readlink(path.Join(d.home, s))
lower := d.dir(s)
lp, err := os.Readlink(lower)
if err != nil {
return nil, err
}
lowersArray = append(lowersArray, path.Clean(path.Join(d.home, "link", lp)))
lowersArray = append(lowersArray, path.Clean(d.dir(path.Join("link", lp))))
}
} else if !os.IsNotExist(err) {
return nil, err
@ -411,6 +442,31 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
return "", err
}
newlowers := ""
for _, l := range strings.Split(string(lowers), ":") {
lower := ""
newpath := path.Join(d.home, l)
if _, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() {
lower = path.Join(p, d.name, l)
if _, err2 := os.Stat(lower); err2 == nil {
break
}
lower = ""
}
if lower == "" {
return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err)
}
} else {
lower = l
}
if newlowers == "" {
newlowers = lower
} else {
newlowers = newlowers + ":" + lower
}
}
mergedDir := path.Join(dir, "merged")
if count := d.ctr.Increment(mergedDir); count > 1 {
return mergedDir, nil
@ -424,7 +480,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
}()
workDir := path.Join(dir, "work")
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work"))
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, path.Join(id, "diff"), path.Join(id, "work"))
mountLabel = label.FormatMountLabel(opts, mountLabel)
if len(mountLabel) > syscall.Getpagesize() {
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountLabel))
@ -527,3 +583,8 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
return archive.OverlayChanges(layers, diffPath)
}
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
return d.opts.imageStores
}

View File

@ -3,10 +3,10 @@
package graphdriver
import (
"errors"
"fmt"
"github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
)
type graphDriverProxy struct {

View File

@ -143,3 +143,9 @@ func (d *Driver) Exists(id string) bool {
_, err := os.Stat(d.dir(id))
return err == nil
}
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
var imageStores []string
return imageStores
}

View File

@ -20,6 +20,7 @@ import (
"github.com/containers/storage/pkg/parsers"
zfs "github.com/mistifyio/go-zfs"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
type zfsOptions struct {
@ -47,13 +48,13 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
if _, err := exec.LookPath("zfs"); err != nil {
logrus.Debugf("[zfs] zfs command is not available: %v", err)
return nil, graphdriver.ErrPrerequisites
return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available")
}
file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600)
if err != nil {
logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err)
return nil, graphdriver.ErrPrerequisites
return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err)
}
defer file.Close()
@ -403,3 +404,9 @@ func (d *Driver) Exists(id string) bool {
defer d.Unlock()
return d.filesystemsCache[d.zfsPath(id)] == true
}
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
var imageStores []string
return imageStores
}

View File

@ -7,6 +7,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/containers/storage/drivers"
"github.com/pkg/errors"
)
func checkRootdirFs(rootdir string) error {
@ -18,7 +19,7 @@ func checkRootdirFs(rootdir string) error {
// on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ]
if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) {
logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
return graphdriver.ErrPrerequisites
return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
}
return nil

View File

@ -6,6 +6,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/containers/storage/drivers"
"github.com/pkg/errors"
)
func checkRootdirFs(rootdir string) error {
@ -16,7 +17,7 @@ func checkRootdirFs(rootdir string) error {
if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs {
logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
return graphdriver.ErrPrerequisites
return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
}
return nil

View File

@ -22,6 +22,7 @@ import (
log "github.com/Sirupsen/logrus"
"github.com/containers/storage/drivers"
"github.com/pkg/errors"
)
func checkRootdirFs(rootdir string) error {
@ -34,7 +35,7 @@ func checkRootdirFs(rootdir string) error {
(buf.f_basetype[3] != 0) {
log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
C.free(unsafe.Pointer(buf))
return graphdriver.ErrPrerequisites
return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
}
C.free(unsafe.Pointer(buf))

View File

@ -2,7 +2,6 @@ package storage
import (
"encoding/json"
"errors"
"io/ioutil"
"os"
"path/filepath"
@ -11,6 +10,7 @@ import (
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
"github.com/pkg/errors"
)
var (
@ -46,24 +46,20 @@ type Image struct {
// that has been stored, if they're known.
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
// Created is the datestamp for when this image was created. Older
// versions of the library did not track this information, so callers
// will likely want to use the IsZero() method to verify that a value
// is set before using it.
Created time.Time `json:"created,omitempty"`
Flags map[string]interface{} `json:"flags,omitempty"`
}
// ImageStore provides bookkeeping for information about Images.
type ImageStore interface {
FileBasedStore
MetadataStore
BigDataStore
FlaggableStore
// Create creates an image that has a specified ID (or a random one) and
// optional names, using the specified layer as its topmost (hopefully
// read-only) layer. That layer can be referenced by multiple images.
Create(id string, names []string, layer, metadata string) (*Image, error)
// SetNames replaces the list of names associated with an image with the
// supplied values.
SetNames(id string, names []string) error
// ROImageStore provides bookkeeping for information about Images.
type ROImageStore interface {
ROFileBasedStore
ROMetadataStore
ROBigDataStore
// Exists checks if there is an image with the given ID or name.
Exists(id string) bool
@ -71,12 +67,6 @@ type ImageStore interface {
// Get retrieves information about an image given an ID or name.
Get(id string) (*Image, error)
// Delete removes the record of the image.
Delete(id string) error
// Wipe removes records of all images.
Wipe() error
// Lookup attempts to translate a name to an ID. Most methods do this
// implicitly.
Lookup(name string) (string, error)
@ -85,17 +75,45 @@ type ImageStore interface {
Images() ([]Image, error)
}
// ImageStore provides bookkeeping for information about Images.
type ImageStore interface {
ROImageStore
RWFileBasedStore
RWMetadataStore
RWBigDataStore
FlaggableStore
// Create creates an image that has a specified ID (or a random one) and
// optional names, using the specified layer as its topmost (hopefully
// read-only) layer. That layer can be referenced by multiple images.
Create(id string, names []string, layer, metadata string, created time.Time) (*Image, error)
// SetNames replaces the list of names associated with an image with the
// supplied values.
SetNames(id string, names []string) error
// Delete removes the record of the image.
Delete(id string) error
// Wipe removes records of all images.
Wipe() error
}
type imageStore struct {
lockfile Locker
dir string
images []Image
images []*Image
idindex *truncindex.TruncIndex
byid map[string]*Image
byname map[string]*Image
}
func (r *imageStore) Images() ([]Image, error) {
return r.images, nil
images := make([]Image, len(r.images))
for i := range r.images {
images[i] = *(r.images[i])
}
return images, nil
}
func (r *imageStore) imagespath() string {
@ -111,41 +129,46 @@ func (r *imageStore) datapath(id, key string) string {
}
func (r *imageStore) Load() error {
needSave := false
shouldSave := false
rpath := r.imagespath()
data, err := ioutil.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}
images := []Image{}
images := []*Image{}
idlist := []string{}
ids := make(map[string]*Image)
names := make(map[string]*Image)
if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil {
for n, image := range images {
ids[image.ID] = &images[n]
ids[image.ID] = images[n]
idlist = append(idlist, image.ID)
for _, name := range image.Names {
if conflict, ok := names[name]; ok {
r.removeName(conflict, name)
needSave = true
shouldSave = true
}
names[name] = &images[n]
names[name] = images[n]
}
}
}
if shouldSave && !r.IsReadWrite() {
return errors.New("image store assigns the same name to multiple images")
}
r.images = images
r.idindex = truncindex.NewTruncIndex(idlist)
r.byid = ids
r.byname = names
if needSave {
r.Touch()
if shouldSave {
return r.Save()
}
return nil
}
func (r *imageStore) Save() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
}
rpath := r.imagespath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
return err
@ -154,6 +177,7 @@ func (r *imageStore) Save() error {
if err != nil {
return err
}
defer r.Touch()
return ioutils.AtomicWriteFile(rpath, jdata, 0600)
}
@ -170,7 +194,27 @@ func newImageStore(dir string) (ImageStore, error) {
istore := imageStore{
lockfile: lockfile,
dir: dir,
images: []Image{},
images: []*Image{},
byid: make(map[string]*Image),
byname: make(map[string]*Image),
}
if err := istore.Load(); err != nil {
return nil, err
}
return &istore, nil
}
func newROImageStore(dir string) (ROImageStore, error) {
lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock"))
if err != nil {
return nil, err
}
lockfile.Lock()
defer lockfile.Unlock()
istore := imageStore{
lockfile: lockfile,
dir: dir,
images: []*Image{},
byid: make(map[string]*Image),
byname: make(map[string]*Image),
}
@ -193,6 +237,9 @@ func (r *imageStore) lookup(id string) (*Image, bool) {
}
func (r *imageStore) ClearFlag(id string, flag string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath())
}
image, ok := r.lookup(id)
if !ok {
return ErrImageUnknown
@ -202,6 +249,9 @@ func (r *imageStore) ClearFlag(id string, flag string) error {
}
func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath())
}
image, ok := r.lookup(id)
if !ok {
return ErrImageUnknown
@ -210,7 +260,10 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
return r.Save()
}
func (r *imageStore) Create(id string, names []string, layer, metadata string) (image *Image, err error) {
func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time) (image *Image, err error) {
if !r.IsReadWrite() {
return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath())
}
if id == "" {
id = stringid.GenerateRandomID()
_, idInUse := r.byid[id]
@ -227,18 +280,21 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string) (
return nil, ErrDuplicateName
}
}
if created.IsZero() {
created = time.Now().UTC()
}
if err == nil {
newImage := Image{
image = &Image{
ID: id,
Names: names,
TopLayer: layer,
Metadata: metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
Created: created,
Flags: make(map[string]interface{}),
}
r.images = append(r.images, newImage)
image = &r.images[len(r.images)-1]
r.images = append(r.images, image)
r.idindex.Add(id)
r.byid[id] = image
for _, name := range names {
@ -257,6 +313,9 @@ func (r *imageStore) Metadata(id string) (string, error) {
}
func (r *imageStore) SetMetadata(id, metadata string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath())
}
if image, ok := r.lookup(id); ok {
image.Metadata = metadata
return r.Save()
@ -269,6 +328,9 @@ func (r *imageStore) removeName(image *Image, name string) {
}
func (r *imageStore) SetNames(id string, names []string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath())
}
if image, ok := r.lookup(id); ok {
for _, name := range image.Names {
delete(r.byname, name)
@ -286,15 +348,18 @@ func (r *imageStore) SetNames(id string, names []string) error {
}
func (r *imageStore) Delete(id string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
}
image, ok := r.lookup(id)
if !ok {
return ErrImageUnknown
}
id = image.ID
newImages := []Image{}
for _, candidate := range r.images {
if candidate.ID != id {
newImages = append(newImages, candidate)
toDeleteIndex := -1
for i, candidate := range r.images {
if candidate.ID == id {
toDeleteIndex = i
}
}
delete(r.byid, id)
@ -302,7 +367,14 @@ func (r *imageStore) Delete(id string) error {
for _, name := range image.Names {
delete(r.byname, name)
}
r.images = newImages
if toDeleteIndex != -1 {
// delete the image at toDeleteIndex
if toDeleteIndex == len(r.images)-1 {
r.images = r.images[:len(r.images)-1]
} else {
r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
}
}
if err := r.Save(); err != nil {
return err
}
@ -359,6 +431,9 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) {
}
func (r *imageStore) SetBigData(id, key string, data []byte) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath())
}
image, ok := r.lookup(id)
if !ok {
return ErrImageUnknown
@ -393,6 +468,9 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
}
func (r *imageStore) Wipe() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
}
ids := []string{}
for id := range r.byid {
ids = append(ids, id)
@ -421,6 +499,10 @@ func (r *imageStore) Modified() (bool, error) {
return r.lockfile.Modified()
}
func (r *imageStore) IsReadWrite() bool {
return r.lockfile.IsReadWrite()
}
func (r *imageStore) TouchedSince(when time.Time) bool {
return r.lockfile.TouchedSince(when)
}

View File

@ -4,7 +4,6 @@ import (
"bytes"
"compress/gzip"
"encoding/json"
"errors"
"io"
"io/ioutil"
"os"
@ -16,6 +15,8 @@ import (
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
@ -66,6 +67,38 @@ type Layer struct {
// mounted at the mount point.
MountCount int `json:"-"`
// Created is the datestamp for when this layer was created. Older
// versions of the library did not track this information, so callers
// will likely want to use the IsZero() method to verify that a value
// is set before using it.
Created time.Time `json:"created,omitempty"`
// CompressedDigest is the digest of the blob that was last passed to
// ApplyDiff() or Put(), as it was presented to us.
CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"`
// CompressedSize is the length of the blob that was last passed to
// ApplyDiff() or Put(), as it was presented to us. If
// CompressedDigest is not set, this should be treated as if it were an
// uninitialized value.
CompressedSize int64 `json:"compressed-size,omitempty"`
// UncompressedDigest is the digest of the blob that was last passed to
// ApplyDiff() or Put(), after we decompressed it. Often referred to
// as a DiffID.
UncompressedDigest digest.Digest `json:"diff-digest,omitempty"`
// UncompressedSize is the length of the blob that was last passed to
// ApplyDiff() or Put(), after we decompressed it. If
// UncompressedDigest is not set, this should be treated as if it were
// an uninitialized value.
UncompressedSize int64 `json:"diff-size,omitempty"`
// CompressionType is the type of compression which we detected on the blob
// that was last passed to ApplyDiff() or Put().
CompressionType archive.Compression `json:"compression,omitempty"`
// Flags is arbitrary data about the layer.
Flags map[string]interface{} `json:"flags,omitempty"`
}
@ -75,12 +108,74 @@ type layerMountPoint struct {
MountCount int `json:"count"`
}
// DiffOptions override the default behavior of Diff() methods.
type DiffOptions struct {
// Compression, if set overrides the default compressor when generating a diff.
Compression *archive.Compression
}
// ROLayerStore wraps a graph driver, adding the ability to refer to layers by
// name, and keeping track of parent-child relationships, along with a list of
// all known layers.
type ROLayerStore interface {
ROFileBasedStore
ROMetadataStore
// Exists checks if a layer with the specified name or ID is known.
Exists(id string) bool
// Get retrieves information about a layer given an ID or name.
Get(id string) (*Layer, error)
// Status returns an slice of key-value pairs, suitable for human consumption,
// relaying whatever status information the underlying driver can share.
Status() ([][2]string, error)
// Changes returns a slice of Change structures, which contain a pathname
// (Path) and a description of what sort of change (Kind) was made by the
// layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a
// specified layer. By default, the layer's parent is used as a reference.
Changes(from, to string) ([]archive.Change, error)
// Diff produces a tarstream which can be applied to a layer with the contents
// of the first layer to produce a layer with the contents of the second layer.
// By default, the parent of the second layer is used as the first
// layer, so it need not be specified. Options can be used to override
// default behavior, but are also not required.
Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
// DiffSize produces an estimate of the length of the tarstream which would be
// produced by Diff.
DiffSize(from, to string) (int64, error)
// Size produces a cached value for the uncompressed size of the layer,
// if one is known, or -1 if it is not known. If the layer can not be
// found, it returns an error.
Size(name string) (int64, error)
// Lookup attempts to translate a name to an ID. Most methods do this
// implicitly.
Lookup(name string) (string, error)
// LayersByCompressedDigest returns a slice of the layers with the
// specified compressed digest value recorded for them.
LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
// LayersByUncompressedDigest returns a slice of the layers with the
// specified uncompressed digest value recorded for them.
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
// Layers returns a slice of the known layers.
Layers() ([]Layer, error)
}
// LayerStore wraps a graph driver, adding the ability to refer to layers by
// name, and keeping track of parent-child relationships, along with a list of
// all known layers.
type LayerStore interface {
FileBasedStore
MetadataStore
ROLayerStore
RWFileBasedStore
RWMetadataStore
FlaggableStore
// Create creates a new layer, optionally giving it a specified ID rather than
@ -98,20 +193,10 @@ type LayerStore interface {
// Put combines the functions of CreateWithFlags and ApplyDiff.
Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (*Layer, int64, error)
// Exists checks if a layer with the specified name or ID is known.
Exists(id string) bool
// Get retrieves information about a layer given an ID or name.
Get(id string) (*Layer, error)
// SetNames replaces the list of names associated with a layer with the
// supplied values.
SetNames(id string, names []string) error
// Status returns an slice of key-value pairs, suitable for human consumption,
// relaying whatever status information the underlying driver can share.
Status() ([][2]string, error)
// Delete deletes a layer with the specified name or ID.
Delete(id string) error
@ -126,48 +211,31 @@ type LayerStore interface {
// Unmount unmounts a layer when it is no longer in use.
Unmount(id string) error
// Changes returns a slice of Change structures, which contain a pathname
// (Path) and a description of what sort of change (Kind) was made by the
// layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a
// specified layer. By default, the layer's parent is used as a reference.
Changes(from, to string) ([]archive.Change, error)
// Diff produces a tarstream which can be applied to a layer with the contents
// of the first layer to produce a layer with the contents of the second layer.
// By default, the parent of the second layer is used as the first
// layer, so it need not be specified.
Diff(from, to string) (io.ReadCloser, error)
// DiffSize produces an estimate of the length of the tarstream which would be
// produced by Diff.
DiffSize(from, to string) (int64, error)
// ApplyDiff reads a tarstream which was created by a previous call to Diff and
// applies its changes to a specified layer.
ApplyDiff(to string, diff archive.Reader) (int64, error)
// Lookup attempts to translate a name to an ID. Most methods do this
// implicitly.
Lookup(name string) (string, error)
// Layers returns a slice of the known layers.
Layers() ([]Layer, error)
}
type layerStore struct {
lockfile Locker
rundir string
driver drivers.Driver
layerdir string
layers []Layer
idindex *truncindex.TruncIndex
byid map[string]*Layer
byname map[string]*Layer
bymount map[string]*Layer
lockfile Locker
rundir string
driver drivers.Driver
layerdir string
layers []*Layer
idindex *truncindex.TruncIndex
byid map[string]*Layer
byname map[string]*Layer
bymount map[string]*Layer
bycompressedsum map[digest.Digest][]string
byuncompressedsum map[digest.Digest][]string
}
func (r *layerStore) Layers() ([]Layer, error) {
return r.layers, nil
layers := make([]Layer, len(r.layers))
for i := range r.layers {
layers[i] = *(r.layers[i])
}
return layers, nil
}
func (r *layerStore) mountspath() string {
@ -179,36 +247,41 @@ func (r *layerStore) layerspath() string {
}
func (r *layerStore) Load() error {
needSave := false
shouldSave := false
rpath := r.layerspath()
data, err := ioutil.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}
layers := []Layer{}
layers := []*Layer{}
idlist := []string{}
ids := make(map[string]*Layer)
names := make(map[string]*Layer)
mounts := make(map[string]*Layer)
parents := make(map[string][]*Layer)
compressedsums := make(map[digest.Digest][]string)
uncompressedsums := make(map[digest.Digest][]string)
if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil {
for n, layer := range layers {
ids[layer.ID] = &layers[n]
ids[layer.ID] = layers[n]
idlist = append(idlist, layer.ID)
for _, name := range layer.Names {
if conflict, ok := names[name]; ok {
r.removeName(conflict, name)
needSave = true
shouldSave = true
}
names[name] = &layers[n]
names[name] = layers[n]
}
if pslice, ok := parents[layer.Parent]; ok {
parents[layer.Parent] = append(pslice, &layers[n])
} else {
parents[layer.Parent] = []*Layer{&layers[n]}
if layer.CompressedDigest != "" {
compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID)
}
if layer.UncompressedDigest != "" {
uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
}
}
}
if shouldSave && !r.IsReadWrite() {
return errors.New("layer store assigns the same name to multiple layers")
}
mpath := r.mountspath()
data, err = ioutil.ReadFile(mpath)
if err != nil && !os.IsNotExist(err) {
@ -231,29 +304,35 @@ func (r *layerStore) Load() error {
r.byid = ids
r.byname = names
r.bymount = mounts
r.bycompressedsum = compressedsums
r.byuncompressedsum = uncompressedsums
err = nil
// Last step: try to remove anything that a previous user of this
// storage area marked for deletion but didn't manage to actually
// delete.
for _, layer := range r.layers {
if cleanup, ok := layer.Flags[incompleteFlag]; ok {
if b, ok := cleanup.(bool); ok && b {
err = r.Delete(layer.ID)
if err != nil {
break
// Last step: if we're writable, try to remove anything that a previous
// user of this storage area marked for deletion but didn't manage to
// actually delete.
if r.IsReadWrite() {
for _, layer := range r.layers {
if cleanup, ok := layer.Flags[incompleteFlag]; ok {
if b, ok := cleanup.(bool); ok && b {
err = r.Delete(layer.ID)
if err != nil {
break
}
shouldSave = true
}
needSave = true
}
}
}
if needSave {
r.Touch()
return r.Save()
if shouldSave {
return r.Save()
}
}
return err
}
func (r *layerStore) Save() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
}
rpath := r.layerspath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
return err
@ -283,6 +362,7 @@ func (r *layerStore) Save() error {
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
return err
}
defer r.Touch()
return ioutils.AtomicWriteFile(mpath, jmdata, 0600)
}
@ -314,6 +394,28 @@ func newLayerStore(rundir string, layerdir string, driver drivers.Driver) (Layer
return &rlstore, nil
}
func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) {
lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock"))
if err != nil {
return nil, err
}
lockfile.Lock()
defer lockfile.Unlock()
rlstore := layerStore{
lockfile: lockfile,
driver: driver,
rundir: rundir,
layerdir: layerdir,
byid: make(map[string]*Layer),
bymount: make(map[string]*Layer),
byname: make(map[string]*Layer),
}
if err := rlstore.Load(); err != nil {
return nil, err
}
return &rlstore, nil
}
func (r *layerStore) lookup(id string) (*Layer, bool) {
if layer, ok := r.byid[id]; ok {
return layer, ok
@ -326,7 +428,24 @@ func (r *layerStore) lookup(id string) (*Layer, bool) {
return nil, false
}
func (r *layerStore) Size(name string) (int64, error) {
layer, ok := r.lookup(name)
if !ok {
return -1, ErrLayerUnknown
}
// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
// a zero value is not just present because it was never set to anything else (which can happen if the layer was
// created by a version of this library that didn't keep track of digest and size information).
if layer.UncompressedDigest != "" {
return layer.UncompressedSize, nil
}
return -1, nil
}
func (r *layerStore) ClearFlag(id string, flag string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath())
}
layer, ok := r.lookup(id)
if !ok {
return ErrLayerUnknown
@ -336,6 +455,9 @@ func (r *layerStore) ClearFlag(id string, flag string) error {
}
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath())
}
layer, ok := r.lookup(id)
if !ok {
return ErrLayerUnknown
@ -349,6 +471,9 @@ func (r *layerStore) Status() ([][2]string, error) {
}
func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (layer *Layer, size int64, err error) {
if !r.IsReadWrite() {
return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath())
}
size = -1
if err := os.MkdirAll(r.rundir, 0700); err != nil {
return nil, -1, err
@ -383,15 +508,15 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o
err = r.driver.Create(id, parent, mountLabel, options)
}
if err == nil {
newLayer := Layer{
layer = &Layer{
ID: id,
Parent: parent,
Names: names,
MountLabel: mountLabel,
Created: time.Now().UTC(),
Flags: make(map[string]interface{}),
}
r.layers = append(r.layers, newLayer)
layer = &r.layers[len(r.layers)-1]
r.layers = append(r.layers, layer)
r.idindex.Add(id)
r.byid[id] = layer
for _, name := range names {
@ -441,6 +566,9 @@ func (r *layerStore) Create(id, parent string, names []string, mountLabel string
}
func (r *layerStore) Mount(id, mountLabel string) (string, error) {
if !r.IsReadWrite() {
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
}
layer, ok := r.lookup(id)
if !ok {
return "", ErrLayerUnknown
@ -466,6 +594,9 @@ func (r *layerStore) Mount(id, mountLabel string) (string, error) {
}
func (r *layerStore) Unmount(id string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
}
layer, ok := r.lookup(id)
if !ok {
layerByMount, ok := r.bymount[filepath.Clean(id)]
@ -495,6 +626,9 @@ func (r *layerStore) removeName(layer *Layer, name string) {
}
func (r *layerStore) SetNames(id string, names []string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath())
}
if layer, ok := r.lookup(id); ok {
for _, name := range layer.Names {
delete(r.byname, name)
@ -519,6 +653,9 @@ func (r *layerStore) Metadata(id string) (string, error) {
}
func (r *layerStore) SetMetadata(id, metadata string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath())
}
if layer, ok := r.lookup(id); ok {
layer.Metadata = metadata
return r.Save()
@ -531,6 +668,9 @@ func (r *layerStore) tspath(id string) string {
}
func (r *layerStore) Delete(id string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
}
layer, ok := r.lookup(id)
if !ok {
return ErrLayerUnknown
@ -549,13 +689,21 @@ func (r *layerStore) Delete(id string) error {
if layer.MountPoint != "" {
delete(r.bymount, layer.MountPoint)
}
newLayers := []Layer{}
for _, candidate := range r.layers {
if candidate.ID != id {
newLayers = append(newLayers, candidate)
toDeleteIndex := -1
for i, candidate := range r.layers {
if candidate.ID == id {
toDeleteIndex = i
break
}
}
if toDeleteIndex != -1 {
// delete the layer at toDeleteIndex
if toDeleteIndex == len(r.layers)-1 {
r.layers = r.layers[:len(r.layers)-1]
} else {
r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
}
}
r.layers = newLayers
if err = r.Save(); err != nil {
return err
}
@ -583,6 +731,9 @@ func (r *layerStore) Get(id string) (*Layer, error) {
}
func (r *layerStore) Wipe() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
}
ids := []string{}
for id := range r.byid {
ids = append(ids, id)
@ -657,48 +808,64 @@ func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
}, nil
}
func (r *layerStore) Diff(from, to string) (io.ReadCloser, error) {
func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
var metadata storage.Unpacker
from, to, toLayer, err := r.findParentAndLayer(from, to)
if err != nil {
return nil, ErrLayerUnknown
}
compression := archive.Uncompressed
if cflag, ok := toLayer.Flags[compressionFlag]; ok {
if ctype, ok := cflag.(float64); ok {
compression = archive.Compression(ctype)
} else if ctype, ok := cflag.(archive.Compression); ok {
compression = archive.Compression(ctype)
}
// Default to applying the type of compression that we noted was used
// for the layerdiff when it was applied.
compression := toLayer.CompressionType
// If a particular compression type (or no compression) was selected,
// use that instead.
if options != nil && options.Compression != nil {
compression = *options.Compression
}
maybeCompressReadCloser := func(rc io.ReadCloser) (io.ReadCloser, error) {
// Depending on whether or not compression is desired, return either the
// passed-in ReadCloser, or a new one that provides its readers with a
// compressed version of the data that the original would have provided
// to its readers.
if compression == archive.Uncompressed {
return rc, nil
}
preader, pwriter := io.Pipe()
compressor, err := archive.CompressStream(pwriter, compression)
if err != nil {
rc.Close()
pwriter.Close()
preader.Close()
return nil, err
}
go func() {
defer pwriter.Close()
defer compressor.Close()
defer rc.Close()
io.Copy(compressor, rc)
}()
return preader, nil
}
if from != toLayer.Parent {
diff, err := r.driver.Diff(to, from)
if err == nil && (compression != archive.Uncompressed) {
preader, pwriter := io.Pipe()
compressor, err := archive.CompressStream(pwriter, compression)
if err != nil {
diff.Close()
pwriter.Close()
return nil, err
}
go func() {
io.Copy(compressor, diff)
diff.Close()
compressor.Close()
pwriter.Close()
}()
diff = preader
if err != nil {
return nil, err
}
return diff, err
return maybeCompressReadCloser(diff)
}
tsfile, err := os.Open(r.tspath(to))
if err != nil {
if os.IsNotExist(err) {
return r.driver.Diff(to, from)
if !os.IsNotExist(err) {
return nil, err
}
return nil, err
diff, err := r.driver.Diff(to, from)
if err != nil {
return nil, err
}
return maybeCompressReadCloser(diff)
}
defer tsfile.Close()
@ -720,33 +887,16 @@ func (r *layerStore) Diff(from, to string) (io.ReadCloser, error) {
return nil, err
}
var stream io.ReadCloser
if compression != archive.Uncompressed {
preader, pwriter := io.Pipe()
compressor, err := archive.CompressStream(pwriter, compression)
if err != nil {
fgetter.Close()
pwriter.Close()
preader.Close()
return nil, err
}
go func() {
asm.WriteOutputTarStream(fgetter, metadata, compressor)
compressor.Close()
pwriter.Close()
}()
stream = preader
} else {
stream = asm.NewOutputTarStream(fgetter, metadata)
}
return ioutils.NewReadCloserWrapper(stream, func() error {
err1 := stream.Close()
tarstream := asm.NewOutputTarStream(fgetter, metadata)
rc := ioutils.NewReadCloserWrapper(tarstream, func() error {
err1 := tarstream.Close()
err2 := fgetter.Close()
if err2 == nil {
return err1
}
return err2
}), nil
})
return maybeCompressReadCloser(rc)
}
func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
@ -758,6 +908,10 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
}
func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err error) {
if !r.IsReadWrite() {
return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath())
}
layer, ok := r.lookup(to)
if !ok {
return -1, ErrLayerUnknown
@ -770,7 +924,9 @@ func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err
}
compression := archive.DetectCompression(header[:n])
defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff)
compressedDigest := digest.Canonical.Digester()
compressedCounter := ioutils.NewWriteCounter(compressedDigest.Hash())
defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter)
tsdata := bytes.Buffer{}
compressor, err := gzip.NewWriterLevel(&tsdata, gzip.BestSpeed)
@ -778,15 +934,20 @@ func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err
compressor = gzip.NewWriter(&tsdata)
}
metadata := storage.NewJSONPacker(compressor)
decompressed, err := archive.DecompressStream(defragmented)
uncompressed, err := archive.DecompressStream(defragmented)
if err != nil {
return -1, err
}
payload, err := asm.NewInputTarStream(decompressed, metadata, storage.NewDiscardFilePutter())
uncompressedDigest := digest.Canonical.Digester()
uncompressedCounter := ioutils.NewWriteCounter(uncompressedDigest.Hash())
payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedCounter), metadata, storage.NewDiscardFilePutter())
if err != nil {
return -1, err
}
size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, payload)
if err != nil {
return -1, err
}
compressor.Close()
if err == nil {
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
@ -797,15 +958,57 @@ func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err
}
}
if compression != archive.Uncompressed {
layer.Flags[compressionFlag] = compression
} else {
delete(layer.Flags, compressionFlag)
updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
var newList []string
if oldvalue != "" {
for _, value := range (*m)[oldvalue] {
if value != id {
newList = append(newList, value)
}
}
if len(newList) > 0 {
(*m)[oldvalue] = newList
} else {
delete(*m, oldvalue)
}
}
if newvalue != "" {
(*m)[newvalue] = append((*m)[newvalue], id)
}
}
updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest.Digest(), layer.ID)
layer.CompressedDigest = compressedDigest.Digest()
layer.CompressedSize = compressedCounter.Count
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest.Digest(), layer.ID)
layer.UncompressedDigest = uncompressedDigest.Digest()
layer.UncompressedSize = uncompressedCounter.Count
layer.CompressionType = compression
err = r.Save()
return size, err
}
func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) {
var layers []Layer
for _, layerID := range m[d] {
layer, ok := r.lookup(layerID)
if !ok {
return nil, ErrLayerUnknown
}
layers = append(layers, *layer)
}
return layers, nil
}
func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
return r.layersByDigestMap(r.bycompressedsum, d)
}
func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
return r.layersByDigestMap(r.byuncompressedsum, d)
}
func (r *layerStore) Lock() {
r.lockfile.Lock()
}
@ -822,6 +1025,10 @@ func (r *layerStore) Modified() (bool, error) {
return r.lockfile.Modified()
}
func (r *layerStore) IsReadWrite() bool {
return r.lockfile.IsReadWrite()
}
func (r *layerStore) TouchedSince(when time.Time) bool {
return r.lockfile.TouchedSince(when)
}

View File

@ -1,14 +1,15 @@
package storage
import (
"fmt"
"os"
"path/filepath"
"sync"
"time"
"golang.org/x/sys/unix"
"github.com/containers/storage/pkg/stringid"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
// A Locker represents a file lock where the file is used to cache an
@ -27,43 +28,80 @@ type Locker interface {
// TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time.
TouchedSince(when time.Time) bool
// IsReadWrite() checks if the lock file is read-write
IsReadWrite() bool
}
type lockfile struct {
mu sync.Mutex
file string
fd uintptr
lw string
mu sync.Mutex
file string
fd uintptr
lw string
locktype int16
}
var (
lockfiles map[string]*lockfile
lockfilesLock sync.Mutex
// ErrLockReadOnly indicates that the caller only took a read-only lock, and is not allowed to write
ErrLockReadOnly = errors.New("lock is not a read-write lock")
)
// GetLockfile opens a lock file, creating it if necessary. The Locker object
// return will be returned unlocked.
// GetLockfile opens a read-write lock file, creating it if necessary. The
// Locker object it returns will be returned unlocked.
func GetLockfile(path string) (Locker, error) {
lockfilesLock.Lock()
defer lockfilesLock.Unlock()
if lockfiles == nil {
lockfiles = make(map[string]*lockfile)
}
if locker, ok := lockfiles[filepath.Clean(path)]; ok {
cleanPath := filepath.Clean(path)
if locker, ok := lockfiles[cleanPath]; ok {
if !locker.IsReadWrite() {
return nil, errors.Wrapf(ErrLockReadOnly, "lock %q is a read-only lock", cleanPath)
}
return locker, nil
}
fd, err := unix.Open(filepath.Clean(path), os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR)
fd, err := unix.Open(cleanPath, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR)
if err != nil {
return nil, err
return nil, errors.Wrapf(err, "error opening %q", cleanPath)
}
locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID()}
unix.CloseOnExec(fd)
locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_WRLCK}
lockfiles[filepath.Clean(path)] = locker
return locker, nil
}
// GetROLockfile opens a read-only lock file. The Locker object it returns
// will be returned unlocked.
func GetROLockfile(path string) (Locker, error) {
lockfilesLock.Lock()
defer lockfilesLock.Unlock()
if lockfiles == nil {
lockfiles = make(map[string]*lockfile)
}
cleanPath := filepath.Clean(path)
if locker, ok := lockfiles[cleanPath]; ok {
if locker.IsReadWrite() {
return nil, fmt.Errorf("lock %q is a read-write lock", cleanPath)
}
return locker, nil
}
fd, err := unix.Open(cleanPath, os.O_RDONLY, 0)
if err != nil {
return nil, errors.Wrapf(err, "error opening %q", cleanPath)
}
unix.CloseOnExec(fd)
locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_RDLCK}
lockfiles[filepath.Clean(path)] = locker
return locker, nil
}
// Lock locks the lock file
func (l *lockfile) Lock() {
lk := unix.Flock_t{
Type: unix.F_WRLCK,
Type: l.locktype,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
@ -75,6 +113,7 @@ func (l *lockfile) Lock() {
}
}
// Unlock unlocks the lock file
func (l *lockfile) Unlock() {
lk := unix.Flock_t{
Type: unix.F_UNLCK,
@ -89,6 +128,7 @@ func (l *lockfile) Unlock() {
l.mu.Unlock()
}
// Touch updates the lock file with the UID of the user
func (l *lockfile) Touch() error {
l.lw = stringid.GenerateRandomID()
id := []byte(l.lw)
@ -110,6 +150,7 @@ func (l *lockfile) Touch() error {
return nil
}
// Modified indicates if the lock file has been updated since the last time it was loaded
func (l *lockfile) Modified() (bool, error) {
id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
@ -128,6 +169,7 @@ func (l *lockfile) Modified() (bool, error) {
return l.lw != lw, nil
}
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *lockfile) TouchedSince(when time.Time) bool {
st := unix.Stat_t{}
err := unix.Fstat(int(l.fd), &st)
@ -137,3 +179,8 @@ func (l *lockfile) TouchedSince(when time.Time) bool {
touched := time.Unix(statTMtimeUnix(st))
return when.Before(touched)
}
// IsRWLock indicates if the lock file is a read-write lock
func (l *lockfile) IsReadWrite() bool {
return (l.locktype == unix.F_WRLCK)
}

View File

@ -38,7 +38,15 @@ func getNextFreeLoopbackIndex() (int, error) {
return index, err
}
func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) {
func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) {
// Read information about the loopback file.
var st syscall.Stat_t
err = syscall.Fstat(int(sparseFile.Fd()), &st)
if err != nil {
logrus.Errorf("Error reading information about loopback file %s: %v", sparseName, err)
return nil, ErrAttachLoopbackDevice
}
// Start looking for a free /dev/loop
for {
target := fmt.Sprintf("/dev/loop%d", index)
@ -77,6 +85,18 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
// Otherwise, we keep going with the loop
continue
}
// Check if the loopback driver and underlying filesystem agree on the loopback file's
// device and inode numbers.
dev, ino, err := getLoopbackBackingFile(loopFile)
if err != nil {
logrus.Errorf("Error getting loopback backing file: %s", err)
return nil, ErrGetLoopbackBackingFile
}
if dev != st.Dev || ino != st.Ino {
logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino)
}
// In case of success, we finished. Break the loop.
break
}
@ -110,7 +130,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
}
defer sparseFile.Close()
loopFile, err := openNextAvailableLoopback(startIndex, sparseFile)
loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile)
if err != nil {
return nil, err
}

View File

@ -15,8 +15,8 @@ import (
var (
// ErrNotFound plugin not found
ErrNotFound = errors.New("plugin not found")
socketsPath = "/run/oci-storage/plugins"
specsPaths = []string{"/etc/oci-storage/plugins", "/usr/lib/oci-storage/plugins"}
socketsPath = "/run/containers/storage/plugins"
specsPaths = []string{"/etc/containers/storage/plugins", "/usr/lib/containers/storage/plugins"}
)
// localRegistry defines a registry that is local (using unix socket).

View File

@ -3,10 +3,11 @@
//
// Storage discovers plugins by looking for them in the plugin directory whenever
// a user or container tries to use one by name. UNIX domain socket files must
// be located under /run/oci-storage/plugins, whereas spec files can be located
// either under /etc/oci-storage/plugins or /usr/lib/oci-storage/plugins. This
// is handled by the Registry interface, which lets you list all plugins or get
// a plugin by its name if it exists.
// be located under /run/containers/storage/plugins, whereas spec files can be
// located either under /etc/containers/storage/plugins or
// /usr/lib/containers/storage/plugins. This is handled by the Registry
// interface, which lets you list all plugins or get a plugin by its name if it
// exists.
//
// The plugins need to implement an HTTP server and bind this to the UNIX socket
// or the address specified in the spec files.

View File

@ -1,13 +0,0 @@
// +build !containersstorageautogen
// Package storageversion is auto-generated at build-time
package storageversion
// Default build-time variable for library-import.
// This file is overridden on build with build-time informations.
const (
GitCommit string = "library-import"
Version string = "library-import"
BuildTime string = "library-import"
IAmStatic string = "library-import"
)

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,4 @@
github.com/BurntSushi/toml master
github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
github.com/Microsoft/hcsshim 0f615c198a84e0344b4ed49c464d8833d4648dfc
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
@ -7,9 +8,11 @@ github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
github.com/go-check/check 20d25e2804050c1cd24a7eea1e7a6447dd0e74ec
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/opencontainers/go-digest master
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
github.com/pkg/errors master
github.com/tchap/go-patricia v2.2.6
github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3

View File

@ -0,0 +1,56 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1
const (
// AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339).
AnnotationCreated = "org.opencontainers.image.created"
// AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string).
AnnotationAuthors = "org.opencontainers.image.authors"
// AnnotationURL is the annotation key for the URL to find more information on the image.
AnnotationURL = "org.opencontainers.image.url"
// AnnotationDocumentation is the annotation key for the URL to get documentation on the image.
AnnotationDocumentation = "org.opencontainers.image.documentation"
// AnnotationSource is the annotation key for the URL to get source code for building the image.
AnnotationSource = "org.opencontainers.image.source"
// AnnotationVersion is the annotation key for the version of the packaged software.
// The version MAY match a label or tag in the source code repository.
// The version MAY be Semantic versioning-compatible.
AnnotationVersion = "org.opencontainers.image.version"
// AnnotationRevision is the annotation key for the source control revision identifier for the packaged software.
AnnotationRevision = "org.opencontainers.image.revision"
// AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual.
AnnotationVendor = "org.opencontainers.image.vendor"
// AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression.
AnnotationLicenses = "org.opencontainers.image.licenses"
// AnnotationRefName is the annotation key for the name of the reference for a target.
// SHOULD only be considered valid when on descriptors on `index.json` within image layout.
AnnotationRefName = "org.opencontainers.image.ref.name"
// AnnotationTitle is the annotation key for the human-readable title of the image.
AnnotationTitle = "org.opencontainers.image.title"
// AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image.
AnnotationDescription = "org.opencontainers.image.description"
)

View File

@ -37,7 +37,7 @@ type ImageConfig struct {
// Cmd defines the default arguments to the entrypoint of the container.
Cmd []string `json:"Cmd,omitempty"`
// Volumes is a set of directories which should be created as data volumes in a container running this image.
// Volumes is a set of directories describing where the process is likely write data specific to a container instance.
Volumes map[string]struct{} `json:"Volumes,omitempty"`
// WorkingDir sets the current working directory of the entrypoint process in the container.

View File

@ -25,7 +25,7 @@ const (
VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-rc6-dev"
VersionDev = ""
)
// Version is the specification version that the package types support.

17
vendor/github.com/ostreedev/ostree-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,17 @@
Portions of this code are derived from:
https://github.com/dradtke/gotk3
Copyright (c) 2013 Conformal Systems LLC.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

4
vendor/github.com/ostreedev/ostree-go/README.md generated vendored Normal file
View File

@ -0,0 +1,4 @@
OSTree-Go
=========
Go bindings for OSTree. Find out more about OSTree [here](https://github.com/ostreedev/ostree)

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2013 Conformal Systems <info@conformal.com>
*
* This file originated from: http://opensource.conformal.com/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package glibobject
import (
"unsafe"
)
// #cgo pkg-config: glib-2.0 gobject-2.0
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include "glibobject.go.h"
// #include <stdlib.h>
import "C"
/*
* GBoolean
*/
// GBoolean is a Go representation of glib's gboolean
type GBoolean C.gboolean
func NewGBoolean() GBoolean {
return GBoolean(0)
}
func GBool(b bool) GBoolean {
if b {
return GBoolean(1)
}
return GBoolean(0)
}
func (b GBoolean) Ptr() unsafe.Pointer {
return unsafe.Pointer(&b)
}
func GoBool(b GBoolean) bool {
if b != 0 {
return true
}
return false
}

View File

@ -0,0 +1,47 @@
/*
* Copyright (c) 2013 Conformal Systems <info@conformal.com>
*
* This file originated from: http://opensource.conformal.com/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package glibobject
// #cgo pkg-config: glib-2.0 gobject-2.0
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include "glibobject.go.h"
// #include <stdlib.h>
import "C"
import (
"unsafe"
)
// GIO types
type GCancellable struct {
*GObject
}
func (self *GCancellable) native() *C.GCancellable {
return (*C.GCancellable)(unsafe.Pointer(self))
}
func (self *GCancellable) Ptr() unsafe.Pointer {
return unsafe.Pointer(self)
}
// At the moment, no cancellable API, just pass nil

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2013 Conformal Systems <info@conformal.com>
*
* This file originated from: http://opensource.conformal.com/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package glibobject
// #cgo pkg-config: glib-2.0 gobject-2.0
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include "glibobject.go.h"
// #include <stdlib.h>
import "C"
import (
"errors"
"unsafe"
)
/*
* GError
*/
// GError is a representation of GLib's GError
type GError struct {
ptr unsafe.Pointer
}
func NewGError() GError {
return GError{nil}
}
func (e GError) Ptr() unsafe.Pointer {
if e.ptr == nil {
return nil
}
return e.ptr
}
func (e GError) Nil() {
e.ptr = nil
}
func (e *GError) native() *C.GError {
if e == nil || e.ptr == nil {
return nil
}
return (*C.GError)(e.ptr)
}
func ToGError(ptr unsafe.Pointer) GError {
return GError{ptr}
}
func ConvertGError(e GError) error {
defer C.g_error_free(e.native())
return errors.New(C.GoString((*C.char)(C._g_error_get_message(e.native()))))
}

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2013 Conformal Systems <info@conformal.com>
*
* This file originated from: http://opensource.conformal.com/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package glibobject
// #cgo pkg-config: glib-2.0 gobject-2.0
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include "glibobject.go.h"
// #include <stdlib.h>
import "C"
import (
"unsafe"
)
/*
* GFile
*/
type GFile struct {
ptr unsafe.Pointer
}
func (f GFile) Ptr() unsafe.Pointer {
return f.ptr
}
func NewGFile() *GFile {
return &GFile{nil}
}
func ToGFile(ptr unsafe.Pointer) *GFile {
gf := NewGFile()
gf.ptr = ptr
return gf
}

View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2013 Conformal Systems <info@conformal.com>
*
* This file originated from: http://opensource.conformal.com/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package glibobject
// #cgo pkg-config: glib-2.0 gobject-2.0
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include "glibobject.go.h"
// #include <stdlib.h>
import "C"
import (
"unsafe"
)
/*
* GFileInfo
*/
type GFileInfo struct {
ptr unsafe.Pointer
}
func (fi GFileInfo) Ptr() unsafe.Pointer {
return fi.ptr
}
func NewGFileInfo() GFileInfo {
var fi GFileInfo = GFileInfo{nil}
return fi
}
func ToGFileInfo(p unsafe.Pointer) *GFileInfo {
var fi *GFileInfo = &GFileInfo{}
fi.ptr = p
return fi
}

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2013 Conformal Systems <info@conformal.com>
*
* This file originated from: http://opensource.conformal.com/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package glibobject
import (
"unsafe"
)
// #cgo pkg-config: glib-2.0 gobject-2.0
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include "glibobject.go.h"
// #include <stdlib.h>
import "C"
/*
* GHashTable
*/
type GHashTable struct {
ptr unsafe.Pointer
}
func (ht *GHashTable) Ptr() unsafe.Pointer {
return ht.ptr
}
func (ht *GHashTable) native() *C.GHashTable {
return (*C.GHashTable)(ht.ptr)
}
func ToGHashTable(ptr unsafe.Pointer) *GHashTable {
return &GHashTable{ptr}
}

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2013 Conformal Systems <info@conformal.com>
*
* This file originated from: http://opensource.conformal.com/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package glibobject
import (
"unsafe"
)
// #cgo pkg-config: glib-2.0 gobject-2.0
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include "glibobject.go.h"
// #include <stdlib.h>
import "C"
/*
* GHashTableIter
*/
type GHashTableIter struct {
ptr unsafe.Pointer
}
func (ht *GHashTableIter) Ptr() unsafe.Pointer {
return ht.ptr
}
func (ht *GHashTableIter) native() *C.GHashTableIter {
return (*C.GHashTableIter)(ht.ptr)
}
func ToGHashTableIter(ptr unsafe.Pointer) *GHashTableIter {
return &GHashTableIter{ptr}
}

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) 2013 Conformal Systems <info@conformal.com>
*
* This file originated from: http://opensource.conformal.com/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package glibobject
// #cgo pkg-config: glib-2.0 gobject-2.0
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include "glibobject.go.h"
// #include <stdlib.h>
import "C"

View File

@ -0,0 +1,17 @@
#include <glib.h>
static char *
_g_error_get_message (GError *error)
{
g_assert (error != NULL);
return error->message;
}
static const char *
_g_variant_lookup_string (GVariant *v, const char *key)
{
const char *r;
if (g_variant_lookup (v, key, "&s", &r))
return r;
return NULL;
}

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2013 Conformal Systems <info@conformal.com>
*
* This file originated from: http://opensource.conformal.com/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package glibobject
// #cgo pkg-config: glib-2.0 gobject-2.0
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include "glibobject.go.h"
// #include <stdlib.h>
import "C"
import (
"unsafe"
)
/*
* GObject
*/
// IObject is an interface type implemented by Object and all types which embed
// an Object. It is meant to be used as a type for function arguments which
// require GObjects or any subclasses thereof.
type IObject interface {
toGObject() *C.GObject
ToObject() *GObject
}
// GObject is a representation of GLib's GObject.
type GObject struct {
ptr unsafe.Pointer
}
func (v *GObject) Ptr() unsafe.Pointer {
return v.ptr
}
func (v *GObject) native() *C.GObject {
if v == nil {
return nil
}
return (*C.GObject)(v.ptr)
}
func (v *GObject) Ref() {
C.g_object_ref(C.gpointer(v.Ptr()))
}
func (v *GObject) Unref() {
C.g_object_unref(C.gpointer(v.Ptr()))
}
func (v *GObject) RefSink() {
C.g_object_ref_sink(C.gpointer(v.native()))
}
func (v *GObject) IsFloating() bool {
c := C.g_object_is_floating(C.gpointer(v.native()))
return GoBool(GBoolean(c))
}
func (v *GObject) ForceFloating() {
C.g_object_force_floating(v.native())
}

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2013 Conformal Systems <info@conformal.com>
*
* This file originated from: http://opensource.conformal.com/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package glibobject
import (
"unsafe"
)
// #cgo pkg-config: glib-2.0 gobject-2.0
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include "glibobject.go.h"
// #include <stdlib.h>
import "C"
/*
* GOptionContext
*/
type GOptionContext struct {
ptr unsafe.Pointer
}
func (oc *GOptionContext) Ptr() unsafe.Pointer {
return oc.ptr
}
func (oc *GOptionContext) native() *C.GOptionContext {
return (*C.GOptionContext)(oc.ptr)
}
func ToGOptionContext(ptr unsafe.Pointer) GOptionContext {
return GOptionContext{ptr}
}

View File

@ -0,0 +1,97 @@
/*
* Copyright (c) 2013 Conformal Systems <info@conformal.com>
*
* This file originated from: http://opensource.conformal.com/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package glibobject
// #cgo pkg-config: glib-2.0 gobject-2.0
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include "glibobject.go.h"
// #include <stdlib.h>
import "C"
import (
"fmt"
"unsafe"
)
/*
* GVariant
*/
type GVariant struct {
ptr unsafe.Pointer
}
//func GVariantNew(p unsafe.Pointer) *GVariant {
//o := &GVariant{p}
//runtime.SetFinalizer(o, (*GVariant).Unref)
//return o;
//}
//func GVariantNewSink(p unsafe.Pointer) *GVariant {
//o := &GVariant{p}
//runtime.SetFinalizer(o, (*GVariant).Unref)
//o.RefSink()
//return o;
//}
func (v *GVariant) native() *C.GVariant {
return (*C.GVariant)(v.ptr)
}
func (v *GVariant) Ptr() unsafe.Pointer {
return v.ptr
}
func (v *GVariant) Ref() {
C.g_variant_ref(v.native())
}
func (v *GVariant) Unref() {
C.g_variant_unref(v.native())
}
func (v *GVariant) RefSink() {
C.g_variant_ref_sink(v.native())
}
func (v *GVariant) TypeString() string {
cs := (*C.char)(C.g_variant_get_type_string(v.native()))
return C.GoString(cs)
}
func (v *GVariant) GetChildValue(i int) *GVariant {
cchild := C.g_variant_get_child_value(v.native(), C.gsize(i))
return (*GVariant)(unsafe.Pointer(cchild))
}
func (v *GVariant) LookupString(key string) (string, error) {
ckey := C.CString(key)
defer C.free(unsafe.Pointer(ckey))
// TODO: Find a way to have constant C strings in golang
cstr := C._g_variant_lookup_string(v.native(), ckey)
if cstr == nil {
return "", fmt.Errorf("No such key: %s", key)
}
return C.GoString(cstr), nil
}
func ToGVariant(ptr unsafe.Pointer) *GVariant {
return &GVariant{ptr}
}

View File

View File

View File

View File

View File

View File

View File

View File

View File

View File

View File

View File

View File

View File

View File

@ -0,0 +1,93 @@
// Package otbuiltin contains all of the basic commands for creating and
// interacting with an ostree repository
package otbuiltin
import (
"errors"
"fmt"
"runtime"
"unsafe"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
)
// #cgo pkg-config: ostree-1
// #include <stdlib.h>
// #include <glib.h>
// #include <ostree.h>
// #include "builtin.go.h"
import "C"
type Repo struct {
//*glib.GObject
ptr unsafe.Pointer
}
// Converts an ostree repo struct to its C equivalent
func (r *Repo) native() *C.OstreeRepo {
//return (*C.OstreeRepo)(r.Ptr())
return (*C.OstreeRepo)(r.ptr)
}
// Takes a C ostree repo and converts it to a Go struct
func repoFromNative(p *C.OstreeRepo) *Repo {
if p == nil {
return nil
}
//o := (*glib.GObject)(unsafe.Pointer(p))
//r := &Repo{o}
r := &Repo{unsafe.Pointer(p)}
return r
}
// Checks if the repo has been initialized
func (r *Repo) isInitialized() bool {
if r.ptr != nil {
return true
}
return false
}
// Attempts to open the repo at the given path
func OpenRepo(path string) (*Repo, error) {
var cerr *C.GError = nil
cpath := C.CString(path)
pathc := C.g_file_new_for_path(cpath)
defer C.g_object_unref(C.gpointer(pathc))
crepo := C.ostree_repo_new(pathc)
repo := repoFromNative(crepo)
r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, nil, &cerr)))
if !r {
return nil, generateError(cerr)
}
return repo, nil
}
// Enable support for tombstone commits, which allow the repo to distinguish between
// commits that were intentionally deleted and commits that were removed accidentally
func enableTombstoneCommits(repo *Repo) error {
var tombstoneCommits bool
var config *C.GKeyFile = C.ostree_repo_get_config(repo.native())
var cerr *C.GError
tombstoneCommits = glib.GoBool(glib.GBoolean(C.g_key_file_get_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), nil)))
//tombstoneCommits is false only if it really is false or if it is set to FALSE in the config file
if !tombstoneCommits {
C.g_key_file_set_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), C.TRUE)
if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_config(repo.native(), config, &cerr))) {
return generateError(cerr)
}
}
return nil
}
func generateError(err *C.GError) error {
goErr := glib.ConvertGError(glib.ToGError(unsafe.Pointer(err)))
_, file, line, ok := runtime.Caller(1)
if ok {
return errors.New(fmt.Sprintf("%s:%d - %s", file, line, goErr))
} else {
return goErr
}
}

View File

@ -0,0 +1,191 @@
#ifndef BUILTIN_GO_H
#define BUILTIN_GO_H
#include <glib.h>
#include <ostree.h>
#include <string.h>
#include <fcntl.h>
static guint32 owner_uid;
static guint32 owner_gid;
static void
_ostree_repo_append_modifier_flags(OstreeRepoCommitModifierFlags *flags, int flag) {
*flags |= flag;
}
struct CommitFilterData {
GHashTable *mode_adds;
GHashTable *skip_list;
};
typedef struct CommitFilterData CommitFilterData;
static char* _gptr_to_str(gpointer p)
{
return (char*)p;
}
// The following 3 functions are wrapper functions for macros since CGO can't parse macros
static OstreeRepoFile*
_ostree_repo_file(GFile *file)
{
return OSTREE_REPO_FILE (file);
}
static guint
_gpointer_to_uint (gpointer ptr)
{
return GPOINTER_TO_UINT (ptr);
}
static gpointer
_guint_to_pointer (guint u)
{
return GUINT_TO_POINTER (u);
}
static void
_g_clear_object (volatile GObject **object_ptr)
{
g_clear_object(object_ptr);
}
static const GVariantType*
_g_variant_type (char *type)
{
return G_VARIANT_TYPE (type);
}
static int
_at_fdcwd ()
{
return AT_FDCWD;
}
static guint64
_guint64_from_be (guint64 val)
{
return GUINT64_FROM_BE (val);
}
// These functions are wrappers for variadic functions since CGO can't parse variadic functions
static void
_g_printerr_onearg (char* msg,
char* arg)
{
g_printerr("%s %s\n", msg, arg);
}
static void
_g_set_error_onearg (GError *err,
char* msg,
char* arg)
{
g_set_error(&err, G_IO_ERROR, G_IO_ERROR_FAILED, "%s %s", msg, arg);
}
static void
_g_variant_builder_add_twoargs (GVariantBuilder* builder,
const char *format_string,
char *arg1,
GVariant *arg2)
{
g_variant_builder_add(builder, format_string, arg1, arg2);
}
static GHashTable*
_g_hash_table_new_full ()
{
return g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL);
}
static void
_g_variant_get_commit_dump (GVariant *variant,
const char *format,
char **subject,
char **body,
guint64 *timestamp)
{
return g_variant_get (variant, format, NULL, NULL, NULL, subject, body, timestamp, NULL, NULL);
}
static guint32
_binary_or (guint32 a, guint32 b)
{
return a | b;
}
static void
_cleanup (OstreeRepo *self,
OstreeRepoCommitModifier *modifier,
GCancellable *cancellable,
GError **out_error)
{
if (self)
ostree_repo_abort_transaction(self, cancellable, out_error);
if (modifier)
ostree_repo_commit_modifier_unref (modifier);
}
// The following functions make up a commit_filter function that gets passed into
// another C function (and thus can't be a go function) as well as its helpers
static OstreeRepoCommitFilterResult
_commit_filter (OstreeRepo *self,
const char *path,
GFileInfo *file_info,
gpointer user_data)
{
struct CommitFilterData *data = user_data;
GHashTable *mode_adds = data->mode_adds;
GHashTable *skip_list = data->skip_list;
gpointer value;
if (owner_uid >= 0)
g_file_info_set_attribute_uint32 (file_info, "unix::uid", owner_uid);
if (owner_gid >= 0)
g_file_info_set_attribute_uint32 (file_info, "unix::gid", owner_gid);
if (mode_adds && g_hash_table_lookup_extended (mode_adds, path, NULL, &value))
{
guint current_mode = g_file_info_get_attribute_uint32 (file_info, "unix::mode");
guint mode_add = GPOINTER_TO_UINT (value);
g_file_info_set_attribute_uint32 (file_info, "unix::mode",
current_mode | mode_add);
g_hash_table_remove (mode_adds, path);
}
if (skip_list && g_hash_table_contains (skip_list, path))
{
g_hash_table_remove (skip_list, path);
return OSTREE_REPO_COMMIT_FILTER_SKIP;
}
return OSTREE_REPO_COMMIT_FILTER_ALLOW;
}
static void
_set_owner_uid (guint32 uid)
{
owner_uid = uid;
}
static void _set_owner_gid (guint32 gid)
{
owner_gid = gid;
}
// Wrapper function for a function that takes a C function as a parameter.
// That translation doesn't work in go
static OstreeRepoCommitModifier*
_ostree_repo_commit_modifier_new_wrapper (OstreeRepoCommitModifierFlags flags,
gpointer user_data,
GDestroyNotify destroy_notify)
{
return ostree_repo_commit_modifier_new(flags, _commit_filter, user_data, destroy_notify);
}
#endif

View File

@ -0,0 +1 @@
package otbuiltin

View File

@ -0,0 +1,102 @@
package otbuiltin
import (
"strings"
"unsafe"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
)
// #cgo pkg-config: ostree-1
// #include <stdlib.h>
// #include <glib.h>
// #include <ostree.h>
// #include "builtin.go.h"
import "C"
// Global variable for options
var checkoutOpts checkoutOptions
// Contains all of the options for checking commits out of
// an ostree repo
type checkoutOptions struct {
UserMode bool // Do not change file ownership or initialize extended attributes
Union bool // Keep existing directories and unchanged files, overwriting existing filesystem
AllowNoent bool // Do nothing if the specified filepath does not exist
DisableCache bool // Do not update or use the internal repository uncompressed object caceh
Whiteouts bool // Process 'whiteout' (docker style) entries
RequireHardlinks bool // Do not fall back to full copies if hard linking fails
Subpath string // Checkout sub-directory path
FromFile string // Process many checkouts from the given file
}
// Instantiates and returns a checkoutOptions struct with default values set
func NewCheckoutOptions() checkoutOptions {
return checkoutOptions{}
}
// Checks out a commit with the given ref from a repository at the location of repo path to to the destination. Returns an error if the checkout could not be processed
func Checkout(repoPath, destination, commit string, opts checkoutOptions) error {
checkoutOpts = opts
var cancellable *glib.GCancellable
ccommit := C.CString(commit)
defer C.free(unsafe.Pointer(ccommit))
var gerr = glib.NewGError()
cerr := (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
repoPathc := C.g_file_new_for_path(C.CString(repoPath))
defer C.g_object_unref(C.gpointer(repoPathc))
crepo := C.ostree_repo_new(repoPathc)
if !glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
return generateError(cerr)
}
if strings.Compare(checkoutOpts.FromFile, "") != 0 {
err := processManyCheckouts(crepo, destination, cancellable)
if err != nil {
return err
}
} else {
var resolvedCommit *C.char
defer C.free(unsafe.Pointer(resolvedCommit))
if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(crepo, ccommit, C.FALSE, &resolvedCommit, &cerr))) {
return generateError(cerr)
}
err := processOneCheckout(crepo, resolvedCommit, checkoutOpts.Subpath, destination, cancellable)
if err != nil {
return err
}
}
return nil
}
// Processes one checkout from the repo
func processOneCheckout(crepo *C.OstreeRepo, resolvedCommit *C.char, subpath, destination string, cancellable *glib.GCancellable) error {
cdest := C.CString(destination)
defer C.free(unsafe.Pointer(cdest))
var gerr = glib.NewGError()
cerr := (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
var repoCheckoutAtOptions C.OstreeRepoCheckoutAtOptions
if checkoutOpts.UserMode {
repoCheckoutAtOptions.mode = C.OSTREE_REPO_CHECKOUT_MODE_USER
}
if checkoutOpts.Union {
repoCheckoutAtOptions.overwrite_mode = C.OSTREE_REPO_CHECKOUT_OVERWRITE_UNION_FILES
}
checkedOut := glib.GoBool(glib.GBoolean(C.ostree_repo_checkout_at(crepo, &repoCheckoutAtOptions, C._at_fdcwd(), cdest, resolvedCommit, nil, &cerr)))
if !checkedOut {
return generateError(cerr)
}
return nil
}
// process many checkouts
func processManyCheckouts(crepo *C.OstreeRepo, target string, cancellable *glib.GCancellable) error {
return nil
}

View File

@ -0,0 +1 @@
package otbuiltin

View File

@ -0,0 +1,482 @@
package otbuiltin
import (
"bytes"
"errors"
"fmt"
"strings"
"time"
"unsafe"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
)
// #cgo pkg-config: ostree-1
// #include <stdlib.h>
// #include <glib.h>
// #include <ostree.h>
// #include "builtin.go.h"
import "C"
// Declare global variable to store commitOptions
var options commitOptions
// Declare a function prototype for being passed into another function
type handleLineFunc func(string, *glib.GHashTable) error
// Contains all of the options for commmiting to an ostree repo. Initialize
// with NewCommitOptions()
type commitOptions struct {
Subject string // One line subject
Body string // Full description
Parent string // Parent of the commit
Tree []string // 'dir=PATH' or 'tar=TARFILE' or 'ref=COMMIT': overlay the given argument as a tree
AddMetadataString []string // Add a key/value pair to metadata
AddDetachedMetadataString []string // Add a key/value pair to detached metadata
OwnerUID int // Set file ownership to user id
OwnerGID int // Set file ownership to group id
NoXattrs bool // Do not import extended attributes
LinkCheckoutSpeedup bool // Optimize for commits of trees composed of hardlinks in the repository
TarAutoCreateParents bool // When loading tar archives, automatically create parent directories as needed
SkipIfUnchanged bool // If the contents are unchanged from a previous commit, do nothing
StatOverrideFile string // File containing list of modifications to make permissions
SkipListFile string // File containing list of file paths to skip
GenerateSizes bool // Generate size information along with commit metadata
GpgSign []string // GPG Key ID with which to sign the commit (if you have GPGME - GNU Privacy Guard Made Easy)
GpgHomedir string // GPG home directory to use when looking for keyrings (if you have GPGME - GNU Privacy Guard Made Easy)
Timestamp time.Time // Override the timestamp of the commit
Orphan bool // Commit does not belong to a branch
Fsync bool // Specify whether fsync should be used or not. Default to true
}
// Initializes a commitOptions struct and sets default values
func NewCommitOptions() commitOptions {
var co commitOptions
co.OwnerUID = -1
co.OwnerGID = -1
co.Fsync = true
return co
}
type OstreeRepoTransactionStats struct {
metadata_objects_total int32
metadata_objects_written int32
content_objects_total int32
content_objects_written int32
content_bytes_written uint64
}
func (repo *Repo) PrepareTransaction() (bool, error) {
var cerr *C.GError = nil
var resume C.gboolean
r := glib.GoBool(glib.GBoolean(C.ostree_repo_prepare_transaction(repo.native(), &resume, nil, &cerr)))
if !r {
return false, generateError(cerr)
}
return glib.GoBool(glib.GBoolean(resume)), nil
}
func (repo *Repo) CommitTransaction() (*OstreeRepoTransactionStats, error) {
var cerr *C.GError = nil
var stats OstreeRepoTransactionStats = OstreeRepoTransactionStats{}
statsPtr := (*C.OstreeRepoTransactionStats)(unsafe.Pointer(&stats))
r := glib.GoBool(glib.GBoolean(C.ostree_repo_commit_transaction(repo.native(), statsPtr, nil, &cerr)))
if !r {
return nil, generateError(cerr)
}
return &stats, nil
}
func (repo *Repo) TransactionSetRef(remote string, ref string, checksum string) {
var cRemote *C.char = nil
var cRef *C.char = nil
var cChecksum *C.char = nil
if remote != "" {
cRemote = C.CString(remote)
}
if ref != "" {
cRef = C.CString(ref)
}
if checksum != "" {
cChecksum = C.CString(checksum)
}
C.ostree_repo_transaction_set_ref(repo.native(), cRemote, cRef, cChecksum)
}
func (repo *Repo) AbortTransaction() error {
var cerr *C.GError = nil
r := glib.GoBool(glib.GBoolean(C.ostree_repo_abort_transaction(repo.native(), nil, &cerr)))
if !r {
return generateError(cerr)
}
return nil
}
func (repo *Repo) RegenerateSummary() error {
var cerr *C.GError = nil
r := glib.GoBool(glib.GBoolean(C.ostree_repo_regenerate_summary(repo.native(), nil, nil, &cerr)))
if !r {
return generateError(cerr)
}
return nil
}
// Commits a directory, specified by commitPath, to an ostree repo as a given branch
func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string, error) {
options = opts
var err error
var modeAdds *glib.GHashTable
var skipList *glib.GHashTable
var objectToCommit *glib.GFile
var skipCommit bool = false
var ccommitChecksum *C.char
defer C.free(unsafe.Pointer(ccommitChecksum))
var flags C.OstreeRepoCommitModifierFlags = 0
var filter_data C.CommitFilterData
var cerr *C.GError
defer C.free(unsafe.Pointer(cerr))
var metadata *C.GVariant = nil
defer func(){
if metadata != nil {
defer C.g_variant_unref(metadata)
}
}()
var detachedMetadata *C.GVariant = nil
defer C.free(unsafe.Pointer(detachedMetadata))
var mtree *C.OstreeMutableTree
defer C.free(unsafe.Pointer(mtree))
var root *C.GFile
defer C.free(unsafe.Pointer(root))
var modifier *C.OstreeRepoCommitModifier
defer C.free(unsafe.Pointer(modifier))
var cancellable *C.GCancellable
defer C.free(unsafe.Pointer(cancellable))
cpath := C.CString(commitPath)
defer C.free(unsafe.Pointer(cpath))
csubject := C.CString(options.Subject)
defer C.free(unsafe.Pointer(csubject))
cbody := C.CString(options.Body)
defer C.free(unsafe.Pointer(cbody))
cbranch := C.CString(branch)
defer C.free(unsafe.Pointer(cbranch))
cparent := C.CString(options.Parent)
defer C.free(unsafe.Pointer(cparent))
if !glib.GoBool(glib.GBoolean(C.ostree_repo_is_writable(repo.native(), &cerr))) {
goto out
}
// If the user provided a stat override file
if strings.Compare(options.StatOverrideFile, "") != 0 {
modeAdds = glib.ToGHashTable(unsafe.Pointer(C._g_hash_table_new_full()))
if err = parseFileByLine(options.StatOverrideFile, handleStatOverrideLine, modeAdds, cancellable); err != nil {
goto out
}
}
// If the user provided a skiplist file
if strings.Compare(options.SkipListFile, "") != 0 {
skipList = glib.ToGHashTable(unsafe.Pointer(C._g_hash_table_new_full()))
if err = parseFileByLine(options.SkipListFile, handleSkipListline, skipList, cancellable); err != nil {
goto out
}
}
if options.AddMetadataString != nil {
metadata, err = parseKeyValueStrings(options.AddMetadataString)
if err != nil {
goto out
}
}
if options.AddDetachedMetadataString != nil {
_, err := parseKeyValueStrings(options.AddDetachedMetadataString)
if err != nil {
goto out
}
}
if strings.Compare(branch, "") == 0 && !options.Orphan {
err = errors.New("A branch must be specified or use commitOptions.Orphan")
goto out
}
if options.NoXattrs {
C._ostree_repo_append_modifier_flags(&flags, C.OSTREE_REPO_COMMIT_MODIFIER_FLAGS_SKIP_XATTRS)
}
if options.GenerateSizes {
C._ostree_repo_append_modifier_flags(&flags, C.OSTREE_REPO_COMMIT_MODIFIER_FLAGS_GENERATE_SIZES)
}
if !options.Fsync {
C.ostree_repo_set_disable_fsync(repo.native(), C.TRUE)
}
if flags != 0 || options.OwnerUID >= 0 || options.OwnerGID >= 0 || strings.Compare(options.StatOverrideFile, "") != 0 || options.NoXattrs {
filter_data.mode_adds = (*C.GHashTable)(modeAdds.Ptr())
filter_data.skip_list = (*C.GHashTable)(skipList.Ptr())
C._set_owner_uid((C.guint32)(options.OwnerUID))
C._set_owner_gid((C.guint32)(options.OwnerGID))
modifier = C._ostree_repo_commit_modifier_new_wrapper(flags, C.gpointer(&filter_data), nil)
}
if strings.Compare(options.Parent, "") != 0 {
if strings.Compare(options.Parent, "none") == 0 {
options.Parent = ""
}
} else if !options.Orphan {
cerr = nil
if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.TRUE, &cparent, &cerr))) {
goto out
}
}
if options.LinkCheckoutSpeedup && !glib.GoBool(glib.GBoolean(C.ostree_repo_scan_hardlinks(repo.native(), cancellable, &cerr))) {
goto out
}
mtree = C.ostree_mutable_tree_new()
if len(commitPath) == 0 && (len(options.Tree) == 0 || len(options.Tree[0]) == 0) {
currentDir := (*C.char)(C.g_get_current_dir())
objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(currentDir)))
C.g_free(C.gpointer(currentDir))
if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) {
goto out
}
} else if len(options.Tree) != 0 {
var eq int = -1
cerr = nil
for tree := range options.Tree {
eq = strings.Index(options.Tree[tree], "=")
if eq == -1 {
C._g_set_error_onearg(cerr, C.CString("Missing type in tree specification"), C.CString(options.Tree[tree]))
goto out
}
treeType := options.Tree[tree][:eq]
treeVal := options.Tree[tree][eq+1:]
if strings.Compare(treeType, "dir") == 0 {
objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(treeVal))))
if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) {
goto out
}
} else if strings.Compare(treeType, "tar") == 0 {
objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(treeVal))))
if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_archive_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, (C.gboolean)(glib.GBool(opts.TarAutoCreateParents)), cancellable, &cerr))) {
fmt.Println("error 1")
goto out
}
} else if strings.Compare(treeType, "ref") == 0 {
if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo.native(), C.CString(treeVal), (**C.GFile)(objectToCommit.Ptr()), nil, cancellable, &cerr))) {
goto out
}
if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) {
goto out
}
} else {
C._g_set_error_onearg(cerr, C.CString("Missing type in tree specification"), C.CString(treeVal))
goto out
}
}
} else {
objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(cpath)))
cerr = nil
if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) {
goto out
}
}
if modeAdds != nil && C.g_hash_table_size((*C.GHashTable)(modeAdds.Ptr())) > 0 {
var hashIter *C.GHashTableIter
var key, value C.gpointer
C.g_hash_table_iter_init(hashIter, (*C.GHashTable)(modeAdds.Ptr()))
for glib.GoBool(glib.GBoolean(C.g_hash_table_iter_next(hashIter, &key, &value))) {
C._g_printerr_onearg(C.CString("Unmatched StatOverride path: "), C._gptr_to_str(key))
}
err = errors.New("Unmatched StatOverride paths")
C.free(unsafe.Pointer(hashIter))
C.free(unsafe.Pointer(key))
C.free(unsafe.Pointer(value))
goto out
}
if skipList != nil && C.g_hash_table_size((*C.GHashTable)(skipList.Ptr())) > 0 {
var hashIter *C.GHashTableIter
var key, value C.gpointer
C.g_hash_table_iter_init(hashIter, (*C.GHashTable)(skipList.Ptr()))
for glib.GoBool(glib.GBoolean(C.g_hash_table_iter_next(hashIter, &key, &value))) {
C._g_printerr_onearg(C.CString("Unmatched SkipList path: "), C._gptr_to_str(key))
}
err = errors.New("Unmatched SkipList paths")
C.free(unsafe.Pointer(hashIter))
C.free(unsafe.Pointer(key))
C.free(unsafe.Pointer(value))
goto out
}
cerr = nil
if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_mtree(repo.native(), mtree, &root, cancellable, &cerr))) {
goto out
}
if options.SkipIfUnchanged && strings.Compare(options.Parent, "") != 0 {
var parentRoot *C.GFile
cerr = nil
if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo.native(), cparent, &parentRoot, nil, cancellable, &cerr))) {
C.free(unsafe.Pointer(parentRoot))
goto out
}
if glib.GoBool(glib.GBoolean(C.g_file_equal(root, parentRoot))) {
skipCommit = true
}
C.free(unsafe.Pointer(parentRoot))
}
if !skipCommit {
var timestamp C.guint64
if options.Timestamp.IsZero() {
var now *C.GDateTime = C.g_date_time_new_now_utc()
timestamp = (C.guint64)(C.g_date_time_to_unix(now))
C.g_date_time_unref(now)
cerr = nil
ret := C.ostree_repo_write_commit(repo.native(), cparent, csubject, cbody, metadata, C._ostree_repo_file(root), &ccommitChecksum, cancellable, &cerr)
if !glib.GoBool(glib.GBoolean(ret)) {
goto out
}
} else {
timestamp = (C.guint64)(options.Timestamp.Unix())
if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_commit_with_time(repo.native(), cparent, csubject, cbody,
metadata, C._ostree_repo_file(root), timestamp, &ccommitChecksum, cancellable, &cerr))) {
goto out
}
}
if detachedMetadata != nil {
C.ostree_repo_write_commit_detached_metadata(repo.native(), ccommitChecksum, detachedMetadata, cancellable, &cerr)
}
if len(options.GpgSign) != 0 {
for key := range options.GpgSign {
if !glib.GoBool(glib.GBoolean(C.ostree_repo_sign_commit(repo.native(), (*C.gchar)(ccommitChecksum), (*C.gchar)(C.CString(options.GpgSign[key])), (*C.gchar)(C.CString(options.GpgHomedir)), cancellable, &cerr))) {
goto out
}
}
}
if strings.Compare(branch, "") != 0 {
C.ostree_repo_transaction_set_ref(repo.native(), nil, cbranch, ccommitChecksum)
} else if !options.Orphan {
goto out
} else {
// TODO: Looks like I forgot to implement this.
}
} else {
ccommitChecksum = C.CString(options.Parent)
}
return C.GoString(ccommitChecksum), nil
out:
if repo.native() != nil {
C.ostree_repo_abort_transaction(repo.native(), cancellable, nil)
//C.free(unsafe.Pointer(repo.native()))
}
if modifier != nil {
C.ostree_repo_commit_modifier_unref(modifier)
}
if err != nil {
return "", err
}
return "", generateError(cerr)
}
// Parse an array of key value pairs of the format KEY=VALUE and add them to a GVariant
func parseKeyValueStrings(pairs []string) (*C.GVariant, error) {
builder := C.g_variant_builder_new(C._g_variant_type(C.CString("a{sv}")))
defer C.g_variant_builder_unref(builder)
for iter := range pairs {
index := strings.Index(pairs[iter], "=")
if index <= 0 {
var buffer bytes.Buffer
buffer.WriteString("Missing '=' in KEY=VALUE metadata '%s'")
buffer.WriteString(pairs[iter])
return nil, errors.New(buffer.String())
}
key := C.CString(pairs[iter][:index])
value := C.CString(pairs[iter][index+1:])
valueVariant := C.g_variant_new_string((*C.gchar)(value))
C._g_variant_builder_add_twoargs(builder, C.CString("{sv}"), key, valueVariant)
}
metadata := C.g_variant_builder_end(builder)
return C.g_variant_ref_sink(metadata), nil
}
// Parse a file linue by line and handle the line with the handleLineFunc
func parseFileByLine(path string, fn handleLineFunc, table *glib.GHashTable, cancellable *C.GCancellable) error {
var contents *C.char
var file *glib.GFile
var lines []string
var gerr = glib.NewGError()
cerr := (*C.GError)(gerr.Ptr())
file = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(path))))
if !glib.GoBool(glib.GBoolean(C.g_file_load_contents((*C.GFile)(file.Ptr()), cancellable, &contents, nil, nil, &cerr))) {
return generateError(cerr)
}
lines = strings.Split(C.GoString(contents), "\n")
for line := range lines {
if strings.Compare(lines[line], "") == 0 {
continue
}
if err := fn(lines[line], table); err != nil {
return generateError(cerr)
}
}
return nil
}
// Handle an individual line from a Statoverride file
func handleStatOverrideLine(line string, table *glib.GHashTable) error {
var space int
var modeAdd C.guint
if space = strings.IndexRune(line, ' '); space == -1 {
return errors.New("Malformed StatOverrideFile (no space found)")
}
modeAdd = (C.guint)(C.g_ascii_strtod((*C.gchar)(C.CString(line)), nil))
C.g_hash_table_insert((*C.GHashTable)(table.Ptr()), C.gpointer(C.g_strdup((*C.gchar)(C.CString(line[space+1:])))), C._guint_to_pointer(modeAdd))
return nil
}
// Handle an individual line from a Skiplist file
func handleSkipListline(line string, table *glib.GHashTable) error {
C.g_hash_table_add((*C.GHashTable)(table.Ptr()), C.gpointer( C.g_strdup((*C.gchar)(C.CString(line)))))
return nil
}

View File

@ -0,0 +1 @@
package otbuiltin

View File

@ -0,0 +1 @@
package otbuiltin

View File

@ -0,0 +1 @@
package otbuiltin

View File

@ -0,0 +1 @@
package otbuiltin

View File

@ -0,0 +1 @@
package otbuiltin

View File

@ -0,0 +1,90 @@
package otbuiltin
import (
"errors"
"strings"
"unsafe"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
)
// #cgo pkg-config: ostree-1
// #include <stdlib.h>
// #include <glib.h>
// #include <ostree.h>
// #include "builtin.go.h"
import "C"
// Declare variables for options
var initOpts initOptions
// Contains all of the options for initializing an ostree repo
type initOptions struct {
Mode string // either bare, archive-z2, or bare-user
repoMode C.OstreeRepoMode
}
// Instantiates and returns an initOptions struct with default values set
func NewInitOptions() initOptions {
io := initOptions{}
io.Mode = "bare"
io.repoMode = C.OSTREE_REPO_MODE_BARE
return io
}
// Initializes a new ostree repository at the given path. Returns true
// if the repo exists at the location, regardless of whether it was initialized
// by the function or if it already existed. Returns an error if the repo could
// not be initialized
func Init(path string, options initOptions) (bool, error) {
initOpts = options
err := parseMode()
if err != nil {
return false, err
}
// Create a repo struct from the path
var cerr *C.GError
defer C.free(unsafe.Pointer(cerr))
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
pathc := C.g_file_new_for_path(cpath)
defer C.g_object_unref(C.gpointer(pathc))
crepo := C.ostree_repo_new(pathc)
// If the repo exists in the filesystem, return an error but set exists to true
/* var exists C.gboolean = 0
success := glib.GoBool(glib.GBoolean(C.ostree_repo_exists(crepo, &exists, &cerr)))
if exists != 0 {
err = errors.New("repository already exists")
return true, err
} else if !success {
return false, generateError(cerr)
}*/
cerr = nil
created := glib.GoBool(glib.GBoolean(C.ostree_repo_create(crepo, initOpts.repoMode, nil, &cerr)))
if !created {
errString := generateError(cerr).Error()
if strings.Contains(errString, "File exists") {
return true, generateError(cerr)
}
return false, generateError(cerr)
}
return true, nil
}
// Converts the mode string to a C.OSTREE_REPO_MODE enum value
func parseMode() error {
if strings.EqualFold(initOpts.Mode, "bare") {
initOpts.repoMode = C.OSTREE_REPO_MODE_BARE
} else if strings.EqualFold(initOpts.Mode, "bare-user") {
initOpts.repoMode = C.OSTREE_REPO_MODE_BARE_USER
} else if strings.EqualFold(initOpts.Mode, "archive-z2") {
initOpts.repoMode = C.OSTREE_REPO_MODE_ARCHIVE_Z2
} else {
return errors.New("Invalid option for mode")
}
return nil
}

View File

@ -0,0 +1,167 @@
package otbuiltin
import (
"fmt"
"strings"
"time"
"unsafe"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
)
// #cgo pkg-config: ostree-1
// #include <stdlib.h>
// #include <glib.h>
// #include <ostree.h>
// #include "builtin.go.h"
import "C"
// Declare variables for options
var logOpts logOptions
// Set the format of the strings in the log
const formatString = "2006-01-02 03:04;05 -0700"
// Struct for the various pieces of data in a log entry
type LogEntry struct {
Checksum []byte
Variant []byte
Timestamp time.Time
Subject string
Body string
}
// Convert the log entry to a string
func (l LogEntry) String() string {
if len(l.Variant) == 0 {
return fmt.Sprintf("%s\n%s\n\n\t%s\n\n\t%s\n\n", l.Checksum, l.Timestamp, l.Subject, l.Body)
}
return fmt.Sprintf("%s\n%s\n\n", l.Checksum, l.Variant)
}
type OstreeDumpFlags uint
const (
OSTREE_DUMP_NONE OstreeDumpFlags = 0
OSTREE_DUMP_RAW OstreeDumpFlags = 1 << iota
)
// Contains all of the options for initializing an ostree repo
type logOptions struct {
Raw bool // Show raw variant data
}
//Instantiates and returns a logOptions struct with default values set
func NewLogOptions() logOptions {
return logOptions{}
}
// Show the logs of a branch starting with a given commit or ref. Returns a
// slice of log entries on success and an error otherwise
func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) {
// attempt to open the repository
repo, err := OpenRepo(repoPath)
if err != nil {
return nil, err
}
cbranch := C.CString(branch)
defer C.free(unsafe.Pointer(cbranch))
var checksum *C.char
defer C.free(unsafe.Pointer(checksum))
var flags OstreeDumpFlags = OSTREE_DUMP_NONE
var cerr *C.GError
defer C.free(unsafe.Pointer(cerr))
if logOpts.Raw {
flags |= OSTREE_DUMP_RAW
}
if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.FALSE, &checksum, &cerr))) {
return nil, generateError(cerr)
}
return logCommit(repo, checksum, false, flags)
}
func logCommit(repo *Repo, checksum *C.char, isRecursive bool, flags OstreeDumpFlags) ([]LogEntry, error) {
var variant *C.GVariant
var parent *C.char
defer C.free(unsafe.Pointer(parent))
var gerr = glib.NewGError()
var cerr = (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
entries := make([]LogEntry, 0, 1)
var err error
if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, &variant, &cerr))) {
if isRecursive && glib.GoBool(glib.GBoolean(C.g_error_matches(cerr, C.g_io_error_quark(), C.G_IO_ERROR_NOT_FOUND))) {
return nil, nil
}
return entries, generateError(cerr)
}
nextLogEntry := dumpLogObject(C.OSTREE_OBJECT_TYPE_COMMIT, checksum, variant, flags)
// get the parent of this commit
parent = (*C.char)(C.ostree_commit_get_parent(variant))
defer C.free(unsafe.Pointer(parent))
if parent != nil {
entries, err = logCommit(repo, parent, true, flags)
if err != nil {
return nil, err
}
}
entries = append(entries, *nextLogEntry)
return entries, nil
}
func dumpLogObject(objectType C.OstreeObjectType, checksum *C.char, variant *C.GVariant, flags OstreeDumpFlags) *LogEntry {
objLog := new(LogEntry)
objLog.Checksum = []byte(C.GoString(checksum))
if (flags & OSTREE_DUMP_RAW) != 0 {
dumpVariant(objLog, variant)
return objLog
}
switch objectType {
case C.OSTREE_OBJECT_TYPE_COMMIT:
dumpCommit(objLog, variant, flags)
return objLog
default:
return objLog
}
}
func dumpVariant(log *LogEntry, variant *C.GVariant) {
var byteswappedVariant *C.GVariant
if C.G_BYTE_ORDER != C.G_BIG_ENDIAN {
byteswappedVariant = C.g_variant_byteswap(variant)
log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE))))
} else {
log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE))))
}
}
func dumpCommit(log *LogEntry, variant *C.GVariant, flags OstreeDumpFlags) {
var subject, body *C.char
defer C.free(unsafe.Pointer(subject))
defer C.free(unsafe.Pointer(body))
var timestamp C.guint64
C._g_variant_get_commit_dump(variant, C.CString("(a{sv}aya(say)&s&stayay)"), &subject, &body, &timestamp)
// Timestamp is now a Unix formatted timestamp as a guint64
timestamp = C._guint64_from_be(timestamp)
log.Timestamp = time.Unix((int64)(timestamp), 0)
if strings.Compare(C.GoString(subject), "") != 0 {
log.Subject = C.GoString(subject)
}
if strings.Compare(C.GoString(body), "") != 0 {
log.Body = C.GoString(body)
}
}

View File

@ -0,0 +1 @@
package otbuiltin

View File

@ -0,0 +1,217 @@
package otbuiltin
import (
"bytes"
"errors"
"strconv"
"strings"
"time"
"unsafe"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
)
// #cgo pkg-config: ostree-1
// #include <stdlib.h>
// #include <glib.h>
// #include <ostree.h>
// #include "builtin.go.h"
import "C"
// Declare gobal variable for options
var pruneOpts pruneOptions
// Contains all of the options for pruning an ostree repo. Use
// NewPruneOptions() to initialize
type pruneOptions struct {
NoPrune bool // Only display unreachable objects; don't delete
RefsOnly bool // Only compute reachability via refs
DeleteCommit string // Specify a commit to delete
KeepYoungerThan time.Time // All commits older than this date will be pruned
Depth int // Only traverse depths (integer) parents for each commit (default: -1=infinite)
StaticDeltasOnly int // Change the behavior of --keep-younger-than and --delete-commit to prune only the static delta files
}
// Instantiates and returns a pruneOptions struct with default values set
func NewPruneOptions() pruneOptions {
po := new(pruneOptions)
po.Depth = -1
return *po
}
// Search for unreachable objects in the repository given by repoPath. Removes the
// objects unless pruneOptions.NoPrune is specified
func Prune(repoPath string, options pruneOptions) (string, error) {
pruneOpts = options
// attempt to open the repository
repo, err := OpenRepo(repoPath)
if err != nil {
return "", err
}
var pruneFlags C.OstreeRepoPruneFlags
var numObjectsTotal int
var numObjectsPruned int
var objSizeTotal uint64
var gerr = glib.NewGError()
var cerr = (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
var cancellable *glib.GCancellable
if !pruneOpts.NoPrune && !glib.GoBool(glib.GBoolean(C.ostree_repo_is_writable(repo.native(), &cerr))) {
return "", generateError(cerr)
}
cerr = nil
if strings.Compare(pruneOpts.DeleteCommit, "") != 0 {
if pruneOpts.NoPrune {
return "", errors.New("Cannot specify both pruneOptions.DeleteCommit and pruneOptions.NoPrune")
}
if pruneOpts.StaticDeltasOnly > 0 {
if glib.GoBool(glib.GBoolean(C.ostree_repo_prune_static_deltas(repo.native(), C.CString(pruneOpts.DeleteCommit), (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
return "", generateError(cerr)
}
} else if err = deleteCommit(repo, pruneOpts.DeleteCommit, cancellable); err != nil {
return "", err
}
}
if !pruneOpts.KeepYoungerThan.IsZero() {
if pruneOpts.NoPrune {
return "", errors.New("Cannot specify both pruneOptions.KeepYoungerThan and pruneOptions.NoPrune")
}
if err = pruneCommitsKeepYoungerThanDate(repo, pruneOpts.KeepYoungerThan, cancellable); err != nil {
return "", err
}
}
if pruneOpts.RefsOnly {
pruneFlags |= C.OSTREE_REPO_PRUNE_FLAGS_REFS_ONLY
}
if pruneOpts.NoPrune {
pruneFlags |= C.OSTREE_REPO_PRUNE_FLAGS_NO_PRUNE
}
formattedFreedSize := C.GoString((*C.char)(C.g_format_size_full((C.guint64)(objSizeTotal), 0)))
var buffer bytes.Buffer
buffer.WriteString("Total objects: ")
buffer.WriteString(strconv.Itoa(numObjectsTotal))
if numObjectsPruned == 0 {
buffer.WriteString("\nNo unreachable objects")
} else if pruneOpts.NoPrune {
buffer.WriteString("\nWould delete: ")
buffer.WriteString(strconv.Itoa(numObjectsPruned))
buffer.WriteString(" objects, freeing ")
buffer.WriteString(formattedFreedSize)
} else {
buffer.WriteString("\nDeleted ")
buffer.WriteString(strconv.Itoa(numObjectsPruned))
buffer.WriteString(" objects, ")
buffer.WriteString(formattedFreedSize)
buffer.WriteString(" freed")
}
return buffer.String(), nil
}
// Delete an unreachable commit from the repo
func deleteCommit(repo *Repo, commitToDelete string, cancellable *glib.GCancellable) error {
var refs *glib.GHashTable
var hashIter glib.GHashTableIter
var hashkey, hashvalue C.gpointer
var gerr = glib.NewGError()
var cerr = (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
if glib.GoBool(glib.GBoolean(C.ostree_repo_list_refs(repo.native(), nil, (**C.GHashTable)(refs.Ptr()), (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
return generateError(cerr)
}
C.g_hash_table_iter_init((*C.GHashTableIter)(hashIter.Ptr()), (*C.GHashTable)(refs.Ptr()))
for C.g_hash_table_iter_next((*C.GHashTableIter)(hashIter.Ptr()), &hashkey, &hashvalue) != 0 {
var ref string = C.GoString((*C.char)(hashkey))
var commit string = C.GoString((*C.char)(hashvalue))
if strings.Compare(commitToDelete, commit) == 0 {
var buffer bytes.Buffer
buffer.WriteString("Commit ")
buffer.WriteString(commitToDelete)
buffer.WriteString(" is referenced by ")
buffer.WriteString(ref)
return errors.New(buffer.String())
}
}
if err := enableTombstoneCommits(repo); err != nil {
return err
}
if !glib.GoBool(glib.GBoolean(C.ostree_repo_delete_object(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, C.CString(commitToDelete), (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
return generateError(cerr)
}
return nil
}
// Prune commits but keep any younger than the given date regardless of whether they
// are reachable
func pruneCommitsKeepYoungerThanDate(repo *Repo, date time.Time, cancellable *glib.GCancellable) error {
var objects *glib.GHashTable
defer C.free(unsafe.Pointer(objects))
var hashIter glib.GHashTableIter
var key, value C.gpointer
defer C.free(unsafe.Pointer(key))
defer C.free(unsafe.Pointer(value))
var gerr = glib.NewGError()
var cerr = (*C.GError)(gerr.Ptr())
defer C.free(unsafe.Pointer(cerr))
if err := enableTombstoneCommits(repo); err != nil {
return err
}
if !glib.GoBool(glib.GBoolean(C.ostree_repo_list_objects(repo.native(), C.OSTREE_REPO_LIST_OBJECTS_ALL, (**C.GHashTable)(objects.Ptr()), (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
return generateError(cerr)
}
C.g_hash_table_iter_init((*C.GHashTableIter)(hashIter.Ptr()), (*C.GHashTable)(objects.Ptr()))
for C.g_hash_table_iter_next((*C.GHashTableIter)(hashIter.Ptr()), &key, &value) != 0 {
var serializedKey *glib.GVariant
defer C.free(unsafe.Pointer(serializedKey))
var checksum *C.char
defer C.free(unsafe.Pointer(checksum))
var objType C.OstreeObjectType
var commitTimestamp uint64
var commit *glib.GVariant = nil
C.ostree_object_name_deserialize((*C.GVariant)(serializedKey.Ptr()), &checksum, &objType)
if objType != C.OSTREE_OBJECT_TYPE_COMMIT {
continue
}
cerr = nil
if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, (**C.GVariant)(commit.Ptr()), &cerr))) {
return generateError(cerr)
}
commitTimestamp = (uint64)(C.ostree_commit_get_timestamp((*C.GVariant)(commit.Ptr())))
if commitTimestamp < (uint64)(date.Unix()) {
cerr = nil
if pruneOpts.StaticDeltasOnly != 0 {
if !glib.GoBool(glib.GBoolean(C.ostree_repo_prune_static_deltas(repo.native(), checksum, (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
return generateError(cerr)
}
} else {
if !glib.GoBool(glib.GBoolean(C.ostree_repo_delete_object(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, (*C.GCancellable)(cancellable.Ptr()), &cerr))) {
return generateError(cerr)
}
}
}
}
return nil
}

View File

@ -0,0 +1 @@
package otbuiltin

Some files were not shown because too many files have changed in this diff Show More