2016-11-02 01:43:15 +00:00
|
|
|
package content
|
|
|
|
|
|
|
|
import (
|
|
|
|
"io"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/docker/distribution/digest"
|
2016-11-16 03:46:24 +00:00
|
|
|
"github.com/nightlyone/lockfile"
|
2016-11-02 01:43:15 +00:00
|
|
|
"github.com/pkg/errors"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
ErrBlobNotFound = errors.New("blob not found")
|
|
|
|
|
|
|
|
bufPool = sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
return make([]byte, 32<<10)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
// ContentStore is digest-keyed store for content. All data written into the
|
|
|
|
// store is stored under a verifiable digest.
|
|
|
|
//
|
|
|
|
// ContentStore can generally support multi-reader, single-writer ingest of
|
|
|
|
// data, including resumable ingest.
|
|
|
|
type ContentStore struct {
|
|
|
|
root string
|
|
|
|
}
|
|
|
|
|
|
|
|
func OpenContentStore(root string) (*ContentStore, error) {
|
|
|
|
if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &ContentStore{
|
|
|
|
root: root,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2016-11-16 03:46:24 +00:00
|
|
|
// TODO(stevvooe): Work out how we can export the status of an ongoing download.
|
|
|
|
// TODO(stevvooe): Allow querying the set of blobs in the blob store.
|
|
|
|
|
2016-11-02 01:43:15 +00:00
|
|
|
func (cs *ContentStore) GetPath(dgst digest.Digest) (string, error) {
|
|
|
|
p := filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex())
|
|
|
|
if _, err := os.Stat(p); err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return "", ErrBlobNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Begin starts a new write transaction against the blob store.
|
|
|
|
//
|
|
|
|
// The argument `ref` is used to identify the transaction. It must be a valid
|
|
|
|
// path component, meaning it has no `/` characters and no `:` (we'll ban
|
|
|
|
// others fs characters, as needed).
|
|
|
|
//
|
|
|
|
// TODO(stevvooe): Figure out minimum common set of characters, basically common
|
|
|
|
func (cs *ContentStore) Begin(ref string) (*ContentWriter, error) {
|
2016-11-16 03:46:24 +00:00
|
|
|
path, data, lock, err := cs.ingestPaths(ref)
|
2016-11-02 01:43:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// use single path mkdir for this to ensure ref is only base path, in
|
|
|
|
// addition to validation above.
|
|
|
|
if err := os.Mkdir(path, 0755); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-11-16 03:46:24 +00:00
|
|
|
if err := tryLock(lock); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-11-02 01:43:15 +00:00
|
|
|
fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to open data file")
|
|
|
|
}
|
|
|
|
defer fp.Close()
|
|
|
|
|
|
|
|
// re-open the file in append mode
|
|
|
|
fp, err = os.OpenFile(data, os.O_WRONLY|os.O_APPEND, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "error opening for append")
|
|
|
|
}
|
|
|
|
|
|
|
|
return &ContentWriter{
|
|
|
|
cs: cs,
|
|
|
|
fp: fp,
|
2016-11-16 03:46:24 +00:00
|
|
|
lock: lock,
|
2016-11-02 01:43:15 +00:00
|
|
|
path: path,
|
|
|
|
digester: digest.Canonical.New(),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cs *ContentStore) Resume(ref string) (*ContentWriter, error) {
|
2016-11-16 03:46:24 +00:00
|
|
|
path, data, lock, err := cs.ingestPaths(ref)
|
2016-11-02 01:43:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-11-16 03:46:24 +00:00
|
|
|
if err := tryLock(lock); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-11-02 01:43:15 +00:00
|
|
|
digester := digest.Canonical.New()
|
|
|
|
|
2016-11-16 03:46:24 +00:00
|
|
|
// slow slow slow!!, send to goroutine or use resumable hashes
|
2016-11-02 01:43:15 +00:00
|
|
|
fp, err := os.Open(data)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer fp.Close()
|
|
|
|
|
2016-11-16 03:46:24 +00:00
|
|
|
offset, err := io.Copy(digester.Hash(), fp)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-11-02 01:43:15 +00:00
|
|
|
fp1, err := os.OpenFile(data, os.O_WRONLY|os.O_APPEND, 0666)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, errors.Wrap(err, "ingest does not exist")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, errors.Wrap(err, "error opening for append")
|
|
|
|
}
|
|
|
|
|
|
|
|
return &ContentWriter{
|
|
|
|
cs: cs,
|
|
|
|
fp: fp1,
|
2016-11-16 03:46:24 +00:00
|
|
|
lock: lock,
|
2016-11-02 01:43:15 +00:00
|
|
|
path: path,
|
|
|
|
offset: offset,
|
|
|
|
digester: digester,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2016-11-16 03:46:24 +00:00
|
|
|
func (cs *ContentStore) ingestPaths(ref string) (string, string, lockfile.Lockfile, error) {
|
2016-11-02 01:43:15 +00:00
|
|
|
cref := filepath.Clean(ref)
|
|
|
|
if cref != ref {
|
2016-11-16 03:46:24 +00:00
|
|
|
return "", "", "", errors.Errorf("invalid path after clean")
|
2016-11-02 01:43:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fp := filepath.Join(cs.root, "ingest", ref)
|
|
|
|
|
|
|
|
// ensure we don't escape root
|
|
|
|
if !strings.HasPrefix(fp, cs.root) {
|
2016-11-16 03:46:24 +00:00
|
|
|
return "", "", "", errors.Errorf("path %q escapes root", ref)
|
2016-11-02 01:43:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ensure we are just a single path component
|
|
|
|
if ref != filepath.Base(fp) {
|
2016-11-16 03:46:24 +00:00
|
|
|
return "", "", "", errors.Errorf("ref must be a single path component")
|
|
|
|
}
|
|
|
|
|
|
|
|
lock, err := lockfile.New(filepath.Join(fp, "lock"))
|
|
|
|
if err != nil {
|
|
|
|
return "", "", "", errors.Wrap(err, "error creating lockfile")
|
2016-11-02 01:43:15 +00:00
|
|
|
}
|
|
|
|
|
2016-11-16 03:46:24 +00:00
|
|
|
return fp, filepath.Join(fp, "data"), lock, nil
|
2016-11-02 01:43:15 +00:00
|
|
|
}
|