Merge pull request #844 from BrianBland/ng-storagedriver-tests
Enforces a path format for storage drivers
This commit is contained in:
commit
47324d0398
4 changed files with 157 additions and 13 deletions
|
@ -56,6 +56,10 @@ func New(rootDirectory string) *Driver {
|
|||
|
||||
// GetContent retrieves the content stored at "path" as a []byte.
|
||||
func (d *Driver) GetContent(path string) ([]byte, error) {
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
return nil, storagedriver.InvalidPathError{Path: path}
|
||||
}
|
||||
|
||||
rc, err := d.ReadStream(path, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -72,6 +76,10 @@ func (d *Driver) GetContent(path string) ([]byte, error) {
|
|||
|
||||
// PutContent stores the []byte content at a location designated by "path".
|
||||
func (d *Driver) PutContent(subPath string, contents []byte) error {
|
||||
if !storagedriver.PathRegexp.MatchString(subPath) {
|
||||
return storagedriver.InvalidPathError{Path: subPath}
|
||||
}
|
||||
|
||||
if _, err := d.WriteStream(subPath, 0, bytes.NewReader(contents)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -82,6 +90,10 @@ func (d *Driver) PutContent(subPath string, contents []byte) error {
|
|||
// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
|
||||
// given byte offset.
|
||||
func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) {
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
return nil, storagedriver.InvalidPathError{Path: path}
|
||||
}
|
||||
|
||||
if offset < 0 {
|
||||
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
|
||||
}
|
||||
|
@ -110,6 +122,10 @@ func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) {
|
|||
// WriteStream stores the contents of the provided io.Reader at a location
|
||||
// designated by the given path.
|
||||
func (d *Driver) WriteStream(subPath string, offset int64, reader io.Reader) (nn int64, err error) {
|
||||
if !storagedriver.PathRegexp.MatchString(subPath) {
|
||||
return 0, storagedriver.InvalidPathError{Path: subPath}
|
||||
}
|
||||
|
||||
if offset < 0 {
|
||||
return 0, storagedriver.InvalidOffsetError{Path: subPath, Offset: offset}
|
||||
}
|
||||
|
@ -150,6 +166,10 @@ func (d *Driver) WriteStream(subPath string, offset int64, reader io.Reader) (nn
|
|||
// Stat retrieves the FileInfo for the given path, including the current size
|
||||
// in bytes and the creation time.
|
||||
func (d *Driver) Stat(subPath string) (storagedriver.FileInfo, error) {
|
||||
if !storagedriver.PathRegexp.MatchString(subPath) {
|
||||
return nil, storagedriver.InvalidPathError{Path: subPath}
|
||||
}
|
||||
|
||||
fullPath := d.fullPath(subPath)
|
||||
|
||||
fi, err := os.Stat(fullPath)
|
||||
|
@ -170,6 +190,10 @@ func (d *Driver) Stat(subPath string) (storagedriver.FileInfo, error) {
|
|||
// List returns a list of the objects that are direct descendants of the given
|
||||
// path.
|
||||
func (d *Driver) List(subPath string) ([]string, error) {
|
||||
if !storagedriver.PathRegexp.MatchString(subPath) && subPath != "/" {
|
||||
return nil, storagedriver.InvalidPathError{Path: subPath}
|
||||
}
|
||||
|
||||
if subPath[len(subPath)-1] != '/' {
|
||||
subPath += "/"
|
||||
}
|
||||
|
@ -196,6 +220,12 @@ func (d *Driver) List(subPath string) ([]string, error) {
|
|||
// Move moves an object stored at sourcePath to destPath, removing the original
|
||||
// object.
|
||||
func (d *Driver) Move(sourcePath string, destPath string) error {
|
||||
if !storagedriver.PathRegexp.MatchString(sourcePath) {
|
||||
return storagedriver.InvalidPathError{Path: sourcePath}
|
||||
} else if !storagedriver.PathRegexp.MatchString(destPath) {
|
||||
return storagedriver.InvalidPathError{Path: destPath}
|
||||
}
|
||||
|
||||
source := d.fullPath(sourcePath)
|
||||
dest := d.fullPath(destPath)
|
||||
|
||||
|
@ -213,6 +243,10 @@ func (d *Driver) Move(sourcePath string, destPath string) error {
|
|||
|
||||
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
||||
func (d *Driver) Delete(subPath string) error {
|
||||
if !storagedriver.PathRegexp.MatchString(subPath) {
|
||||
return storagedriver.InvalidPathError{Path: subPath}
|
||||
}
|
||||
|
||||
fullPath := d.fullPath(subPath)
|
||||
|
||||
_, err := os.Stat(fullPath)
|
||||
|
|
|
@ -46,6 +46,10 @@ func New() *Driver {
|
|||
|
||||
// GetContent retrieves the content stored at "path" as a []byte.
|
||||
func (d *Driver) GetContent(path string) ([]byte, error) {
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
return nil, storagedriver.InvalidPathError{Path: path}
|
||||
}
|
||||
|
||||
d.mutex.RLock()
|
||||
defer d.mutex.RUnlock()
|
||||
|
||||
|
@ -60,6 +64,10 @@ func (d *Driver) GetContent(path string) ([]byte, error) {
|
|||
|
||||
// PutContent stores the []byte content at a location designated by "path".
|
||||
func (d *Driver) PutContent(p string, contents []byte) error {
|
||||
if !storagedriver.PathRegexp.MatchString(p) {
|
||||
return storagedriver.InvalidPathError{Path: p}
|
||||
}
|
||||
|
||||
d.mutex.Lock()
|
||||
defer d.mutex.Unlock()
|
||||
|
||||
|
@ -79,6 +87,10 @@ func (d *Driver) PutContent(p string, contents []byte) error {
|
|||
// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
|
||||
// given byte offset.
|
||||
func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) {
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
return nil, storagedriver.InvalidPathError{Path: path}
|
||||
}
|
||||
|
||||
d.mutex.RLock()
|
||||
defer d.mutex.RUnlock()
|
||||
|
||||
|
@ -103,6 +115,10 @@ func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) {
|
|||
// WriteStream stores the contents of the provided io.ReadCloser at a location
|
||||
// designated by the given path.
|
||||
func (d *Driver) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) {
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
return 0, storagedriver.InvalidPathError{Path: path}
|
||||
}
|
||||
|
||||
d.mutex.Lock()
|
||||
defer d.mutex.Unlock()
|
||||
|
||||
|
@ -135,6 +151,10 @@ func (d *Driver) WriteStream(path string, offset int64, reader io.Reader) (nn in
|
|||
|
||||
// Stat returns info about the provided path.
|
||||
func (d *Driver) Stat(path string) (storagedriver.FileInfo, error) {
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
return nil, storagedriver.InvalidPathError{Path: path}
|
||||
}
|
||||
|
||||
d.mutex.RLock()
|
||||
defer d.mutex.RUnlock()
|
||||
|
||||
|
@ -161,6 +181,10 @@ func (d *Driver) Stat(path string) (storagedriver.FileInfo, error) {
|
|||
// List returns a list of the objects that are direct descendants of the given
|
||||
// path.
|
||||
func (d *Driver) List(path string) ([]string, error) {
|
||||
if !storagedriver.PathRegexp.MatchString(path) && path != "/" {
|
||||
return nil, storagedriver.InvalidPathError{Path: path}
|
||||
}
|
||||
|
||||
normalized := normalize(path)
|
||||
|
||||
found := d.root.find(normalized)
|
||||
|
@ -188,6 +212,12 @@ func (d *Driver) List(path string) ([]string, error) {
|
|||
// Move moves an object stored at sourcePath to destPath, removing the original
|
||||
// object.
|
||||
func (d *Driver) Move(sourcePath string, destPath string) error {
|
||||
if !storagedriver.PathRegexp.MatchString(sourcePath) {
|
||||
return storagedriver.InvalidPathError{Path: sourcePath}
|
||||
} else if !storagedriver.PathRegexp.MatchString(destPath) {
|
||||
return storagedriver.InvalidPathError{Path: destPath}
|
||||
}
|
||||
|
||||
d.mutex.Lock()
|
||||
defer d.mutex.Unlock()
|
||||
|
||||
|
@ -204,6 +234,10 @@ func (d *Driver) Move(sourcePath string, destPath string) error {
|
|||
|
||||
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
||||
func (d *Driver) Delete(path string) error {
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
return storagedriver.InvalidPathError{Path: path}
|
||||
}
|
||||
|
||||
d.mutex.Lock()
|
||||
defer d.mutex.Unlock()
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ package storagedriver
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
@ -72,6 +73,17 @@ type StorageDriver interface {
|
|||
Delete(path string) error
|
||||
}
|
||||
|
||||
// PathComponentRegexp is the regular expression which each repository path
|
||||
// component must match.
|
||||
// A component of a repository path must be at least two characters, optionally
|
||||
// separated by periods, dashes or underscores.
|
||||
var PathComponentRegexp = regexp.MustCompile(`[a-z0-9]+([._-]?[a-z0-9])+`)
|
||||
|
||||
// PathRegexp is the regular expression which each repository path must match.
|
||||
// A repository path is absolute, beginning with a slash and containing a
|
||||
// positive number of path components separated by slashes.
|
||||
var PathRegexp = regexp.MustCompile(`^(/[a-z0-9]+([._-]?[a-z0-9])+)+$`)
|
||||
|
||||
// PathNotFoundError is returned when operating on a nonexistent path.
|
||||
type PathNotFoundError struct {
|
||||
Path string
|
||||
|
@ -81,6 +93,15 @@ func (err PathNotFoundError) Error() string {
|
|||
return fmt.Sprintf("Path not found: %s", err.Path)
|
||||
}
|
||||
|
||||
// InvalidPathError is returned when the provided path is malformed.
|
||||
type InvalidPathError struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
func (err InvalidPathError) Error() string {
|
||||
return fmt.Sprintf("Invalid path: %s", err.Path)
|
||||
}
|
||||
|
||||
// InvalidOffsetError is returned when attempting to read or write from an
|
||||
// invalid offset.
|
||||
type InvalidOffsetError struct {
|
||||
|
|
|
@ -108,6 +108,51 @@ func (suite *DriverSuite) TearDownSuite(c *check.C) {
|
|||
}
|
||||
}
|
||||
|
||||
// TearDownTest tears down the gocheck test.
|
||||
// This causes the suite to abort if any files are left around in the storage
|
||||
// driver.
|
||||
func (suite *DriverSuite) TearDownTest(c *check.C) {
|
||||
files, _ := suite.StorageDriver.List("/")
|
||||
if len(files) > 0 {
|
||||
c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files)
|
||||
}
|
||||
}
|
||||
|
||||
// TestValidPaths checks that various valid file paths are accepted by the
|
||||
// storage driver.
|
||||
func (suite *DriverSuite) TestValidPaths(c *check.C) {
|
||||
contents := randomContents(64)
|
||||
validFiles := []string{"/aa", "/a.a", "/0-9/abcdefg", "/abcdefg/z.75", "/abc/1.2.3.4.5-6_zyx/123.z", "/docker/docker-registry"}
|
||||
|
||||
for _, filename := range validFiles {
|
||||
err := suite.StorageDriver.PutContent(filename, contents)
|
||||
defer suite.StorageDriver.Delete(firstPart(filename))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
received, err := suite.StorageDriver.GetContent(filename)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(received, check.DeepEquals, contents)
|
||||
}
|
||||
}
|
||||
|
||||
// TestInvalidPaths checks that various invalid file paths are rejected by the
|
||||
// storage driver.
|
||||
func (suite *DriverSuite) TestInvalidPaths(c *check.C) {
|
||||
contents := randomContents(64)
|
||||
invalidFiles := []string{"/", "abc", "/abc./abc", "/.abc", "/a--b", "/a-.b", "/_.abc", "/a/bcd", "/abc_123/d", "/Docker/docker-registry"}
|
||||
|
||||
for _, filename := range invalidFiles {
|
||||
err := suite.StorageDriver.PutContent(filename, contents)
|
||||
defer suite.StorageDriver.Delete(firstPart(filename))
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{})
|
||||
|
||||
_, err = suite.StorageDriver.GetContent(filename)
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWriteRead1 tests a simple write-read workflow.
|
||||
func (suite *DriverSuite) TestWriteRead1(c *check.C) {
|
||||
filename := randomPath(32)
|
||||
|
@ -337,7 +382,7 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) {
|
|||
c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1)))
|
||||
|
||||
if fi.Size() > chunkSize {
|
||||
c.Fatalf("Offset too large, %d > %d", fi.Size(), chunkSize)
|
||||
c.Errorf("Offset too large, %d > %d", fi.Size(), chunkSize)
|
||||
}
|
||||
nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(contentsChunk2))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -349,7 +394,7 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) {
|
|||
c.Assert(fi.Size(), check.Equals, 2*chunkSize)
|
||||
|
||||
if fi.Size() > 2*chunkSize {
|
||||
c.Fatalf("Offset too large, %d > %d", fi.Size(), 2*chunkSize)
|
||||
c.Errorf("Offset too large, %d > %d", fi.Size(), 2*chunkSize)
|
||||
}
|
||||
|
||||
nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():]))
|
||||
|
@ -409,7 +454,7 @@ func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) {
|
|||
// TestList checks the returned list of keys after populating a directory tree.
|
||||
func (suite *DriverSuite) TestList(c *check.C) {
|
||||
rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8)))
|
||||
defer suite.StorageDriver.Delete("/")
|
||||
defer suite.StorageDriver.Delete(rootDirectory)
|
||||
|
||||
parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8)))
|
||||
childFiles := make([]string, 50)
|
||||
|
@ -625,11 +670,11 @@ func (suite *DriverSuite) TestStatCall(c *check.C) {
|
|||
c.Assert(fi.IsDir(), check.Equals, false)
|
||||
|
||||
if start.After(fi.ModTime()) {
|
||||
c.Fatalf("modtime %s before file created (%v)", fi.ModTime(), start)
|
||||
c.Errorf("modtime %s before file created (%v)", fi.ModTime(), start)
|
||||
}
|
||||
|
||||
if fi.ModTime().After(expectedModTime) {
|
||||
c.Fatalf("modtime %s after file created (%v)", fi.ModTime(), expectedModTime)
|
||||
c.Errorf("modtime %s after file created (%v)", fi.ModTime(), expectedModTime)
|
||||
}
|
||||
|
||||
// Call on directory
|
||||
|
@ -643,11 +688,11 @@ func (suite *DriverSuite) TestStatCall(c *check.C) {
|
|||
c.Assert(fi.IsDir(), check.Equals, true)
|
||||
|
||||
if start.After(fi.ModTime()) {
|
||||
c.Fatalf("modtime %s before file created (%v)", fi.ModTime(), start)
|
||||
c.Errorf("modtime %s before file created (%v)", fi.ModTime(), start)
|
||||
}
|
||||
|
||||
if fi.ModTime().After(expectedModTime) {
|
||||
c.Fatalf("modtime %s after file created (%v)", fi.ModTime(), expectedModTime)
|
||||
c.Errorf("modtime %s after file created (%v)", fi.ModTime(), expectedModTime)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -779,16 +824,19 @@ func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, c
|
|||
}
|
||||
|
||||
var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789")
|
||||
var separatorChars = []byte("._-")
|
||||
|
||||
func randomPath(length int64) string {
|
||||
path := ""
|
||||
path := "/"
|
||||
for int64(len(path)) < length {
|
||||
chunkLength := rand.Int63n(length-int64(len(path))) + 1
|
||||
chunkLength := rand.Int63n(length-int64(len(path)+1)) + 2
|
||||
chunk := randomFilename(chunkLength)
|
||||
path += chunk
|
||||
if length-int64(len(path)) == 1 {
|
||||
path += randomFilename(1)
|
||||
} else if length-int64(len(path)) > 1 {
|
||||
} else if length-int64(len(path)) == 2 {
|
||||
path += randomFilename(2)
|
||||
} else if length-int64(len(path)) > 2 {
|
||||
path += "/"
|
||||
}
|
||||
}
|
||||
|
@ -797,8 +845,15 @@ func randomPath(length int64) string {
|
|||
|
||||
func randomFilename(length int64) string {
|
||||
b := make([]byte, length)
|
||||
wasSeparator := true
|
||||
for i := range b {
|
||||
if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 {
|
||||
b[i] = separatorChars[rand.Intn(len(separatorChars))]
|
||||
wasSeparator = true
|
||||
} else {
|
||||
b[i] = filenameChars[rand.Intn(len(filenameChars))]
|
||||
wasSeparator = false
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
@ -821,8 +876,8 @@ func firstPart(filePath string) string {
|
|||
if dir == "" && file == "" {
|
||||
return "/"
|
||||
}
|
||||
if dir == "" {
|
||||
return file
|
||||
if dir == "/" || dir == "" {
|
||||
return "/" + file
|
||||
}
|
||||
if file == "" {
|
||||
return dir
|
||||
|
|
Loading…
Reference in a new issue