Merge pull request #699 from BrianBland/storagedriver-api-rename

Renames ResumeWritePosition to CurrentSize in storage driver api
This commit is contained in:
Olivier Gambier 2014-11-07 14:51:50 -08:00
commit 57ad37163e
8 changed files with 24 additions and 24 deletions

View file

@ -98,7 +98,7 @@ func (d *FilesystemDriver) ReadStream(path string, offset uint64) (io.ReadCloser
func (d *FilesystemDriver) WriteStream(subPath string, offset, size uint64, reader io.ReadCloser) error {
defer reader.Close()
resumableOffset, err := d.ResumeWritePosition(subPath)
resumableOffset, err := d.CurrentSize(subPath)
if _, pathNotFound := err.(storagedriver.PathNotFoundError); err != nil && !pathNotFound {
return err
}
@ -154,7 +154,7 @@ func (d *FilesystemDriver) WriteStream(subPath string, offset, size uint64, read
return err
}
func (d *FilesystemDriver) ResumeWritePosition(subPath string) (uint64, error) {
func (d *FilesystemDriver) CurrentSize(subPath string) (uint64, error) {
fullPath := d.subPath(subPath)
fileInfo, err := os.Stat(fullPath)

View file

@ -78,7 +78,7 @@ func (d *InMemoryDriver) WriteStream(path string, offset, size uint64, reader io
d.mutex.RLock()
defer d.mutex.RUnlock()
resumableOffset, err := d.ResumeWritePosition(path)
resumableOffset, err := d.CurrentSize(path)
if err != nil {
return err
}
@ -100,7 +100,7 @@ func (d *InMemoryDriver) WriteStream(path string, offset, size uint64, reader io
return nil
}
func (d *InMemoryDriver) ResumeWritePosition(path string) (uint64, error) {
func (d *InMemoryDriver) CurrentSize(path string) (uint64, error) {
d.mutex.RLock()
defer d.mutex.RUnlock()
contents, ok := d.storage[path]

View file

@ -216,16 +216,16 @@ func (driver *StorageDriverClient) WriteStream(path string, offset, size uint64,
return nil
}
func (driver *StorageDriverClient) ResumeWritePosition(path string) (uint64, error) {
func (driver *StorageDriverClient) CurrentSize(path string) (uint64, error) {
receiver, remoteSender := libchan.Pipe()
params := map[string]interface{}{"Path": path}
err := driver.sender.Send(&Request{Type: "ResumeWritePosition", Parameters: params, ResponseChannel: remoteSender})
err := driver.sender.Send(&Request{Type: "CurrentSize", Parameters: params, ResponseChannel: remoteSender})
if err != nil {
return 0, err
}
var response ResumeWritePositionResponse
var response CurrentSizeResponse
err = receiver.Receive(&response)
if err != nil {
return 0, err

View file

@ -49,8 +49,8 @@ type WriteStreamResponse struct {
Error *responseError
}
// ResumeWritePositionResponse is a response for a ResumeWritePosition request
type ResumeWritePositionResponse struct {
// CurrentSizeResponse is a response for a CurrentSize request
type CurrentSizeResponse struct {
Position uint64
Error *responseError
}

View file

@ -119,10 +119,10 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) {
if err != nil {
panic(err)
}
case "ResumeWritePosition":
case "CurrentSize":
path, _ := request.Parameters["Path"].(string)
position, err := driver.ResumeWritePosition(path)
response := ResumeWritePositionResponse{
position, err := driver.CurrentSize(path)
response := CurrentSizeResponse{
Position: position,
Error: ResponseError(err),
}

View file

@ -177,7 +177,7 @@ func (d *S3Driver) WriteStream(path string, offset, size uint64, reader io.ReadC
return nil
}
func (d *S3Driver) ResumeWritePosition(path string) (uint64, error) {
func (d *S3Driver) CurrentSize(path string) (uint64, error) {
_, parts, err := d.getAllParts(path)
if err != nil {
return 0, err
@ -190,11 +190,11 @@ func (d *S3Driver) ResumeWritePosition(path string) (uint64, error) {
return (((uint64(len(parts)) - 1) * uint64(parts[0].Size)) + uint64(parts[len(parts)-1].Size)), nil
}
func (d *S3Driver) List(prefix string) ([]string, error) {
if prefix[len(prefix)-1] != '/' {
prefix = prefix + "/"
func (d *S3Driver) List(path string) ([]string, error) {
if path[len(path)-1] != '/' {
path = path + "/"
}
listResponse, err := d.Bucket.List(prefix, "/", "", listPartsMax)
listResponse, err := d.Bucket.List(path, "/", "", listPartsMax)
if err != nil {
return nil, err
}
@ -212,7 +212,7 @@ func (d *S3Driver) List(prefix string) ([]string, error) {
}
if listResponse.IsTruncated {
listResponse, err = d.Bucket.List(prefix, "/", listResponse.NextMarker, listPartsMax)
listResponse, err = d.Bucket.List(path, "/", listResponse.NextMarker, listPartsMax)
if err != nil {
return nil, err
}

View file

@ -25,12 +25,12 @@ type StorageDriver interface {
// the given path
// The driver will know it has received the full contents when it has read "size" bytes
// May be used to resume writing a stream by providing a nonzero offset
// The offset must be no larger than the ResumeWritePosition for this path
// The offset must be no larger than the CurrentSize for this path
WriteStream(path string, offset, size uint64, readCloser io.ReadCloser) error
// ResumeWritePosition retrieves the byte offset at which it is safe to continue writing at the
// given path
ResumeWritePosition(path string) (uint64, error)
// CurrentSize retrieves the curernt size in bytes of the object at the given path
// It should be safe to read or write anywhere up to this point
CurrentSize(path string) (uint64, error)
// List returns a list of the objects that are direct descendants of the given path
List(path string) ([]string, error)

View file

@ -160,7 +160,7 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *C) {
err := suite.StorageDriver.WriteStream(filename, 0, 3*chunkSize, ioutil.NopCloser(bytes.NewReader(contentsChunk1)))
c.Assert(err, IsNil)
offset, err := suite.StorageDriver.ResumeWritePosition(filename)
offset, err := suite.StorageDriver.CurrentSize(filename)
c.Assert(err, IsNil)
if offset > chunkSize {
c.Fatalf("Offset too large, %d > %d", offset, chunkSize)
@ -168,7 +168,7 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *C) {
err = suite.StorageDriver.WriteStream(filename, offset, 3*chunkSize, ioutil.NopCloser(bytes.NewReader(fullContents[offset:2*chunkSize])))
c.Assert(err, IsNil)
offset, err = suite.StorageDriver.ResumeWritePosition(filename)
offset, err = suite.StorageDriver.CurrentSize(filename)
c.Assert(err, IsNil)
if offset > 2*chunkSize {
c.Fatalf("Offset too large, %d > %d", offset, 2*chunkSize)