From a4a227e351ed3d421d2ea80f089d709ba3b6b60e Mon Sep 17 00:00:00 2001 From: "yaoyao.xyy" Date: Tue, 8 Nov 2016 12:14:13 +0800 Subject: [PATCH] oss native large file copy consume too much time which will eventually lead to client timeout because of no data transmit throughout native copy. change maxCopySize to 128MB, ensure only sm all medium size file use oss native copy to avoid connection reset by peer. And fix Move function with CopyLargeFileInParallel to optimize oss upload copy Signed-off-by: yaoyao.xyy --- Godeps/Godeps.json | 6 +++--- registry/storage/driver/oss/oss.go | 8 +++++--- vendor/github.com/denverdino/aliyungo/oss/client.go | 8 ++++---- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index d76c7aa6..24672531 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -167,15 +167,15 @@ }, { "ImportPath": "github.com/denverdino/aliyungo/common", - "Rev": "ce70ed03a598bb3ba258ff9c90a83a257959067c" + "Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2" }, { "ImportPath": "github.com/denverdino/aliyungo/oss", - "Rev": "ce70ed03a598bb3ba258ff9c90a83a257959067c" + "Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2" }, { "ImportPath": "github.com/denverdino/aliyungo/util", - "Rev": "ce70ed03a598bb3ba258ff9c90a83a257959067c" + "Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2" }, { "ImportPath": "github.com/docker/goamz/aws", diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index 39f0ceb4..4d215928 100644 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -389,15 +389,17 @@ func (d *driver) List(ctx context.Context, opath string) ([]string, error) { return append(files, directories...), nil } +const maxConcurrency = 10 + // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath)) - - err := d.Bucket.CopyLargeFile(d.ossPath(sourcePath), d.ossPath(destPath), + err := d.Bucket.CopyLargeFileInParallel(d.ossPath(sourcePath), d.ossPath(destPath), d.getContentType(), getPermissions(), - oss.Options{}) + oss.Options{}, + maxConcurrency) if err != nil { logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err) return parseError(sourcePath, err) diff --git a/vendor/github.com/denverdino/aliyungo/oss/client.go b/vendor/github.com/denverdino/aliyungo/oss/client.go index bc1bd6b0..c5e13e51 100644 --- a/vendor/github.com/denverdino/aliyungo/oss/client.go +++ b/vendor/github.com/denverdino/aliyungo/oss/client.go @@ -1313,7 +1313,7 @@ func (b *Bucket) CopyLargeFile(sourcePath string, destPath string, contentType s } const defaultChunkSize = int64(128 * 1024 * 1024) //128MB -const maxCopytSize = int64(1024 * 1024 * 1024) //1G +const maxCopytSize = int64(128 * 1024 * 1024) //128MB // Copy large file in the same bucket func (b *Bucket) CopyLargeFileInParallel(sourcePath string, destPath string, contentType string, perm ACL, options Options, maxConcurrency int) error { @@ -1322,10 +1322,10 @@ func (b *Bucket) CopyLargeFileInParallel(sourcePath string, destPath string, con maxConcurrency = 1 } - log.Printf("Copy large file from %s to %s\n", sourcePath, destPath) - currentLength, err := b.GetContentLength(sourcePath) - + + log.Printf("Parallel Copy large file[size: %d] from %s to %s\n",currentLength, sourcePath, destPath) + if err != nil { return err }