format code with gofumpt

gofumpt (https://github.com/mvdan/gofumpt) provides a supserset of `gofmt` / `go fmt`,
and addresses various formatting issues that linters may be checking for.

We can consider enabling the `gofumpt` linter to verify the formatting in CI, although
not every developer may have it installed, so for now this runs it once to get formatting
in shape.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2022-11-02 22:05:45 +01:00
parent 7f9f86c411
commit e0281dc609
No known key found for this signature in database
GPG key ID: 76698F39D527CE8C
107 changed files with 404 additions and 483 deletions

View file

@ -62,7 +62,6 @@ func main() {
if flag.NArg() > 0 {
for _, path := range flag.Args() {
fp, err := os.Open(path)
if err != nil {
log.Printf("%s: %v", path, err)
fail = true

View file

@ -27,7 +27,6 @@ import (
var spaceRegex = regexp.MustCompile(`\n\s*`)
func main() {
if len(os.Args) != 2 {
log.Fatalln("please specify a template to execute.")
}
@ -127,5 +126,4 @@ end:
}
return output
}

View file

@ -589,7 +589,7 @@ type Events struct {
IncludeReferences bool `yaml:"includereferences"` // include reference data in manifest events
}
//Ignore configures mediaTypes and actions of the event, that it won't be propagated
// Ignore configures mediaTypes and actions of the event, that it won't be propagated
type Ignore struct {
MediaTypes []string `yaml:"mediatypes"` // target media types to ignore
Actions []string `yaml:"actions"` // ignore action types

View file

@ -360,7 +360,6 @@ func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) {
_, err = Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, NotNil)
}
// TestParseWithDifferentEnvReporting validates that environment variables

View file

@ -1,3 +1,4 @@
//go:build gofuzz
// +build gofuzz
package configuration

View file

@ -15,7 +15,7 @@
// The above will store the version in the context and will be available to
// the logger.
//
// Logging
// # Logging
//
// The most useful aspect of this package is GetLogger. This function takes
// any context.Context interface and returns the current logger from the
@ -65,7 +65,7 @@
// added to the request context, is unique to that context and can have
// request scoped variables.
//
// HTTP Requests
// # HTTP Requests
//
// This package also contains several methods for working with http requests.
// The concepts are very similar to those described above. We simply place the

View file

@ -20,9 +20,7 @@ import (
"github.com/sirupsen/logrus"
)
var (
enforceRepoClass bool
)
var enforceRepoClass bool
func main() {
var (
@ -110,7 +108,6 @@ func main() {
if err != nil {
logrus.Infof("Error serving: %v", err)
}
}
// handlerWithContext wraps the given context-aware handler by setting up the

View file

@ -5,11 +5,10 @@ import (
"crypto/rsa"
"encoding/base64"
"errors"
"strings"
"testing"
"time"
"strings"
"github.com/distribution/distribution/v3/registry/auth"
"github.com/docker/libtrust"
)
@ -49,7 +48,6 @@ func TestCreateJWTSuccessWithEmptyACL(t *testing.T) {
if !strings.Contains(json, "test") {
t.Fatal("Valid token was not generated.")
}
}
func decodeJWT(rawToken string) (string, error) {
@ -74,7 +72,7 @@ func joseBase64Decode(s string) (string, error) {
}
data, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return "", err //errors.New("Error in Decoding base64 String")
return "", err // errors.New("Error in Decoding base64 String")
}
return string(data), nil
}

View file

@ -187,7 +187,6 @@ func TestAll(t *testing.T) {
t.Fatalf("Missing element at position %d: %s", i, dgst)
}
}
}
func assertEqualShort(t *testing.T, actual, expected string) {
@ -363,9 +362,11 @@ func BenchmarkLookup1000(b *testing.B) {
func BenchmarkShortCode10(b *testing.B) {
benchShortCodeNTable(b, 10, 12)
}
func BenchmarkShortCode100(b *testing.B) {
benchShortCodeNTable(b, 100, 12)
}
func BenchmarkShortCode1000(b *testing.B) {
benchShortCodeNTable(b, 1000, 12)
}

View file

@ -7,9 +7,7 @@ import (
"github.com/distribution/distribution/v3/health"
)
var (
updater = health.NewStatusUpdater()
)
var updater = health.NewStatusUpdater()
// DownHandler registers a manual_http_status that always returns an Error
func DownHandler(w http.ResponseWriter, r *http.Request) {

View file

@ -13,7 +13,7 @@
// particularly useful for checks that verify upstream connectivity or
// database status, since they might take a long time to return/timeout.
//
// Installing
// # Installing
//
// To install health, just import it in your application:
//
@ -35,7 +35,7 @@
// After importing these packages to your main application, you can start
// registering checks.
//
// Registering Checks
// # Registering Checks
//
// The recommended way of registering checks is using a periodic Check.
// PeriodicChecks run on a certain schedule and asynchronously update the
@ -84,7 +84,7 @@
// return Errors.new("This is an error!")
// }))
//
// Examples
// # Examples
//
// You could also use the health checker mechanism to ensure your application
// only comes up if certain conditions are met, or to allow the developer to

View file

@ -11,14 +11,12 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
var SchemaVersion = manifest.Versioned{
SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version
MediaType: v1.MediaTypeImageManifest,
}
)
}
func init() {
ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {

View file

@ -20,13 +20,11 @@ const (
MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
var SchemaVersion = manifest.Versioned{
SchemaVersion: 1,
}
)
}
func init() {
schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
@ -149,7 +147,6 @@ func (sm SignedManifest) References() []distribution.Descriptor {
}
return dependencies
}
// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner

View file

@ -42,7 +42,6 @@ func TestManifestUnmarshaling(t *testing.T) {
if !reflect.DeepEqual(&signed, env.signed) {
t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed)
}
}
func TestManifestVerification(t *testing.T) {

View file

@ -65,7 +65,6 @@ func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable)
mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...)
mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...)
return nil
}
// References returns the current references added to this builder

View file

@ -33,14 +33,12 @@ const (
MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
var SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: MediaTypeManifest,
}
)
}
func init() {
schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
@ -119,7 +117,6 @@ func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
if manifest.MediaType != MediaTypeManifest {
return fmt.Errorf("mediaType in manifest should be '%s' not '%s'",
MediaTypeManifest, manifest.MediaType)
}
m.Manifest = manifest

View file

@ -233,7 +233,6 @@ func checkCommon(t *testing.T, event events.Event) {
if event.(Event).Target.Repository != repo {
t.Fatalf("unexpected repository: %q != %q", event.(Event).Target.Repository, repo)
}
}
type testSinkFn func(event events.Event) error

View file

@ -143,9 +143,7 @@ type SourceRecord struct {
InstanceID string `json:"instanceID,omitempty"`
}
var (
// ErrSinkClosed is returned if a write is issued to a sink that has been
// closed. If encountered, the error should be considered terminal and
// retries will not be successful.
ErrSinkClosed = fmt.Errorf("sink: closed")
)
// ErrSinkClosed is returned if a write is issued to a sink that has been
// closed. If encountered, the error should be considered terminal and
// retries will not be successful.
var ErrSinkClosed = fmt.Errorf("sink: closed")

View file

@ -13,7 +13,7 @@ import (
// envelope has changed. If this code fails, the revision of the protocol may
// need to be incremented.
func TestEventEnvelopeJSONFormat(t *testing.T) {
var expected = strings.TrimSpace(`
expected := strings.TrimSpace(`
{
"events": [
{
@ -114,7 +114,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
prototype.Request.UserAgent = "test/0.1"
prototype.Source.Addr = "hostname.local:port"
var manifestPush = prototype
manifestPush := prototype
manifestPush.ID = "asdf-asdf-asdf-asdf-0"
manifestPush.Target.Digest = "sha256:0123456789abcdef0"
manifestPush.Target.Length = 1
@ -123,7 +123,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
manifestPush.Target.Repository = "library/test"
manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest"
var layerPush0 = prototype
layerPush0 := prototype
layerPush0.ID = "asdf-asdf-asdf-asdf-1"
layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
layerPush0.Target.Length = 2
@ -132,7 +132,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
layerPush0.Target.Repository = "library/test"
layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest"
var layerPush1 = prototype
layerPush1 := prototype
layerPush1.ID = "asdf-asdf-asdf-asdf-2"
layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6"
layerPush1.Target.Length = 3

View file

@ -135,7 +135,7 @@ type headerRoundTripper struct {
}
func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
var nreq = *req
nreq := *req
nreq.Header = make(http.Header)
merge := func(headers http.Header) {

View file

@ -197,7 +197,6 @@ func TestHTTPSink(t *testing.T) {
if err := sink.Close(); err == nil {
t.Fatalf("second close should have returned error: %v", err)
}
}
func createTestEvent(action, repo, typ string) Event {

View file

@ -3,13 +3,12 @@ package notifications
import (
"reflect"
"sync"
"testing"
"time"
events "github.com/docker/go-events"
"github.com/sirupsen/logrus"
"testing"
)
func TestEventQueue(t *testing.T) {

View file

@ -1,3 +1,4 @@
//go:build gofuzz
// +build gofuzz
package reference

View file

@ -84,7 +84,7 @@ func TestValidateRemoteName(t *testing.T) {
// Allow multiple hyphens as well.
"docker---rules/docker",
//Username doc and image name docker being tested.
// Username doc and image name docker being tested.
"doc/docker",
// single character names are now allowed.
@ -129,7 +129,7 @@ func TestValidateRemoteName(t *testing.T) {
// No repository.
"docker/",
//namespace too long
// namespace too long
"this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker",
}
for _, repositoryName := range invalidRepositoryNames {

View file

@ -525,7 +525,6 @@ func TestReferenceRegexp(t *testing.T) {
for i := range testcases {
checkRegexp(t, ReferenceRegexp, testcases[i])
}
}
func TestIdentifierRegexp(t *testing.T) {

View file

@ -86,7 +86,6 @@ func TestErrorCodes(t *testing.T) {
t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString)
}
}
}
func TestErrorsManagement(t *testing.T) {
@ -99,7 +98,6 @@ func TestErrorsManagement(t *testing.T) {
errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data"))
p, err := json.Marshal(errs)
if err != nil {
t.Fatalf("error marashaling errors: %v", err)
}
@ -181,5 +179,4 @@ func TestErrorsManagement(t *testing.T) {
if e2.Detail != `stuff2` {
t.Fatalf("e2 had wrong detail: %q", e2.Detail)
}
}

View file

@ -75,8 +75,10 @@ var (
})
)
var nextCode = 1000
var registerLock sync.Mutex
var (
nextCode = 1000
registerLock sync.Mutex
)
// Register will make the passed-in error known to the environment and
// return a new ErrorCode

View file

@ -262,7 +262,6 @@ type RouteDescriptor struct {
// MethodDescriptor provides a description of the requests that may be
// conducted with the target method.
type MethodDescriptor struct {
// Method is an HTTP method, such as GET, PUT or POST.
Method string

View file

@ -1,3 +1,4 @@
//go:build gofuzz
// +build gofuzz
package v2

View file

@ -265,7 +265,6 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee
u := server.URL + testcase.RequestURI
resp, err := http.Get(u)
if err != nil {
t.Fatalf("error issuing get request: %v", err)
}
@ -316,7 +315,6 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee
resp.Body.Close()
}
}
// -------------- START LICENSED CODE --------------

View file

@ -29,7 +29,6 @@
// }
// }
// }
//
package auth
import (

View file

@ -128,10 +128,10 @@ func createHtpasswdFile(path string) error {
return err
}
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
return err
}
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600)
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0o600)
if err != nil {
return fmt.Errorf("failed to open htpasswd path %s", err)
}

View file

@ -42,7 +42,7 @@ func TestBasicAccessController(t *testing.T) {
tempFile.Close()
var userNumber = 0
userNumber := 0
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := context.WithRequest(ctx, r)
@ -76,7 +76,6 @@ func TestBasicAccessController(t *testing.T) {
req, _ := http.NewRequest("GET", server.URL, nil)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("unexpected error during GET: %v", err)
}
@ -120,7 +119,6 @@ func TestBasicAccessController(t *testing.T) {
}
}
}
}
func TestCreateHtpasswdFile(t *testing.T) {

View file

@ -8,7 +8,6 @@ import (
)
func TestParseHTPasswd(t *testing.T) {
for _, tc := range []struct {
desc string
input string
@ -81,5 +80,4 @@ asdf
t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries)
}
}
}

View file

@ -70,7 +70,6 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut
ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, auth.UserNameKey, auth.UserKey))
return ctx, nil
}
type challenge struct {

View file

@ -185,6 +185,7 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error {
// VerifySigningKey attempts to get the key which was used to sign this token.
// The token header should contain either of these 3 fields:
//
// `x5c` - The x509 certificate chain for the signing key. Needs to be
// verified.
// `jwk` - The JSON Web Key representation of the signing key.
@ -192,6 +193,7 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error {
// `kid` - The unique identifier for the key. This library interprets it
// as a libtrust fingerprint. The key itself can be looked up in
// the trustedKeys field of the given verify options.
//
// Each of these methods are tried in that order of preference until the
// signing key is found or an error is returned.
func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) {

View file

@ -493,7 +493,7 @@ func TestNewAccessControllerPemBlock(t *testing.T) {
defer os.Remove(rootCertBundleFilename)
// Add something other than a certificate to the rootcertbundle
file, err := os.OpenFile(rootCertBundleFilename, os.O_WRONLY|os.O_APPEND, 0666)
file, err := os.OpenFile(rootCertBundleFilename, os.O_WRONLY|os.O_APPEND, 0o666)
if err != nil {
t.Fatal(err)
}

View file

@ -38,7 +38,6 @@ func TestAuthChallengeParse(t *testing.T) {
if expected := "he\"llo"; challenge.Parameters["slashed"] != expected {
t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected)
}
}
func TestAuthChallengeNormalization(t *testing.T) {
@ -49,7 +48,6 @@ func TestAuthChallengeNormalization(t *testing.T) {
}
func testAuthChallengeNormalization(t *testing.T, host string) {
scm := NewSimpleManager()
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))
@ -85,7 +83,6 @@ func testAuthChallengeNormalization(t *testing.T, host string) {
}
func testAuthChallengeConcurrent(t *testing.T, host string) {
scm := NewSimpleManager()
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))

View file

@ -50,7 +50,6 @@ func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Re
func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) {
h := testutil.NewHandler(rrm)
wrapper := &testAuthenticationWrapper{
headers: http.Header(map[string][]string{
"X-API-Version": {"registry/2.0"},
"X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"},

View file

@ -296,7 +296,6 @@ func descriptorFromResponse(response *http.Response) (distribution.Descriptor, e
desc.Size = length
return desc, nil
}
// Get issues a HEAD request for a Manifest against its named endpoint in order
@ -529,7 +528,6 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
}
mt := resp.Header.Get("Content-Type")
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
@ -667,7 +665,6 @@ func sanitizeLocation(location, base string) (string, error) {
func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
return bs.statter.Stat(ctx, dgst)
}
func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {

View file

@ -319,7 +319,6 @@ func TestBlobDelete(t *testing.T) {
if err != nil {
t.Errorf("Error deleting blob: %s", err.Error())
}
}
func TestBlobFetch(t *testing.T) {
@ -399,7 +398,6 @@ func TestBlobExistsNoContentLength(t *testing.T) {
if !strings.Contains(err.Error(), "missing content-length heade") {
t.Fatalf("Expected missing content-length error message")
}
}
func TestBlobExists(t *testing.T) {
@ -986,7 +984,6 @@ func addTestManifestWithEtag(repo reference.Named, reference string, content []b
"Content-Type": {schema1.MediaTypeSignedManifest},
}),
}
}
*m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag})
}
@ -1535,6 +1532,7 @@ func TestObtainsManifestForTagWithoutHeaders(t *testing.T) {
t.Fatalf("Unexpected digest")
}
}
func TestManifestTagsPaginated(t *testing.T) {
s := httptest.NewServer(http.NotFoundHandler())
defer s.Close()

View file

@ -87,7 +87,8 @@ func TestCatalogAPI(t *testing.T) {
values := url.Values{
"last": []string{""},
"n": []string{strconv.Itoa(chunkLen)}}
"n": []string{strconv.Itoa(chunkLen)},
}
catalogURL, err := env.builder.BuildCatalogURL(values)
if err != nil {
@ -453,7 +454,6 @@ func TestBlobAPI(t *testing.T) {
defer env2.Shutdown()
args = makeBlobArgs(t)
testBlobAPI(t, env2, args)
}
func TestBlobDelete(t *testing.T) {
@ -1110,7 +1110,7 @@ const (
func (factory *storageManifestErrDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
// Initialize the mock driver
var errGenericStorage = errors.New("generic storage error")
errGenericStorage := errors.New("generic storage error")
return &mockErrorDriver{
returnErrs: []mockErrorMapping{
{
@ -1346,7 +1346,6 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name
for i := range unsignedManifest.FSLayers {
rs, dgst, err := testutil.CreateRandomTarFile()
if err != nil {
t.Fatalf("error creating random layer %d: %v", i, err)
}
@ -1450,7 +1449,6 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name
sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk)
if err != nil {
t.Fatal(err)
}
// Re-push with a few different Content-Types. The official schema1
@ -1684,7 +1682,6 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name
for i := range manifest.Layers {
rs, dgst, err := testutil.CreateRandomTarFile()
if err != nil {
t.Fatalf("error creating random layer %d: %v", i, err)
}
@ -2279,7 +2276,6 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) {
if len(tagsResponse.Tags) != 0 {
t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags)
}
}
type testEnv struct {
@ -2308,7 +2304,6 @@ func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv {
config.Compatibility.Schema1.Enabled = true
return newTestEnvWithConfig(t, &config)
}
func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv {
@ -2334,7 +2329,6 @@ func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *te
app := NewApp(ctx, config)
server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app))
builder, err := v2.NewURLBuilderFromString(server.URL+config.HTTP.Prefix, false)
if err != nil {
t.Fatalf("error creating url builder: %v", err)
}
@ -2832,7 +2826,6 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) {
blobURL, _ := env.builder.BuildBlobURL(ref)
resp, _ = httpDelete(blobURL)
checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode)
}
func TestProxyManifestGetByTag(t *testing.T) {

View file

@ -703,7 +703,6 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler {
return
}
repository, err := app.registry.Repository(context, nameRef)
if err != nil {
dcontext.GetLogger(context).Errorf("error resolving repository: %v", err)
@ -983,7 +982,6 @@ func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespac
registry = rmw
}
return registry, nil
}
// applyRepoMiddleware wraps a repository with the configured middlewares

View file

@ -120,13 +120,11 @@ func TestAppDispatcher(t *testing.T) {
app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars)))
route := router.GetRoute(testcase.endpoint).Host(serverURL.Host)
u, err := route.URL(testcase.vars...)
if err != nil {
t.Fatal(err)
}
resp, err := http.Get(u.String())
if err != nil {
t.Fatal(err)
}
@ -275,5 +273,4 @@ func TestAppendAccessRecords(t *testing.T) {
if ok := reflect.DeepEqual(result, expectedResult); !ok {
t.Fatalf("Actual access record differs from expected")
}
}

View file

@ -79,7 +79,6 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req
blobs := buh.Repository.Blobs(buh)
upload, err := blobs.Create(buh, options...)
if err != nil {
if ebm, ok := err.(distribution.ErrBlobMounted); ok {
if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil {
@ -219,7 +218,6 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht
// really set the mediatype. For now, we can let the backend take care
// of this.
})
if err != nil {
switch err := err.(type) {
case distribution.ErrBlobInvalidDigest:

View file

@ -34,7 +34,7 @@ type catalogAPIResponse struct {
}
func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) {
var moreEntries = true
moreEntries := true
q := r.URL.Query()
lastEntry := q.Get("last")

View file

@ -31,7 +31,7 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler {
func copyFullPayload(ctx context.Context, responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, action string) error {
// Get a channel that tells us if the client disconnects
clientClosed := r.Context().Done()
var body = r.Body
body := r.Body
if limit > 0 {
body = http.MaxBytesReader(responseWriter, body, limit)
}

View file

@ -479,7 +479,6 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest)
}
return nil
}
// DeleteManifest removes the manifest with the given digest or the tag with the given name from the registry.

View file

@ -12,8 +12,10 @@ import (
// used to register the constructor for different RegistryMiddleware backends.
type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error)
var middlewares map[string]InitFunc
var registryoptions []storage.RegistryOption
var (
middlewares map[string]InitFunc
registryoptions []storage.RegistryOption
)
// Register is used to register an InitFunc for
// a RegistryMiddleware backend with the given name.

View file

@ -221,6 +221,7 @@ func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) {
te.inRemote = inRemote
te.numUnique = numUnique
}
func TestProxyStoreGet(t *testing.T) {
te := makeTestEnv(t, "foo/bar")
@ -253,7 +254,6 @@ func TestProxyStoreGet(t *testing.T) {
if (*remoteStats)["get"] != 1 {
t.Errorf("Unexpected remote get count")
}
}
func TestProxyStoreStat(t *testing.T) {
@ -284,7 +284,6 @@ func TestProxyStoreStat(t *testing.T) {
if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) {
t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger)
}
}
func TestProxyStoreServeHighConcurrency(t *testing.T) {

View file

@ -79,7 +79,7 @@ func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, optio
pms.scheduler.AddManifest(repoBlob, repositoryTTL)
// Ensure the manifest blob is cleaned up
//pms.scheduler.AddBlob(blobRef, repositoryTTL)
// pms.scheduler.AddBlob(blobRef, repositoryTTL)
}

View file

@ -271,5 +271,4 @@ func TestProxyManifests(t *testing.T) {
if env.manifests.authChallenger.(*mockChallenger).count != 2 {
t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger)
}
}

View file

@ -70,5 +70,4 @@ func init() {
pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} {
return proxyMetrics.manifestMetrics
}))
}

View file

@ -69,7 +69,6 @@ func TestSchedule(t *testing.T) {
s.Lock()
s.add(ref3, 1*timeUnit, entryTypeBlob)
s.Unlock()
}()
// Ensure all repos are deleted
@ -195,7 +194,6 @@ func TestStopRestore(t *testing.T) {
if len(remainingRepos) != 0 {
t.Fatalf("Repositories remaining: %#v", remainingRepos)
}
}
func TestDoubleStart(t *testing.T) {

View file

@ -73,12 +73,14 @@ var defaultCipherSuites = []uint16{
}
// maps tls version strings to constants
var defaultTLSVersionStr = "tls1.2"
var tlsVersions = map[string]uint16{
var (
defaultTLSVersionStr = "tls1.2"
tlsVersions = map[string]uint16{
// user specified values
"tls1.2": tls.VersionTLS12,
"tls1.3": tls.VersionTLS13,
}
}
)
// this channel gets notified when process receives signal. It is global to ease unit testing
var quit = make(chan os.Signal, 1)
@ -89,7 +91,6 @@ var ServeCmd = &cobra.Command{
Short: "`serve` stores and distributes Docker images",
Long: "`serve` stores and distributes Docker images.",
Run: func(cmd *cobra.Command, args []string) {
// setup context
ctx := dcontext.WithVersion(dcontext.Background(), version.Version)

View file

@ -152,7 +152,7 @@ func TestGetCipherSuite(t *testing.T) {
t.Error("did not return expected error about unknown cipher suite")
}
var insecureCipherSuites = []string{
insecureCipherSuites := []string{
"TLS_RSA_WITH_RC4_128_SHA",
"TLS_RSA_WITH_AES_128_CBC_SHA256",
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
@ -234,7 +234,7 @@ func buildRegistryTLSConfig(name, keyType string, cipherSuites []string) (*regis
}
keyPath := path.Join(os.TempDir(), name+".key")
keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
return nil, fmt.Errorf("failed to open %s for writing: %v", keyPath, err)
}

View file

@ -36,8 +36,10 @@ var RootCmd = &cobra.Command{
},
}
var dryRun bool
var removeUntagged bool
var (
dryRun bool
removeUntagged bool
)
// GCCmd is the cobra command that corresponds to the garbage-collect subcommand
var GCCmd = &cobra.Command{

View file

@ -36,7 +36,6 @@ func TestWriteSeek(t *testing.T) {
bs := repository.Blobs(ctx)
blobUpload, err := bs.Create(ctx)
if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err)
}
@ -47,7 +46,6 @@ func TestWriteSeek(t *testing.T) {
if offset != int64(len(contents)) {
t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents))
}
}
// TestSimpleBlobUpload covers the blob upload process, exercising common
@ -75,7 +73,6 @@ func TestSimpleBlobUpload(t *testing.T) {
rd := io.TeeReader(randomDataReader, h)
blobUpload, err := bs.Create(ctx)
if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err)
}
@ -385,7 +382,6 @@ func TestBlobMount(t *testing.T) {
sbs := sourceRepository.Blobs(ctx)
blobUpload, err := sbs.Create(ctx)
if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err)
}

View file

@ -121,7 +121,6 @@ func (bs *blobStore) path(dgst digest.Digest) (string, error) {
bp, err := pathFor(blobDataPathSpec{
digest: dgst,
})
if err != nil {
return "", err
}
@ -165,7 +164,6 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi
path, err := pathFor(blobDataPathSpec{
digest: dgst,
})
if err != nil {
return distribution.Descriptor{}, err
}

View file

@ -15,9 +15,7 @@ import (
"github.com/sirupsen/logrus"
)
var (
errResumableDigestNotAvailable = errors.New("resumable digest not available")
)
var errResumableDigestNotAvailable = errors.New("resumable digest not available")
const (
// digestSha256Empty is the canonical sha256 digest of empty data
@ -296,7 +294,6 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor
blobPath, err := pathFor(blobDataPathSpec{
digest: desc.Digest,
})
if err != nil {
return err
}
@ -355,7 +352,6 @@ func (bw *blobWriter) removeResources(ctx context.Context) error {
name: bw.blobStore.repository.Named().Name(),
id: bw.id,
})
if err != nil {
return err
}

View file

@ -85,7 +85,6 @@ func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry
alg: bw.digester.Digest().Algorithm(),
list: true,
})
if err != nil {
return nil, err
}
@ -136,7 +135,6 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error {
alg: bw.digester.Digest().Algorithm(),
offset: bw.written,
})
if err != nil {
return err
}

View file

@ -39,14 +39,16 @@ func checkBlobDescriptorCacheEmptyRepository(ctx context.Context, t *testing.T,
if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{
Digest: "sha384:abc",
Size: 10,
MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat {
MediaType: "application/octet-stream",
}); err != digest.ErrDigestInvalidFormat {
t.Fatalf("expected error with invalid digest: %v", err)
}
if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{
Digest: "",
Size: 10,
MediaType: "application/octet-stream"}); err == nil {
MediaType: "application/octet-stream",
}); err == nil {
t.Fatalf("expected error setting value on invalid descriptor")
}
@ -68,7 +70,8 @@ func checkBlobDescriptorCacheSetAndRead(ctx context.Context, t *testing.T, provi
expected := distribution.Descriptor{
Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111",
Size: 10,
MediaType: "application/octet-stream"}
MediaType: "application/octet-stream",
}
cache, err := provider.RepositoryScoped("foo/bar")
if err != nil {
@ -152,7 +155,8 @@ func checkBlobDescriptorCacheClear(ctx context.Context, t *testing.T, provider c
expected := distribution.Descriptor{
Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111",
Size: 10,
MediaType: "application/octet-stream"}
MediaType: "application/octet-stream",
}
cache, err := provider.RepositoryScoped("foo/bar")
if err != nil {

View file

@ -14,10 +14,8 @@ type cachedBlobStatter struct {
backend distribution.BlobDescriptorService
}
var (
// cacheCount is the number of total cache request received/hits/misses
cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type")
)
// cacheCount is the number of total cache request received/hits/misses
var cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type")
// NewCachedBlobStatter creates a new statter which prefers a cache and
// falls back to a backend.

View file

@ -102,7 +102,6 @@ func makeRepo(ctx context.Context, t *testing.T, name string, reg distribution.N
if err != nil {
t.Fatalf("manifest upload failed: %v", err)
}
}
func TestCatalog(t *testing.T) {
@ -289,8 +288,10 @@ func BenchmarkPathCompareNativeEqual(B *testing.B) {
}
}
var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789")
var separatorChars = []byte("._-")
var (
filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789")
separatorChars = []byte("._-")
)
func randomPath(length int64) string {
path := "/"

View file

@ -93,7 +93,8 @@ func New(accountName, accountKey, container, realm string) (*Driver, error) {
d := &driver{
client: blobClient,
container: container}
container: container,
}
return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil
}
@ -412,7 +413,6 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) {
Marker: marker,
Prefix: virtPath,
})
if err != nil {
return out, err
}

View file

@ -48,10 +48,8 @@ import (
"github.com/docker/go-metrics"
)
var (
// storageAction is the metrics of blob related operations
storageAction = prometheus.StorageNamespace.NewLabeledTimer("action", "The number of seconds that the storage action takes", "driver", "action")
)
// storageAction is the metrics of blob related operations
var storageAction = prometheus.StorageNamespace.NewLabeledTimer("action", "The number of seconds that the storage action takes", "driver", "action")
func init() {
metrics.Register(prometheus.StorageNamespace)

View file

@ -145,7 +145,7 @@ func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileIn
}
// List returns a list of the objects that are direct descendants of the
//given path.
// given path.
func (r *regulator) List(ctx context.Context, path string) ([]string, error) {
r.enter()
defer r.exit()

View file

@ -52,8 +52,10 @@ type FileInfoInternal struct {
FileInfoFields
}
var _ FileInfo = FileInfoInternal{}
var _ FileInfo = &FileInfoInternal{}
var (
_ FileInfo = FileInfoInternal{}
_ FileInfo = &FileInfoInternal{}
)
// Path provides the full path of the target of this file info.
func (fi FileInfoInternal) Path() string {

View file

@ -149,7 +149,7 @@ func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset.
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644)
file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0o644)
if err != nil {
if os.IsNotExist(err) {
return nil, storagedriver.PathNotFoundError{Path: path}
@ -173,11 +173,11 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
func (d *driver) Writer(ctx context.Context, subPath string, append bool) (storagedriver.FileWriter, error) {
fullPath := d.fullPath(subPath)
parentDir := path.Dir(fullPath)
if err := os.MkdirAll(parentDir, 0777); err != nil {
if err := os.MkdirAll(parentDir, 0o777); err != nil {
return nil, err
}
fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666)
fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0o666)
if err != nil {
return nil, err
}
@ -260,7 +260,7 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e
return storagedriver.PathNotFoundError{Path: sourcePath}
}
if err := os.MkdirAll(path.Dir(dest), 0777); err != nil {
if err := os.MkdirAll(path.Dir(dest), 0o777); err != nil {
return err
}

View file

@ -34,7 +34,6 @@ func init() {
}
func TestFromParametersImpl(t *testing.T) {
tests := []struct {
params map[string]interface{} // technically the yaml can contain anything
expected DriverParameters
@ -109,5 +108,4 @@ func TestFromParametersImpl(t *testing.T) {
t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params)
}
}
}

View file

@ -2,7 +2,7 @@
// store blobs in Google cloud storage.
//
// This package leverages the google.golang.org/cloud/storage client library
//for interfacing with gcs.
// for interfacing with gcs.
//
// Because gcs is a key, value store the Stat call does not support last modification
// time for directories (directories are an abstraction for key, value stores)
@ -445,7 +445,6 @@ func putContentsClose(wc *storage.Writer, contents []byte) error {
// available for future calls to StorageDriver.GetContent and
// StorageDriver.Reader.
func (w *writer) Commit() error {
if err := w.checkClosed(); err != nil {
return err
}
@ -597,7 +596,7 @@ func retry(req request) error {
// size in bytes and the creation time.
func (d *driver) Stat(context context.Context, path string) (storagedriver.FileInfo, error) {
var fi storagedriver.FileInfoFields
//try to get as file
// try to get as file
gcsContext := d.context(context)
obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path))
if err == nil {
@ -612,7 +611,7 @@ func (d *driver) Stat(context context.Context, path string) (storagedriver.FileI
}
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
}
//try to get as folder
// try to get as folder
dirpath := d.pathToDirKey(path)
var query *storage.Query
@ -640,7 +639,7 @@ func (d *driver) Stat(context context.Context, path string) (storagedriver.FileI
}
// List returns a list of the objects that are direct descendants of the
//given path.
// given path.
func (d *driver) List(context context.Context, path string) ([]string, error) {
var query *storage.Query
query = &storage.Query{}

View file

@ -22,8 +22,10 @@ import (
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { check.TestingT(t) }
var gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error)
var skipGCS func() string
var (
gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error)
skipGCS func() string
)
func init() {
bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET")

View file

@ -190,7 +190,6 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) {
}
entries, err := found.(*dir).list(normalized)
if err != nil {
switch err {
case errNotExists:

View file

@ -163,7 +163,6 @@ func (d *dir) mkdirs(p string) (*dir, error) {
components := strings.Split(relative, "/")
for _, component := range components {
d, err := dd.mkdir(component)
if err != nil {
// This should actually never happen, since there are no children.
return nil, err

View file

@ -98,7 +98,6 @@ func newAliCDNStorageMiddleware(storageDriver storagedriver.StorageDriver, optio
// URLFor attempts to find a url which may be used to retrieve the file at the given path.
func (ac *aliCDNStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
if ac.StorageDriver.Name() != "oss" {
dcontext.GetLogger(ctx).Warn("the AliCDN middleware does not support this backend storage driver")
return ac.StorageDriver.URLFor(ctx, path, options)
@ -112,5 +111,5 @@ func (ac *aliCDNStorageMiddleware) URLFor(ctx context.Context, path string, opti
// init registers the alicdn layerHandler backend.
func init() {
storagemiddleware.Register("alicdn", storagemiddleware.InitFunc(newAliCDNStorageMiddleware))
storagemiddleware.Register("alicdn", newAliCDNStorageMiddleware)
}

View file

@ -1,6 +1,5 @@
// Package middleware - cloudfront wrapper for storage libs
// N.B. currently only works with S3, not arbitrary sites
//
package middleware
import (
@ -34,12 +33,21 @@ var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{}
// newCloudFrontLayerHandler constructs and returns a new CloudFront
// LayerHandler implementation.
// Required options: baseurl, privatekey, keypairid
// Optional options: ipFilteredBy, awsregion
// ipfilteredby: valid value "none|aws|awsregion". "none", do not filter any IP, default value. "aws", only aws IP goes
// to S3 directly. "awsregion", only regions listed in awsregion options goes to S3 directly
// awsregion: a comma separated string of AWS regions.
//
// Required options:
//
// - baseurl
// - privatekey
// - keypairid
//
// Optional options:
//
// - ipFilteredBy
// - awsregion
// - ipfilteredby: valid value "none|aws|awsregion". "none", do not filter any IP,
// default value. "aws", only aws IP goes to S3 directly. "awsregion", only
// regions listed in awsregion options goes to S3 directly
// - awsregion: a comma separated string of AWS regions.
func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) {
// parse baseurl
base, ok := options["baseurl"]
@ -211,5 +219,5 @@ func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string,
// init registers the cloudfront layerHandler backend.
func init() {
storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware))
storagemiddleware.Register("cloudfront", newCloudFrontStorageMiddleware)
}

View file

@ -21,11 +21,10 @@ func (s *MiddlewareSuite) TestNoConfig(c *check.C) {
}
func TestCloudFrontStorageMiddlewareGenerateKey(t *testing.T) {
options := make(map[string]interface{})
options["baseurl"] = "example.com"
var privk = `-----BEGIN RSA PRIVATE KEY-----
privk := `-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQCy0ZZsItDuYoX3y6hWqyU9YdH/0B+tlOhvjlaJqvkmAIBBatVV
VAShnEAEircBwV3i08439WYgjXnrZ0FjXBTjTKWwCsbpuWJY1w8hqHW3VDivUo1n
F9WTeclVJuEMhmiAhek3dhUdATaEDqBNskXMofSgKmQHqhPdXCgDmnzKoQIDAQAB

View file

@ -113,7 +113,6 @@ func (s *awsIPs) tryUpdate() error {
if regionAllowed {
*output = append(*output, *network)
}
}
for _, prefix := range response.Prefixes {

View file

@ -35,7 +35,6 @@ func (m mockIPRangeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
w.Write(bytes)
}
func newTestHandler(data awsIPResponse) *httptest.Server {
@ -68,7 +67,6 @@ func TestS3TryUpdate(t *testing.T) {
assertEqual(t, 1, len(ips.ipv4))
assertEqual(t, 0, len(ips.ipv6))
}
func TestMatchIPV6(t *testing.T) {
@ -215,7 +213,7 @@ func TestInvalidNetworkType(t *testing.T) {
}
func TestParsing(t *testing.T) {
var data = `{
data := `{
"prefixes": [{
"ip_prefix": "192.168.0.0",
"region": "someregion",

View file

@ -46,5 +46,5 @@ func (r *redirectStorageMiddleware) URLFor(ctx context.Context, path string, opt
}
func init() {
storagemiddleware.Register("redirect", storagemiddleware.InitFunc(newRedirectStorageMiddleware))
storagemiddleware.Register("redirect", newRedirectStorageMiddleware)
}

View file

@ -37,13 +37,15 @@ const driverName = "oss"
// OSS API requires multipart upload chunks to be at least 5MB
const minChunkSize = 5 << 20
const defaultChunkSize = 2 * minChunkSize
const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk
const (
defaultChunkSize = 2 * minChunkSize
defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk
)
// listMax is the largest amount of objects you can request from OSS in a list call
const listMax = 1000
//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
// DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
type DriverParameters struct {
AccessKeyID string
AccessKeySecret string
@ -202,7 +204,6 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
// New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and
// bucketName
func New(params DriverParameters) (*Driver, error) {
client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure)
client.SetEndpoint(params.Endpoint)
bucket := client.Bucket(params.Bucket)

View file

@ -24,15 +24,18 @@ var ossDriverConstructor func(rootDirectory string) (*Driver, error)
var skipCheck func() string
func init() {
accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID")
secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET")
bucket := os.Getenv("OSS_BUCKET")
region := os.Getenv("OSS_REGION")
internal := os.Getenv("OSS_INTERNAL")
encrypt := os.Getenv("OSS_ENCRYPT")
secure := os.Getenv("OSS_SECURE")
endpoint := os.Getenv("OSS_ENDPOINT")
encryptionKeyID := os.Getenv("OSS_ENCRYPTIONKEYID")
var (
accessKey = os.Getenv("ALIYUN_ACCESS_KEY_ID")
secretKey = os.Getenv("ALIYUN_ACCESS_KEY_SECRET")
bucket = os.Getenv("OSS_BUCKET")
region = os.Getenv("OSS_REGION")
internal = os.Getenv("OSS_INTERNAL")
encrypt = os.Getenv("OSS_ENCRYPT")
secure = os.Getenv("OSS_SECURE")
endpoint = os.Getenv("OSS_ENDPOINT")
encryptionKeyID = os.Getenv("OSS_ENCRYPTIONKEYID")
)
root, err := ioutil.TempDir("", "driver-")
if err != nil {
panic(err)

View file

@ -93,7 +93,7 @@ var validRegions = map[string]struct{}{}
// validObjectACLs contains known s3 object Acls
var validObjectACLs = map[string]struct{}{}
//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
// DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
type DriverParameters struct {
AccessKey string
SecretKey string
@ -632,7 +632,6 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
Key: aws.String(d.s3Path(path)),
Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"),
})
if err != nil {
if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" {
return ioutil.NopCloser(bytes.NewReader(nil)), nil
@ -1166,16 +1165,22 @@ func (d *driver) doWalk(parentCtx context.Context, objectCount *int64, path, pre
// directoryDiff finds all directories that are not in common between
// the previous and current paths in sorted order.
//
// Eg 1 directoryDiff("/path/to/folder", "/path/to/folder/folder/file")
// => [ "/path/to/folder/folder" ],
// Eg 2 directoryDiff("/path/to/folder/folder1", "/path/to/folder/folder2/file")
// => [ "/path/to/folder/folder2" ]
// Eg 3 directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/file")
// => [ "/path/to/folder/folder2" ]
// Eg 4 directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/folder1/file")
// => [ "/path/to/folder/folder2", "/path/to/folder/folder2/folder1" ]
// Eg 5 directoryDiff("/", "/path/to/folder/folder/file")
// => [ "/path", "/path/to", "/path/to/folder", "/path/to/folder/folder" ],
// # Examples
//
// directoryDiff("/path/to/folder", "/path/to/folder/folder/file")
// // => [ "/path/to/folder/folder" ]
//
// directoryDiff("/path/to/folder/folder1", "/path/to/folder/folder2/file")
// // => [ "/path/to/folder/folder2" ]
//
// directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/file")
// // => [ "/path/to/folder/folder2" ]
//
// directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/folder1/file")
// // => [ "/path/to/folder/folder2", "/path/to/folder/folder2/folder1" ]
//
// directoryDiff("/", "/path/to/folder/folder/file")
// // => [ "/path", "/path/to", "/path/to/folder", "/path/to/folder/folder" ]
func directoryDiff(prev, current string) []string {
var paths []string

View file

@ -27,27 +27,32 @@ import (
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { check.TestingT(t) }
var s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error)
var skipS3 func() string
var (
s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error)
skipS3 func() string
)
func init() {
accessKey := os.Getenv("AWS_ACCESS_KEY")
secretKey := os.Getenv("AWS_SECRET_KEY")
bucket := os.Getenv("S3_BUCKET")
encrypt := os.Getenv("S3_ENCRYPT")
keyID := os.Getenv("S3_KEY_ID")
secure := os.Getenv("S3_SECURE")
skipVerify := os.Getenv("S3_SKIP_VERIFY")
v4Auth := os.Getenv("S3_V4_AUTH")
region := os.Getenv("AWS_REGION")
objectACL := os.Getenv("S3_OBJECT_ACL")
var (
accessKey = os.Getenv("AWS_ACCESS_KEY")
secretKey = os.Getenv("AWS_SECRET_KEY")
bucket = os.Getenv("S3_BUCKET")
encrypt = os.Getenv("S3_ENCRYPT")
keyID = os.Getenv("S3_KEY_ID")
secure = os.Getenv("S3_SECURE")
skipVerify = os.Getenv("S3_SKIP_VERIFY")
v4Auth = os.Getenv("S3_V4_AUTH")
region = os.Getenv("AWS_REGION")
objectACL = os.Getenv("S3_OBJECT_ACL")
regionEndpoint = os.Getenv("REGION_ENDPOINT")
forcePathStyle = os.Getenv("AWS_S3_FORCE_PATH_STYLE")
sessionToken = os.Getenv("AWS_SESSION_TOKEN")
useDualStack = os.Getenv("S3_USE_DUALSTACK")
combineSmallPart = os.Getenv("MULTIPART_COMBINE_SMALL_PART")
accelerate = os.Getenv("S3_ACCELERATE")
)
root, err := ioutil.TempDir("", "driver-")
regionEndpoint := os.Getenv("REGION_ENDPOINT")
forcePathStyle := os.Getenv("AWS_S3_FORCE_PATH_STYLE")
sessionToken := os.Getenv("AWS_SESSION_TOKEN")
useDualStack := os.Getenv("S3_USE_DUALSTACK")
combineSmallPart := os.Getenv("MULTIPART_COMBINE_SMALL_PART")
accelerate := os.Getenv("S3_ACCELERATE")
if err != nil {
panic(err)
}
@ -343,7 +348,7 @@ func TestDelete(t *testing.T) {
return false
}
var objs = []string{
objs := []string{
"/file1",
"/file1-2",
"/file1/2",
@ -411,7 +416,7 @@ func TestDelete(t *testing.T) {
}
// objects to skip auto-created test case
var skipCase = map[string]bool{
skipCase := map[string]bool{
// special case where deleting "/file1" also deletes "/file1/2" is tested explicitly
"/file1": true,
}
@ -536,7 +541,7 @@ func TestWalk(t *testing.T) {
t.Fatalf("unexpected error creating driver with standard storage: %v", err)
}
var fileset = []string{
fileset := []string{
"/file1",
"/folder1/file1",
"/folder2/file1",

View file

@ -66,7 +66,7 @@ type StorageDriver interface {
Stat(ctx context.Context, path string) (FileInfo, error)
// List returns a list of the objects that are direct descendants of the
//given path.
// given path.
List(ctx context.Context, path string) ([]string, error)
// Move moves an object stored at sourcePath to destPath, removing the

View file

@ -341,8 +341,8 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
return file, err
}
//if this is a DLO and it is clear that segments are still missing,
//wait until they show up
// if this is a DLO and it is clear that segments are still missing,
// wait until they show up
_, isDLO := headers["X-Object-Manifest"]
size, err := file.Length()
if err != nil {
@ -357,7 +357,7 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
continue
}
//if not, then this reader will be fine
// if not, then this reader will be fine
return file, nil
}
}
@ -436,9 +436,9 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo,
}
}
//Don't trust an empty `objects` slice. A container listing can be
//outdated. For files, we can make a HEAD request on the object which
//reports existence (at least) much more reliably.
// Don't trust an empty `objects` slice. A container listing can be
// outdated. For files, we can make a HEAD request on the object which
// reports existence (at least) much more reliably.
waitingTime := readAfterWriteWait
endTime := time.Now().Add(readAfterWriteTimeout)
@ -451,8 +451,8 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo,
return nil, err
}
//if this is a DLO and it is clear that segments are still missing,
//wait until they show up
// if this is a DLO and it is clear that segments are still missing,
// wait until they show up
_, isDLO := headers["X-Object-Manifest"]
if isDLO && info.Bytes == 0 {
if time.Now().Add(waitingTime).After(endTime) {
@ -463,7 +463,7 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo,
continue
}
//otherwise, accept the result
// otherwise, accept the result
fi.IsDir = false
fi.Size = info.Bytes
fi.ModTime = info.LastModified
@ -681,7 +681,7 @@ func (d *driver) swiftSegmentPath(path string) (string, error) {
}
func (d *driver) getAllSegments(path string) ([]swift.Object, error) {
//a simple container listing works 99.9% of the time
// a simple container listing works 99.9% of the time
segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path})
if err != nil {
if err == swift.ContainerNotFound {
@ -690,15 +690,15 @@ func (d *driver) getAllSegments(path string) ([]swift.Object, error) {
return nil, err
}
//build a lookup table by object name
// build a lookup table by object name
hasObjectName := make(map[string]struct{})
for _, segment := range segments {
hasObjectName[segment.Name] = struct{}{}
}
//The container listing might be outdated (i.e. not contain all existing
//segment objects yet) because of temporary inconsistency (Swift is only
//eventually consistent!). Check its completeness.
// The container listing might be outdated (i.e. not contain all existing
// segment objects yet) because of temporary inconsistency (Swift is only
// eventually consistent!). Check its completeness.
segmentNumber := 0
for {
segmentNumber++
@ -708,23 +708,23 @@ func (d *driver) getAllSegments(path string) ([]swift.Object, error) {
continue
}
//This segment is missing in the container listing. Use a more reliable
//request to check its existence. (HEAD requests on segments are
//guaranteed to return the correct metadata, except for the pathological
//case of an outage of large parts of the Swift cluster or its network,
//since every segment is only written once.)
// This segment is missing in the container listing. Use a more reliable
// request to check its existence. (HEAD requests on segments are
// guaranteed to return the correct metadata, except for the pathological
// case of an outage of large parts of the Swift cluster or its network,
// since every segment is only written once.)
segment, _, err := d.Conn.Object(d.Container, segmentPath)
switch err {
case nil:
//found new segment -> keep going, more might be missing
// found new segment -> keep going, more might be missing
segments = append(segments, segment)
continue
case swift.ObjectNotFound:
//This segment is missing. Since we upload segments sequentially,
//there won't be any more segments after it.
// This segment is missing. Since we upload segments sequentially,
// there won't be any more segments after it.
return segments, nil
default:
return nil, err //unexpected error
return nil, err // unexpected error
}
}
}

View file

@ -24,29 +24,6 @@ var swiftDriverConstructor func(prefix string) (*Driver, error)
func init() {
var (
username string
password string
authURL string
tenant string
tenantID string
domain string
domainID string
tenantDomain string
tenantDomainID string
trustID string
container string
region string
AuthVersion int
endpointType string
insecureSkipVerify bool
secretKey string
accessKey string
containerKey bool
tempURLMethods []string
swiftServer *swifttest.SwiftServer
err error
)
username = os.Getenv("SWIFT_USERNAME")
password = os.Getenv("SWIFT_PASSWORD")
authURL = os.Getenv("SWIFT_AUTH_URL")
@ -67,6 +44,10 @@ func init() {
containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY"))
tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",")
swiftServer *swifttest.SwiftServer
err error
)
if username == "" || password == "" || authURL == "" || container == "" {
if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil {
panic(err)

View file

@ -116,7 +116,8 @@ func (suite *DriverSuite) TestValidPaths(c *check.C) {
"/a-.b",
"/_.abc",
"/Docker/docker-registry",
"/Abc/Cba"}
"/Abc/Cba",
}
for _, filename := range validFiles {
err := suite.StorageDriver.PutContent(suite.ctx, filename, contents)
@ -154,7 +155,8 @@ func (suite *DriverSuite) TestInvalidPaths(c *check.C) {
"abc",
"123.abc",
"//bcd",
"/abc_123/"}
"/abc_123/",
}
for _, filename := range invalidFiles {
err := suite.StorageDriver.PutContent(suite.ctx, filename, contents)
@ -1175,8 +1177,10 @@ func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, c
c.Assert(readContents, check.DeepEquals, contents)
}
var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789")
var separatorChars = []byte("._-")
var (
filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789")
separatorChars = []byte("._-")
)
func randomPath(length int64) string {
path := "/"

View file

@ -16,6 +16,7 @@ type changingFileSystem struct {
func (cfs *changingFileSystem) List(_ context.Context, _ string) ([]string, error) {
return cfs.fileset, nil
}
func (cfs *changingFileSystem) Stat(_ context.Context, path string) (FileInfo, error) {
kept, ok := cfs.keptFiles[path]
if ok && kept {
@ -48,6 +49,7 @@ func (cfs *fileSystem) Stat(_ context.Context, path string) (FileInfo, error) {
},
}, nil
}
func (cfs *fileSystem) isDir(path string) bool {
_, isDir := cfs.fileset[path]
return isDir
@ -167,7 +169,6 @@ func TestWalkFallback(t *testing.T) {
compareWalked(t, tc.expected, walked)
})
}
}
func compareWalked(t *testing.T, expected, walked []string) {

View file

@ -61,7 +61,6 @@ func TestFileReaderSeek(t *testing.T) {
}
fr, err := newFileReader(ctx, driver, path, int64(len(content)))
if err != nil {
t.Fatalf("unexpected error creating file reader: %v", err)
}

View file

@ -109,7 +109,6 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis
return err
})
if err != nil {
return fmt.Errorf("failed to mark: %v", err)
}

View file

@ -173,7 +173,8 @@ func TestNoDeletionNoEffect(t *testing.T) {
// construct manifestlist for fun.
blobstatter := registry.BlobStatter()
manifestList, err := testutil.MakeManifestList(blobstatter, []digest.Digest{
image1.manifestDigest, image2.manifestDigest})
image1.manifestDigest, image2.manifestDigest,
})
if err != nil {
t.Fatalf("Failed to make manifest list: %v", err)
}

View file

@ -150,7 +150,6 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.
name: lbs.repository.Named().Name(),
id: uuid,
})
if err != nil {
return nil, err
}
@ -159,7 +158,6 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.
name: lbs.repository.Named().Name(),
id: uuid,
})
if err != nil {
return nil, err
}
@ -179,7 +177,6 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution
name: lbs.repository.Named().Name(),
id: id,
})
if err != nil {
return nil, err
}
@ -203,7 +200,6 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution
name: lbs.repository.Named().Name(),
id: id,
})
if err != nil {
return nil, err
}

View file

@ -540,7 +540,6 @@ func testOCIManifestStorage(t *testing.T, testname string, includeMediaTypes boo
if payloadMediaType != v1.MediaTypeImageIndex {
t.Fatalf("%s: unexpected MediaType for index payload, %s", testname, payloadMediaType)
}
}
// TestLinkPathFuncs ensures that the link path functions behavior are locked

View file

@ -12,7 +12,7 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
//ocischemaManifestHandler is a ManifestHandler that covers ocischema manifests.
// ocischemaManifestHandler is a ManifestHandler that covers ocischema manifests.
type ocischemaManifestHandler struct {
repository distribution.Repository
blobStore distribution.BlobStore

View file

@ -24,24 +24,32 @@ const (
// The path layout in the storage backend is roughly as follows:
//
// <root>/v2
// -> repositories/
// -><name>/
// -> _manifests/
// revisions
// -> <manifest digest path>
// -> link
// tags/<tag>
// -> current/link
// -> index
// -> <algorithm>/<hex digest>/link
// -> _layers/
// <layer links to blob store>
// -> _uploads/<id>
// data
// startedat
// hashstates/<algorithm>/<offset>
// -> blob/<algorithm>
// <split directory content addressable storage>
// ├── blob
// │ └── <algorithm>
// │ └── <split directory content addressable storage>
// └── repositories
// └── <name>
// ├── _layers
// │ └── <layer links to blob store>
// ├── _manifests
// │ ├── revisions
// │ │ └── <manifest digest path>
// │ │ └── link
// │ └── tags
// │ └── <tag>
// │ ├── current
// │ │ └── link
// │ └── index
// │ └── <algorithm>
// │ └── <hex digest>
// │ └── link
// └── _uploads
// └── <id>
// ├── data
// ├── hashstates
// │ └── <algorithm>
// │ └── <offset>
// └── startedat
//
// The storage backend layout is broken up into a content-addressable blob
// store and repositories. The content-addressable blob store holds most data
@ -105,7 +113,6 @@ const (
// For more information on the semantic meaning of each path and their
// contents, please see the path spec documentation.
func pathFor(spec pathSpec) (string, error) {
// Switch on the path object type and return the appropriate path. At
// first glance, one may wonder why we don't use an interface to
// accomplish this. By keep the formatting separate from the pathSpec, we
@ -135,7 +142,6 @@ func pathFor(spec pathSpec) (string, error) {
return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil
case manifestRevisionLinkPathSpec:
root, err := pathFor(manifestRevisionPathSpec(v))
if err != nil {
return "", err
}
@ -147,7 +153,6 @@ func pathFor(spec pathSpec) (string, error) {
root, err := pathFor(manifestTagsPathSpec{
name: v.name,
})
if err != nil {
return "", err
}
@ -155,7 +160,6 @@ func pathFor(spec pathSpec) (string, error) {
return path.Join(root, v.tag), nil
case manifestTagCurrentPathSpec:
root, err := pathFor(manifestTagPathSpec(v))
if err != nil {
return "", err
}
@ -163,7 +167,6 @@ func pathFor(spec pathSpec) (string, error) {
return path.Join(root, "current", "link"), nil
case manifestTagIndexPathSpec:
root, err := pathFor(manifestTagPathSpec(v))
if err != nil {
return "", err
}
@ -171,7 +174,6 @@ func pathFor(spec pathSpec) (string, error) {
return path.Join(root, "index"), nil
case manifestTagIndexEntryLinkPathSpec:
root, err := pathFor(manifestTagIndexEntryPathSpec(v))
if err != nil {
return "", err
}
@ -182,7 +184,6 @@ func pathFor(spec pathSpec) (string, error) {
name: v.name,
tag: v.tag,
})
if err != nil {
return "", err
}
@ -431,8 +432,7 @@ type uploadHashStatePathSpec struct {
func (uploadHashStatePathSpec) pathSpec() {}
// repositoriesRootPathSpec returns the root of repositories
type repositoriesRootPathSpec struct {
}
type repositoriesRootPathSpec struct{}
func (repositoriesRootPathSpec) pathSpec() {}
@ -445,7 +445,6 @@ func (repositoriesRootPathSpec) pathSpec() {}
// groups of digest folder. It will be as follows:
//
// <algorithm>/<first two bytes of digest>/<full digest>
//
func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) {
if err := dgst.Validate(); err != nil {
return nil, err
@ -468,7 +467,6 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error)
// Reconstructs a digest from a path
func digestFromPath(digestPath string) (digest.Digest, error) {
digestPath = strings.TrimSuffix(digestPath, "/data")
dir, hex := path.Split(digestPath)
dir = path.Dir(dir)

View file

@ -108,7 +108,6 @@ func TestPathMapper(t *testing.T) {
if err == nil {
t.Fatalf("expected an error when mapping an invalid revision: %s", badpath)
}
}
func TestDigestFromPath(t *testing.T) {
@ -132,7 +131,6 @@ func TestDigestFromPath(t *testing.T) {
if result != testcase.expected {
t.Fatalf("Unexpected result value %v when we wanted %v", result, testcase.expected)
}
}
}

View file

@ -98,7 +98,6 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv
} else {
errors = pushError(errors, filePath, err)
}
}
uploads[uuid] = ud

View file

@ -38,7 +38,6 @@ func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploa
if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
t.Fatalf("Unable to write startedAt file")
}
}
func TestPurgeGather(t *testing.T) {

View file

@ -18,7 +18,7 @@ var (
errInvalidURL = errors.New("invalid URL on layer")
)
//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests.
// schema2ManifestHandler is a ManifestHandler that covers schema2 manifests.
type schema2ManifestHandler struct {
repository distribution.Repository
blobStore distribution.BlobStore

Some files were not shown because too many files have changed in this diff Show more