feat(linter): enable errcheck linter in golangci-lint

Also, bump the linter version to the latest available version.

Signed-off-by: Milos Gajdos <milosthegajdos@gmail.com>
This commit is contained in:
Milos Gajdos 2023-11-18 06:50:40 +00:00
parent 9610a1e618
commit 7ce129d63b
No known key found for this signature in database
36 changed files with 243 additions and 66 deletions

View File

@ -11,7 +11,6 @@ linters:
- misspell
- bodyclose
- prealloc
disable:
- errcheck
linters-settings:

View File

@ -18,5 +18,9 @@ import (
)
func main() {
// NOTE(milosgajdos): if the only two commands registered
// with registry.RootCmd fail they will halt the program
// execution and exit the program with non-zero exit code.
// nolint:errcheck
registry.RootCmd.Execute()
}

View File

@ -2,7 +2,7 @@
ARG GO_VERSION=1.20.10
ARG ALPINE_VERSION=3.18
ARG GOLANGCI_LINT_VERSION=v1.54.2
ARG GOLANGCI_LINT_VERSION=v1.55.2
ARG BUILDTAGS="include_gcs"
FROM golangci/golangci-lint:${GOLANGCI_LINT_VERSION}-alpine AS golangci-lint

View File

@ -265,6 +265,9 @@ func Handler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
checks := CheckStatus()
if len(checks) != 0 {
// NOTE(milosgajdos): disable errcheck as the error is
// accessible via /debug/health
// nolint:errcheck
errcode.ServeJSON(w, errcode.ErrorCodeUnavailable.
WithDetail("health check failed: please see /debug/health"))
return

View File

@ -20,6 +20,7 @@ import (
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/registry/auth"
"github.com/sirupsen/logrus"
)
type accessController struct {
@ -151,5 +152,7 @@ func createHtpasswdFile(path string) error {
}
func init() {
auth.Register("htpasswd", auth.InitFunc(newAccessController))
if err := auth.Register("htpasswd", auth.InitFunc(newAccessController)); err != nil {
logrus.Errorf("failed to register htpasswd auth: %v", err)
}
}

View File

@ -33,7 +33,9 @@ func (htpasswd *htpasswd) authenticateUser(username string, password string) err
credentials, ok := htpasswd.entries[username]
if !ok {
// timing attack paranoia
bcrypt.CompareHashAndPassword([]byte{}, []byte(password))
if err := bcrypt.CompareHashAndPassword([]byte{}, []byte(password)); err != nil {
return auth.ErrAuthenticationFailure
}
return auth.ErrAuthenticationFailure
}

View File

@ -13,6 +13,7 @@ import (
"strings"
"github.com/distribution/distribution/v3/registry/auth"
"github.com/sirupsen/logrus"
)
// accessController provides a simple implementation of auth.AccessController
@ -87,5 +88,7 @@ func (ch challenge) Error() string {
// init registers the silly auth backend.
func init() {
auth.Register("silly", auth.InitFunc(newAccessController))
if err := auth.Register("silly", auth.InitFunc(newAccessController)); err != nil {
logrus.Errorf("failed to register silly auth: %v", err)
}
}

View File

@ -14,6 +14,7 @@ import (
"github.com/distribution/distribution/v3/registry/auth"
"github.com/go-jose/go-jose/v3"
"github.com/sirupsen/logrus"
)
// accessSet maps a typed, named resource to
@ -339,5 +340,7 @@ func (ac *accessController) Authorized(req *http.Request, accessItems ...auth.Ac
// init handles registering the token auth backend.
func init() {
auth.Register("token", auth.InitFunc(newAccessController))
if err := auth.Register("token", auth.InitFunc(newAccessController)); err != nil {
logrus.Errorf("tailed to register token auth: %v", err)
}
}

View File

@ -22,7 +22,10 @@ func FuzzToken1(f *testing.F) {
if err != nil {
return
}
token.Verify(verifyOps)
_, err = token.Verify(verifyOps)
if err != nil {
return
}
_, _ = token.VerifySigningKey(verifyOps)
})
}
@ -40,7 +43,10 @@ func FuzzToken2(f *testing.F) {
if err != nil {
return
}
token.Verify(verifyOps)
_, err = token.Verify(verifyOps)
if err != nil {
return
}
_, _ = token.VerifySigningKey(verifyOps)
})
}

View File

@ -339,6 +339,7 @@ func TestCatalogAPI(t *testing.T) {
defer resp.Body.Close()
checkResponse(t, "issuing catalog api check", resp, http.StatusBadRequest)
// nolint:errcheck
checkBodyHasErrorCodes(t, "invalid number of results requested", resp, errcode.ErrorCodePaginationNumberInvalid)
// -----------------------------------
@ -360,6 +361,7 @@ func TestCatalogAPI(t *testing.T) {
defer resp.Body.Close()
checkResponse(t, "issuing catalog api check", resp, http.StatusBadRequest)
// nolint:errcheck
checkBodyHasErrorCodes(t, "invalid number of results requested", resp, errcode.ErrorCodePaginationNumberInvalid)
// -----------------------------------
@ -409,6 +411,7 @@ func TestCatalogAPI(t *testing.T) {
defer resp.Body.Close()
checkResponse(t, "issuing catalog api check", resp, http.StatusBadRequest)
// nolint:errcheck
checkBodyHasErrorCodes(t, "invalid number of results requested", resp, errcode.ErrorCodePaginationNumberInvalid)
// -----------------------------------
@ -584,6 +587,7 @@ func TestTagsAPI(t *testing.T) {
}
if test.expectedBodyErr != nil {
// nolint:errcheck
checkBodyHasErrorCodes(t, "invalid number of results requested", resp, *test.expectedBodyErr)
} else {
var body tagsAPIResponse
@ -900,6 +904,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
defer resp.Body.Close()
checkResponse(t, "bad layer push", resp, http.StatusBadRequest)
// nolint:errcheck
checkBodyHasErrorCodes(t, "bad layer push", resp, errcode.ErrorCodeDigestInvalid)
// -----------------------------------------
@ -927,15 +932,22 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
// ------------------------------------------
// Now, actually do successful upload.
layerLength, _ := layerFile.Seek(0, io.SeekEnd)
layerFile.Seek(0, io.SeekStart)
layerLength, err := layerFile.Seek(0, io.SeekEnd)
if err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
uploadURLBase, _ = startPushLayer(t, env, imageName)
pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile)
// ------------------------------------------
// Now, push just a chunk
layerFile.Seek(0, 0)
if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
canonicalDigester := digest.Canonical.Digester()
if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil {
@ -943,7 +955,9 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
}
canonicalDigest := canonicalDigester.Digest()
layerFile.Seek(0, 0)
if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
uploadURLBase, _ = startPushLayer(t, env, imageName)
uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength)
@ -961,7 +975,10 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
// -----------------------------------------
// Do layer push with invalid content range
layerFile.Seek(0, io.SeekStart)
if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
uploadURLBase, _ = startPushLayer(t, env, imageName)
sizeInvalid := chunkOptions{
contentRange: "0-20",
@ -973,7 +990,9 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
defer resp.Body.Close()
checkResponse(t, "putting size invalid chunk", resp, http.StatusBadRequest)
layerFile.Seek(0, io.SeekStart)
if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
uploadURLBase, _ = startPushLayer(t, env, imageName)
outOfOrder := chunkOptions{
contentRange: "3-22",
@ -1013,7 +1032,9 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
// Verify the body
verifier := layerDigest.Verifier()
io.Copy(verifier, resp.Body)
if _, err := io.Copy(verifier, resp.Body); err != nil {
t.Fatalf("unexpected error reading response body: %v", err)
}
if !verifier.Verified() {
t.Fatalf("response body did not pass verification")
@ -1134,12 +1155,16 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) {
// ----------------
// Reupload previously deleted blob
layerFile.Seek(0, io.SeekStart)
if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
uploadURLBase, _ := startPushLayer(t, env, imageName)
pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile)
layerFile.Seek(0, io.SeekStart)
if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
canonicalDigester := digest.Canonical.Digester()
if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil {
t.Fatalf("error copying to digest: %v", err)
@ -1342,6 +1367,7 @@ func TestManifestAPI_DeleteTag_Unknown(t *testing.T) {
defer resp.Body.Close()
checkResponse(t, msg, resp, http.StatusNotFound)
// nolint:errcheck
checkBodyHasErrorCodes(t, msg, resp, errcode.ErrorCodeManifestUnknown)
}
@ -1504,6 +1530,7 @@ func testManifestWithStorageError(t *testing.T, env *testEnv, imageName referenc
}
defer resp.Body.Close()
checkResponse(t, "getting non-existent manifest", resp, expectedStatusCode)
// nolint:errcheck
checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, expectedErrorCode)
}
@ -1528,6 +1555,7 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name
defer resp.Body.Close()
checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound)
// nolint:errcheck
checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, errcode.ErrorCodeManifestUnknown)
tagsURL, err := env.builder.BuildTagsURL(imageName)
@ -1543,6 +1571,7 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name
// Check that we get an unknown repository error when asking for tags
checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound)
// nolint:errcheck
checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, errcode.ErrorCodeNameUnknown)
// --------------------------------

View File

@ -212,7 +212,9 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request)
w.Header().Set("Content-Length", fmt.Sprint(len(p)))
w.Header().Set("Docker-Content-Digest", imh.Digest.String())
w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest))
w.Write(p)
if _, err := w.Write(p); err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
}
func etagMatch(r *http.Request, etag string) bool {

View File

@ -24,8 +24,14 @@ func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
err = tc.SetKeepAlive(true)
if err != nil {
return
}
err = tc.SetKeepAlivePeriod(3 * time.Minute)
if err != nil {
return
}
return tc, nil
}

View File

@ -138,7 +138,10 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter,
}
if pbs.scheduler != nil && pbs.ttl != nil {
pbs.scheduler.AddBlob(blobRef, *pbs.ttl)
if err := pbs.scheduler.AddBlob(blobRef, *pbs.ttl); err != nil {
dcontext.GetLogger(ctx).Errorf("Error adding blob: %s", err)
return err
}
}
return nil

View File

@ -77,7 +77,10 @@ func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, optio
}
if pms.scheduler != nil && pms.ttl != nil {
pms.scheduler.AddManifest(repoBlob, *pms.ttl)
if err := pms.scheduler.AddManifest(repoBlob, *pms.ttl); err != nil {
dcontext.GetLogger(ctx).Errorf("Error adding manifest: %s", err)
return nil, err
}
}
// Ensure the manifest blob is cleaned up

View File

@ -102,6 +102,7 @@ var ServeCmd = &cobra.Command{
config, err := resolveConfiguration(args)
if err != nil {
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
// nolint:errcheck
cmd.Usage()
os.Exit(1)
}

View File

@ -219,7 +219,9 @@ func buildRegistryTLSConfig(name, keyType string, cipherSuites []string) (*regis
return nil, fmt.Errorf("failed to create certificate: %v", err)
}
if _, err := os.Stat(os.TempDir()); os.IsNotExist(err) {
os.Mkdir(os.TempDir(), 1777)
if err := os.Mkdir(os.TempDir(), 1777); err != nil {
return nil, fmt.Errorf("failed to create temp dir: %v", err)
}
}
certPath := path.Join(os.TempDir(), name+".pem")

View File

@ -31,6 +31,7 @@ var RootCmd = &cobra.Command{
version.PrintVersion()
return
}
// nolint:errcheck
cmd.Usage()
},
}
@ -49,6 +50,7 @@ var GCCmd = &cobra.Command{
config, err := resolveConfiguration(args)
if err != nil {
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
// nolint:errcheck
cmd.Usage()
os.Exit(1)
}

View File

@ -39,7 +39,9 @@ func TestWriteSeek(t *testing.T) {
t.Fatalf("unexpected error starting layer upload: %s", err)
}
contents := []byte{1, 2, 3}
blobUpload.Write(contents)
if _, err := blobUpload.Write(contents); err != nil {
t.Fatalf("unexpected error writing contets: %v", err)
}
blobUpload.Close()
offset := blobUpload.Size()
if offset != int64(len(contents)) {
@ -596,6 +598,7 @@ func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distributio
if err != nil {
return distribution.Descriptor{}, err
}
// nolint:errcheck
defer wr.Cancel(ctx)
if nn, err := io.Copy(wr, rd); err != nil {

View File

@ -319,7 +319,9 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e
copyStatus := *resp.CopyStatus
if d.copyStatusPollMaxRetry == -1 && copyStatus == blob.CopyStatusTypePending {
destBlobRef.AbortCopyFromURL(ctx, *resp.CopyID, nil)
if _, err := destBlobRef.AbortCopyFromURL(ctx, *resp.CopyID, nil); err != nil {
return err
}
return nil
}
@ -331,7 +333,9 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e
}
if retryCount >= d.copyStatusPollMaxRetry {
destBlobRef.AbortCopyFromURL(ctx, *props.CopyID, nil)
if _, err := destBlobRef.AbortCopyFromURL(ctx, *props.CopyID, nil); err != nil {
return err
}
return fmt.Errorf("max retries for copy polling reached, aborting copy")
}

View File

@ -107,7 +107,9 @@ func TestCommitAfterMove(t *testing.T) {
destPath := "/dest/file"
ctx := context.Background()
// nolint:errcheck
defer driver.Delete(ctx, sourcePath)
// nolint:errcheck
defer driver.Delete(ctx, destPath)
writer, err := driver.Writer(ctx, sourcePath, false)

View File

@ -4,6 +4,7 @@ import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"net/http"
@ -140,7 +141,9 @@ func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte
defer writer.Close()
_, err = io.Copy(writer, bytes.NewReader(contents))
if err != nil {
writer.Cancel(ctx)
if cErr := writer.Cancel(ctx); cErr != nil {
return errors.Join(err, cErr)
}
return err
}
return writer.Commit(ctx)

View File

@ -119,6 +119,7 @@ func TestCommitEmpty(t *testing.T) {
ctx := dcontext.Background()
writer, err := driver.Writer(ctx, filename, false)
// nolint:errcheck
defer driver.Delete(ctx, filename)
if err != nil {
t.Fatalf("driver.Writer: unexpected error: %v", err)
@ -162,6 +163,7 @@ func TestCommit(t *testing.T) {
contents := make([]byte, defaultChunkSize)
writer, err := driver.Writer(ctx, filename, false)
// nolint:errcheck
defer driver.Delete(ctx, filename)
if err != nil {
t.Fatalf("driver.Writer: unexpected error: %v", err)

View File

@ -97,7 +97,9 @@ func (d *driver) PutContent(ctx context.Context, p string, contents []byte) erro
}
f.truncate()
f.WriteAt(contents, 0)
if _, err := f.WriteAt(contents, 0); err != nil {
return err
}
return nil
}
@ -301,7 +303,10 @@ func (w *writer) Close() error {
return fmt.Errorf("already closed")
}
w.closed = true
w.flush()
if err := w.flush(); err != nil {
return err
}
return nil
}
@ -329,16 +334,23 @@ func (w *writer) Commit(ctx context.Context) error {
return fmt.Errorf("already cancelled")
}
w.committed = true
w.flush()
if err := w.flush(); err != nil {
return err
}
return nil
}
func (w *writer) flush() {
func (w *writer) flush() error {
w.d.mutex.Lock()
defer w.d.mutex.Unlock()
w.f.WriteAt(w.buffer, int64(len(w.f.data)))
if _, err := w.f.WriteAt(w.buffer, int64(len(w.f.data))); err != nil {
return err
}
w.buffer = []byte{}
w.buffSize = 0
return nil
}

View File

@ -17,6 +17,7 @@ import (
"github.com/distribution/distribution/v3/internal/dcontext"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
storagemiddleware "github.com/distribution/distribution/v3/registry/storage/driver/middleware"
"github.com/sirupsen/logrus"
)
// cloudFrontStorageMiddleware provides a simple implementation of layerHandler that
@ -226,5 +227,7 @@ func (lh *cloudFrontStorageMiddleware) RedirectURL(r *http.Request, path string)
// init registers the cloudfront layerHandler backend.
func init() {
storagemiddleware.Register("cloudfront", newCloudFrontStorageMiddleware)
if err := storagemiddleware.Register("cloudfront", newCloudFrontStorageMiddleware); err != nil {
logrus.Errorf("failed to register cloudfront middleware: %v", err)
}
}

View File

@ -45,7 +45,9 @@ pZeMRablbPQdp8/1NyIwimq1VlG0ohQ4P6qhW7E09ZMC
if err != nil {
t.Fatal("File cannot be created")
}
file.WriteString(privk)
if _, err := file.WriteString(privk); err != nil {
t.Fatal(err)
}
defer os.Remove(file.Name())
options["privatekey"] = file.Name()
options["keypairid"] = "test"

View File

@ -9,6 +9,7 @@ import (
"net/http"
"net/http/httptest"
"reflect" // used as a replacement for testify
"sync"
"testing"
"time"
)
@ -31,7 +32,10 @@ func (m mockIPRangeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(500)
return
}
w.Write(bytes)
if _, err := w.Write(bytes); err != nil {
w.WriteHeader(500)
return
}
}
func newTestHandler(data awsIPResponse) *httptest.Server {
@ -77,7 +81,8 @@ func TestMatchIPV6(t *testing.T) {
ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, nil)
ips.tryUpdate(ctx)
err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, true, ips.contains(net.ParseIP("ff00::")))
assertEqual(t, 1, len(ips.ipv6))
assertEqual(t, 0, len(ips.ipv4))
@ -94,7 +99,8 @@ func TestMatchIPV4(t *testing.T) {
ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, nil)
ips.tryUpdate(ctx)
err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0")))
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1")))
assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0")))
@ -114,7 +120,8 @@ func TestMatchIPV4_2(t *testing.T) {
ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, nil)
ips.tryUpdate(ctx)
err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0")))
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1")))
assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0")))
@ -134,7 +141,8 @@ func TestMatchIPV4WithRegionMatched(t *testing.T) {
ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, []string{"us-east-1"})
ips.tryUpdate(ctx)
err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0")))
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1")))
assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0")))
@ -154,7 +162,8 @@ func TestMatchIPV4WithRegionMatch_2(t *testing.T) {
ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, []string{"us-west-2", "us-east-1"})
ips.tryUpdate(ctx)
err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0")))
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1")))
assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0")))
@ -174,7 +183,8 @@ func TestMatchIPV4WithRegionNotMatched(t *testing.T) {
ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, []string{"us-west-2"})
ips.tryUpdate(ctx)
err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, false, ips.contains(net.ParseIP("192.168.0.0")))
assertEqual(t, false, ips.contains(net.ParseIP("192.168.0.1")))
assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0")))
@ -193,7 +203,8 @@ func TestInvalidData(t *testing.T) {
ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, nil)
ips.tryUpdate(ctx)
err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, 1, len(ips.ipv4))
}
@ -227,7 +238,12 @@ func TestParsing(t *testing.T) {
"region": "anotherregion",
"service": "ec2"}]
}`
rawMockHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(data)) })
rawMockHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if _, err := w.Write([]byte(data)); err != nil {
w.WriteHeader(500)
return
}
})
t.Parallel()
server := httptest.NewServer(rawMockHandler)
defer server.Close()
@ -251,15 +267,25 @@ func TestParsing(t *testing.T) {
func TestUpdateCalledRegularly(t *testing.T) {
t.Parallel()
mu := &sync.RWMutex{}
updateCount := 0
server := httptest.NewServer(http.HandlerFunc(
func(rw http.ResponseWriter, req *http.Request) {
mu.Lock()
updateCount++
rw.Write([]byte("ok"))
mu.Unlock()
if _, err := rw.Write([]byte("ok")); err != nil {
rw.WriteHeader(http.StatusInternalServerError)
}
}))
defer server.Close()
newAWSIPs(context.Background(), fmt.Sprintf("%s/", server.URL), time.Second, nil)
if _, err := newAWSIPs(context.Background(), fmt.Sprintf("%s/", server.URL), time.Second, nil); err != nil {
t.Fatalf("failed creating AWS IP filter: %v", err)
}
time.Sleep(time.Second*4 + time.Millisecond*500)
mu.RLock()
defer mu.RUnlock()
if updateCount < 4 {
t.Errorf("Update should have been called at least 4 times, actual=%d", updateCount)
}
@ -364,8 +390,14 @@ func BenchmarkContainsRandom(b *testing.B) {
for i := 0; i < b.N; i++ {
ipv4[i] = make([]byte, 4)
ipv6[i] = make([]byte, 16)
rand.Read(ipv4[i])
rand.Read(ipv6[i])
_, err := rand.Read(ipv4[i])
if err != nil {
b.Fatal(err)
}
_, err = rand.Read(ipv6[i])
if err != nil {
b.Fatal(err)
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
@ -381,8 +413,14 @@ func BenchmarkContainsProd(b *testing.B) {
for i := 0; i < b.N; i++ {
ipv4[i] = make([]byte, 4)
ipv6[i] = make([]byte, 16)
rand.Read(ipv4[i])
rand.Read(ipv6[i])
_, err := rand.Read(ipv4[i])
if err != nil {
b.Fatal(err)
}
_, err = rand.Read(ipv6[i])
if err != nil {
b.Fatal(err)
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@ -9,6 +9,7 @@ import (
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
storagemiddleware "github.com/distribution/distribution/v3/registry/storage/driver/middleware"
"github.com/sirupsen/logrus"
)
type redirectStorageMiddleware struct {
@ -52,5 +53,7 @@ func (r *redirectStorageMiddleware) RedirectURL(_ *http.Request, urlPath string)
}
func init() {
storagemiddleware.Register("redirect", newRedirectStorageMiddleware)
if err := storagemiddleware.Register("redirect", newRedirectStorageMiddleware); err != nil {
logrus.Errorf("tailed to register redirect storage middleware: %v", err)
}
}

View File

@ -186,6 +186,7 @@ func TestEmptyRootList(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error creating content: %v", err)
}
// nolint:errcheck
defer rootedDriver.Delete(ctx, filename)
keys, _ := emptyRootDriver.List(ctx, "/")
@ -230,6 +231,7 @@ func TestStorageClass(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error creating content with storage class %v: %v", storageClass, err)
}
// nolint:errcheck
defer s3Driver.Delete(ctx, filename)
driverUnwrapped := s3Driver.Base.StorageDriver.(*driver)
@ -751,17 +753,21 @@ func TestMoveWithMultipartCopy(t *testing.T) {
sourcePath := "/source"
destPath := "/dest"
// nolint:errcheck
defer d.Delete(ctx, sourcePath)
// nolint:errcheck
defer d.Delete(ctx, destPath)
// An object larger than d's MultipartCopyThresholdSize will cause d.Move() to perform a multipart copy.
multipartCopyThresholdSize := d.baseEmbed.Base.StorageDriver.(*driver).MultipartCopyThresholdSize
contents := make([]byte, 2*multipartCopyThresholdSize)
rand.Read(contents)
if _, err := rand.Read(contents); err != nil {
t.Fatalf("unexpected error creating content: %v", err)
}
err = d.PutContent(ctx, sourcePath, contents)
if err != nil {
t.Fatalf("unexpected error creating content: %v", err)
t.Fatalf("unexpected error writing content: %v", err)
}
err = d.Move(ctx, sourcePath, destPath)

View File

@ -105,6 +105,9 @@ func Sign(req *request.Request) {
Time: req.Time,
Credentials: req.Config.Credentials,
}
// TODO(milosgajdos): figure this out; if Sign returns error which we should check,
// we should modify the codepath related to svc.Handlers.Sign.PushBack etc.
// nolint:errcheck
v2.Sign()
}

View File

@ -302,7 +302,9 @@ func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) {
defer reader.Close()
writtenChecksum := sha256.New()
io.Copy(writtenChecksum, reader)
if _, err := io.Copy(writtenChecksum, reader); err != nil {
c.Assert(err, check.IsNil)
}
c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil))
}
@ -1108,6 +1110,7 @@ func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) {
parentDir := randomPath(8)
defer func() {
c.StopTimer()
// nolint:errcheck
suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir))
}()
@ -1146,6 +1149,7 @@ func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) {
parentDir := randomPath(8)
defer func() {
c.StopTimer()
// nolint:errcheck
suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir))
}()
@ -1182,6 +1186,7 @@ func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) {
parentDir := randomPath(8)
defer func() {
c.StopTimer()
// nolint:errcheck
suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir))
}()
@ -1240,8 +1245,10 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) {
_, err = tf.Write(contents)
c.Assert(err, check.IsNil)
tf.Sync()
tf.Seek(0, io.SeekStart)
err = tf.Sync()
c.Assert(err, check.IsNil)
_, err = tf.Seek(0, io.SeekStart)
c.Assert(err, check.IsNil)
writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false)
c.Assert(err, check.IsNil)

View File

@ -42,7 +42,9 @@ func TestSimpleRead(t *testing.T) {
}
verifier := dgst.Verifier()
io.Copy(verifier, fr)
if _, err := io.Copy(verifier, fr); err != nil {
t.Fatalf("failed writing verification data: %v", err)
}
if !verifier.Verified() {
t.Fatalf("unable to verify read data")

View File

@ -224,10 +224,12 @@ func TestDeleteManifestIfTagNotFound(t *testing.T) {
}
manifestEnumerator, _ := manifestService.(distribution.ManifestEnumerator)
manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error {
repo.Tags(ctx).Tag(ctx, "test", distribution.Descriptor{Digest: dgst})
return nil
err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error {
return repo.Tags(ctx).Tag(ctx, "test", distribution.Descriptor{Digest: dgst})
})
if err != nil {
t.Fatalf("manifest enumeration failed: %v", err)
}
before1 := allBlobs(t, registry)
before2 := allManifests(t, manifestService)
@ -314,8 +316,13 @@ func TestDeletionHasEffect(t *testing.T) {
image2 := uploadRandomSchema2Image(t, repo)
image3 := uploadRandomSchema2Image(t, repo)
manifests.Delete(ctx, image2.manifestDigest)
manifests.Delete(ctx, image3.manifestDigest)
if err := manifests.Delete(ctx, image2.manifestDigest); err != nil {
t.Fatalf("failed deleting manifest digest: %v", err)
}
if err := manifests.Delete(ctx, image3.manifestDigest); err != nil {
t.Fatalf("failed deleting manifest digest: %v", err)
}
// Run GC
err := MarkAndSweep(dcontext.Background(), inmemoryDriver, registry, GCOpts{

View File

@ -137,7 +137,9 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) {
if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil {
t.Fatalf("unexpected error finishing upload: %v", err)
}
builder.AppendReference(distribution.Descriptor{Digest: dgst})
if err := builder.AppendReference(distribution.Descriptor{Digest: dgst, MediaType: schema2.MediaTypeLayer}); err != nil {
t.Fatalf("unexpected error appending references: %v", err)
}
}
sm, err := builder.Build(ctx)
@ -349,7 +351,9 @@ func testOCIManifestStorage(t *testing.T, testname string, includeMediaTypes boo
t.Fatalf("%s: unexpected error finishing upload: %v", testname, err)
}
builder.AppendReference(distribution.Descriptor{Digest: dgst})
if err := builder.AppendReference(distribution.Descriptor{Digest: dgst, MediaType: v1.MediaTypeImageLayer}); err != nil {
t.Fatalf("%s unexpected error appending references: %v", testname, err)
}
}
mfst, err := builder.Build(ctx)

View File

@ -35,7 +35,7 @@ func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploa
t.Fatalf("Unable to resolve path")
}
if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
if err := d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
t.Fatalf("Unable to write startedAt file")
}
}

View File

@ -143,5 +143,8 @@ func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(response.StatusCode)
io.Copy(w, bytes.NewReader(response.Body))
if _, err := io.Copy(w, bytes.NewReader(response.Body)); err != nil {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
}

View File

@ -50,7 +50,9 @@ func MakeSchema2Manifest(repository distribution.Repository, digests []digest.Di
}
builder := schema2.NewManifestBuilder(d, configJSON)
for _, digest := range digests {
builder.AppendReference(distribution.Descriptor{Digest: digest})
if err := builder.AppendReference(distribution.Descriptor{Digest: digest}); err != nil {
return nil, fmt.Errorf("unexpected error building manifest: %v", err)
}
}
mfst, err := builder.Build(ctx)