1
0
Fork 1
mirror of https://github.com/vbatts/tar-split.git synced 2024-12-18 19:46:29 +00:00

*.go: linting project specific code

the pointer to the pool may be useful, but holding on that until I get
benchmarks of memory use to show the benefit.

Signed-off-by: Vincent Batts <vbatts@hashbangbash.com>
This commit is contained in:
Vincent Batts 2023-03-25 19:20:31 -04:00
parent 19fa6f3d1e
commit 516158dbfb
Signed by: vbatts
GPG key ID: E30EFAA812C6E5ED
4 changed files with 28 additions and 16 deletions

View file

@ -29,9 +29,14 @@ func BenchmarkUpstreamTar(b *testing.B) {
fh.Close() fh.Close()
b.Fatal(err) b.Fatal(err)
} }
io.Copy(ioutil.Discard, tr) _, err = io.Copy(ioutil.Discard, tr)
if err != nil {
b.Fatal(err)
}
}
if err := fh.Close(); err != nil {
b.Fatal(err)
} }
fh.Close()
} }
} }
@ -52,9 +57,14 @@ func BenchmarkOurTarNoAccounting(b *testing.B) {
fh.Close() fh.Close()
b.Fatal(err) b.Fatal(err)
} }
io.Copy(ioutil.Discard, tr) _, err = io.Copy(ioutil.Discard, tr)
if err != nil {
b.Fatal(err)
}
}
if err := fh.Close(); err != nil {
b.Fatal(err)
} }
fh.Close()
} }
} }
func BenchmarkOurTarYesAccounting(b *testing.B) { func BenchmarkOurTarYesAccounting(b *testing.B) {
@ -76,9 +86,14 @@ func BenchmarkOurTarYesAccounting(b *testing.B) {
fh.Close() fh.Close()
b.Fatal(err) b.Fatal(err)
} }
io.Copy(ioutil.Discard, tr) _, err = io.Copy(ioutil.Discard, tr)
if err != nil {
b.Fatal(err)
}
_ = tr.RawBytes() _ = tr.RawBytes()
} }
fh.Close() if err := fh.Close(); err != nil {
b.Fatal(err)
}
} }
} }

View file

@ -71,6 +71,8 @@ func WriteOutputTarStream(fg storage.FileGetter, up storage.Unpacker, w io.Write
crcSum = make([]byte, 8) crcSum = make([]byte, 8)
multiWriter = io.MultiWriter(w, crcHash) multiWriter = io.MultiWriter(w, crcHash)
copyBuffer = byteBufferPool.Get().([]byte) copyBuffer = byteBufferPool.Get().([]byte)
// TODO once we have some benchmark or memory profile then we can experiment with using *bytes.Buffer
//nolint:staticcheck // SA6002 not going to do a pointer here
defer byteBufferPool.Put(copyBuffer) defer byteBufferPool.Put(copyBuffer)
} else { } else {
crcHash.Reset() crcHash.Reset()

View file

@ -18,13 +18,11 @@ func TestLargeJunkPadding(t *testing.T) {
// Write a normal tar file into the pipe and then load it full of junk // Write a normal tar file into the pipe and then load it full of junk
// bytes as padding. We have to do this in a goroutine because we can't // bytes as padding. We have to do this in a goroutine because we can't
// store 20GB of junk in-memory. // store 20GB of junk in-memory.
var err error go func() {
go func(e error) {
// Empty archive. // Empty archive.
tw := tar.NewWriter(pW) tw := tar.NewWriter(pW)
if err := tw.Close(); err != nil { if err := tw.Close(); err != nil {
pW.CloseWithError(err) pW.CloseWithError(err)
e = err
return return
} }
@ -36,7 +34,6 @@ func TestLargeJunkPadding(t *testing.T) {
devZero, err := os.Open("/dev/zero") devZero, err := os.Open("/dev/zero")
if err != nil { if err != nil {
pW.CloseWithError(err) pW.CloseWithError(err)
e = err
return return
} }
defer devZero.Close() defer devZero.Close()
@ -46,17 +43,13 @@ func TestLargeJunkPadding(t *testing.T) {
} }
if _, err := io.CopyN(pW, devZero, junkChunkSize); err != nil { if _, err := io.CopyN(pW, devZero, junkChunkSize); err != nil {
pW.CloseWithError(err) pW.CloseWithError(err)
e = err
return return
} }
} }
fmt.Fprintln(os.Stderr, "[TestLargeJunkPadding] junk chunk finished") fmt.Fprintln(os.Stderr, "[TestLargeJunkPadding] junk chunk finished")
pW.Close() pW.Close()
}(err) }()
if err != nil {
t.Fatal(err)
}
// Disassemble our junk file. // Disassemble our junk file.
nilPacker := storage.NewJSONPacker(ioutil.Discard) nilPacker := storage.NewJSONPacker(ioutil.Discard)

View file

@ -199,7 +199,9 @@ func BenchmarkGetPut(b *testing.B) {
b.Fatal(err) b.Fatal(err)
} }
} }
fh.Sync() if err := fh.Sync(); err != nil {
b.Fatal(err)
}
up := NewJSONUnpacker(fh) up := NewJSONUnpacker(fh)
for { for {