1
0
Fork 1
mirror of https://github.com/vbatts/tar-split.git synced 2024-12-18 03:26:31 +00:00

Merge pull request #60 from vbatts/workflows

Workflows
This commit is contained in:
Vincent Batts 2023-03-25 20:56:13 -04:00 committed by GitHub
commit da105eb683
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 105 additions and 39 deletions

35
.github/workflows/go.yml vendored Normal file
View file

@ -0,0 +1,35 @@
name: build and vet
on:
pull_request:
branches_ignore: []
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
go: ['1.15', '1.16', '1.17', '1.18', '1.19', '1.20']
name: build and vet
steps:
- uses: actions/checkout@v2
with:
path: go/src/github.com/vbatts/tar-split
- uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go }}
- name: vet and build
env:
GOPATH: /home/runner/work/tar-split/tar-split/go
run: |
set -x
export PATH=$GOPATH/bin:$PATH
cd go/src/github.com/vbatts/tar-split
go test -v ./...
go vet -v ./...
go build -v ./...
#go run mage.go -v vet build test

35
.github/workflows/lint.yml vendored Normal file
View file

@ -0,0 +1,35 @@
name: lint
on:
pull_request:
branches_ignore: []
jobs:
lint:
runs-on: ubuntu-latest
strategy:
matrix:
go: ['1.20']
name: Linting
steps:
- uses: actions/checkout@v2
with:
path: go/src/github.com/vbatts/tar-split
- uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go }}
- name: lint
env:
GOPATH: /home/runner/work/tar-split/tar-split/go
run: |
set -x
#curl -sSL https://github.com/magefile/mage/releases/download/v1.14.0/mage_1.14.0_Linux-64bit.tar.gz | tar -xzv mage && mv mage $GOPATH/bin/
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2
export PATH=$GOPATH/bin:$PATH
cd go/src/github.com/vbatts/tar-split
golangci-lint run
#go run mage.go -v lint

View file

@ -1,22 +0,0 @@
language: go
arch:
- amd64
- ppc64le
go:
- tip
- 1.15.x
- 1.14.x
- 1.13.x
- 1.12.x
- 1.11.x
- 1.10.x
# let us have pretty, fast Docker-based Travis workers!
sudo: false
install:
- go get -d ./...
script:
- go test -v ./...
- go vet ./...

View file

@ -41,7 +41,7 @@ type fileReader interface {
// RawBytes accesses the raw bytes of the archive, apart from the file payload itself.
// This includes the header and padding.
//
// This call resets the current rawbytes buffer
// # This call resets the current rawbytes buffer
//
// Only when RawAccounting is enabled, otherwise this returns nil
func (tr *Reader) RawBytes() []byte {
@ -126,7 +126,9 @@ func (tr *Reader) next() (*Header, error) {
return nil, err
}
if hdr.Typeflag == TypeXGlobalHeader {
mergePAX(hdr, paxHdrs)
if err = mergePAX(hdr, paxHdrs); err != nil {
return nil, err
}
return &Header{
Name: hdr.Name,
Typeflag: hdr.Typeflag,
@ -381,9 +383,9 @@ func parsePAX(r io.Reader) (map[string]string, error) {
// header in case further processing is required.
//
// The err will be set to io.EOF only when one of the following occurs:
// * Exactly 0 bytes are read and EOF is hit.
// * Exactly 1 block of zeros is read and EOF is hit.
// * At least 2 blocks of zeros are read.
// - Exactly 0 bytes are read and EOF is hit.
// - Exactly 1 block of zeros is read and EOF is hit.
// - At least 2 blocks of zeros are read.
func (tr *Reader) readHeader() (*Header, *block, error) {
// Two blocks of zero bytes marks the end of the archive.
n, err := io.ReadFull(tr.r, tr.blk[:])

View file

@ -833,8 +833,8 @@ func Benchmark(b *testing.B) {
// Write the archive to a byte buffer.
tw := NewWriter(&buf)
for _, file := range v.files {
tw.WriteHeader(file.hdr)
tw.Write(file.body)
_ = tw.WriteHeader(file.hdr)
_, _ = tw.Write(file.body)
}
tw.Close()
b.Run(v.label, func(b *testing.B) {

View file

@ -29,9 +29,14 @@ func BenchmarkUpstreamTar(b *testing.B) {
fh.Close()
b.Fatal(err)
}
io.Copy(ioutil.Discard, tr)
_, err = io.Copy(ioutil.Discard, tr)
if err != nil {
b.Fatal(err)
}
}
if err := fh.Close(); err != nil {
b.Fatal(err)
}
fh.Close()
}
}
@ -52,9 +57,14 @@ func BenchmarkOurTarNoAccounting(b *testing.B) {
fh.Close()
b.Fatal(err)
}
io.Copy(ioutil.Discard, tr)
_, err = io.Copy(ioutil.Discard, tr)
if err != nil {
b.Fatal(err)
}
}
if err := fh.Close(); err != nil {
b.Fatal(err)
}
fh.Close()
}
}
func BenchmarkOurTarYesAccounting(b *testing.B) {
@ -76,9 +86,14 @@ func BenchmarkOurTarYesAccounting(b *testing.B) {
fh.Close()
b.Fatal(err)
}
io.Copy(ioutil.Discard, tr)
_, err = io.Copy(ioutil.Discard, tr)
if err != nil {
b.Fatal(err)
}
_ = tr.RawBytes()
}
fh.Close()
if err := fh.Close(); err != nil {
b.Fatal(err)
}
}
}

View file

@ -71,6 +71,8 @@ func WriteOutputTarStream(fg storage.FileGetter, up storage.Unpacker, w io.Write
crcSum = make([]byte, 8)
multiWriter = io.MultiWriter(w, crcHash)
copyBuffer = byteBufferPool.Get().([]byte)
// TODO once we have some benchmark or memory profile then we can experiment with using *bytes.Buffer
//nolint:staticcheck // SA6002 not going to do a pointer here
defer byteBufferPool.Put(copyBuffer)
} else {
crcHash.Reset()

View file

@ -23,7 +23,6 @@ func TestLargeJunkPadding(t *testing.T) {
tw := tar.NewWriter(pW)
if err := tw.Close(); err != nil {
pW.CloseWithError(err)
t.Fatal(err)
return
}
@ -35,7 +34,6 @@ func TestLargeJunkPadding(t *testing.T) {
devZero, err := os.Open("/dev/zero")
if err != nil {
pW.CloseWithError(err)
t.Fatal(err)
return
}
defer devZero.Close()
@ -45,7 +43,6 @@ func TestLargeJunkPadding(t *testing.T) {
}
if _, err := io.CopyN(pW, devZero, junkChunkSize); err != nil {
pW.CloseWithError(err)
t.Fatal(err)
return
}
}

View file

@ -199,7 +199,9 @@ func BenchmarkGetPut(b *testing.B) {
b.Fatal(err)
}
}
fh.Sync()
if err := fh.Sync(); err != nil {
b.Fatal(err)
}
up := NewJSONUnpacker(fh)
for {