Build and install from GOPATH
* Rename 'vendor/src' -> 'vendor' * Ignore vendor/ instead of vendor/src/ for lint * Rename 'cmd/client' -> 'cmd/ocic' to make it 'go install'able * Rename 'cmd/server' -> 'cmd/ocid' to make it 'go install'able * Update Makefile to build and install from GOPATH * Update tests to locate ocid/ocic in GOPATH/bin * Search for binaries in GOPATH/bin instead of PATH * Install tools using `go get -u`, so they are updated on each run Signed-off-by: Jonathan Yu <jawnsy@redhat.com>
This commit is contained in:
parent
9da2882d49
commit
6c9628cdb1
1111 changed files with 70 additions and 61 deletions
51
vendor/github.com/containers/storage/pkg/ioutils/buffer.go
generated
vendored
Normal file
51
vendor/github.com/containers/storage/pkg/ioutils/buffer.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
package ioutils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var errBufferFull = errors.New("buffer is full")
|
||||
|
||||
type fixedBuffer struct {
|
||||
buf []byte
|
||||
pos int
|
||||
lastRead int
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Write(p []byte) (int, error) {
|
||||
n := copy(b.buf[b.pos:cap(b.buf)], p)
|
||||
b.pos += n
|
||||
|
||||
if n < len(p) {
|
||||
if b.pos == cap(b.buf) {
|
||||
return n, errBufferFull
|
||||
}
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Read(p []byte) (int, error) {
|
||||
n := copy(p, b.buf[b.lastRead:b.pos])
|
||||
b.lastRead += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Len() int {
|
||||
return b.pos - b.lastRead
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Cap() int {
|
||||
return cap(b.buf)
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Reset() {
|
||||
b.pos = 0
|
||||
b.lastRead = 0
|
||||
b.buf = b.buf[:0]
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) String() string {
|
||||
return string(b.buf[b.lastRead:b.pos])
|
||||
}
|
22
vendor/github.com/containers/storage/pkg/ioutils/fmt.go
generated
vendored
Normal file
22
vendor/github.com/containers/storage/pkg/ioutils/fmt.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
package ioutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// FprintfIfNotEmpty prints the string value if it's not empty
|
||||
func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
|
||||
if value != "" {
|
||||
return fmt.Fprintf(w, format, value)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// FprintfIfTrue prints the boolean value if it's true
|
||||
func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
|
||||
if ok {
|
||||
return fmt.Fprintf(w, format, ok)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
82
vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
generated
vendored
Normal file
82
vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
package ioutils
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
|
||||
// temporary file and closing it atomically changes the temporary file to
|
||||
// destination path. Writing and closing concurrently is not allowed.
|
||||
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
|
||||
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
abspath, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &atomicFileWriter{
|
||||
f: f,
|
||||
fn: abspath,
|
||||
perm: perm,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AtomicWriteFile atomically writes data to a file named by filename.
|
||||
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
f, err := NewAtomicFileWriter(filename, perm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, err := f.Write(data)
|
||||
if err == nil && n < len(data) {
|
||||
err = io.ErrShortWrite
|
||||
f.(*atomicFileWriter).writeErr = err
|
||||
}
|
||||
if err1 := f.Close(); err == nil {
|
||||
err = err1
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type atomicFileWriter struct {
|
||||
f *os.File
|
||||
fn string
|
||||
writeErr error
|
||||
perm os.FileMode
|
||||
}
|
||||
|
||||
func (w *atomicFileWriter) Write(dt []byte) (int, error) {
|
||||
n, err := w.f.Write(dt)
|
||||
if err != nil {
|
||||
w.writeErr = err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *atomicFileWriter) Close() (retErr error) {
|
||||
defer func() {
|
||||
if retErr != nil || w.writeErr != nil {
|
||||
os.Remove(w.f.Name())
|
||||
}
|
||||
}()
|
||||
if err := w.f.Sync(); err != nil {
|
||||
w.f.Close()
|
||||
return err
|
||||
}
|
||||
if err := w.f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Chmod(w.f.Name(), w.perm); err != nil {
|
||||
return err
|
||||
}
|
||||
if w.writeErr == nil {
|
||||
return os.Rename(w.f.Name(), w.fn)
|
||||
}
|
||||
return nil
|
||||
}
|
226
vendor/github.com/containers/storage/pkg/ioutils/multireader.go
generated
vendored
Normal file
226
vendor/github.com/containers/storage/pkg/ioutils/multireader.go
generated
vendored
Normal file
|
@ -0,0 +1,226 @@
|
|||
package ioutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
type pos struct {
|
||||
idx int
|
||||
offset int64
|
||||
}
|
||||
|
||||
type multiReadSeeker struct {
|
||||
readers []io.ReadSeeker
|
||||
pos *pos
|
||||
posIdx map[io.ReadSeeker]int
|
||||
}
|
||||
|
||||
func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
var tmpOffset int64
|
||||
switch whence {
|
||||
case os.SEEK_SET:
|
||||
for i, rdr := range r.readers {
|
||||
// get size of the current reader
|
||||
s, err := rdr.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if offset > tmpOffset+s {
|
||||
if i == len(r.readers)-1 {
|
||||
rdrOffset := s + (offset - tmpOffset)
|
||||
if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
r.pos = &pos{i, rdrOffset}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
tmpOffset += s
|
||||
continue
|
||||
}
|
||||
|
||||
rdrOffset := offset - tmpOffset
|
||||
idx := i
|
||||
|
||||
rdr.Seek(rdrOffset, os.SEEK_SET)
|
||||
// make sure all following readers are at 0
|
||||
for _, rdr := range r.readers[i+1:] {
|
||||
rdr.Seek(0, os.SEEK_SET)
|
||||
}
|
||||
|
||||
if rdrOffset == s && i != len(r.readers)-1 {
|
||||
idx++
|
||||
rdrOffset = 0
|
||||
}
|
||||
r.pos = &pos{idx, rdrOffset}
|
||||
return offset, nil
|
||||
}
|
||||
case os.SEEK_END:
|
||||
for _, rdr := range r.readers {
|
||||
s, err := rdr.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
tmpOffset += s
|
||||
}
|
||||
r.Seek(tmpOffset+offset, os.SEEK_SET)
|
||||
return tmpOffset + offset, nil
|
||||
case os.SEEK_CUR:
|
||||
if r.pos == nil {
|
||||
return r.Seek(offset, os.SEEK_SET)
|
||||
}
|
||||
// Just return the current offset
|
||||
if offset == 0 {
|
||||
return r.getCurOffset()
|
||||
}
|
||||
|
||||
curOffset, err := r.getCurOffset()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
r.pos = &pos{r.posIdx[rdr], rdrOffset}
|
||||
return curOffset + offset, nil
|
||||
default:
|
||||
return -1, fmt.Errorf("Invalid whence: %d", whence)
|
||||
}
|
||||
|
||||
return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
|
||||
}
|
||||
|
||||
func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
|
||||
var rdr io.ReadSeeker
|
||||
var rdrOffset int64
|
||||
|
||||
for i, rdr := range r.readers {
|
||||
offsetTo, err := r.getOffsetToReader(rdr)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
if offsetTo > offset {
|
||||
rdr = r.readers[i-1]
|
||||
rdrOffset = offsetTo - offset
|
||||
break
|
||||
}
|
||||
|
||||
if rdr == r.readers[len(r.readers)-1] {
|
||||
rdrOffset = offsetTo + offset
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return rdr, rdrOffset, nil
|
||||
}
|
||||
|
||||
func (r *multiReadSeeker) getCurOffset() (int64, error) {
|
||||
var totalSize int64
|
||||
for _, rdr := range r.readers[:r.pos.idx+1] {
|
||||
if r.posIdx[rdr] == r.pos.idx {
|
||||
totalSize += r.pos.offset
|
||||
break
|
||||
}
|
||||
|
||||
size, err := getReadSeekerSize(rdr)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error getting seeker size: %v", err)
|
||||
}
|
||||
totalSize += size
|
||||
}
|
||||
return totalSize, nil
|
||||
}
|
||||
|
||||
func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
|
||||
var offset int64
|
||||
for _, r := range r.readers {
|
||||
if r == rdr {
|
||||
break
|
||||
}
|
||||
|
||||
size, err := getReadSeekerSize(rdr)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
offset += size
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (r *multiReadSeeker) Read(b []byte) (int, error) {
|
||||
if r.pos == nil {
|
||||
r.pos = &pos{0, 0}
|
||||
}
|
||||
|
||||
bCap := int64(cap(b))
|
||||
buf := bytes.NewBuffer(nil)
|
||||
var rdr io.ReadSeeker
|
||||
|
||||
for _, rdr = range r.readers[r.pos.idx:] {
|
||||
readBytes, err := io.CopyN(buf, rdr, bCap)
|
||||
if err != nil && err != io.EOF {
|
||||
return -1, err
|
||||
}
|
||||
bCap -= readBytes
|
||||
|
||||
if bCap == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
r.pos = &pos{r.posIdx[rdr], rdrPos}
|
||||
return buf.Read(b)
|
||||
}
|
||||
|
||||
func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
|
||||
// save the current position
|
||||
pos, err := rdr.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// get the size
|
||||
size, err := rdr.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// reset the position
|
||||
if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
|
||||
// input readseekers. After calling this method the initial position is set to the
|
||||
// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
|
||||
// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
|
||||
// Seek can be used over the sum of lengths of all readseekers.
|
||||
//
|
||||
// When a MultiReadSeeker is used, no Read and Seek operations should be made on
|
||||
// its ReadSeeker components. Also, users should make no assumption on the state
|
||||
// of individual readseekers while the MultiReadSeeker is used.
|
||||
func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
|
||||
if len(readers) == 1 {
|
||||
return readers[0]
|
||||
}
|
||||
idx := make(map[io.ReadSeeker]int)
|
||||
for i, rdr := range readers {
|
||||
idx[rdr] = i
|
||||
}
|
||||
return &multiReadSeeker{
|
||||
readers: readers,
|
||||
posIdx: idx,
|
||||
}
|
||||
}
|
154
vendor/github.com/containers/storage/pkg/ioutils/readers.go
generated
vendored
Normal file
154
vendor/github.com/containers/storage/pkg/ioutils/readers.go
generated
vendored
Normal file
|
@ -0,0 +1,154 @@
|
|||
package ioutils
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type readCloserWrapper struct {
|
||||
io.Reader
|
||||
closer func() error
|
||||
}
|
||||
|
||||
func (r *readCloserWrapper) Close() error {
|
||||
return r.closer()
|
||||
}
|
||||
|
||||
// NewReadCloserWrapper returns a new io.ReadCloser.
|
||||
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
|
||||
return &readCloserWrapper{
|
||||
Reader: r,
|
||||
closer: closer,
|
||||
}
|
||||
}
|
||||
|
||||
type readerErrWrapper struct {
|
||||
reader io.Reader
|
||||
closer func()
|
||||
}
|
||||
|
||||
func (r *readerErrWrapper) Read(p []byte) (int, error) {
|
||||
n, err := r.reader.Read(p)
|
||||
if err != nil {
|
||||
r.closer()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// NewReaderErrWrapper returns a new io.Reader.
|
||||
func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
|
||||
return &readerErrWrapper{
|
||||
reader: r,
|
||||
closer: closer,
|
||||
}
|
||||
}
|
||||
|
||||
// HashData returns the sha256 sum of src.
|
||||
func HashData(src io.Reader) (string, error) {
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, src); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// OnEOFReader wraps an io.ReadCloser and a function
|
||||
// the function will run at the end of file or close the file.
|
||||
type OnEOFReader struct {
|
||||
Rc io.ReadCloser
|
||||
Fn func()
|
||||
}
|
||||
|
||||
func (r *OnEOFReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.Rc.Read(p)
|
||||
if err == io.EOF {
|
||||
r.runFunc()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the file and run the function.
|
||||
func (r *OnEOFReader) Close() error {
|
||||
err := r.Rc.Close()
|
||||
r.runFunc()
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *OnEOFReader) runFunc() {
|
||||
if fn := r.Fn; fn != nil {
|
||||
fn()
|
||||
r.Fn = nil
|
||||
}
|
||||
}
|
||||
|
||||
// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
|
||||
// operations.
|
||||
type cancelReadCloser struct {
|
||||
cancel func()
|
||||
pR *io.PipeReader // Stream to read from
|
||||
pW *io.PipeWriter
|
||||
}
|
||||
|
||||
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
|
||||
// context is cancelled. The returned io.ReadCloser must be closed when it is
|
||||
// no longer needed.
|
||||
func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
|
||||
pR, pW := io.Pipe()
|
||||
|
||||
// Create a context used to signal when the pipe is closed
|
||||
doneCtx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
p := &cancelReadCloser{
|
||||
cancel: cancel,
|
||||
pR: pR,
|
||||
pW: pW,
|
||||
}
|
||||
|
||||
go func() {
|
||||
_, err := io.Copy(pW, in)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// If the context was closed, p.closeWithError
|
||||
// was already called. Calling it again would
|
||||
// change the error that Read returns.
|
||||
default:
|
||||
p.closeWithError(err)
|
||||
}
|
||||
in.Close()
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
p.closeWithError(ctx.Err())
|
||||
case <-doneCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// Read wraps the Read method of the pipe that provides data from the wrapped
|
||||
// ReadCloser.
|
||||
func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
|
||||
return p.pR.Read(buf)
|
||||
}
|
||||
|
||||
// closeWithError closes the wrapper and its underlying reader. It will
|
||||
// cause future calls to Read to return err.
|
||||
func (p *cancelReadCloser) closeWithError(err error) {
|
||||
p.pW.CloseWithError(err)
|
||||
p.cancel()
|
||||
}
|
||||
|
||||
// Close closes the wrapper its underlying reader. It will cause
|
||||
// future calls to Read to return io.EOF.
|
||||
func (p *cancelReadCloser) Close() error {
|
||||
p.closeWithError(io.EOF)
|
||||
return nil
|
||||
}
|
10
vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
generated
vendored
Normal file
10
vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
// +build !windows
|
||||
|
||||
package ioutils
|
||||
|
||||
import "io/ioutil"
|
||||
|
||||
// TempDir on Unix systems is equivalent to ioutil.TempDir.
|
||||
func TempDir(dir, prefix string) (string, error) {
|
||||
return ioutil.TempDir(dir, prefix)
|
||||
}
|
18
vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
generated
vendored
Normal file
18
vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// +build windows
|
||||
|
||||
package ioutils
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/storage/pkg/longpath"
|
||||
)
|
||||
|
||||
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
|
||||
func TempDir(dir, prefix string) (string, error) {
|
||||
tempDir, err := ioutil.TempDir(dir, prefix)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return longpath.AddPrefix(tempDir), nil
|
||||
}
|
92
vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go
generated
vendored
Normal file
92
vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
package ioutils
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// WriteFlusher wraps the Write and Flush operation ensuring that every write
|
||||
// is a flush. In addition, the Close method can be called to intercept
|
||||
// Read/Write calls if the targets lifecycle has already ended.
|
||||
type WriteFlusher struct {
|
||||
w io.Writer
|
||||
flusher flusher
|
||||
flushed chan struct{}
|
||||
flushedOnce sync.Once
|
||||
closed chan struct{}
|
||||
closeLock sync.Mutex
|
||||
}
|
||||
|
||||
type flusher interface {
|
||||
Flush()
|
||||
}
|
||||
|
||||
var errWriteFlusherClosed = io.EOF
|
||||
|
||||
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
|
||||
select {
|
||||
case <-wf.closed:
|
||||
return 0, errWriteFlusherClosed
|
||||
default:
|
||||
}
|
||||
|
||||
n, err = wf.w.Write(b)
|
||||
wf.Flush() // every write is a flush.
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Flush the stream immediately.
|
||||
func (wf *WriteFlusher) Flush() {
|
||||
select {
|
||||
case <-wf.closed:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
wf.flushedOnce.Do(func() {
|
||||
close(wf.flushed)
|
||||
})
|
||||
wf.flusher.Flush()
|
||||
}
|
||||
|
||||
// Flushed returns the state of flushed.
|
||||
// If it's flushed, return true, or else it return false.
|
||||
func (wf *WriteFlusher) Flushed() bool {
|
||||
// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
|
||||
// be used to detect whether or a response code has been issued or not.
|
||||
// Another hook should be used instead.
|
||||
var flushed bool
|
||||
select {
|
||||
case <-wf.flushed:
|
||||
flushed = true
|
||||
default:
|
||||
}
|
||||
return flushed
|
||||
}
|
||||
|
||||
// Close closes the write flusher, disallowing any further writes to the
|
||||
// target. After the flusher is closed, all calls to write or flush will
|
||||
// result in an error.
|
||||
func (wf *WriteFlusher) Close() error {
|
||||
wf.closeLock.Lock()
|
||||
defer wf.closeLock.Unlock()
|
||||
|
||||
select {
|
||||
case <-wf.closed:
|
||||
return errWriteFlusherClosed
|
||||
default:
|
||||
close(wf.closed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewWriteFlusher returns a new WriteFlusher.
|
||||
func NewWriteFlusher(w io.Writer) *WriteFlusher {
|
||||
var fl flusher
|
||||
if f, ok := w.(flusher); ok {
|
||||
fl = f
|
||||
} else {
|
||||
fl = &NopFlusher{}
|
||||
}
|
||||
return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
|
||||
}
|
66
vendor/github.com/containers/storage/pkg/ioutils/writers.go
generated
vendored
Normal file
66
vendor/github.com/containers/storage/pkg/ioutils/writers.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
package ioutils
|
||||
|
||||
import "io"
|
||||
|
||||
// NopWriter represents a type which write operation is nop.
|
||||
type NopWriter struct{}
|
||||
|
||||
func (*NopWriter) Write(buf []byte) (int, error) {
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
type nopWriteCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (w *nopWriteCloser) Close() error { return nil }
|
||||
|
||||
// NopWriteCloser returns a nopWriteCloser.
|
||||
func NopWriteCloser(w io.Writer) io.WriteCloser {
|
||||
return &nopWriteCloser{w}
|
||||
}
|
||||
|
||||
// NopFlusher represents a type which flush operation is nop.
|
||||
type NopFlusher struct{}
|
||||
|
||||
// Flush is a nop operation.
|
||||
func (f *NopFlusher) Flush() {}
|
||||
|
||||
type writeCloserWrapper struct {
|
||||
io.Writer
|
||||
closer func() error
|
||||
}
|
||||
|
||||
func (r *writeCloserWrapper) Close() error {
|
||||
return r.closer()
|
||||
}
|
||||
|
||||
// NewWriteCloserWrapper returns a new io.WriteCloser.
|
||||
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
|
||||
return &writeCloserWrapper{
|
||||
Writer: r,
|
||||
closer: closer,
|
||||
}
|
||||
}
|
||||
|
||||
// WriteCounter wraps a concrete io.Writer and hold a count of the number
|
||||
// of bytes written to the writer during a "session".
|
||||
// This can be convenient when write return is masked
|
||||
// (e.g., json.Encoder.Encode())
|
||||
type WriteCounter struct {
|
||||
Count int64
|
||||
Writer io.Writer
|
||||
}
|
||||
|
||||
// NewWriteCounter returns a new WriteCounter.
|
||||
func NewWriteCounter(w io.Writer) *WriteCounter {
|
||||
return &WriteCounter{
|
||||
Writer: w,
|
||||
}
|
||||
}
|
||||
|
||||
func (wc *WriteCounter) Write(p []byte) (count int, err error) {
|
||||
count, err = wc.Writer.Write(p)
|
||||
wc.Count += int64(count)
|
||||
return
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue