diff --git a/archive/archive_unix.go b/archive/archive_unix.go index 8a15cfe..9e1dfad 100644 --- a/archive/archive_unix.go +++ b/archive/archive_unix.go @@ -11,7 +11,7 @@ import ( "github.com/docker/docker/pkg/system" ) -// canonicalTarNameForPath returns platform-specific filepath +// CanonicalTarNameForPath returns platform-specific filepath // to canonical posix-style path for tar archival. p is relative // path. func CanonicalTarNameForPath(p string) (string, error) { diff --git a/archive/utils_test.go b/archive/utils_test.go index 2a266c2..f5cacea 100644 --- a/archive/utils_test.go +++ b/archive/utils_test.go @@ -133,7 +133,7 @@ func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { helloStat.Size() != fi.Size() || !bytes.Equal(helloData, b) { // codepath taken if hello has been modified - return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi) + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) } // Check that nothing in dest/ has the same content as victim/hello. diff --git a/chrootarchive/archive.go b/chrootarchive/archive.go index 95f234e..06db8b2 100644 --- a/chrootarchive/archive.go +++ b/chrootarchive/archive.go @@ -58,6 +58,10 @@ func untar() { os.Exit(0) } +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { if tarArchive == nil { return fmt.Errorf("Empty archive") @@ -133,17 +137,18 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output) } return nil - } else { - cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) - out, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("Untar %s %s", err, out) - } - return nil } + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Untar %s %s", err, out) + } + return nil } +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. func TarUntar(src, dst string) error { return chrootArchiver.TarUntar(src, dst) } diff --git a/chrootarchive/diff.go b/chrootarchive/diff.go index 334679d..c99aed0 100644 --- a/chrootarchive/diff.go +++ b/chrootarchive/diff.go @@ -69,6 +69,9 @@ func applyLayer() { os.Exit(0) } +// ApplyLayer parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) { dest = filepath.Clean(dest) decompressed, err := archive.DecompressStream(layer) diff --git a/fileutils/fileutils.go b/fileutils/fileutils.go index 633fbef..8575150 100644 --- a/fileutils/fileutils.go +++ b/fileutils/fileutils.go @@ -20,7 +20,7 @@ func Empty(pattern string) bool { return pattern == "" } -// Cleanpatterns takes a slice of patterns returns a new +// CleanPatterns takes a slice of patterns returns a new // slice of patterns cleaned with filepath.Clean, stripped // of any empty patterns and lets the caller know whether the // slice contains any exception patterns (prefixed with !). @@ -73,7 +73,7 @@ func Matches(file string, patterns []string) (bool, error) { return OptimizedMatches(file, patterns, patDirs) } -// Matches is basically the same as fileutils.Matches() but optimized for archive.go. +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. // It will assume that the inputs have been preprocessed and therefore the function // doen't need to do as much error checking and clean-up. This was done to avoid // repeating these steps on each file being checked during the archive process. diff --git a/graphdb/conn_sqlite3.go b/graphdb/conn_sqlite3.go index 455790a..2be9112 100644 --- a/graphdb/conn_sqlite3.go +++ b/graphdb/conn_sqlite3.go @@ -8,6 +8,8 @@ import ( _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite ) +// NewSqliteConn opens a connection to a sqlite +// database. func NewSqliteConn(root string) (*Database, error) { conn, err := sql.Open("sqlite3", root) if err != nil { diff --git a/graphdb/graphdb.go b/graphdb/graphdb.go index b9433db..93cb029 100644 --- a/graphdb/graphdb.go +++ b/graphdb/graphdb.go @@ -41,17 +41,25 @@ type Edge struct { ParentID string } +// Entities stores the list of entities type Entities map[string]*Entity + +// Edges stores the relationships between entities type Edges []*Edge +// WalkFunc is a function invoked to process an individual entity type WalkFunc func(fullPath string, entity *Entity) error -// Graph database for storing entities and their relationships +// Database is a graph database for storing entities and their relationships type Database struct { conn *sql.DB mux sync.RWMutex } +// IsNonUniqueNameError processes the error to check if it's caused by +// a constraint violation. +// This is necessary because the error isn't the same across various +// sqlite versions. func IsNonUniqueNameError(err error) bool { str := err.Error() // sqlite 3.7.17-1ubuntu1 returns: @@ -72,7 +80,7 @@ func IsNonUniqueNameError(err error) bool { return false } -// Create a new graph database initialized with a root entity +// NewDatabase creates a new graph database initialized with a root entity func NewDatabase(conn *sql.DB) (*Database, error) { if conn == nil { return nil, fmt.Errorf("Database connection cannot be nil") @@ -163,7 +171,7 @@ func (db *Database) Set(fullPath, id string) (*Entity, error) { return e, nil } -// Return true if a name already exists in the database +// Exists returns true if a name already exists in the database func (db *Database) Exists(name string) bool { db.mux.RLock() defer db.mux.RUnlock() @@ -190,14 +198,14 @@ func (db *Database) setEdge(parentPath, name string, e *Entity, tx *sql.Tx) erro return nil } -// Return the root "/" entity for the database +// RootEntity returns the root "/" entity for the database func (db *Database) RootEntity() *Entity { return &Entity{ id: "0", } } -// Return the entity for a given path +// Get returns the entity for a given path func (db *Database) Get(name string) *Entity { db.mux.RLock() defer db.mux.RUnlock() @@ -274,7 +282,7 @@ func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { return nil } -// Return the children of the specified entity +// Children returns the children of the specified entity func (db *Database) Children(name string, depth int) ([]WalkMeta, error) { db.mux.RLock() defer db.mux.RUnlock() @@ -287,7 +295,7 @@ func (db *Database) Children(name string, depth int) ([]WalkMeta, error) { return db.children(e, name, depth, nil) } -// Return the parents of a specified entity +// Parents returns the parents of a specified entity func (db *Database) Parents(name string) ([]string, error) { db.mux.RLock() defer db.mux.RUnlock() @@ -299,7 +307,7 @@ func (db *Database) Parents(name string) ([]string, error) { return db.parents(e) } -// Return the refrence count for a specified id +// Refs returns the refrence count for a specified id func (db *Database) Refs(id string) int { db.mux.RLock() defer db.mux.RUnlock() @@ -311,7 +319,7 @@ func (db *Database) Refs(id string) int { return count } -// Return all the id's path references +// RefPaths returns all the id's path references func (db *Database) RefPaths(id string) Edges { db.mux.RLock() defer db.mux.RUnlock() @@ -360,7 +368,7 @@ func (db *Database) Delete(name string) error { return nil } -// Remove the entity with the specified id +// Purge removes the entity with the specified id // Walk the graph to make sure all references to the entity // are removed and return the number of references removed func (db *Database) Purge(id string) (int, error) { @@ -480,7 +488,7 @@ func (db *Database) children(e *Entity, name string, depth int, entities []WalkM if depth != 0 { nDepth := depth if depth != -1 { - nDepth -= 1 + nDepth-- } entities, err = db.children(child, meta.FullPath, nDepth, entities) if err != nil { @@ -523,12 +531,12 @@ func (db *Database) child(parent *Entity, name string) *Entity { return &Entity{id} } -// Return the id used to reference this entity +// ID returns the id used to reference this entity func (e *Entity) ID() string { return e.id } -// Return the paths sorted by depth +// Paths returns the paths sorted by depth func (e Entities) Paths() []string { out := make([]string, len(e)) var i int diff --git a/graphdb/utils.go b/graphdb/utils.go index bdbcd79..9edd79c 100644 --- a/graphdb/utils.go +++ b/graphdb/utils.go @@ -10,7 +10,7 @@ func split(p string) []string { return strings.Split(p, "/") } -// Returns the depth or number of / in a given path +// PathDepth returns the depth or number of / in a given path func PathDepth(p string) int { parts := split(p) if len(parts) == 2 && parts[1] == "" { diff --git a/httputils/httputils.go b/httputils/httputils.go index 1c92224..02d22ee 100644 --- a/httputils/httputils.go +++ b/httputils/httputils.go @@ -7,7 +7,7 @@ import ( "github.com/docker/docker/pkg/jsonmessage" ) -// Request a given URL and return an io.Reader +// Download requests a given URL and returns an io.Reader func Download(url string) (resp *http.Response, err error) { if resp, err = http.Get(url); err != nil { return nil, err @@ -18,6 +18,7 @@ func Download(url string) (resp *http.Response, err error) { return resp, nil } +// NewHTTPRequestError returns a JSON response error func NewHTTPRequestError(msg string, res *http.Response) error { return &jsonmessage.JSONError{ Message: msg, diff --git a/httputils/resumablerequestreader.go b/httputils/resumablerequestreader.go index f690d0e..bebc860 100644 --- a/httputils/resumablerequestreader.go +++ b/httputils/resumablerequestreader.go @@ -26,6 +26,8 @@ func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, tot return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} } +// ResumableRequestReaderWithInitialResponse makes it possible to resume +// reading the body of an already initiated request. func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} } diff --git a/listenbuffer/buffer.go b/listenbuffer/buffer.go index 97d622c..aa47471 100644 --- a/listenbuffer/buffer.go +++ b/listenbuffer/buffer.go @@ -1,5 +1,5 @@ /* -listenbuffer uses the kernel's listening backlog functionality to queue +Package listenbuffer uses the kernel's listening backlog functionality to queue connections, allowing applications to start listening immediately and handle connections later. This is signaled by closing the activation channel passed to the constructor.