From d53bedb1b7ef4fa3d043356882a7037b15e0515f Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 25 Sep 2014 12:55:53 -0400 Subject: [PATCH 01/99] daemon logging: unifying output and timestamps A little refactor of the ./pkg/log so engine can have a logger instance Signed-off-by: Vincent Batts --- iptables/iptables.go | 7 ++-- log/log.go | 79 ++++++++++++++++++++++++++++++-------------- log/log_test.go | 12 ++++--- signal/trap.go | 7 ++-- 4 files changed, 69 insertions(+), 36 deletions(-) diff --git a/iptables/iptables.go b/iptables/iptables.go index 88d8b5f..b8d9e56 100644 --- a/iptables/iptables.go +++ b/iptables/iptables.go @@ -4,11 +4,12 @@ import ( "errors" "fmt" "net" - "os" "os/exec" "regexp" "strconv" "strings" + + "github.com/docker/docker/pkg/log" ) type Action string @@ -175,9 +176,7 @@ func Raw(args ...string) ([]byte, error) { args = append([]string{"--wait"}, args...) } - if os.Getenv("DEBUG") != "" { - fmt.Fprintf(os.Stderr, fmt.Sprintf("[debug] %s, %v\n", path, args)) - } + log.Debugf("%s, %v", path, args) output, err := exec.Command(path, args...).CombinedOutput() if err != nil { diff --git a/log/log.go b/log/log.go index 53be6cf..b06d958 100644 --- a/log/log.go +++ b/log/log.go @@ -6,18 +6,21 @@ import ( "os" "runtime" "strings" + "time" + + "github.com/docker/docker/pkg/timeutils" ) type priority int const ( - errorFormat = "[%s] %s:%d %s\n" - logFormat = "[%s] %s\n" + errorFormat = "[%s] [%s] %s:%d %s\n" + logFormat = "[%s] [%s] %s\n" - fatal priority = iota - error - info - debug + fatalPriority priority = iota + errorPriority + infoPriority + debugPriority ) // A common interface to access the Fatal method of @@ -28,44 +31,72 @@ type Fataler interface { func (p priority) String() string { switch p { - case fatal: + case fatalPriority: return "fatal" - case error: + case errorPriority: return "error" - case info: + case infoPriority: return "info" - case debug: + case debugPriority: return "debug" } return "" } +var DefaultLogger = Logger{Out: os.Stdout, Err: os.Stderr} + // Debug function, if the debug flag is set, then display. Do nothing otherwise // If Docker is in damon mode, also send the debug info on the socket -func Debugf(format string, a ...interface{}) { - if os.Getenv("DEBUG") != "" { - logf(os.Stderr, debug, format, a...) - } +func Debugf(format string, a ...interface{}) (int, error) { + return DefaultLogger.Debugf(format, a...) } -func Infof(format string, a ...interface{}) { - logf(os.Stdout, info, format, a...) +func Infof(format string, a ...interface{}) (int, error) { + return DefaultLogger.Infof(format, a...) } -func Errorf(format string, a ...interface{}) { - logf(os.Stderr, error, format, a...) +func Errorf(format string, a ...interface{}) (int, error) { + return DefaultLogger.Errorf(format, a...) +} + +func Fatal(a ...interface{}) { + DefaultLogger.Fatalf("%s", a...) } func Fatalf(format string, a ...interface{}) { - logf(os.Stderr, fatal, format, a...) + DefaultLogger.Fatalf(format, a...) +} + +type Logger struct { + Err io.Writer + Out io.Writer +} + +func (l Logger) Debugf(format string, a ...interface{}) (int, error) { + if os.Getenv("DEBUG") != "" { + return l.logf(l.Err, debugPriority, format, a...) + } + return 0, nil +} + +func (l Logger) Infof(format string, a ...interface{}) (int, error) { + return l.logf(l.Out, infoPriority, format, a...) +} + +func (l Logger) Errorf(format string, a ...interface{}) (int, error) { + return l.logf(l.Err, errorPriority, format, a...) +} + +func (l Logger) Fatalf(format string, a ...interface{}) { + l.logf(l.Err, fatalPriority, format, a...) os.Exit(1) } -func logf(stream io.Writer, level priority, format string, a ...interface{}) { +func (l Logger) logf(stream io.Writer, level priority, format string, a ...interface{}) (int, error) { var prefix string - if level <= error || level == debug { + if level <= errorPriority || level == debugPriority { // Retrieve the stack infos _, file, line, ok := runtime.Caller(2) if !ok { @@ -74,10 +105,10 @@ func logf(stream io.Writer, level priority, format string, a ...interface{}) { } else { file = file[strings.LastIndex(file, "/")+1:] } - prefix = fmt.Sprintf(errorFormat, level.String(), file, line, format) + prefix = fmt.Sprintf(errorFormat, time.Now().Format(timeutils.RFC3339NanoFixed), level.String(), file, line, format) } else { - prefix = fmt.Sprintf(logFormat, level.String(), format) + prefix = fmt.Sprintf(logFormat, time.Now().Format(timeutils.RFC3339NanoFixed), level.String(), format) } - fmt.Fprintf(stream, prefix, a...) + return fmt.Fprintf(stream, prefix, a...) } diff --git a/log/log_test.go b/log/log_test.go index 83ba5fd..4f5b3f8 100644 --- a/log/log_test.go +++ b/log/log_test.go @@ -7,6 +7,8 @@ import ( "testing" ) +var reRFC3339NanoFixed = "[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{9}.([0-9]{2}:[0-9]{2})?" + func TestLogFatalf(t *testing.T) { var output *bytes.Buffer @@ -16,15 +18,15 @@ func TestLogFatalf(t *testing.T) { Values []interface{} ExpectedPattern string }{ - {fatal, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[fatal\\] testing.go:\\d+ 1 \\+ 1 = 2"}, - {error, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[error\\] testing.go:\\d+ 1 \\+ 1 = 2"}, - {info, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[info\\] 1 \\+ 1 = 2"}, - {debug, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[debug\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + {fatalPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[fatal\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + {errorPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[error\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + {infoPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[info\\] 1 \\+ 1 = 2"}, + {debugPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[debug\\] testing.go:\\d+ 1 \\+ 1 = 2"}, } for i, test := range tests { output = &bytes.Buffer{} - logf(output, test.Level, test.Format, test.Values...) + DefaultLogger.logf(output, test.Level, test.Format, test.Values...) expected := regexp.MustCompile(test.ExpectedPattern) if !expected.MatchString(output.String()) { diff --git a/signal/trap.go b/signal/trap.go index cbdfd1f..42ddb4d 100644 --- a/signal/trap.go +++ b/signal/trap.go @@ -1,11 +1,12 @@ package signal import ( - "log" "os" gosignal "os/signal" "sync/atomic" "syscall" + + "github.com/docker/docker/pkg/log" ) // Trap sets up a simplified signal "trap", appropriate for common @@ -28,7 +29,7 @@ func Trap(cleanup func()) { interruptCount := uint32(0) for sig := range c { go func(sig os.Signal) { - log.Printf("Received signal '%v', starting shutdown of docker...\n", sig) + log.Infof("Received signal '%v', starting shutdown of docker...", sig) switch sig { case os.Interrupt, syscall.SIGTERM: // If the user really wants to interrupt, let him do so. @@ -43,7 +44,7 @@ func Trap(cleanup func()) { return } } else { - log.Printf("Force shutdown of docker, interrupting cleanup\n") + log.Infof("Force shutdown of docker, interrupting cleanup") } case syscall.SIGQUIT: } From 56d6d5888c95f3433c555b32997f1e93ae2830f8 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 23 Sep 2014 17:24:52 -0700 Subject: [PATCH 02/99] Adding self to various maintainers files. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- iptables/MAINTAINERS | 1 + units/MAINTAINERS | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/iptables/MAINTAINERS b/iptables/MAINTAINERS index 1e998f8..134b02a 100644 --- a/iptables/MAINTAINERS +++ b/iptables/MAINTAINERS @@ -1 +1,2 @@ Michael Crosby (@crosbymichael) +Jessie Frazelle (@jfrazelle) diff --git a/units/MAINTAINERS b/units/MAINTAINERS index 68a97d2..96abeae 100644 --- a/units/MAINTAINERS +++ b/units/MAINTAINERS @@ -1,2 +1,2 @@ -Michael Crosby (@crosbymichael) Victor Vieux (@vieux) +Jessie Frazelle (@jfrazelle) From 1a8f9d99898e6564dcc801e07935fc48edeaf57c Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Fri, 3 Oct 2014 10:17:42 -0400 Subject: [PATCH 03/99] Add more names Docker-DCO-1.1-Signed-off-by: Brian Goff (github: cpuguy83) --- namesgenerator/names-generator.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/namesgenerator/names-generator.go b/namesgenerator/names-generator.go index ebb5850..30df30f 100644 --- a/namesgenerator/names-generator.go +++ b/namesgenerator/names-generator.go @@ -7,7 +7,7 @@ import ( ) var ( - left = [...]string{"happy", "jolly", "dreamy", "sad", "angry", "pensive", "focused", "sleepy", "grave", "distracted", "determined", "stoic", "stupefied", "sharp", "agitated", "cocky", "tender", "goofy", "furious", "desperate", "hopeful", "compassionate", "silly", "lonely", "condescending", "naughty", "kickass", "drunk", "boring", "nostalgic", "ecstatic", "insane", "cranky", "mad", "jovial", "sick", "hungry", "thirsty", "elegant", "backstabbing", "clever", "trusting", "loving", "suspicious", "berserk", "high", "romantic", "prickly", "evil"} + left = [...]string{"happy", "jolly", "dreamy", "sad", "angry", "pensive", "focused", "sleepy", "grave", "distracted", "determined", "stoic", "stupefied", "sharp", "agitated", "cocky", "tender", "goofy", "furious", "desperate", "hopeful", "compassionate", "silly", "lonely", "condescending", "naughty", "kickass", "drunk", "boring", "nostalgic", "ecstatic", "insane", "cranky", "mad", "jovial", "sick", "hungry", "thirsty", "elegant", "backstabbing", "clever", "trusting", "loving", "suspicious", "berserk", "high", "romantic", "prickly", "evil", "admiring", "adoring", "reverent", "serene", "fervent", "modest", "gloomy", "elated"} // Docker 0.7.x generates names from notable scientists and hackers. // // Ada Lovelace invented the first algorithm. http://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) @@ -22,6 +22,7 @@ var ( // Charles Babbage invented the concept of a programmable computer. http://en.wikipedia.org/wiki/Charles_Babbage. // Charles Darwin established the principles of natural evolution. http://en.wikipedia.org/wiki/Charles_Darwin. // Dennis Ritchie and Ken Thompson created UNIX and the C programming language. http://en.wikipedia.org/wiki/Dennis_Ritchie http://en.wikipedia.org/wiki/Ken_Thompson + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. http://en.wikipedia.org/wiki/Dorothy_Hodgkin // Douglas Engelbart gave the mother of all demos: http://en.wikipedia.org/wiki/Douglas_Engelbart // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - http://en.wikipedia.org/wiki/Elizabeth_Blackwell // Emmett Brown invented time travel. http://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) @@ -31,6 +32,7 @@ var ( // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. http://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. http://en.wikipedia.org/wiki/Galileo_Galilei // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - http://en.wikipedia.org/wiki/Gertrude_Elion + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. http://en.wikipedia.org/wiki/Gerty_Cori // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. http://en.wikipedia.org/wiki/Grace_Hopper // Henry Poincare made fundamental contributions in several fields of mathematics. http://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - http://en.wikipedia.org/wiki/Hypatia @@ -64,6 +66,7 @@ var ( // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. http://en.wikiquote.org/wiki/Richard_Stallman // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. http://en.wikipedia.org/wiki/Rob_Pike // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - http://en.wikipedia.org/wiki/Rosalind_Franklin + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. http://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - http://en.wikipedia.org/wiki/Sofia_Kovalevskaya // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. http://en.wikipedia.org/wiki/Sophie_Wilson // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. http://en.wikipedia.org/wiki/Stephen_Hawking @@ -73,7 +76,7 @@ var ( // http://en.wikipedia.org/wiki/John_Bardeen // http://en.wikipedia.org/wiki/Walter_Houser_Brattain // http://en.wikipedia.org/wiki/William_Shockley - right = [...]string{"albattani", "almeida", "archimedes", "ardinghelli", "babbage", "bardeen", "bartik", "bell", "blackwell", "bohr", "brattain", "brown", "carson", "colden", "curie", "darwin", "davinci", "einstein", "elion", "engelbart", "euclid", "fermat", "fermi", "feynman", "franklin", "galileo", "goldstine", "goodall", "hawking", "heisenberg", "hoover", "hopper", "hypatia", "jones", "kirch", "kowalevski", "lalande", "leakey", "lovelace", "lumiere", "mayer", "mccarthy", "mcclintock", "mclean", "meitner", "mestorf", "morse", "newton", "nobel", "pare", "pasteur", "perlman", "pike", "poincare", "ptolemy", "ritchie", "rosalind", "sammet", "shockley", "sinoussi", "stallman", "tesla", "thompson", "torvalds", "turing", "wilson", "wozniak", "wright", "yonath"} + right = [...]string{"albattani", "almeida", "archimedes", "ardinghelli", "babbage", "bardeen", "bartik", "bell", "blackwell", "bohr", "brattain", "brown", "carson", "colden", "cori", "curie", "darwin", "davinci", "einstein", "elion", "engelbart", "euclid", "fermat", "fermi", "feynman", "franklin", "galileo", "goldstine", "goodall", "hawking", "heisenberg", "hodgkin", "hoover", "hopper", "hypatia", "jones", "kirch", "kowalevski", "lalande", "leakey", "lovelace", "lumiere", "mayer", "mccarthy", "mcclintock", "mclean", "meitner", "mestorf", "morse", "newton", "nobel", "pare", "pasteur", "perlman", "pike", "poincare", "ptolemy", "ritchie", "rosalind", "sammet", "shockley", "sinoussi", "stallman", "tesla", "thompson", "torvalds", "turing", "wilson", "wozniak", "wright", "yalow", "yonath"} ) func GetRandomName(retry int) string { From 6f66d8a30f94d5cfad15b0f1c2036391f19f087d Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 6 Oct 2014 18:41:53 +0300 Subject: [PATCH 04/99] pkg/version: lint and add comments --- version/version.go | 42 ++++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/version/version.go b/version/version.go index 6a7d635..cc802a6 100644 --- a/version/version.go +++ b/version/version.go @@ -5,53 +5,59 @@ import ( "strings" ) +// Version provides utility methods for comparing versions. type Version string -func (me Version) compareTo(other Version) int { +func (v Version) compareTo(other Version) int { var ( - meTab = strings.Split(string(me), ".") + currTab = strings.Split(string(v), ".") otherTab = strings.Split(string(other), ".") ) - max := len(meTab) + max := len(currTab) if len(otherTab) > max { max = len(otherTab) } for i := 0; i < max; i++ { - var meInt, otherInt int + var currInt, otherInt int - if len(meTab) > i { - meInt, _ = strconv.Atoi(meTab[i]) + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) } if len(otherTab) > i { otherInt, _ = strconv.Atoi(otherTab[i]) } - if meInt > otherInt { + if currInt > otherInt { return 1 } - if otherInt > meInt { + if otherInt > currInt { return -1 } } return 0 } -func (me Version) LessThan(other Version) bool { - return me.compareTo(other) == -1 +// LessThan checks if a version is less than another version +func (v Version) LessThan(other Version) bool { + return v.compareTo(other) == -1 } -func (me Version) LessThanOrEqualTo(other Version) bool { - return me.compareTo(other) <= 0 +// LessThanOrEqualTo checks if a version is less than or equal to another +func (v Version) LessThanOrEqualTo(other Version) bool { + return v.compareTo(other) <= 0 } -func (me Version) GreaterThan(other Version) bool { - return me.compareTo(other) == 1 +// GreaterThan checks if a version is greater than another one +func (v Version) GreaterThan(other Version) bool { + return v.compareTo(other) == 1 } -func (me Version) GreaterThanOrEqualTo(other Version) bool { - return me.compareTo(other) >= 0 +// GreaterThanOrEqualTo checks ia version is greater than or equal to another +func (v Version) GreaterThanOrEqualTo(other Version) bool { + return v.compareTo(other) >= 0 } -func (me Version) Equal(other Version) bool { - return me.compareTo(other) == 0 +// Equal checks if a version is equal to another +func (v Version) Equal(other Version) bool { + return v.compareTo(other) == 0 } From d250fdea613c79804fe6dff720b6153a1be133e1 Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 6 Oct 2014 22:00:58 +0300 Subject: [PATCH 05/99] pkg/truncindex: lint and add comments Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- truncindex/truncindex.go | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/truncindex/truncindex.go b/truncindex/truncindex.go index 89aa88d..c5b7175 100644 --- a/truncindex/truncindex.go +++ b/truncindex/truncindex.go @@ -10,7 +10,9 @@ import ( ) var ( - ErrNoID = errors.New("prefix can't be empty") + // ErrNoID is thrown when attempting to use empty prefixes + ErrNoID = errors.New("prefix can't be empty") + errDuplicateID = errors.New("multiple IDs were found") ) func init() { @@ -27,56 +29,62 @@ type TruncIndex struct { ids map[string]struct{} } +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs func NewTruncIndex(ids []string) (idx *TruncIndex) { idx = &TruncIndex{ ids: make(map[string]struct{}), trie: patricia.NewTrie(), } for _, id := range ids { - idx.addId(id) + idx.addID(id) } return } -func (idx *TruncIndex) addId(id string) error { +func (idx *TruncIndex) addID(id string) error { if strings.Contains(id, " ") { - return fmt.Errorf("Illegal character: ' '") + return fmt.Errorf("illegal character: ' '") } if id == "" { return ErrNoID } if _, exists := idx.ids[id]; exists { - return fmt.Errorf("Id already exists: '%s'", id) + return fmt.Errorf("id already exists: '%s'", id) } idx.ids[id] = struct{}{} if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { - return fmt.Errorf("Failed to insert id: %s", id) + return fmt.Errorf("failed to insert id: %s", id) } return nil } +// Add adds a new ID to the TruncIndex func (idx *TruncIndex) Add(id string) error { idx.Lock() defer idx.Unlock() - if err := idx.addId(id); err != nil { + if err := idx.addID(id); err != nil { return err } return nil } +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. func (idx *TruncIndex) Delete(id string) error { idx.Lock() defer idx.Unlock() if _, exists := idx.ids[id]; !exists || id == "" { - return fmt.Errorf("No such id: '%s'", id) + return fmt.Errorf("no such id: '%s'", id) } delete(idx.ids, id) if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { - return fmt.Errorf("No such id: '%s'", id) + return fmt.Errorf("no such id: '%s'", id) } return nil } +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. func (idx *TruncIndex) Get(s string) (string, error) { idx.RLock() defer idx.RUnlock() @@ -90,17 +98,17 @@ func (idx *TruncIndex) Get(s string) (string, error) { if id != "" { // we haven't found the ID if there are two or more IDs id = "" - return fmt.Errorf("we've found two entries") + return errDuplicateID } id = string(prefix) return nil } if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { - return "", fmt.Errorf("No such id: %s", s) + return "", fmt.Errorf("no such id: %s", s) } if id != "" { return id, nil } - return "", fmt.Errorf("No such id: %s", s) + return "", fmt.Errorf("no such id: %s", s) } From bce8f57f1bb1c11c334a6bdc924ee5c72c99ad54 Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 6 Oct 2014 22:19:41 +0300 Subject: [PATCH 06/99] pkg/units: lint Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- units/size.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/units/size.go b/units/size.go index ea39bbd..853b555 100644 --- a/units/size.go +++ b/units/size.go @@ -10,6 +10,7 @@ import ( // See: http://en.wikipedia.org/wiki/Binary_prefix const ( // Decimal + KB = 1000 MB = 1000 * KB GB = 1000 * MB @@ -17,6 +18,7 @@ const ( PB = 1000 * TB // Binary + KiB = 1024 MiB = 1024 * KiB GiB = 1024 * MiB @@ -52,7 +54,7 @@ func FromHumanSize(size string) (int64, error) { return parseSize(size, decimalMap) } -// Parses a human-readable string representing an amount of RAM +// RAMInBytes parses a human-readable string representing an amount of RAM // in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and // returns the number of bytes, or -1 if the string is unparseable. // Units are case-insensitive, and the 'b' suffix is optional. @@ -64,7 +66,7 @@ func RAMInBytes(size string) (int64, error) { func parseSize(sizeStr string, uMap unitMap) (int64, error) { matches := sizeRegex.FindStringSubmatch(sizeStr) if len(matches) != 3 { - return -1, fmt.Errorf("Invalid size: '%s'", sizeStr) + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } size, err := strconv.ParseInt(matches[1], 10, 0) From 730301be76bae7b31d16810382f1f56aee78660a Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 6 Oct 2014 22:27:56 +0300 Subject: [PATCH 07/99] pkg/timeutils: lint and add comments Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- timeutils/json.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/timeutils/json.go b/timeutils/json.go index 19f107b..8043d69 100644 --- a/timeutils/json.go +++ b/timeutils/json.go @@ -6,18 +6,21 @@ import ( ) const ( - // Define our own version of RFC339Nano because we want one + // RFC3339NanoFixed is our own version of RFC339Nano because we want one // that pads the nano seconds part with zeros to ensure // the timestamps are aligned in the logs. RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - JSONFormat = `"` + time.RFC3339Nano + `"` + // JSONFormat is the format used by FastMarshalJSON + JSONFormat = `"` + time.RFC3339Nano + `"` ) +// FastMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. func FastMarshalJSON(t time.Time) (string, error) { if y := t.Year(); y < 0 || y >= 10000 { // RFC 3339 is clear that years are 4 digits exactly. // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("Time.MarshalJSON: year outside of range [0,9999]") + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") } return t.Format(JSONFormat), nil } From 68c42446a1ed4b3b38b375b7b69968afb2a24fa2 Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 6 Oct 2014 22:57:27 +0300 Subject: [PATCH 08/99] pkg/graphdb: some linting Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- graphdb/graphdb.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/graphdb/graphdb.go b/graphdb/graphdb.go index 59873fe..450bd50 100644 --- a/graphdb/graphdb.go +++ b/graphdb/graphdb.go @@ -131,8 +131,8 @@ func (db *Database) Set(fullPath, id string) (*Entity, error) { if _, err := db.conn.Exec("BEGIN EXCLUSIVE"); err != nil { return nil, err } - var entityId string - if err := db.conn.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityId); err != nil { + var entityID string + if err := db.conn.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityID); err != nil { if err == sql.ErrNoRows { if _, err := db.conn.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { rollback() @@ -320,14 +320,14 @@ func (db *Database) RefPaths(id string) Edges { for rows.Next() { var name string - var parentId string - if err := rows.Scan(&name, &parentId); err != nil { + var parentID string + if err := rows.Scan(&name, &parentID); err != nil { return refs } refs = append(refs, &Edge{ EntityID: id, Name: name, - ParentID: parentId, + ParentID: parentID, }) } return refs @@ -443,11 +443,11 @@ func (db *Database) children(e *Entity, name string, depth int, entities []WalkM defer rows.Close() for rows.Next() { - var entityId, entityName string - if err := rows.Scan(&entityId, &entityName); err != nil { + var entityID, entityName string + if err := rows.Scan(&entityID, &entityName); err != nil { return nil, err } - child := &Entity{entityId} + child := &Entity{entityID} edge := &Edge{ ParentID: e.id, Name: entityName, @@ -490,11 +490,11 @@ func (db *Database) parents(e *Entity) (parents []string, err error) { defer rows.Close() for rows.Next() { - var parentId string - if err := rows.Scan(&parentId); err != nil { + var parentID string + if err := rows.Scan(&parentID); err != nil { return nil, err } - parents = append(parents, parentId) + parents = append(parents, parentID) } return parents, nil From 16f6e4744aaefb24872ec57491f5906b34a7cf20 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 14 Oct 2014 03:54:32 +0000 Subject: [PATCH 09/99] add BytesSize in pkg/units Signed-off-by: Victor Vieux --- units/size.go | 18 +++++++++++++----- units/size_test.go | 10 ++++++++++ 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/units/size.go b/units/size.go index ea39bbd..eb2d887 100644 --- a/units/size.go +++ b/units/size.go @@ -32,18 +32,26 @@ var ( sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) ) -var unitAbbrs = [...]string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} // HumanSize returns a human-readable approximation of a size // using SI standard (eg. "44kB", "17MB") func HumanSize(size int64) string { + return intToString(float64(size), 1000.0, decimapAbbrs) +} + +func BytesSize(size float64) string { + return intToString(size, 1024.0, binaryAbbrs) +} + +func intToString(size, unit float64, _map []string) string { i := 0 - sizef := float64(size) - for sizef >= 1000.0 { - sizef = sizef / 1000.0 + for size >= unit { + size = size / unit i++ } - return fmt.Sprintf("%.4g %s", sizef, unitAbbrs[i]) + return fmt.Sprintf("%.4g %s", size, _map[i]) } // FromHumanSize returns an integer from a human-readable specification of a diff --git a/units/size_test.go b/units/size_test.go index 8dae7e7..5b329fc 100644 --- a/units/size_test.go +++ b/units/size_test.go @@ -7,6 +7,16 @@ import ( "testing" ) +func TestBytesSize(t *testing.T) { + assertEquals(t, "1 KiB", BytesSize(1024)) + assertEquals(t, "1 MiB", BytesSize(1024*1024)) + assertEquals(t, "1 MiB", BytesSize(1048576)) + assertEquals(t, "2 MiB", BytesSize(2*MiB)) + assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) + assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) + assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) +} + func TestHumanSize(t *testing.T) { assertEquals(t, "1 kB", HumanSize(1000)) assertEquals(t, "1.024 kB", HumanSize(1024)) From ab81bfc8f5768fc3e361294a025204ff0bd9aeb0 Mon Sep 17 00:00:00 2001 From: Srini Brahmaroutu Date: Mon, 13 Oct 2014 06:12:44 +0000 Subject: [PATCH 10/99] Adding capability to filter by name, id or status to list containers api Closes #7599 Signed-off-by: Srini Brahmaroutu --- parsers/filters/parse.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/parsers/filters/parse.go b/parsers/filters/parse.go index 27c7132..4039592 100644 --- a/parsers/filters/parse.go +++ b/parsers/filters/parse.go @@ -3,6 +3,7 @@ package filters import ( "encoding/json" "errors" + "regexp" "strings" ) @@ -61,3 +62,22 @@ func FromParam(p string) (Args, error) { } return args, nil } + +func (filters Args) Match(field, source string) bool { + fieldValues := filters[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + for _, name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} From b81a28fa8b5712c2e1b056ad13ede4613de8a905 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Wed, 24 Sep 2014 09:07:11 -0400 Subject: [PATCH 11/99] Make container.Copy support volumes Fixes #1992 Right now when you `docker cp` a path which is in a volume, the cp itself works, however you end up getting files that are in the container's fs rather than the files in the volume (which is not in the container's fs). This makes it so when you `docker cp` a path that is in a volume it follows the volume to the real path on the host. archive.go has been modified so that when you do `docker cp mydata:/foo .`, and /foo is the volume, the outputed folder is called "foo" instead of the volume ID (because we are telling it to tar up `/var/lib/docker/vfs/dir/` and not "foo", but the user would be expecting "foo", not the ID Signed-off-by: Brian Goff --- archive/archive.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/archive/archive.go b/archive/archive.go index 7d9103e..9814916 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -34,6 +34,7 @@ type ( Excludes []string Compression Compression NoLchown bool + Name string } ) @@ -359,6 +360,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) twBuf := pools.BufioWriter32KPool.Get(nil) defer pools.BufioWriter32KPool.Put(twBuf) + var renamedRelFilePath string // For when tar.Options.Name is set for _, include := range options.Includes { filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { if err != nil { @@ -384,6 +386,15 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) return nil } + // Rename the base resource + if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) { + renamedRelFilePath = relFilePath + } + // Set this to make sure the items underneath also get renamed + if options.Name != "" { + relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1) + } + if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil { log.Debugf("Can't add file %s to tar: %s", srcPath, err) } From 6aeaba297ca93e938e8185eb2882f6a84c7290f2 Mon Sep 17 00:00:00 2001 From: Zach Borboa Date: Wed, 1 Oct 2014 18:26:36 -0700 Subject: [PATCH 12/99] Fix typo Signed-off-by: Zach Borboa --- namesgenerator/names-generator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/namesgenerator/names-generator.go b/namesgenerator/names-generator.go index ebb5850..beb8a95 100644 --- a/namesgenerator/names-generator.go +++ b/namesgenerator/names-generator.go @@ -56,7 +56,7 @@ var ( // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - http://en.wikipedia.org/wiki/Mary_Leakey // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. http://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB // Niels Bohr is the father of quantum theory. http://en.wikipedia.org/wiki/Niels_Bohr. - // Nikola Tesla invented the AC electric system and every gaget ever used by a James Bond villain. http://en.wikipedia.org/wiki/Nikola_Tesla + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. http://en.wikipedia.org/wiki/Nikola_Tesla // Pierre de Fermat pioneered several aspects of modern mathematics. http://en.wikipedia.org/wiki/Pierre_de_Fermat // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. http://en.wikipedia.org/wiki/Rachel_Carson // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). http://en.wikipedia.org/wiki/Radia_Perlman From 8e6399648422c4415f5b67470959966bc9bcf77d Mon Sep 17 00:00:00 2001 From: Andrea Luzzardi Date: Mon, 13 Oct 2014 20:41:22 -0700 Subject: [PATCH 13/99] Add MemInfo to the system pkg. MemInfo provides a simple API to get memory information from the system. Signed-off-by: Andrea Luzzardi --- system/meminfo.go | 17 +++++++++ system/meminfo_linux.go | 67 +++++++++++++++++++++++++++++++++++ system/meminfo_linux_test.go | 37 +++++++++++++++++++ system/meminfo_unsupported.go | 7 ++++ 4 files changed, 128 insertions(+) create mode 100644 system/meminfo.go create mode 100644 system/meminfo_linux.go create mode 100644 system/meminfo_linux_test.go create mode 100644 system/meminfo_unsupported.go diff --git a/system/meminfo.go b/system/meminfo.go new file mode 100644 index 0000000..3b6e947 --- /dev/null +++ b/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/system/meminfo_linux.go b/system/meminfo_linux.go new file mode 100644 index 0000000..b7de3ff --- /dev/null +++ b/system/meminfo_linux.go @@ -0,0 +1,67 @@ +package system + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/docker/pkg/units" +) + +var ( + ErrMalformed = errors.New("malformed file") +) + +// Retrieve memory statistics of the host system and parse them into a MemInfo +// type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/system/meminfo_linux_test.go b/system/meminfo_linux_test.go new file mode 100644 index 0000000..377405e --- /dev/null +++ b/system/meminfo_linux_test.go @@ -0,0 +1,37 @@ +package system + +import ( + "strings" + "testing" + + "github.com/docker/docker/pkg/units" +) + +func TestMemInfo(t *testing.T) { + const input = ` + MemTotal: 1 kB + MemFree: 2 kB + SwapTotal: 3 kB + SwapFree: 4 kB + Malformed1: + Malformed2: 1 + Malformed3: 2 MB + Malformed4: X kB + ` + meminfo, err := parseMemInfo(strings.NewReader(input)) + if err != nil { + t.Fatal(err) + } + if meminfo.MemTotal != 1*units.KiB { + t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) + } + if meminfo.MemFree != 2*units.KiB { + t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) + } + if meminfo.SwapTotal != 3*units.KiB { + t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) + } + if meminfo.SwapFree != 4*units.KiB { + t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) + } +} diff --git a/system/meminfo_unsupported.go b/system/meminfo_unsupported.go new file mode 100644 index 0000000..63b8b16 --- /dev/null +++ b/system/meminfo_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package system + +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} From 396f1dd1255c84b79ead6083cc712c82f770a8b7 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 26 Sep 2014 11:55:23 -0400 Subject: [PATCH 14/99] archive: tests and benchmarks for hardlinks Adding moar information, so benchmark comparisons can be moar comparative. Signed-off-by: Vincent Batts --- archive/archive_test.go | 111 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 107 insertions(+), 4 deletions(-) diff --git a/archive/archive_test.go b/archive/archive_test.go index b46f953..900fff5 100644 --- a/archive/archive_test.go +++ b/archive/archive_test.go @@ -8,6 +8,7 @@ import ( "os" "os/exec" "path" + "syscall" "testing" "time" @@ -63,6 +64,50 @@ func TestCmdStreamGood(t *testing.T) { } } +func TestTarFiles(t *testing.T) { + // try without hardlinks + if err := checkNoChanges(1000, false); err != nil { + t.Fatal(err) + } + // try with hardlinks + if err := checkNoChanges(1000, true); err != nil { + t.Fatal(err) + } +} + +func checkNoChanges(fileNum int, hardlinks bool) error { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + return err + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + return err + } + defer os.RemoveAll(destDir) + + _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) + if err != nil { + return err + } + + err = TarUntar(srcDir, destDir) + if err != nil { + return err + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) + } + return nil +} + func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { @@ -204,13 +249,42 @@ func TestUntarUstarGnuConflict(t *testing.T) { } } -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string) (int, error) { +func getNlink(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Nlink, nil +} + +func getInode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Ino, nil +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } } totalSize := numberOfFiles * len(fileData) return totalSize, nil @@ -226,14 +300,43 @@ func BenchmarkTarUntar(b *testing.B) { b.Fatal(err) } target := path.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin) + n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } - b.ResetTimer() - b.SetBytes(int64(n)) defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func BenchmarkTarUntarWithLinks(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, true) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := TarUntar(origin, target) if err != nil { From 712a6554cec44a7947db37ca2572aed6921a74ac Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Fri, 24 Oct 2014 10:12:35 -0700 Subject: [PATCH 15/99] Use logrus everywhere for logging Fixed #8761 Signed-off-by: Alexandr Morozov --- archive/archive.go | 2 +- archive/changes.go | 2 +- broadcastwriter/broadcastwriter.go | 2 +- fileutils/fileutils.go | 2 +- httputils/resumablerequestreader.go | 2 +- iptables/iptables.go | 2 +- log/log.go | 9 +++++++++ signal/trap.go | 2 +- stdcopy/stdcopy.go | 2 +- tarsum/tarsum.go | 2 +- 10 files changed, 18 insertions(+), 9 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index 9814916..e4db63a 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -19,7 +19,7 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/system" diff --git a/archive/changes.go b/archive/changes.go index 5fbdcc9..557b5db 100644 --- a/archive/changes.go +++ b/archive/changes.go @@ -12,7 +12,7 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" ) diff --git a/broadcastwriter/broadcastwriter.go b/broadcastwriter/broadcastwriter.go index 1898302..a9ae104 100644 --- a/broadcastwriter/broadcastwriter.go +++ b/broadcastwriter/broadcastwriter.go @@ -7,7 +7,7 @@ import ( "time" "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) // BroadcastWriter accumulate multiple io.WriteCloser by stream. diff --git a/fileutils/fileutils.go b/fileutils/fileutils.go index acc27f5..4e4a91b 100644 --- a/fileutils/fileutils.go +++ b/fileutils/fileutils.go @@ -1,7 +1,7 @@ package fileutils import ( - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "path/filepath" ) diff --git a/httputils/resumablerequestreader.go b/httputils/resumablerequestreader.go index 3cd1f49..10edd43 100644 --- a/httputils/resumablerequestreader.go +++ b/httputils/resumablerequestreader.go @@ -6,7 +6,7 @@ import ( "net/http" "time" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) type resumableRequestReader struct { diff --git a/iptables/iptables.go b/iptables/iptables.go index b8d9e56..53e6e14 100644 --- a/iptables/iptables.go +++ b/iptables/iptables.go @@ -9,7 +9,7 @@ import ( "strconv" "strings" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) type Action string diff --git a/log/log.go b/log/log.go index b06d958..d636f76 100644 --- a/log/log.go +++ b/log/log.go @@ -8,9 +8,18 @@ import ( "strings" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/timeutils" ) +func init() { + log.SetOutput(os.Stderr) + log.SetLevel(log.InfoLevel) + if os.Getenv("DEBUG") != "" { + log.SetLevel(log.DebugLevel) + } +} + type priority int const ( diff --git a/signal/trap.go b/signal/trap.go index 42ddb4d..9be8267 100644 --- a/signal/trap.go +++ b/signal/trap.go @@ -6,7 +6,7 @@ import ( "sync/atomic" "syscall" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) // Trap sets up a simplified signal "trap", appropriate for common diff --git a/stdcopy/stdcopy.go b/stdcopy/stdcopy.go index 79e15bc..a61779c 100644 --- a/stdcopy/stdcopy.go +++ b/stdcopy/stdcopy.go @@ -5,7 +5,7 @@ import ( "errors" "io" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) const ( diff --git a/tarsum/tarsum.go b/tarsum/tarsum.go index 6581f3f..88d603c 100644 --- a/tarsum/tarsum.go +++ b/tarsum/tarsum.go @@ -13,7 +13,7 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) const ( From 7dae3e3de3ac9160607a98109d6339eead7a86b0 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Fri, 24 Oct 2014 13:47:52 -0700 Subject: [PATCH 16/99] Remove pkg/log Signed-off-by: Alexandr Morozov --- log/log.go | 123 ------------------------------------------------ log/log_test.go | 39 --------------- 2 files changed, 162 deletions(-) delete mode 100644 log/log.go delete mode 100644 log/log_test.go diff --git a/log/log.go b/log/log.go deleted file mode 100644 index d636f76..0000000 --- a/log/log.go +++ /dev/null @@ -1,123 +0,0 @@ -package log - -import ( - "fmt" - "io" - "os" - "runtime" - "strings" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/timeutils" -) - -func init() { - log.SetOutput(os.Stderr) - log.SetLevel(log.InfoLevel) - if os.Getenv("DEBUG") != "" { - log.SetLevel(log.DebugLevel) - } -} - -type priority int - -const ( - errorFormat = "[%s] [%s] %s:%d %s\n" - logFormat = "[%s] [%s] %s\n" - - fatalPriority priority = iota - errorPriority - infoPriority - debugPriority -) - -// A common interface to access the Fatal method of -// both testing.B and testing.T. -type Fataler interface { - Fatal(args ...interface{}) -} - -func (p priority) String() string { - switch p { - case fatalPriority: - return "fatal" - case errorPriority: - return "error" - case infoPriority: - return "info" - case debugPriority: - return "debug" - } - - return "" -} - -var DefaultLogger = Logger{Out: os.Stdout, Err: os.Stderr} - -// Debug function, if the debug flag is set, then display. Do nothing otherwise -// If Docker is in damon mode, also send the debug info on the socket -func Debugf(format string, a ...interface{}) (int, error) { - return DefaultLogger.Debugf(format, a...) -} - -func Infof(format string, a ...interface{}) (int, error) { - return DefaultLogger.Infof(format, a...) -} - -func Errorf(format string, a ...interface{}) (int, error) { - return DefaultLogger.Errorf(format, a...) -} - -func Fatal(a ...interface{}) { - DefaultLogger.Fatalf("%s", a...) -} - -func Fatalf(format string, a ...interface{}) { - DefaultLogger.Fatalf(format, a...) -} - -type Logger struct { - Err io.Writer - Out io.Writer -} - -func (l Logger) Debugf(format string, a ...interface{}) (int, error) { - if os.Getenv("DEBUG") != "" { - return l.logf(l.Err, debugPriority, format, a...) - } - return 0, nil -} - -func (l Logger) Infof(format string, a ...interface{}) (int, error) { - return l.logf(l.Out, infoPriority, format, a...) -} - -func (l Logger) Errorf(format string, a ...interface{}) (int, error) { - return l.logf(l.Err, errorPriority, format, a...) -} - -func (l Logger) Fatalf(format string, a ...interface{}) { - l.logf(l.Err, fatalPriority, format, a...) - os.Exit(1) -} - -func (l Logger) logf(stream io.Writer, level priority, format string, a ...interface{}) (int, error) { - var prefix string - - if level <= errorPriority || level == debugPriority { - // Retrieve the stack infos - _, file, line, ok := runtime.Caller(2) - if !ok { - file = "" - line = -1 - } else { - file = file[strings.LastIndex(file, "/")+1:] - } - prefix = fmt.Sprintf(errorFormat, time.Now().Format(timeutils.RFC3339NanoFixed), level.String(), file, line, format) - } else { - prefix = fmt.Sprintf(logFormat, time.Now().Format(timeutils.RFC3339NanoFixed), level.String(), format) - } - - return fmt.Fprintf(stream, prefix, a...) -} diff --git a/log/log_test.go b/log/log_test.go deleted file mode 100644 index 4f5b3f8..0000000 --- a/log/log_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package log - -import ( - "bytes" - "regexp" - - "testing" -) - -var reRFC3339NanoFixed = "[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{9}.([0-9]{2}:[0-9]{2})?" - -func TestLogFatalf(t *testing.T) { - var output *bytes.Buffer - - tests := []struct { - Level priority - Format string - Values []interface{} - ExpectedPattern string - }{ - {fatalPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[fatal\\] testing.go:\\d+ 1 \\+ 1 = 2"}, - {errorPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[error\\] testing.go:\\d+ 1 \\+ 1 = 2"}, - {infoPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[info\\] 1 \\+ 1 = 2"}, - {debugPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[debug\\] testing.go:\\d+ 1 \\+ 1 = 2"}, - } - - for i, test := range tests { - output = &bytes.Buffer{} - DefaultLogger.logf(output, test.Level, test.Format, test.Values...) - - expected := regexp.MustCompile(test.ExpectedPattern) - if !expected.MatchString(output.String()) { - t.Errorf("[%d] Log output does not match expected pattern:\n\tExpected: %s\n\tOutput: %s", - i, - expected.String(), - output.String()) - } - } -} From 015f966a1be210fa9a67adb49a94bed55dc2d0b7 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Fri, 24 Oct 2014 15:11:48 -0700 Subject: [PATCH 17/99] Mass gofmt Signed-off-by: Alexandr Morozov --- archive/archive.go | 2 +- broadcastwriter/broadcastwriter.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index e4db63a..9c4d881 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -18,8 +18,8 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "github.com/docker/docker/pkg/fileutils" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/system" diff --git a/broadcastwriter/broadcastwriter.go b/broadcastwriter/broadcastwriter.go index a9ae104..232cf3d 100644 --- a/broadcastwriter/broadcastwriter.go +++ b/broadcastwriter/broadcastwriter.go @@ -6,8 +6,8 @@ import ( "sync" "time" - "github.com/docker/docker/pkg/jsonlog" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/jsonlog" ) // BroadcastWriter accumulate multiple io.WriteCloser by stream. From 0ed7e5e7dee466cc554b188f0a158417f2b13874 Mon Sep 17 00:00:00 2001 From: shuai-z Date: Sun, 26 Oct 2014 13:55:29 +0800 Subject: [PATCH 18/99] removed redundant Clean The doc (or src) says: The result is Cleaned. http://golang.org/pkg/path/filepath/#Join Signed-off-by: shuai-z --- symlink/fs.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/symlink/fs.go b/symlink/fs.go index da9c590..d761732 100644 --- a/symlink/fs.go +++ b/symlink/fs.go @@ -35,7 +35,6 @@ func FollowSymlinkInScope(link, root string) (string, error) { for _, p := range strings.Split(link, "/") { prev = filepath.Join(prev, p) - prev = filepath.Clean(prev) loopCounter := 0 for { @@ -72,7 +71,7 @@ func FollowSymlinkInScope(link, root string) (string, error) { } else { prev, _ = filepath.Abs(prev) - if prev = filepath.Clean(filepath.Join(filepath.Dir(prev), dest)); len(prev) < len(root) { + if prev = filepath.Join(filepath.Dir(prev), dest); len(prev) < len(root) { prev = filepath.Join(root, filepath.Base(dest)) } } From 61b6781d59c934d7bf80439ffb79121ddbbf7096 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 27 Oct 2014 17:45:38 +0000 Subject: [PATCH 19/99] update sysinfo to logrus Signed-off-by: Victor Vieux --- jsonlog/jsonlog.go | 3 ++- proxy/tcp_proxy.go | 3 ++- proxy/udp_proxy.go | 3 ++- sysinfo/sysinfo.go | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/jsonlog/jsonlog.go b/jsonlog/jsonlog.go index b0c61a8..3a96d86 100644 --- a/jsonlog/jsonlog.go +++ b/jsonlog/jsonlog.go @@ -4,8 +4,9 @@ import ( "encoding/json" "fmt" "io" - "log" "time" + + log "github.com/Sirupsen/logrus" ) type JSONLog struct { diff --git a/proxy/tcp_proxy.go b/proxy/tcp_proxy.go index 1aa6d9f..eacf142 100644 --- a/proxy/tcp_proxy.go +++ b/proxy/tcp_proxy.go @@ -2,9 +2,10 @@ package proxy import ( "io" - "log" "net" "syscall" + + log "github.com/Sirupsen/logrus" ) type TCPProxy struct { diff --git a/proxy/udp_proxy.go b/proxy/udp_proxy.go index ae6a7bb..f9f2d11 100644 --- a/proxy/udp_proxy.go +++ b/proxy/udp_proxy.go @@ -2,12 +2,13 @@ package proxy import ( "encoding/binary" - "log" "net" "strings" "sync" "syscall" "time" + + log "github.com/Sirupsen/logrus" ) const ( diff --git a/sysinfo/sysinfo.go b/sysinfo/sysinfo.go index 0c28719..001111f 100644 --- a/sysinfo/sysinfo.go +++ b/sysinfo/sysinfo.go @@ -2,10 +2,10 @@ package sysinfo import ( "io/ioutil" - "log" "os" "path" + log "github.com/Sirupsen/logrus" "github.com/docker/libcontainer/cgroups" ) From f70214084c89470fb81cb85bdcbc608cae736207 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Mon, 27 Oct 2014 17:23:50 -0700 Subject: [PATCH 20/99] Exclude `.wh..wh.*` AUFS metadata on layer export In an effort to make layer content 'stable' between import and export from two different graph drivers, we must resolve an issue where AUFS produces metadata files in its layers which other drivers explicitly ignore when importing. The issue presents itself like this: - Generate a layer using AUFS - On commit of that container, the new stored layer contains AUFS metadata files/dirs. The stored layer content has some tarsum value: '1234567' - `docker save` that image to a USB drive and `docker load` into another docker engine instance which uses another graph driver, say 'btrfs' - On load, this graph driver explicitly ignores any AUFS metadata that it encounters. The stored layer content now has some different tarsum value: 'abcdefg'. The only (apparent) useful aufs metadata to keep are the psuedo link files located at `/.wh..wh.plink/`. Thes files hold information at the RW layer about hard linked files between this layer and another layer. The other graph drivers make sure to copy up these psuedo linked files but I've tested out a few different situations and it seems that this is unnecessary (In my test, AUFS already copies up the other hard linked files to the RW layer). This changeset adds explicit exclusion of the AUFS metadata files and directories (NOTE: not the whiteout files!) on commit of a container using the AUFS storage driver. Also included is a change to the archive package. It now explicitly ignores the root directory from being included in the resulting tar archive for 2 reasons: 1) it's unnecessary. 2) It's another difference between what other graph drivers produce when exporting a layer to a tar archive. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- archive/archive.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/archive/archive.go b/archive/archive.go index 9c4d881..fea2c3d 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -369,7 +369,9 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil { + if err != nil || (relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the root path. Skip in both situations. return nil } From b17f754fff4bd05316caac78ea7703bfac09ba0f Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 15 Sep 2014 14:45:53 -0400 Subject: [PATCH 21/99] archive: preserve hardlinks in Tar and Untar * integration test for preserving hardlinks Signed-off-by: Vincent Batts Signed-off-by: Vincent Batts --- archive/archive.go | 52 +++++++++++++++++++++++++++--------- archive/archive_test.go | 58 +++++++++++++++++++++++++++++++++++++++++ archive/changes.go | 18 ++++++++----- 3 files changed, 108 insertions(+), 20 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index 9c4d881..dd14b77 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -153,7 +153,15 @@ func (compression *Compression) Extension() string { return "" } -func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string +} + +func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err @@ -188,13 +196,28 @@ func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { } + // if it's a regular file and has more than 1 link, + // it's hardlinked, so set the type flag accordingly + if fi.Mode().IsRegular() && stat.Nlink > 1 { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + ino := uint64(stat.Ino) + if oldpath, ok := ta.SeenFiles[ino]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[ino] = name + } + } + capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability) } - if err := tw.WriteHeader(hdr); err != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } @@ -204,17 +227,17 @@ func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { return err } - twBuf.Reset(tw) - _, err = io.Copy(twBuf, file) + ta.Buffer.Reset(ta.TarWriter) + _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } - err = twBuf.Flush() + err = ta.Buffer.Flush() if err != nil { return err } - twBuf.Reset(nil) + ta.Buffer.Reset(nil) } return nil @@ -345,9 +368,15 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) return nil, err } - tw := tar.NewWriter(compressWriter) - go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors @@ -357,9 +386,6 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) options.Includes = []string{"."} } - twBuf := pools.BufioWriter32KPool.Get(nil) - defer pools.BufioWriter32KPool.Put(twBuf) - var renamedRelFilePath string // For when tar.Options.Name is set for _, include := range options.Includes { filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { @@ -395,7 +421,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1) } - if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil { + if err := ta.addTarFile(filePath, relFilePath); err != nil { log.Debugf("Can't add file %s to tar: %s", srcPath, err) } return nil @@ -403,7 +429,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } // Make sure to check the error on Close. - if err := tw.Close(); err != nil { + if err := ta.TarWriter.Close(); err != nil { log.Debugf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { diff --git a/archive/archive_test.go b/archive/archive_test.go index 900fff5..3516aca 100644 --- a/archive/archive_test.go +++ b/archive/archive_test.go @@ -249,6 +249,64 @@ func TestUntarUstarGnuConflict(t *testing.T) { } } +func TestTarWithHardLink(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil { + t.Fatal(err) + } + + var i1, i2 uint64 + if i1, err = getNlink(path.Join(origin, "1")); err != nil { + t.Fatal(err) + } + // sanity check that we can hardlink + if i1 != 2 { + t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + if i1, err = getInode(path.Join(dest, "1")); err != nil { + t.Fatal(err) + } + if i2, err = getInode(path.Join(dest, "2")); err != nil { + t.Fatal(err) + } + + if i1 != i2 { + t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + } +} + func getNlink(path string) (uint64, error) { stat, err := os.Stat(path) if err != nil { diff --git a/archive/changes.go b/archive/changes.go index 557b5db..3e9ab45 100644 --- a/archive/changes.go +++ b/archive/changes.go @@ -368,11 +368,15 @@ func minor(device uint64) uint64 { // ExportChanges produces an Archive from the provided changes, relative to dir. func ExportChanges(dir string, changes []Change) (Archive, error) { reader, writer := io.Pipe() - tw := tar.NewWriter(writer) - go func() { - twBuf := pools.BufioWriter32KPool.Get(nil) - defer pools.BufioWriter32KPool.Put(twBuf) + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors @@ -390,19 +394,19 @@ func ExportChanges(dir string, changes []Change) (Archive, error) { AccessTime: timestamp, ChangeTime: timestamp, } - if err := tw.WriteHeader(hdr); err != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { log.Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) - if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil { + if err := ta.addTarFile(path, change.Path[1:]); err != nil { log.Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. - if err := tw.Close(); err != nil { + if err := ta.TarWriter.Close(); err != nil { log.Debugf("Can't close layer: %s", err) } writer.Close() From 4a2fb0ab3d3404e86445a4253484b39584fa68cd Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Tue, 28 Oct 2014 16:59:27 -0400 Subject: [PATCH 22/99] archive: example app for diffing directories By default is a demo of file differences, but can be used to create a tar of changes between an old and new path. Signed-off-by: Vincent Batts Signed-off-by: Vincent Batts --- archive/example_changes.go | 97 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 archive/example_changes.go diff --git a/archive/example_changes.go b/archive/example_changes.go new file mode 100644 index 0000000..cedd46a --- /dev/null +++ b/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} From 74b38deaa9cc589f1580ef8ec2cd4b5ca4729b3a Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Tue, 28 Oct 2014 17:01:10 -0400 Subject: [PATCH 23/99] archive: cleanup and more information Signed-off-by: Vincent Batts Signed-off-by: Vincent Batts --- archive/archive.go | 3 +-- archive/changes.go | 6 +++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index dd14b77..37b312e 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -193,7 +193,6 @@ func (ta *tarAppender) addTarFile(path, name string) error { hdr.Devmajor = int64(major(uint64(stat.Rdev))) hdr.Devminor = int64(minor(uint64(stat.Rdev))) } - } // if it's a regular file and has more than 1 link, @@ -228,6 +227,7 @@ func (ta *tarAppender) addTarFile(path, name string) error { } ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { @@ -237,7 +237,6 @@ func (ta *tarAppender) addTarFile(path, name string) error { if err != nil { return err } - ta.Buffer.Reset(nil) } return nil diff --git a/archive/changes.go b/archive/changes.go index 3e9ab45..0a1f741 100644 --- a/archive/changes.go +++ b/archive/changes.go @@ -333,6 +333,8 @@ func ChangesDirs(newDir, oldDir string) ([]Change, error) { newRoot, err2 = collectFileInfo(newDir) errs <- err2 }() + + // block until both routines have returned for i := 0; i < 2; i++ { if err := <-errs; err != nil { return nil, err @@ -409,7 +411,9 @@ func ExportChanges(dir string, changes []Change) (Archive, error) { if err := ta.TarWriter.Close(); err != nil { log.Debugf("Can't close layer: %s", err) } - writer.Close() + if err := writer.Close(); err != nil { + log.Debugf("failed close Changes writer: %s", err) + } }() return reader, nil } From f76adff303be663d7b52685ad1738e3e89b41f42 Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 30 Oct 2014 14:48:30 +0200 Subject: [PATCH 24/99] pkg/reexec: move reexec code to a new package Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- reexec/MAINTAINERS | 1 + reexec/README.md | 5 +++++ reexec/reexec.go | 45 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) create mode 100644 reexec/MAINTAINERS create mode 100644 reexec/README.md create mode 100644 reexec/reexec.go diff --git a/reexec/MAINTAINERS b/reexec/MAINTAINERS new file mode 100644 index 0000000..e48a0c7 --- /dev/null +++ b/reexec/MAINTAINERS @@ -0,0 +1 @@ +Michael Crosby (@crosbymichael) diff --git a/reexec/README.md b/reexec/README.md new file mode 100644 index 0000000..45592ce --- /dev/null +++ b/reexec/README.md @@ -0,0 +1,5 @@ +## reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/reexec/reexec.go b/reexec/reexec.go new file mode 100644 index 0000000..136b905 --- /dev/null +++ b/reexec/reexec.go @@ -0,0 +1,45 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registred under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + + return false +} + +// Self returns the path to the current processes binary +func Self() string { + name := os.Args[0] + + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + name = lp + } + } + + return name +} From b4b52c87b223a02e6e4eda9299b54df90d38436f Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 30 Oct 2014 13:42:57 -0400 Subject: [PATCH 25/99] ./pkg/archive: clean up Stat_t assertion inspired by @tonistiigi comment (https://github.com/docker/docker/pull/8046/files#r19579960) Signed-off-by: Vincent Batts --- archive/archive.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index 37b312e..3857220 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -185,8 +185,13 @@ func (ta *tarAppender) addTarFile(path, name string) error { hdr.Name = name - stat, ok := fi.Sys().(*syscall.Stat_t) - if ok { + var ( + nlink uint32 + inode uint64 + ) + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + nlink = uint32(stat.Nlink) + inode = uint64(stat.Ino) // Currently go does not fill in the major/minors if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK || stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR { @@ -194,19 +199,17 @@ func (ta *tarAppender) addTarFile(path, name string) error { hdr.Devminor = int64(minor(uint64(stat.Rdev))) } } - // if it's a regular file and has more than 1 link, // it's hardlinked, so set the type flag accordingly - if fi.Mode().IsRegular() && stat.Nlink > 1 { + if fi.Mode().IsRegular() && nlink > 1 { // a link should have a name that it links too // and that linked name should be first in the tar archive - ino := uint64(stat.Ino) - if oldpath, ok := ta.SeenFiles[ino]; ok { + if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { - ta.SeenFiles[ino] = name + ta.SeenFiles[inode] = name } } From aaada9057d4b00485cbbded859e2d5400343caba Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 30 Oct 2014 17:04:56 -0400 Subject: [PATCH 26/99] mount: move the MakePrivate to pkg/mount The logic is unrelated to graphdriver. Signed-off-by: Vincent Batts --- mount/sharedsubtree_linux.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 mount/sharedsubtree_linux.go diff --git a/mount/sharedsubtree_linux.go b/mount/sharedsubtree_linux.go new file mode 100644 index 0000000..566ebbd --- /dev/null +++ b/mount/sharedsubtree_linux.go @@ -0,0 +1,18 @@ +// +build linux + +package mount + +func MakePrivate(mountPoint string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + + return ForceMount("", mountPoint, "none", "private") +} From 9775fc4473a5041c5386dcc21e6e25f6bc776352 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 31 Oct 2014 10:18:41 -0400 Subject: [PATCH 27/99] pkg/mount: add more sharesubtree options Signed-off-by: Vincent Batts --- mount/flags.go | 7 +++++++ mount/flags_freebsd.go | 7 +++++++ mount/flags_linux.go | 7 +++++++ mount/flags_unsupported.go | 7 +++++++ mount/sharedsubtree_linux.go | 38 +++++++++++++++++++++++++++++++++++- 5 files changed, 65 insertions(+), 1 deletion(-) diff --git a/mount/flags.go b/mount/flags.go index 742698e..17dbd7a 100644 --- a/mount/flags.go +++ b/mount/flags.go @@ -37,7 +37,14 @@ func parseOptions(options string) (int, string) { "nodiratime": {false, NODIRATIME}, "bind": {false, BIND}, "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, "relatime": {false, RELATIME}, "norelatime": {true, RELATIME}, "strictatime": {false, STRICTATIME}, diff --git a/mount/flags_freebsd.go b/mount/flags_freebsd.go index 4ddf4d7..a59b589 100644 --- a/mount/flags_freebsd.go +++ b/mount/flags_freebsd.go @@ -19,7 +19,14 @@ const ( MANDLOCK = 0 NODEV = 0 NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 RBIND = 0 RELATIVE = 0 RELATIME = 0 diff --git a/mount/flags_linux.go b/mount/flags_linux.go index 0bb47d8..9986621 100644 --- a/mount/flags_linux.go +++ b/mount/flags_linux.go @@ -17,7 +17,14 @@ const ( NODIRATIME = syscall.MS_NODIRATIME BIND = syscall.MS_BIND RBIND = syscall.MS_BIND | syscall.MS_REC + UNBINDABLE = syscall.MS_UNBINDABLE + RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC PRIVATE = syscall.MS_PRIVATE + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + SLAVE = syscall.MS_SLAVE + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + SHARED = syscall.MS_SHARED + RSHARED = syscall.MS_SHARED | syscall.MS_REC RELATIME = syscall.MS_RELATIME STRICTATIME = syscall.MS_STRICTATIME ) diff --git a/mount/flags_unsupported.go b/mount/flags_unsupported.go index 5a14108..c4f8217 100644 --- a/mount/flags_unsupported.go +++ b/mount/flags_unsupported.go @@ -11,7 +11,14 @@ const ( NODIRATIME = 0 NOEXEC = 0 NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 RBIND = 0 RELATIME = 0 RELATIVE = 0 diff --git a/mount/sharedsubtree_linux.go b/mount/sharedsubtree_linux.go index 566ebbd..cd9b86c 100644 --- a/mount/sharedsubtree_linux.go +++ b/mount/sharedsubtree_linux.go @@ -2,7 +2,39 @@ package mount +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { mounted, err := Mounted(mountPoint) if err != nil { return err @@ -13,6 +45,10 @@ func MakePrivate(mountPoint string) error { return err } } + mounted, err = Mounted(mountPoint) + if err != nil { + return err + } - return ForceMount("", mountPoint, "none", "private") + return ForceMount("", mountPoint, "none", options) } From 3816c2f723a8039e1e950b4ea7af59ab4ae955a6 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Fri, 31 Oct 2014 18:41:46 +0000 Subject: [PATCH 28/99] pkg/proxy: Bump the maximum size of a UDP packet. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- proxy/udp_proxy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proxy/udp_proxy.go b/proxy/udp_proxy.go index f9f2d11..a3fcf11 100644 --- a/proxy/udp_proxy.go +++ b/proxy/udp_proxy.go @@ -13,7 +13,7 @@ import ( const ( UDPConnTrackTimeout = 90 * time.Second - UDPBufSize = 2048 + UDPBufSize = 65507 ) // A net.Addr where the IP is split into two fields so you can use it as a key From 3894be03396cbacdfec218d2b3b64e30eee32e03 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 31 Oct 2014 13:12:31 -0400 Subject: [PATCH 29/99] pkg/mount: testing for linux sharedsubtree mounts * shared * shared/slave * unbindable * private Signed-off-by: Vincent Batts --- mount/sharedsubtree_linux_test.go | 331 ++++++++++++++++++++++++++++++ 1 file changed, 331 insertions(+) create mode 100644 mount/sharedsubtree_linux_test.go diff --git a/mount/sharedsubtree_linux_test.go b/mount/sharedsubtree_linux_test.go new file mode 100644 index 0000000..145d57b --- /dev/null +++ b/mount/sharedsubtree_linux_test.go @@ -0,0 +1,331 @@ +// +build linux + +package mount + +import ( + "os" + "path" + "syscall" + "testing" +) + +// nothing is propogated in or out +func TestSubtreePrivate(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target private + if err := MakePrivate(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside1CheckPath) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +// Testing that when a target is a shared mount, +// then child mounts propogate to the source +func TestSubtreeShared(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outsideDir = path.Join(tmp, "outside") + + outsidePath = path.Join(outsideDir, "file.txt") + sourceCheckPath = path.Join(sourceDir, "a", "file.txt") + ) + + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outsideDir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outsidePath); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the target + if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // NOW, check that the file from the outside directory is avaible in the source directory + if _, err := os.Stat(sourceCheckPath); err != nil { + t.Fatal(err) + } +} + +// testing that mounts to a shared source show up in the slave target, +// and that mounts into a slave target do _not_ show up in the shared source +func TestSubtreeSharedSlave(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target slave + if err := MakeSlave(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil { + t.Fatal(err) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +func TestSubtreeUnbindable(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + ) + if err := os.MkdirAll(sourceDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(targetDir, 0777); err != nil { + t.Fatal(err) + } + + // next, make the source unbindable + if err := MakeUnbindable(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // then attempt to mount it to target. It should fail + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not have been bindable") + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() +} + +func createFile(path string) error { + f, err := os.Create(path) + if err != nil { + return err + } + f.WriteString("hello world!") + return f.Close() +} From 3a2c49a3d9fd122559921d5fa5bc910f6c059d70 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 3 Nov 2014 14:01:50 -0500 Subject: [PATCH 30/99] pkg/mount: adding fields supported by freebsd Signed-off-by: Vincent Batts --- mount/mountinfo_freebsd.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mount/mountinfo_freebsd.go b/mount/mountinfo_freebsd.go index a16bdb8..2fe9186 100644 --- a/mount/mountinfo_freebsd.go +++ b/mount/mountinfo_freebsd.go @@ -32,6 +32,8 @@ func parseMountTable() ([]*MountInfo, error) { for _, entry := range entries { var mountinfo MountInfo mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) out = append(out, &mountinfo) } return out, nil From 8f30e895b21558e0450c00a19fd766d324288fc1 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 3 Nov 2014 22:05:04 -0500 Subject: [PATCH 31/99] pkg/mount: include optional field one linux, the optional field designates the sharedsubtree information, if any. Signed-off-by: Vincent Batts --- mount/mountinfo.go | 6 +++--- mount/mountinfo_linux.go | 13 +++++++++---- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/mount/mountinfo.go b/mount/mountinfo.go index 78b83ce..ec8e8bc 100644 --- a/mount/mountinfo.go +++ b/mount/mountinfo.go @@ -1,7 +1,7 @@ package mount type MountInfo struct { - Id, Parent, Major, Minor int - Root, Mountpoint, Opts string - Fstype, Source, VfsOpts string + Id, Parent, Major, Minor int + Root, Mountpoint, Opts, Optional string + Fstype, Source, VfsOpts string } diff --git a/mount/mountinfo_linux.go b/mount/mountinfo_linux.go index 84bf551..e6c28da 100644 --- a/mount/mountinfo_linux.go +++ b/mount/mountinfo_linux.go @@ -23,7 +23,7 @@ const ( (9) filesystem type: name of filesystem of the form "type[.subtype]" (10) mount source: filesystem specific information or "none" (11) super options: per super block options*/ - mountinfoFormat = "%d %d %d:%d %s %s %s " + mountinfoFormat = "%d %d %d:%d %s %s %s %s" ) // Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts @@ -49,13 +49,14 @@ func parseInfoFile(r io.Reader) ([]*MountInfo, error) { } var ( - p = &MountInfo{} - text = s.Text() + p = &MountInfo{} + text = s.Text() + optionalFields string ) if _, err := fmt.Sscanf(text, mountinfoFormat, &p.Id, &p.Parent, &p.Major, &p.Minor, - &p.Root, &p.Mountpoint, &p.Opts); err != nil { + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) } // Safe as mountinfo encodes mountpoints with spaces as \040. @@ -65,6 +66,10 @@ func parseInfoFile(r io.Reader) ([]*MountInfo, error) { return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) } + if optionalFields != "-" { + p.Optional = optionalFields + } + p.Fstype = postSeparatorFields[0] p.Source = postSeparatorFields[1] p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") From 21fc078476955a7ec9e91c774e388b3a70eb5260 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 30 Jun 2014 18:39:58 -0400 Subject: [PATCH 32/99] Support hairpin NAT This re-applies commit b39d02b with additional iptables rules to solve the issue with containers routing back into themselves. The previous issue with this attempt was that the DNAT rule would send traffic back into the container it came from. When this happens you have 2 issues. 1) reverse path filtering. The container is going to see the traffic coming in from the outside and it's going to have a source address of itself. So reverse path filtering will kick in and drop the packet. 2) direct return mismatch. Assuming you turned reverse path filtering off, when the packet comes back in, it's goign to have a source address of itself, thus when the reply traffic is sent, it's going to have a source address of itself. But the original packet was sent to the host IP address, so the traffic will be dropped because it's coming from an address which the original traffic was not sent to (and likely with an incorrect port as well). The solution to this is to masquerade the traffic when it gets routed back into the origin container. However for this to work you need to enable hairpin mode on the bridge port, otherwise the kernel will just drop the traffic. The hairpin mode set is part of libcontainer, while the MASQ change is part of docker. This reverts commit 63c303eecdbaf4dc7967fd51b82cd447c778cecc. Docker-DCO-1.1-Signed-off-by: Patrick Hemmer (github: phemmer) --- iptables/iptables.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/iptables/iptables.go b/iptables/iptables.go index 53e6e14..b550837 100644 --- a/iptables/iptables.go +++ b/iptables/iptables.go @@ -73,7 +73,6 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str "-p", proto, "-d", daddr, "--dport", strconv.Itoa(port), - "!", "-i", c.Bridge, "-j", "DNAT", "--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil { return err @@ -97,6 +96,17 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str return fmt.Errorf("Error iptables forward: %s", output) } + if output, err := Raw("-t", "nat", string(fAction), "POSTROUTING", + "-p", proto, + "-s", dest_addr, + "-d", dest_addr, + "--dport", strconv.Itoa(dest_port), + "-j", "MASQUERADE"); err != nil { + return err + } else if len(output) != 0 { + return fmt.Errorf("Error iptables forward: %s", output) + } + return nil } From 78ae4e8ad15eb04131cbd61ad5338f0be57b6432 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Thu, 30 Oct 2014 13:47:31 -0700 Subject: [PATCH 33/99] Finalize TarSum Version 1 w/ refactor The current Dev version of TarSum includes hashing of extended file attributes and omits inclusion of modified time headers. I refactored the logic around the version differences to make it more clear that the difference between versions is in how tar headers are selected and ordered. TarSum Version 1 is now declared with the new Dev version continuing to track it. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- tarsum/tarsum.go | 60 ++++++--------------------- tarsum/versioning.go | 86 ++++++++++++++++++++++++++++++++++++--- tarsum/versioning_test.go | 8 +++- 3 files changed, 100 insertions(+), 54 deletions(-) diff --git a/tarsum/tarsum.go b/tarsum/tarsum.go index 88d603c..34386ff 100644 --- a/tarsum/tarsum.go +++ b/tarsum/tarsum.go @@ -7,8 +7,6 @@ import ( "encoding/hex" "hash" "io" - "sort" - "strconv" "strings" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" @@ -29,18 +27,20 @@ const ( // including the byte payload of the image's json metadata as well, and for // calculating the checksums for buildcache. func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { - if _, ok := tarSumVersions[v]; !ok { - return nil, ErrVersionNotImplemented + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err } - return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v}, nil + return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector}, nil } // Create a new TarSum, providing a THash to use rather than the DefaultTHash func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { - if _, ok := tarSumVersions[v]; !ok { - return nil, ErrVersionNotImplemented + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err } - return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, tHash: tHash}, nil + return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}, nil } // TarSum is the generic interface for calculating fixed time @@ -69,8 +69,9 @@ type tarSum struct { currentFile string finished bool first bool - DisableCompression bool // false by default. When false, the output gzip compressed. - tarSumVersion Version // this field is not exported so it can not be mutated during use + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive } func (ts tarSum) Hash() THash { @@ -103,49 +104,12 @@ type simpleTHash struct { func (sth simpleTHash) Name() string { return sth.n } func (sth simpleTHash) Hash() hash.Hash { return sth.h() } -func (ts tarSum) selectHeaders(h *tar.Header, v Version) (set [][2]string) { - for _, elem := range [][2]string{ - {"name", h.Name}, - {"mode", strconv.Itoa(int(h.Mode))}, - {"uid", strconv.Itoa(h.Uid)}, - {"gid", strconv.Itoa(h.Gid)}, - {"size", strconv.Itoa(int(h.Size))}, - {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, - {"typeflag", string([]byte{h.Typeflag})}, - {"linkname", h.Linkname}, - {"uname", h.Uname}, - {"gname", h.Gname}, - {"devmajor", strconv.Itoa(int(h.Devmajor))}, - {"devminor", strconv.Itoa(int(h.Devminor))}, - } { - if v >= VersionDev && elem[0] == "mtime" { - continue - } - set = append(set, elem) - } - return -} - func (ts *tarSum) encodeHeader(h *tar.Header) error { - for _, elem := range ts.selectHeaders(h, ts.Version()) { + for _, elem := range ts.headerSelector.selectHeaders(h) { if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { return err } } - - // include the additional pax headers, from an ordered list - if ts.Version() >= VersionDev { - var keys []string - for k := range h.Xattrs { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - if _, err := ts.h.Write([]byte(k + h.Xattrs[k])); err != nil { - return err - } - } - } return nil } diff --git a/tarsum/versioning.go b/tarsum/versioning.go index e1161fc..3a65661 100644 --- a/tarsum/versioning.go +++ b/tarsum/versioning.go @@ -2,7 +2,11 @@ package tarsum import ( "errors" + "sort" + "strconv" "strings" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) // versioning of the TarSum algorithm @@ -10,11 +14,11 @@ import ( // i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" type Version int +// Prefix of "tarsum" const ( - // Prefix of "tarsum" Version0 Version = iota - // Prefix of "tarsum.dev" - // NOTE: this variable will be of an unsettled next-version of the TarSum calculation + Version1 + // NOTE: this variable will be either the latest or an unsettled next-version of the TarSum calculation VersionDev ) @@ -28,8 +32,9 @@ func GetVersions() []Version { } var tarSumVersions = map[Version]string{ - 0: "tarsum", - 1: "tarsum.dev", + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", } func (tsv Version) String() string { @@ -50,7 +55,78 @@ func GetVersionFromTarsum(tarsum string) (Version, error) { return -1, ErrNotVersion } +// Errors that may be returned by functions in this package var ( ErrNotVersion = errors.New("string does not include a TarSum Version") ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") ) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.Itoa(int(h.Mode))}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.Itoa(int(h.Size))}, + {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.Itoa(int(h.Devmajor))}, + {"devminor", strconv.Itoa(int(h.Devminor))}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/tarsum/versioning_test.go b/tarsum/versioning_test.go index b851c3b..4ddb72e 100644 --- a/tarsum/versioning_test.go +++ b/tarsum/versioning_test.go @@ -11,11 +11,17 @@ func TestVersion(t *testing.T) { t.Errorf("expected %q, got %q", expected, v.String()) } - expected = "tarsum.dev" + expected = "tarsum.v1" v = 1 if v.String() != expected { t.Errorf("expected %q, got %q", expected, v.String()) } + + expected = "tarsum.dev" + v = 2 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } } func TestGetVersion(t *testing.T) { From a5ad3ec0b701e23175234b3a6e97c6bad982a2d5 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Wed, 5 Nov 2014 18:10:38 -0500 Subject: [PATCH 34/99] devicemapper: split out devicemapper bindings This is a first pass at splitting out devicemapper into separate, usable bindings. Signed-off-by: Vincent Batts --- devicemapper/attach_loopback.go | 129 ++++++ devicemapper/devmapper.go | 673 ++++++++++++++++++++++++++++++ devicemapper/devmapper_log.go | 30 ++ devicemapper/devmapper_wrapper.go | 254 +++++++++++ devicemapper/ioctl.go | 72 ++++ 5 files changed, 1158 insertions(+) create mode 100644 devicemapper/attach_loopback.go create mode 100644 devicemapper/devmapper.go create mode 100644 devicemapper/devmapper_log.go create mode 100644 devicemapper/devmapper_wrapper.go create mode 100644 devicemapper/ioctl.go diff --git a/devicemapper/attach_loopback.go b/devicemapper/attach_loopback.go new file mode 100644 index 0000000..d39cbc6 --- /dev/null +++ b/devicemapper/attach_loopback.go @@ -0,0 +1,129 @@ +// +build linux + +package devicemapper + +import ( + "fmt" + "os" + "syscall" + + log "github.com/Sirupsen/logrus" +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + log.Errorf("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + log.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + log.Errorf("Error opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + log.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + log.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// attachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start loopking for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + log.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + log.Errorf("Error opening sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &LoopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + log.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + log.Errorf("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/devicemapper/devmapper.go b/devicemapper/devmapper.go new file mode 100644 index 0000000..c0b931c --- /dev/null +++ b/devicemapper/devmapper.go @@ -0,0 +1,673 @@ +// +build linux + +package devicemapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "syscall" + + log "github.com/Sirupsen/logrus" +) + +type DevmapperLogger interface { + DMLog(level int, file string, line int, dmError int, message string) +} + +const ( + DeviceCreate TaskType = iota + DeviceReload + DeviceRemove + DeviceRemoveAll + DeviceSuspend + DeviceResume + DeviceInfo + DeviceDeps + DeviceRename + DeviceVersion + DeviceStatus + DeviceTable + DeviceWaitevent + DeviceList + DeviceClear + DeviceMknodes + DeviceListVersions + DeviceTargetMsg + DeviceSetGeometry +) + +const ( + AddNodeOnResume AddNodeType = iota + AddNodeOnCreate +) + +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrAttachLoopbackDevice = errors.New("loopback mounting failed") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") + ErrRunRemoveDevice = errors.New("running removeDevice failed") + ErrInvalidAddNode = errors.New("Invalide AddNoce type") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") + ErrBusy = errors.New("Device is Busy") + + dmSawBusy bool + dmSawExist bool +) + +type ( + Task struct { + unmanaged *CDmTask + } + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + } + TaskType int + AddNodeType int +) + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) Run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + return nil +} + +func (t *Task) SetName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) SetMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) SetSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) SetCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) SetAddNode(addNode AddNodeType) error { + if addNode != AddNodeOnResume && addNode != AddNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) SetRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) AddTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) GetDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + +func (t *Task) GetInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) GetDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + log.Errorf("Error get loopback backing file: %s", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +func LoopbackSetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + log.Errorf("Error loopbackSetCapacity: %s", err) + return ErrLoopbackSetCapacity + } + return nil +} + +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} + +func UdevWait(cookie uint) error { + if res := DmUdevWait(cookie); res != 1 { + log.Debugf("Failed to wait on udev cookie %d", cookie) + return ErrUdevWait + } + return nil +} + +func LogInitVerbose(level int) { + DmLogInitVerbose(level) +} + +var dmLogger DevmapperLogger = nil + +// initialize the logger for the device mapper library +func LogInit(logger DevmapperLogger) { + dmLogger = logger + LogWithErrnoInit() +} + +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + log.Debugf("Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// Useful helper for cleanup +func RemoveDevice(name string) error { + // TODO(vbatts) just use the other removeDevice() + task := TaskCreate(DeviceRemove) + if task == nil { + return ErrCreateRemoveTask + } + if err := task.SetName(name); err != nil { + log.Debugf("Can't set task name %s", name) + return err + } + if err := task.Run(); err != nil { + return ErrRunRemoveDevice + } + return nil +} + +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + log.Errorf("Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + syscall.Sync() + + return nil +} + +// This is the programmatic example of "dmsetup create" +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := createTask(DeviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate (CreatePool) %s", err) + } + + UdevWait(cookie) + + return nil +} + +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := createTask(DeviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate %s", err) + } + + return nil +} + +func createTask(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("Can't create task of type %d", int(t)) + } + if err := task.SetName(name); err != nil { + return nil, fmt.Errorf("Can't set task name %s", name) + } + return task, nil +} + +func GetDeps(name string) (*Deps, error) { + task, err := createTask(DeviceDeps, name) + if task == nil { + return nil, err + } + if err := task.Run(); err != nil { + return nil, err + } + return task.GetDeps() +} + +func GetInfo(name string) (*Info, error) { + task, err := createTask(DeviceInfo, name) + if task == nil { + return nil, err + } + if err := task.Run(); err != nil { + return nil, err + } + return task.GetInfo() +} + +func GetDriverVersion() (string, error) { + task := TaskCreate(DeviceVersion) + if task == nil { + return "", fmt.Errorf("Can't create DeviceVersion task") + } + if err := task.Run(); err != nil { + return "", err + } + return task.GetDriverVersion() +} + +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := createTask(DeviceStatus, name) + if task == nil { + log.Debugf("GetStatus: Error createTask: %s", err) + return 0, 0, "", "", err + } + if err := task.Run(); err != nil { + log.Debugf("GetStatus: Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.GetInfo() + if err != nil { + log.Debugf("GetStatus: Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + log.Debugf("GetStatus: Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) + } + + _, start, length, targetType, params := task.GetNextTarget(0) + return start, length, targetType, params, nil +} + +func SetTransactionId(poolName string, oldId uint64, newId uint64) error { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("set_transaction_id %d %d", oldId, newId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running SetTransactionId %s", err) + } + return nil +} + +func SuspendDevice(name string) error { + task, err := createTask(DeviceSuspend, name) + if task == nil { + return err + } + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceSuspend %s", err) + } + return nil +} + +func ResumeDevice(name string) error { + task, err := createTask(DeviceResume, name) + if task == nil { + return err + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceResume %s", err) + } + + UdevWait(cookie) + + return nil +} + +func CreateDevice(poolName string, deviceId *int) error { + log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) + + for { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("create_thin %d", *deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + dmSawExist = false + if err := task.Run(); err != nil { + if dmSawExist { + // Already exists, try next id + *deviceId++ + continue + } + return fmt.Errorf("Error running CreateDevice %s", err) + } + break + } + return nil +} + +func DeleteDevice(poolName string, deviceId int) error { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("delete %d", deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeleteDevice %s", err) + } + return nil +} + +func removeDevice(name string) error { + log.Debugf("[devmapper] RemoveDevice START") + defer log.Debugf("[devmapper] RemoveDevice END") + task, err := createTask(DeviceRemove, name) + if task == nil { + return err + } + dmSawBusy = false + if err = task.Run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("Error running RemoveDevice %s", err) + } + return nil +} + +func ActivateDevice(poolName string, name string, deviceId int, size uint64) error { + task, err := createTask(DeviceCreate, name) + if task == nil { + return err + } + + params := fmt.Sprintf("%s %d", poolName, deviceId) + if err := task.AddTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + if err := task.SetAddNode(AddNodeOnCreate); err != nil { + return fmt.Errorf("Can't add node %s", err) + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate (ActivateDevice) %s", err) + } + + UdevWait(cookie) + + return nil +} + +func CreateSnapDevice(poolName string, deviceId *int, baseName string, baseDeviceId int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + + for { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + if doSuspend { + ResumeDevice(baseName) + } + return err + } + + if err := task.SetSector(0); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", *deviceId, baseDeviceId)); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + return fmt.Errorf("Can't set message %s", err) + } + + dmSawExist = false + if err := task.Run(); err != nil { + if dmSawExist { + // Already exists, try next id + *deviceId++ + continue + } + + if doSuspend { + ResumeDevice(baseName) + } + return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err) + } + + break + } + + if doSuspend { + if err := ResumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/devicemapper/devmapper_log.go b/devicemapper/devmapper_log.go new file mode 100644 index 0000000..d6550bd --- /dev/null +++ b/devicemapper/devmapper_log.go @@ -0,0 +1,30 @@ +// +build linux + +package devicemapper + +import "C" + +import ( + "strings" +) + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +//export DevmapperLogCallback +func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_class C.int, message *C.char) { + msg := C.GoString(message) + if level < 7 { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + } + + if dmLogger != nil { + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dm_errno_or_class), msg) + } +} diff --git a/devicemapper/devmapper_wrapper.go b/devicemapper/devmapper_wrapper.go new file mode 100644 index 0000000..c7e96a1 --- /dev/null +++ b/devicemapper/devmapper_wrapper.go @@ -0,0 +1,254 @@ +// +build linux + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +#include // FIXME: present only for defines, maybe we can remove it? +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char buffer[256]; + va_list ap; + + va_start(ap, f); + vsnprintf(buffer, 256, f, ap); + va_end(ap); + + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import "unsafe" + +type ( + CDmTask C.struct_dm_task + + CLoopInfo64 C.struct_loop_info64 + LoopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncrypt_type uint32 + loEncrypt_key_size uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 + } +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD + + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) + +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + LogWithErrnoInit = logWithErrnoInitFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *CDmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *CDmTask { + return (*CDmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *CDmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *CDmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *CDmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *CDmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *CDmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *CDmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetDepsFct(task *CDmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range Cdeps.device { + deps.Device = append(deps.Device, (uint64)(device)) + } + return deps +} + +func dmTaskGetInfoFct(task *CDmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *CDmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams) + return uintptr(nextp) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmLogInitVerboseFct(level int) { + C.dm_log_init_verbose(C.int(level)) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/devicemapper/ioctl.go b/devicemapper/ioctl.go new file mode 100644 index 0000000..f97e9d1 --- /dev/null +++ b/devicemapper/ioctl.go @@ -0,0 +1,72 @@ +// +build linux + +package devicemapper + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) { + loopInfo := &LoopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} From 3a138a9b5e50fcacc05279ad7323a5ccf30a81ea Mon Sep 17 00:00:00 2001 From: Yohei Ueda Date: Fri, 7 Nov 2014 02:17:02 +0900 Subject: [PATCH 35/99] Fix the unit test not to remove /tmp Signed-off-by: Yohei Ueda --- parsers/operatingsystem/operatingsystem_test.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/parsers/operatingsystem/operatingsystem_test.go b/parsers/operatingsystem/operatingsystem_test.go index d264b35..b7d54cb 100644 --- a/parsers/operatingsystem/operatingsystem_test.go +++ b/parsers/operatingsystem/operatingsystem_test.go @@ -38,12 +38,13 @@ BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) ) dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + defer func() { + os.Remove(etcOsRelease) etcOsRelease = backup - os.RemoveAll(dir) }() - etcOsRelease = filepath.Join(dir, "etcOsRelease") for expect, osRelease := range map[string][]byte{ "Ubuntu 14.04 LTS": ubuntuTrusty, "Gentoo/Linux": gentoo, @@ -92,13 +93,13 @@ func TestIsContainerized(t *testing.T) { ) dir := os.TempDir() - defer func() { - proc1Cgroup = backup - os.RemoveAll(dir) - }() - proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + defer func() { + os.Remove(proc1Cgroup) + proc1Cgroup = backup + }() + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) } From 5ecbc5de3885aaff65912720ee551664f220649f Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Wed, 5 Nov 2014 12:24:15 -0800 Subject: [PATCH 36/99] Make /etc/hosts records consistent Fixes #8972 Signed-off-by: Alexandr Morozov --- networkfs/etchosts/etchosts.go | 52 +++++++++++++++++++++------------- 1 file changed, 33 insertions(+), 19 deletions(-) diff --git a/networkfs/etchosts/etchosts.go b/networkfs/etchosts/etchosts.go index 6cf29b0..d7edef2 100644 --- a/networkfs/etchosts/etchosts.go +++ b/networkfs/etchosts/etchosts.go @@ -3,40 +3,54 @@ package etchosts import ( "bytes" "fmt" + "io" "io/ioutil" "regexp" ) -var defaultContent = map[string]string{ - "localhost": "127.0.0.1", - "localhost ip6-localhost ip6-loopback": "::1", - "ip6-localnet": "fe00::0", - "ip6-mcastprefix": "ff00::0", - "ip6-allnodes": "ff02::1", - "ip6-allrouters": "ff02::2", +type Record struct { + Hosts string + IP string } -func Build(path, IP, hostname, domainname string, extraContent *map[string]string) error { +func (r Record) WriteTo(w io.Writer) (int64, error) { + n, err := fmt.Fprintf(w, "%s\t%s\n", r.IP, r.Hosts) + return int64(n), err +} + +var defaultContent = []Record{ + {Hosts: "localhost", IP: "127.0.0.1"}, + {Hosts: "localhost ip6-localhost ip6-loopback", IP: "::1"}, + {Hosts: "ip6-localnet", IP: "fe00::0"}, + {Hosts: "ip6-mcastprefix", IP: "ff00::0"}, + {Hosts: "ip6-allnodes", IP: "ff02::1"}, + {Hosts: "ip6-allrouters", IP: "ff02::2"}, +} + +func Build(path, IP, hostname, domainname string, extraContent []Record) error { content := bytes.NewBuffer(nil) if IP != "" { + var mainRec Record + mainRec.IP = IP if domainname != "" { - content.WriteString(fmt.Sprintf("%s\t%s.%s %s\n", IP, hostname, domainname, hostname)) + mainRec.Hosts = fmt.Sprintf("%s.%s %s", hostname, domainname, hostname) } else { - content.WriteString(fmt.Sprintf("%s\t%s\n", IP, hostname)) + mainRec.Hosts = hostname } - } - - for hosts, ip := range defaultContent { - if _, err := content.WriteString(fmt.Sprintf("%s\t%s\n", ip, hosts)); err != nil { + if _, err := mainRec.WriteTo(content); err != nil { return err } } - if extraContent != nil { - for hosts, ip := range *extraContent { - if _, err := content.WriteString(fmt.Sprintf("%s\t%s\n", ip, hosts)); err != nil { - return err - } + for _, r := range defaultContent { + if _, err := r.WriteTo(content); err != nil { + return err + } + } + + for _, r := range extraContent { + if _, err := r.WriteTo(content); err != nil { + return err } } From 5ebff7da579a917e290af842831fbdc8d1abbea2 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Thu, 6 Nov 2014 11:36:09 -0800 Subject: [PATCH 37/99] Test for etchosts consistency Signed-off-by: Alexandr Morozov --- networkfs/etchosts/etchosts_test.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/networkfs/etchosts/etchosts_test.go b/networkfs/etchosts/etchosts_test.go index 05a4f44..c033904 100644 --- a/networkfs/etchosts/etchosts_test.go +++ b/networkfs/etchosts/etchosts_test.go @@ -7,6 +7,32 @@ import ( "testing" ) +func TestBuildDefault(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + // check that /etc/hosts has consistent ordering + for i := 0; i <= 5; i++ { + err = Build(file.Name(), "", "", "", nil) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + expected := "127.0.0.1\tlocalhost\n::1\tlocalhost ip6-localhost ip6-loopback\nfe00::0\tip6-localnet\nff00::0\tip6-mcastprefix\nff02::1\tip6-allnodes\nff02::2\tip6-allrouters\n" + + if expected != string(content) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } + } +} + func TestBuildHostnameDomainname(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { From 2f091187472f7b6007af50dbab07596c858db6a6 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 31 Oct 2014 16:28:20 -0400 Subject: [PATCH 38/99] pkg/mount: mountinfo from specified pid Signed-off-by: Vincent Batts --- mount/mountinfo_linux.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/mount/mountinfo_linux.go b/mount/mountinfo_linux.go index 84bf551..68f4e9f 100644 --- a/mount/mountinfo_linux.go +++ b/mount/mountinfo_linux.go @@ -1,3 +1,5 @@ +// +build linux + package mount import ( @@ -72,3 +74,14 @@ func parseInfoFile(r io.Reader) ([]*MountInfo, error) { } return out, nil } + +// PidMountInfo collects the mounts for a specific Pid +func PidMountInfo(pid int) ([]*MountInfo, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} From 08768fd690e2c1d4b819333fd6adfdc96278d79c Mon Sep 17 00:00:00 2001 From: unclejack Date: Fri, 7 Nov 2014 17:20:16 +0200 Subject: [PATCH 39/99] pkg/namesgenerator: add Yeong-Sil Jang Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- namesgenerator/names-generator.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/namesgenerator/names-generator.go b/namesgenerator/names-generator.go index 3e4e2d9..b641e91 100644 --- a/namesgenerator/names-generator.go +++ b/namesgenerator/names-generator.go @@ -76,7 +76,8 @@ var ( // http://en.wikipedia.org/wiki/John_Bardeen // http://en.wikipedia.org/wiki/Walter_Houser_Brattain // http://en.wikipedia.org/wiki/William_Shockley - right = [...]string{"albattani", "almeida", "archimedes", "ardinghelli", "babbage", "bardeen", "bartik", "bell", "blackwell", "bohr", "brattain", "brown", "carson", "colden", "cori", "curie", "darwin", "davinci", "einstein", "elion", "engelbart", "euclid", "fermat", "fermi", "feynman", "franklin", "galileo", "goldstine", "goodall", "hawking", "heisenberg", "hodgkin", "hoover", "hopper", "hypatia", "jones", "kirch", "kowalevski", "lalande", "leakey", "lovelace", "lumiere", "mayer", "mccarthy", "mcclintock", "mclean", "meitner", "mestorf", "morse", "newton", "nobel", "pare", "pasteur", "perlman", "pike", "poincare", "ptolemy", "ritchie", "rosalind", "sammet", "shockley", "sinoussi", "stallman", "tesla", "thompson", "torvalds", "turing", "wilson", "wozniak", "wright", "yalow", "yonath"} + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. http://en.wikipedia.org/wiki/Jang_Yeong-sil + right = [...]string{"albattani", "almeida", "archimedes", "ardinghelli", "babbage", "bardeen", "bartik", "bell", "blackwell", "bohr", "brattain", "brown", "carson", "colden", "cori", "curie", "darwin", "davinci", "einstein", "elion", "engelbart", "euclid", "fermat", "fermi", "feynman", "franklin", "galileo", "goldstine", "goodall", "hawking", "heisenberg", "hodgkin", "hoover", "hopper", "hypatia", "jang", "jones", "kirch", "kowalevski", "lalande", "leakey", "lovelace", "lumiere", "mayer", "mccarthy", "mcclintock", "mclean", "meitner", "mestorf", "morse", "newton", "nobel", "pare", "pasteur", "perlman", "pike", "poincare", "ptolemy", "ritchie", "rosalind", "sammet", "shockley", "sinoussi", "stallman", "tesla", "thompson", "torvalds", "turing", "wilson", "wozniak", "wright", "yalow", "yonath"} ) func GetRandomName(retry int) string { From 58acf884c332a5b4f6f097e0104bae112d1dc7a0 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Tue, 11 Nov 2014 08:48:11 -0500 Subject: [PATCH 40/99] pkg/tarsum: adding more tests Ensuring case size of headers will still be accounted for. https://github.com/docker/docker/pull/8869#discussion_r20114401 Signed-off-by: Vincent Batts --- tarsum/tarsum_test.go | 147 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) diff --git a/tarsum/tarsum_test.go b/tarsum/tarsum_test.go index 1e06cda..60fcc97 100644 --- a/tarsum/tarsum_test.go +++ b/tarsum/tarsum_test.go @@ -318,6 +318,153 @@ func TestTarSums(t *testing.T) { } } +func TestIteration(t *testing.T) { + headerTests := []struct { + expectedSum string // TODO(vbatts) it would be nice to get individual sums of each + version Version + hdr *tar.Header + data []byte + }{ + { + "tarsum+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + Version0, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + VersionDev, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + VersionDev, + &tar.Header{ + Name: "another.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.key1": "value1", + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.KEY1": "value1", // adding different case to ensure different sum + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", + Version0, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.NOT": "CALCULATED", + }, + }, + []byte("test"), + }, + } + for _, htest := range headerTests { + s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) + if err != nil { + t.Fatal(err) + } + + if s != htest.expectedSum { + t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) + } + } + +} + +func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { + buf := bytes.NewBuffer(nil) + // first build our test tar + tw := tar.NewWriter(buf) + if err := tw.WriteHeader(h); err != nil { + return "", err + } + if _, err := tw.Write(data); err != nil { + return "", err + } + tw.Close() + + ts, err := NewTarSum(buf, true, v) + if err != nil { + return "", err + } + tr := tar.NewReader(ts) + for { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + break + } + if err != nil { + return "", err + } + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + return "", err + } + break // we're just reading one header ... + } + return ts.Sum(nil), nil +} + func Benchmark9kTar(b *testing.B) { buf := bytes.NewBuffer([]byte{}) fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") From 513934f6b1c079441f56b888f74a0a377f5dc04d Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 6 Nov 2014 20:01:37 +0200 Subject: [PATCH 41/99] pkg/archive: add interface for Untar Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- archive/archive.go | 71 ++++++++++++++++++++++++++++++---------------- 1 file changed, 46 insertions(+), 25 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index 2e339b3..d8f34d9 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -36,10 +36,17 @@ type ( NoLchown bool Name string } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + } ) var ( ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar} ) const ( @@ -549,45 +556,47 @@ loop: return nil } -// TarUntar is a convenience function which calls Tar and Untar, with -// the output of one piped into the other. If either Tar or Untar fails, -// TarUntar aborts and returns the error. -func TarUntar(src string, dst string) error { +func (archiver *Archiver) TarUntar(src, dst string) error { log.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() - return Untar(archive, dst, nil) + return archiver.Untar(archive, dst, nil) } -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() - if err := Untar(archive, dst, nil); err != nil { + if err := archiver.Untar(archive, dst, nil); err != nil { return err } return nil } -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -// -func CopyWithTar(src, dst string) error { +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { - return CopyFileWithTar(src, dst) + return archiver.CopyFileWithTar(src, dst) } // Create dst, copy src's content into it log.Debugf("Creating dest directory: %s", dst) @@ -595,16 +604,18 @@ func CopyWithTar(src, dst string) error { return err } log.Debugf("Calling TarUntar(%s, %s)", src, dst) - return TarUntar(src, dst) + return archiver.TarUntar(src, dst) } -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// If `dst` ends with a trailing slash '/', the final destination path -// will be `dst/base(src)`. -func CopyFileWithTar(src, dst string) (err error) { +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { log.Debugf("CopyFileWithTar(%s, %s)", src, dst) srcSt, err := os.Stat(src) if err != nil { @@ -652,7 +663,17 @@ func CopyFileWithTar(src, dst string) (err error) { err = er } }() - return Untar(r, filepath.Dir(dst), nil) + return archiver.Untar(r, filepath.Dir(dst), nil) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/', the final destination path +// will be `dst/base(src)`. +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) } // CmdStream executes a command, and returns its stdout as a stream. From cabd5528e22ee9b60d44f8911f30709e42272d07 Mon Sep 17 00:00:00 2001 From: Steven Burgess Date: Tue, 11 Nov 2014 18:01:08 -0500 Subject: [PATCH 42/99] Fix comment to match the arg name Signed-off-by: Steven Burgess --- archive/archive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/archive/archive.go b/archive/archive.go index d8f34d9..530ea30 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -455,7 +455,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `path`. +// and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. From 4652fdc2084753cff33651c77036fe5b45d0799f Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 30 Oct 2014 20:26:39 -0400 Subject: [PATCH 43/99] devmapper: use proper DM_UDEV_DISABLE_*_FLAG when creating the thin-pool Otherwise udev can unecessarily execute various rules (and issue scanning IO, etc) against the thin-pool -- which can never be a top-level device. Docker-DCO-1.1-Signed-off-by: Mike Snitzer (github: snitm) --- devicemapper/devmapper.go | 3 ++- devicemapper/devmapper_wrapper.go | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/devicemapper/devmapper.go b/devicemapper/devmapper.go index c0b931c..3de42ba 100644 --- a/devicemapper/devmapper.go +++ b/devicemapper/devmapper.go @@ -361,7 +361,8 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize } var cookie uint = 0 - if err := task.SetCookie(&cookie, 0); err != nil { + var flags uint16 = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.SetCookie(&cookie, flags); err != nil { return fmt.Errorf("Can't set cookie %s", err) } diff --git a/devicemapper/devmapper_wrapper.go b/devicemapper/devmapper_wrapper.go index c7e96a1..499405a 100644 --- a/devicemapper/devmapper_wrapper.go +++ b/devicemapper/devmapper_wrapper.go @@ -82,6 +82,12 @@ const ( LoNameSize = C.LO_NAME_SIZE ) +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG +) + var ( DmGetLibraryVersion = dmGetLibraryVersionFct DmGetNextTarget = dmGetNextTargetFct From 7f1cdd81f07c29bdf2b9b82bfacddf620e169bf0 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 13 Nov 2014 17:20:24 -0500 Subject: [PATCH 44/99] pkg/devicemapper: cleanup removeDevice differences Fixes failure on RemoveDevice when host is AUFS, and running devicemapper test docker-in-docker https://gist.github.com/tonistiigi/59559cbfb3f2df26b29c Signed-off-by: Vincent Batts --- devicemapper/devmapper.go | 44 ++++++++++++++++----------------------- 1 file changed, 18 insertions(+), 26 deletions(-) diff --git a/devicemapper/devmapper.go b/devicemapper/devmapper.go index c0b931c..87f5a1f 100644 --- a/devicemapper/devmapper.go +++ b/devicemapper/devmapper.go @@ -62,7 +62,7 @@ var ( ErrSetDevDir = errors.New("dm_set_dev_dir failed") ErrGetLibraryVersion = errors.New("dm_get_library_version failed") ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") - ErrRunRemoveDevice = errors.New("running removeDevice failed") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") ErrInvalidAddNode = errors.New("Invalide AddNoce type") ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") @@ -296,18 +296,27 @@ func GetLibraryVersion() (string, error) { // Useful helper for cleanup func RemoveDevice(name string) error { - // TODO(vbatts) just use the other removeDevice() - task := TaskCreate(DeviceRemove) + log.Debugf("[devmapper] RemoveDevice START") + defer log.Debugf("[devmapper] RemoveDevice END") + task, err := createTask(DeviceRemove, name) if task == nil { - return ErrCreateRemoveTask - } - if err := task.SetName(name); err != nil { - log.Debugf("Can't set task name %s", name) return err } - if err := task.Run(); err != nil { - return ErrRunRemoveDevice + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can not set cookie: %s", err) } + + if err = task.Run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("Error running RemoveDevice %s", err) + } + + UdevWait(cookie) + return nil } @@ -568,23 +577,6 @@ func DeleteDevice(poolName string, deviceId int) error { return nil } -func removeDevice(name string) error { - log.Debugf("[devmapper] RemoveDevice START") - defer log.Debugf("[devmapper] RemoveDevice END") - task, err := createTask(DeviceRemove, name) - if task == nil { - return err - } - dmSawBusy = false - if err = task.Run(); err != nil { - if dmSawBusy { - return ErrBusy - } - return fmt.Errorf("Error running RemoveDevice %s", err) - } - return nil -} - func ActivateDevice(poolName string, name string, deviceId int, size uint64) error { task, err := createTask(DeviceCreate, name) if task == nil { From bc41337f3baff3264bca61a73947bad3dfc388b7 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 14 Nov 2014 09:20:37 -0500 Subject: [PATCH 45/99] pkg/devicemapper: missed MAINTAINERS on split Signed-off-by: Vincent Batts --- devicemapper/MAINTAINERS | 1 + 1 file changed, 1 insertion(+) create mode 100644 devicemapper/MAINTAINERS diff --git a/devicemapper/MAINTAINERS b/devicemapper/MAINTAINERS new file mode 100644 index 0000000..4428dec --- /dev/null +++ b/devicemapper/MAINTAINERS @@ -0,0 +1 @@ +Vincent Batts (@vbatts) From 3bce14e7d02ce2d1ccdb93babb252485d0212cd1 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 14 Nov 2014 14:18:35 -0500 Subject: [PATCH 46/99] pkg/devicemapper: defer udev wait during removal Signed-off-by: Vincent Batts --- devicemapper/devmapper.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/devicemapper/devmapper.go b/devicemapper/devmapper.go index 87f5a1f..4043da6 100644 --- a/devicemapper/devmapper.go +++ b/devicemapper/devmapper.go @@ -307,7 +307,9 @@ func RemoveDevice(name string) error { if err := task.SetCookie(&cookie, 0); err != nil { return fmt.Errorf("Can not set cookie: %s", err) } + defer UdevWait(cookie) + dmSawBusy = false // reset before the task is run if err = task.Run(); err != nil { if dmSawBusy { return ErrBusy @@ -315,8 +317,6 @@ func RemoveDevice(name string) error { return fmt.Errorf("Error running RemoveDevice %s", err) } - UdevWait(cookie) - return nil } @@ -543,7 +543,7 @@ func CreateDevice(poolName string, deviceId *int) error { return fmt.Errorf("Can't set message %s", err) } - dmSawExist = false + dmSawExist = false // reset before the task is run if err := task.Run(); err != nil { if dmSawExist { // Already exists, try next id @@ -638,7 +638,7 @@ func CreateSnapDevice(poolName string, deviceId *int, baseName string, baseDevic return fmt.Errorf("Can't set message %s", err) } - dmSawExist = false + dmSawExist = false // reset before the task is run if err := task.Run(); err != nil { if dmSawExist { // Already exists, try next id From 2d644f8ab1e549c8b6b2db0e120c06bafa0abeff Mon Sep 17 00:00:00 2001 From: John Gossman Date: Thu, 23 Oct 2014 16:44:57 -0700 Subject: [PATCH 47/99] Refactor pkg/term package for Windows tty support Signed-off-by: John Gossman --- term/console_windows.go | 87 ++++++++++++++++++++++++++++++++++++++++ term/term.go | 2 + term/term_windows.go | 89 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 178 insertions(+) create mode 100644 term/console_windows.go create mode 100644 term/term_windows.go diff --git a/term/console_windows.go b/term/console_windows.go new file mode 100644 index 0000000..6335b2b --- /dev/null +++ b/term/console_windows.go @@ -0,0 +1,87 @@ +// +build windows + +package term + +import ( + "syscall" + "unsafe" +) + +const ( + // Consts for Get/SetConsoleMode function + // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_WINDOW_INPUT = 0x0008 + // If parameter is a screen buffer handle, additional values + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 +) + +var kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + +var ( + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") +) + +func GetConsoleMode(fileDesc uintptr) (uint32, error) { + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(fileDesc), &mode) + return mode, err +} + +func SetConsoleMode(fileDesc uintptr, mode uint32) error { + r, _, err := setConsoleModeProc.Call(fileDesc, uintptr(mode), 0) + if r == 0 { + if err != nil { + return err + } + return syscall.EINVAL + } + return nil +} + +// types for calling GetConsoleScreenBufferInfo +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx +type ( + SHORT int16 + + SMALL_RECT struct { + Left SHORT + Top SHORT + Right SHORT + Bottom SHORT + } + + COORD struct { + X SHORT + Y SHORT + } + + WORD uint16 + + CONSOLE_SCREEN_BUFFER_INFO struct { + dwSize COORD + dwCursorPosition COORD + wAttributes WORD + srWindow SMALL_RECT + dwMaximumWindowSize COORD + } +) + +func GetConsoleScreenBufferInfo(fileDesc uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + var info CONSOLE_SCREEN_BUFFER_INFO + r, _, err := getConsoleScreenBufferInfoProc.Call(uintptr(fileDesc), uintptr(unsafe.Pointer(&info)), 0) + if r == 0 { + if err != nil { + return nil, err + } + return nil, syscall.EINVAL + } + return &info, nil +} diff --git a/term/term.go b/term/term.go index ea94b44..553747a 100644 --- a/term/term.go +++ b/term/term.go @@ -1,3 +1,5 @@ +// +build !windows + package term import ( diff --git a/term/term_windows.go b/term/term_windows.go new file mode 100644 index 0000000..d372e86 --- /dev/null +++ b/term/term_windows.go @@ -0,0 +1,89 @@ +// +build windows + +package term + +type State struct { + mode uint32 +} + +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + var info *CONSOLE_SCREEN_BUFFER_INFO + info, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + ws.Height = uint16(info.srWindow.Right - info.srWindow.Left + 1) + ws.Width = uint16(info.srWindow.Bottom - info.srWindow.Top + 1) + + ws.x = 0 // todo azlinux -- this is the pixel size of the Window, and not currently used by any caller + ws.y = 0 + + return ws, nil +} + +func SetWinsize(fd uintptr, ws *Winsize) error { + return nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + _, e := GetConsoleMode(fd) + return e == nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return SetConsoleMode(fd, state.mode) +} + +func SaveState(fd uintptr) (*State, error) { + mode, e := GetConsoleMode(fd) + if e != nil { + return nil, e + } + return &State{mode}, nil +} + +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings +func DisableEcho(fd uintptr, state *State) error { + state.mode &^= (ENABLE_ECHO_INPUT) + state.mode |= (ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT) + return SetConsoleMode(fd, state.mode) +} + +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + // TODO (azlinux): implement handling interrupt and restore state of terminal + return oldState, err +} + +// MakeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var state *State + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings + state.mode &^= (ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT) + err = SetConsoleMode(fd, state.mode) + if err != nil { + return nil, err + } + return state, nil +} From 91ea04eea7a3f6c51c380f66ed8c8b266a32a7b7 Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Thu, 13 Nov 2014 10:40:22 -0800 Subject: [PATCH 48/99] Extract client signals to pkg/signal SIGCHLD and SIGWINCH used in api/client (cli code) are not available on Windows. Extracting into separate files with build tags. Signed-off-by: Ahmet Alp Balkan --- signal/signal_unix.go | 12 ++++++++++++ signal/signal_windows.go | 12 ++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 signal/signal_unix.go create mode 100644 signal/signal_windows.go diff --git a/signal/signal_unix.go b/signal/signal_unix.go new file mode 100644 index 0000000..613e30e --- /dev/null +++ b/signal/signal_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package signal + +import ( + "syscall" +) + +// Signals used in api/client (no windows equivalent, use +// invalid signals so they don't get handled) +const SIGCHLD = syscall.SIGCHLD +const SIGWINCH = syscall.SIGWINCH diff --git a/signal/signal_windows.go b/signal/signal_windows.go new file mode 100644 index 0000000..9f00b99 --- /dev/null +++ b/signal/signal_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package signal + +import ( + "syscall" +) + +// Signals used in api/client (no windows equivalent, use +// invalid signals so they don't get handled) +const SIGCHLD = syscall.Signal(0xff) +const SIGWINCH = syscall.Signal(0xff) From 515e7481ded9baffe76f26630e9721f80b08c3de Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Thu, 13 Nov 2014 12:00:04 -0800 Subject: [PATCH 49/99] Extract mknod, umask, lstat to pkg/system Some parts of pkg/archive is called on both client/daemon code. To get it compiling on Windows, these funcs are extracted into files with build tags. Signed-off-by: Ahmet Alp Balkan --- archive/archive.go | 2 +- archive/changes.go | 12 +++++++++++- archive/diff.go | 7 ------- system/lstat.go | 16 ++++++++++++++++ system/lstat_windows.go | 12 ++++++++++++ system/mknod.go | 18 ++++++++++++++++++ system/mknod_windows.go | 12 ++++++++++++ system/umask.go | 11 +++++++++++ system/umask_windows.go | 8 ++++++++ 9 files changed, 89 insertions(+), 9 deletions(-) create mode 100644 system/lstat.go create mode 100644 system/lstat_windows.go create mode 100644 system/mknod.go create mode 100644 system/mknod_windows.go create mode 100644 system/umask.go create mode 100644 system/umask_windows.go diff --git a/archive/archive.go b/archive/archive.go index 530ea30..85d2319 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -291,7 +291,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L mode |= syscall.S_IFIFO } - if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + if err := syscall.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { return err } diff --git a/archive/changes.go b/archive/changes.go index 0a1f741..720d549 100644 --- a/archive/changes.go +++ b/archive/changes.go @@ -269,6 +269,14 @@ func newRootFileInfo() *FileInfo { return root } +func lstat(path string) (*stat, error) { + s, err := system.Lstat(path) + if err != nil { + return nil, err + } + return fromStatT(s), nil +} + func collectFileInfo(sourceDir string) (*FileInfo, error) { root := newRootFileInfo() @@ -299,9 +307,11 @@ func collectFileInfo(sourceDir string) (*FileInfo, error) { parent: parent, } - if err := syscall.Lstat(path, &info.stat); err != nil { + s, err := lstat(path) + if err != nil { return err } + info.stat = s info.capability, _ = system.Lgetxattr(path, "security.capability") diff --git a/archive/diff.go b/archive/diff.go index 215f62e..c208336 100644 --- a/archive/diff.go +++ b/archive/diff.go @@ -14,13 +14,6 @@ import ( "github.com/docker/docker/pkg/pools" ) -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor -func mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} - // ApplyLayer parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. func ApplyLayer(dest string, layer ArchiveReader) error { diff --git a/system/lstat.go b/system/lstat.go new file mode 100644 index 0000000..d7e06b3 --- /dev/null +++ b/system/lstat.go @@ -0,0 +1,16 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +func Lstat(path string) (*syscall.Stat_t, error) { + s := &syscall.Stat_t{} + err := syscall.Lstat(path, s) + if err != nil { + return nil, err + } + return s, nil +} diff --git a/system/lstat_windows.go b/system/lstat_windows.go new file mode 100644 index 0000000..f4c7e6d --- /dev/null +++ b/system/lstat_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package system + +import ( + "syscall" +) + +func Lstat(path string) (*syscall.Win32FileAttributeData, error) { + // should not be called on cli code path + return nil, ErrNotSupportedPlatform +} diff --git a/system/mknod.go b/system/mknod.go new file mode 100644 index 0000000..06f9c6a --- /dev/null +++ b/system/mknod.go @@ -0,0 +1,18 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/system/mknod_windows.go b/system/mknod_windows.go new file mode 100644 index 0000000..b4020c1 --- /dev/null +++ b/system/mknod_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package system + +func Mknod(path string, mode uint32, dev int) error { + // should not be called on cli code path + return ErrNotSupportedPlatform +} + +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on windows, should not be called on cli code") +} diff --git a/system/umask.go b/system/umask.go new file mode 100644 index 0000000..fddbecd --- /dev/null +++ b/system/umask.go @@ -0,0 +1,11 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/system/umask_windows.go b/system/umask_windows.go new file mode 100644 index 0000000..3be563f --- /dev/null +++ b/system/umask_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package system + +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} From 718066ad6154dd19912ecd18b0c4f2ae6d85e363 Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Thu, 13 Nov 2014 12:36:05 -0800 Subject: [PATCH 50/99] Refactor pkg/archive with a platform-independent stat struct pkg/archive contains code both invoked from cli (cross platform) and daemon (linux only) and Unix-specific dependencies break compilation on Windows. We extracted those stat-related funcs into platform specific implementations at pkg/system and added unit tests. Signed-off-by: Ahmet Alp Balkan --- archive/archive.go | 19 +++++------------ archive/archive_unix.go | 39 +++++++++++++++++++++++++++++++++++ archive/archive_windows.go | 12 +++++++++++ archive/changes.go | 38 ++++++++++------------------------ archive/diff.go | 11 +++++++--- system/lstat.go | 4 ++-- system/lstat_test.go | 25 +++++++++++++++++++++++ system/lstat_windows.go | 6 +----- system/stat.go | 42 ++++++++++++++++++++++++++++++++++++++ system/stat_linux.go | 13 ++++++------ system/stat_test.go | 34 ++++++++++++++++++++++++++++++ system/stat_unsupported.go | 19 +++++++++-------- system/stat_windows.go | 12 +++++++++++ 13 files changed, 209 insertions(+), 65 deletions(-) create mode 100644 archive/archive_unix.go create mode 100644 archive/archive_windows.go create mode 100644 system/lstat_test.go create mode 100644 system/stat.go create mode 100644 system/stat_test.go create mode 100644 system/stat_windows.go diff --git a/archive/archive.go b/archive/archive.go index 85d2319..5a81223 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -192,20 +192,11 @@ func (ta *tarAppender) addTarFile(path, name string) error { hdr.Name = name - var ( - nlink uint32 - inode uint64 - ) - if stat, ok := fi.Sys().(*syscall.Stat_t); ok { - nlink = uint32(stat.Nlink) - inode = uint64(stat.Ino) - // Currently go does not fill in the major/minors - if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK || - stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR { - hdr.Devmajor = int64(major(uint64(stat.Rdev))) - hdr.Devminor = int64(minor(uint64(stat.Rdev))) - } + nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err } + // if it's a regular file and has more than 1 link, // it's hardlinked, so set the type flag accordingly if fi.Mode().IsRegular() && nlink > 1 { @@ -291,7 +282,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L mode |= syscall.S_IFIFO } - if err := syscall.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { return err } diff --git a/archive/archive_unix.go b/archive/archive_unix.go new file mode 100644 index 0000000..c0e8aee --- /dev/null +++ b/archive/archive_unix.go @@ -0,0 +1,39 @@ +// +build !windows + +package archive + +import ( + "errors" + "syscall" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + nlink = uint32(s.Nlink) + inode = uint64(s.Ino) + + // Currently go does not fil in the major/minors + if s.Mode&syscall.S_IFBLK == syscall.S_IFBLK || + s.Mode&syscall.S_IFCHR == syscall.S_IFCHR { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} diff --git a/archive/archive_windows.go b/archive/archive_windows.go new file mode 100644 index 0000000..3cc2493 --- /dev/null +++ b/archive/archive_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package archive + +import ( + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} diff --git a/archive/changes.go b/archive/changes.go index 720d549..85217f6 100644 --- a/archive/changes.go +++ b/archive/changes.go @@ -135,7 +135,7 @@ func Changes(layers []string, rw string) ([]Change, error) { type FileInfo struct { parent *FileInfo name string - stat syscall.Stat_t + stat *system.Stat children map[string]*FileInfo capability []byte added bool @@ -168,7 +168,7 @@ func (info *FileInfo) path() string { } func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode&syscall.S_IFDIR == syscall.S_IFDIR + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR == syscall.S_IFDIR } func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { @@ -199,21 +199,21 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { oldChild, _ := oldChildren[name] if oldChild != nil { // change? - oldStat := &oldChild.stat - newStat := &newChild.stat + oldStat := oldChild.stat + newStat := newChild.stat // Note: We can't compare inode or ctime or blocksize here, because these change // when copying a file into a container. However, that is not generally a problem // because any content change will change mtime, and any status change should // be visible when actually comparing the stat fields. The only time this // breaks down is if some code intentionally hides a change by setting // back mtime - if oldStat.Mode != newStat.Mode || - oldStat.Uid != newStat.Uid || - oldStat.Gid != newStat.Gid || - oldStat.Rdev != newStat.Rdev || + if oldStat.Mode() != newStat.Mode() || + oldStat.Uid() != newStat.Uid() || + oldStat.Gid() != newStat.Gid() || + oldStat.Rdev() != newStat.Rdev() || // Don't look at size for dirs, its not a good measure of change - (oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) || - !sameFsTimeSpec(system.GetLastModification(oldStat), system.GetLastModification(newStat)) || + (oldStat.Size() != newStat.Size() && oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR) || + !sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || bytes.Compare(oldChild.capability, newChild.capability) != 0 { change := Change{ Path: newChild.path(), @@ -269,14 +269,6 @@ func newRootFileInfo() *FileInfo { return root } -func lstat(path string) (*stat, error) { - s, err := system.Lstat(path) - if err != nil { - return nil, err - } - return fromStatT(s), nil -} - func collectFileInfo(sourceDir string) (*FileInfo, error) { root := newRootFileInfo() @@ -307,7 +299,7 @@ func collectFileInfo(sourceDir string) (*FileInfo, error) { parent: parent, } - s, err := lstat(path) + s, err := system.Lstat(path) if err != nil { return err } @@ -369,14 +361,6 @@ func ChangesSize(newDir string, changes []Change) int64 { return size } -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - // ExportChanges produces an Archive from the provided changes, relative to dir. func ExportChanges(dir string, changes []Change) (Archive, error) { reader, writer := io.Pipe() diff --git a/archive/diff.go b/archive/diff.go index c208336..eabb7c4 100644 --- a/archive/diff.go +++ b/archive/diff.go @@ -12,16 +12,21 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" ) // ApplyLayer parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. func ApplyLayer(dest string, layer ArchiveReader) error { // We need to be able to set any perms - oldmask := syscall.Umask(0) - defer syscall.Umask(oldmask) + oldmask, err := system.Umask(0) + if err != nil { + return err + } - layer, err := DecompressStream(layer) + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + layer, err = DecompressStream(layer) if err != nil { return err } diff --git a/system/lstat.go b/system/lstat.go index d7e06b3..9ef82d5 100644 --- a/system/lstat.go +++ b/system/lstat.go @@ -6,11 +6,11 @@ import ( "syscall" ) -func Lstat(path string) (*syscall.Stat_t, error) { +func Lstat(path string) (*Stat, error) { s := &syscall.Stat_t{} err := syscall.Lstat(path, s) if err != nil { return nil, err } - return s, nil + return fromStatT(s) } diff --git a/system/lstat_test.go b/system/lstat_test.go new file mode 100644 index 0000000..7e271ef --- /dev/null +++ b/system/lstat_test.go @@ -0,0 +1,25 @@ +package system + +import ( + "testing" +) + +func TestLstat(t *testing.T) { + file, invalid, _ := prepareFiles(t) + + statFile, err := Lstat(file) + if err != nil { + t.Fatal(err) + } + if statFile == nil { + t.Fatal("returned empty stat for existing file") + } + + statInvalid, err := Lstat(invalid) + if err == nil { + t.Fatal("did not return error for non-existing file") + } + if statInvalid != nil { + t.Fatal("returned non-nil stat for non-existing file") + } +} diff --git a/system/lstat_windows.go b/system/lstat_windows.go index f4c7e6d..213a7c7 100644 --- a/system/lstat_windows.go +++ b/system/lstat_windows.go @@ -2,11 +2,7 @@ package system -import ( - "syscall" -) - -func Lstat(path string) (*syscall.Win32FileAttributeData, error) { +func Lstat(path string) (*Stat, error) { // should not be called on cli code path return nil, ErrNotSupportedPlatform } diff --git a/system/stat.go b/system/stat.go new file mode 100644 index 0000000..5d47494 --- /dev/null +++ b/system/stat.go @@ -0,0 +1,42 @@ +package system + +import ( + "syscall" +) + +type Stat struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +func (s Stat) Mode() uint32 { + return s.mode +} + +func (s Stat) Uid() uint32 { + return s.uid +} + +func (s Stat) Gid() uint32 { + return s.gid +} + +func (s Stat) Rdev() uint64 { + return s.rdev +} + +func (s Stat) Size() int64 { + return s.size +} + +func (s Stat) Mtim() syscall.Timespec { + return s.mtim +} + +func (s Stat) GetLastModification() syscall.Timespec { + return s.Mtim() +} diff --git a/system/stat_linux.go b/system/stat_linux.go index e702200..47cebef 100644 --- a/system/stat_linux.go +++ b/system/stat_linux.go @@ -4,10 +4,11 @@ import ( "syscall" ) -func GetLastAccess(stat *syscall.Stat_t) syscall.Timespec { - return stat.Atim -} - -func GetLastModification(stat *syscall.Stat_t) syscall.Timespec { - return stat.Mtim +func fromStatT(s *syscall.Stat_t) (*Stat, error) { + return &Stat{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil } diff --git a/system/stat_test.go b/system/stat_test.go new file mode 100644 index 0000000..0dcb239 --- /dev/null +++ b/system/stat_test.go @@ -0,0 +1,34 @@ +package system + +import ( + "syscall" + "testing" +) + +func TestFromStatT(t *testing.T) { + file, _, _ := prepareFiles(t) + + stat := &syscall.Stat_t{} + err := syscall.Lstat(file, stat) + + s, err := fromStatT(stat) + if err != nil { + t.Fatal(err) + } + + if stat.Mode != s.Mode() { + t.Fatal("got invalid mode") + } + if stat.Uid != s.Uid() { + t.Fatal("got invalid uid") + } + if stat.Gid != s.Gid() { + t.Fatal("got invalid gid") + } + if stat.Rdev != s.Rdev() { + t.Fatal("got invalid rdev") + } + if stat.Mtim != s.Mtim() { + t.Fatal("got invalid mtim") + } +} diff --git a/system/stat_unsupported.go b/system/stat_unsupported.go index 4686a4c..c4d53e6 100644 --- a/system/stat_unsupported.go +++ b/system/stat_unsupported.go @@ -1,13 +1,16 @@ -// +build !linux +// +build !linux,!windows package system -import "syscall" +import ( + "syscall" +) -func GetLastAccess(stat *syscall.Stat_t) syscall.Timespec { - return stat.Atimespec -} - -func GetLastModification(stat *syscall.Stat_t) syscall.Timespec { - return stat.Mtimespec +func fromStatT(s *syscall.Stat_t) (*Stat, error) { + return &Stat{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil } diff --git a/system/stat_windows.go b/system/stat_windows.go new file mode 100644 index 0000000..584e894 --- /dev/null +++ b/system/stat_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package system + +import ( + "errors" + "syscall" +) + +func fromStatT(s *syscall.Win32FileAttributeData) (*Stat, error) { + return nil, errors.New("fromStatT should not be called on windows path") +} From 17e6f792c7789974ce1a94421a12ae889ee75451 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Fri, 14 Nov 2014 18:15:56 -0500 Subject: [PATCH 51/99] devmapper: Call UdevWait() even in failure path Currently we set up a cookie and upon failure not call UdevWait(). This does not cleanup the cookie and associated semaphore and system will soon max out on total number of semaphores. To avoid this, call UdevWait() even in failure path which in turn will cleanup associated semaphore. Signed-off-by: Vivek Goyal Signed-off-by: Vincent Batts --- devicemapper/devmapper.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/devicemapper/devmapper.go b/devicemapper/devmapper.go index 4043da6..16c0ac1 100644 --- a/devicemapper/devmapper.go +++ b/devicemapper/devmapper.go @@ -373,13 +373,12 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize if err := task.SetCookie(&cookie, 0); err != nil { return fmt.Errorf("Can't set cookie %s", err) } + defer UdevWait(cookie) if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceCreate (CreatePool) %s", err) } - UdevWait(cookie) - return nil } @@ -516,13 +515,12 @@ func ResumeDevice(name string) error { if err := task.SetCookie(&cookie, 0); err != nil { return fmt.Errorf("Can't set cookie %s", err) } + defer UdevWait(cookie) if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceResume %s", err) } - UdevWait(cookie) - return nil } @@ -596,12 +594,12 @@ func ActivateDevice(poolName string, name string, deviceId int, size uint64) err return fmt.Errorf("Can't set cookie %s", err) } + defer UdevWait(cookie) + if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceCreate (ActivateDevice) %s", err) } - UdevWait(cookie) - return nil } From d2a55acf47e77dfb5af8cc0e06c8656566524006 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 17 Nov 2014 16:17:06 -0500 Subject: [PATCH 52/99] pkg/mount: testing mountinfo fields Signed-off-by: Vincent Batts --- mount/mountinfo_linux_test.go | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/mount/mountinfo_linux_test.go b/mount/mountinfo_linux_test.go index 3c21447..e92b7e2 100644 --- a/mount/mountinfo_linux_test.go +++ b/mount/mountinfo_linux_test.go @@ -446,3 +446,32 @@ func TestParseGentooMountinfo(t *testing.T) { t.Fatal(err) } } + +func TestParseFedoraMountinfoFields(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + infos, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } + expectedLength := 58 + if len(infos) != expectedLength { + t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) + } + mi := MountInfo{ + Id: 15, + Parent: 35, + Major: 0, + Minor: 3, + Root: "/", + Mountpoint: "/proc", + Opts: "rw,nosuid,nodev,noexec,relatime", + Optional: "shared:5", + Fstype: "proc", + Source: "proc", + VfsOpts: "rw", + } + + if *infos[0] != mi { + t.Fatalf("expected %#v, got %#v", mi, infos[0]) + } +} From 64b1cbc28945f193a604ca1f52825f43b8f52041 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Wed, 19 Nov 2014 15:46:03 -0500 Subject: [PATCH 53/99] pkg/tarsum: actually init the TarSum struct closes #9241 Signed-off-by: Vincent Batts --- tarsum/tarsum.go | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/tarsum/tarsum.go b/tarsum/tarsum.go index 34386ff..ba09d4a 100644 --- a/tarsum/tarsum.go +++ b/tarsum/tarsum.go @@ -27,11 +27,7 @@ const ( // including the byte payload of the image's json metadata as well, and for // calculating the checksums for buildcache. func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { - headerSelector, err := getTarHeaderSelector(v) - if err != nil { - return nil, err - } - return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector}, nil + return NewTarSumHash(r, dc, v, DefaultTHash) } // Create a new TarSum, providing a THash to use rather than the DefaultTHash @@ -40,7 +36,9 @@ func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) if err != nil { return nil, err } - return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}, nil + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err } // TarSum is the generic interface for calculating fixed time @@ -134,12 +132,6 @@ func (ts *tarSum) initTarSum() error { } func (ts *tarSum) Read(buf []byte) (int, error) { - if ts.writer == nil { - if err := ts.initTarSum(); err != nil { - return 0, err - } - } - if ts.finished { return ts.bufWriter.Read(buf) } From c91f8d09a5d5f7bfc47a402e66c926bb546c57b6 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 19 Nov 2014 12:15:20 -0800 Subject: [PATCH 54/99] Add unit test for tarSum.Sum() with no data Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- tarsum/tarsum_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tarsum/tarsum_test.go b/tarsum/tarsum_test.go index 1e06cda..c5dca6a 100644 --- a/tarsum/tarsum_test.go +++ b/tarsum/tarsum_test.go @@ -230,6 +230,17 @@ func TestEmptyTar(t *testing.T) { if resultSum != expectedSum { t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) } + + // Test without ever actually writing anything. + if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { + t.Fatal(err) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } } var ( From 2ada51fd7f264540950261ea846aa18717b24d74 Mon Sep 17 00:00:00 2001 From: Oh Jinkyun Date: Mon, 3 Nov 2014 20:11:29 +0900 Subject: [PATCH 55/99] Fix for #8777 Now filter name is trimmed and lowercased before evaluation for case insensitive and whitespace trimemd check. Signed-off-by: Oh Jinkyun --- parsers/filters/parse.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/parsers/filters/parse.go b/parsers/filters/parse.go index 4039592..8b045a3 100644 --- a/parsers/filters/parse.go +++ b/parsers/filters/parse.go @@ -29,7 +29,9 @@ func ParseFlag(arg string, prev Args) (Args, error) { } f := strings.SplitN(arg, "=", 2) - filters[f[0]] = append(filters[f[0]], f[1]) + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + filters[name] = append(filters[name], value) return filters, nil } From bdb6fee38c13ea1e3e50753e232416935de35ff2 Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 20 Nov 2014 19:33:15 +0200 Subject: [PATCH 56/99] pkg/system: fix cleanup in tests Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- system/lstat_test.go | 4 +++- system/stat_test.go | 4 +++- system/utimes_test.go | 7 ++++--- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/system/lstat_test.go b/system/lstat_test.go index 7e271ef..9bab4d7 100644 --- a/system/lstat_test.go +++ b/system/lstat_test.go @@ -1,11 +1,13 @@ package system import ( + "os" "testing" ) func TestLstat(t *testing.T) { - file, invalid, _ := prepareFiles(t) + file, invalid, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) statFile, err := Lstat(file) if err != nil { diff --git a/system/stat_test.go b/system/stat_test.go index 0dcb239..abcc8ea 100644 --- a/system/stat_test.go +++ b/system/stat_test.go @@ -1,12 +1,14 @@ package system import ( + "os" "syscall" "testing" ) func TestFromStatT(t *testing.T) { - file, _, _ := prepareFiles(t) + file, _, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) stat := &syscall.Stat_t{} err := syscall.Lstat(file, stat) diff --git a/system/utimes_test.go b/system/utimes_test.go index 38e4020..1dea47c 100644 --- a/system/utimes_test.go +++ b/system/utimes_test.go @@ -8,7 +8,7 @@ import ( "testing" ) -func prepareFiles(t *testing.T) (string, string, string) { +func prepareFiles(t *testing.T) (string, string, string, string) { dir, err := ioutil.TempDir("", "docker-system-test") if err != nil { t.Fatal(err) @@ -26,11 +26,12 @@ func prepareFiles(t *testing.T) (string, string, string) { t.Fatal(err) } - return file, invalid, symlink + return file, invalid, symlink, dir } func TestLUtimesNano(t *testing.T) { - file, invalid, symlink := prepareFiles(t) + file, invalid, symlink, dir := prepareFiles(t) + defer os.RemoveAll(dir) before, err := os.Stat(file) if err != nil { From a590874f1949d7f98a253ea5b207ccc04a5edf58 Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 20 Nov 2014 19:34:35 +0200 Subject: [PATCH 57/99] pkg/archive: fix TempArchive cleanup w/ one read This fixes the removal of TempArchives which can read with only one read. Such archives weren't getting removed because EOF wasn't being triggered. Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- archive/archive.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index 5a81223..9956681 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -742,17 +742,20 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) { return nil, err } size := st.Size() - return &TempArchive{f, size}, nil + return &TempArchive{f, size, 0}, nil } type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) - if err != nil { + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.File.Close() os.Remove(archive.File.Name()) } return n, err From 895e64b880aee1142986b223ee605f5ee7acd7e3 Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 20 Nov 2014 19:36:54 +0200 Subject: [PATCH 58/99] pkg/symlink: fix cleanup for tests Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- symlink/fs_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/symlink/fs_test.go b/symlink/fs_test.go index d85fd6d..cc0d82d 100644 --- a/symlink/fs_test.go +++ b/symlink/fs_test.go @@ -46,6 +46,7 @@ func TestFollowSymLinkUnderLinkedDir(t *testing.T) { if err != nil { t.Fatal(err) } + defer os.RemoveAll(dir) os.Mkdir(filepath.Join(dir, "realdir"), 0700) os.Symlink("realdir", filepath.Join(dir, "linkdir")) From 869842478eeb700ba04c2d8a75017a472d2f75eb Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 20 Nov 2014 14:22:22 -0800 Subject: [PATCH 59/99] Revert "Support hairpin NAT" This reverts commit 95a400e6e1a3b5da68431e64f9902a3fac218360. Signed-off-by: Michael Crosby --- iptables/iptables.go | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/iptables/iptables.go b/iptables/iptables.go index b550837..53e6e14 100644 --- a/iptables/iptables.go +++ b/iptables/iptables.go @@ -73,6 +73,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str "-p", proto, "-d", daddr, "--dport", strconv.Itoa(port), + "!", "-i", c.Bridge, "-j", "DNAT", "--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil { return err @@ -96,17 +97,6 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str return fmt.Errorf("Error iptables forward: %s", output) } - if output, err := Raw("-t", "nat", string(fAction), "POSTROUTING", - "-p", proto, - "-s", dest_addr, - "-d", dest_addr, - "--dport", strconv.Itoa(dest_port), - "-j", "MASQUERADE"); err != nil { - return err - } else if len(output) != 0 { - return fmt.Errorf("Error iptables forward: %s", output) - } - return nil } From 0a3565494be295996ead004a32c8b7fe6b10485a Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Thu, 20 Nov 2014 07:29:04 -0800 Subject: [PATCH 60/99] Make --tlsverify enable tls regardless of value specified I also needed to add a mflag.IsSet() function that allows you to check to see if a certain flag was actually specified on the cmd line. Per #9221 - also tweaked the docs to fix a typo. Closes #9221 Signed-off-by: Doug Davis --- mflag/flag.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/mflag/flag.go b/mflag/flag.go index b40f911..c9061c2 100644 --- a/mflag/flag.go +++ b/mflag/flag.go @@ -394,12 +394,22 @@ func (f *FlagSet) Lookup(name string) *Flag { return f.formal[name] } +// Indicates whether the specified flag was specified at all on the cmd line +func (f *FlagSet) IsSet(name string) bool { + return f.actual[name] != nil +} + // Lookup returns the Flag structure of the named command-line flag, // returning nil if none exists. func Lookup(name string) *Flag { return CommandLine.formal[name] } +// Indicates whether the specified flag was specified at all on the cmd line +func IsSet(name string) bool { + return CommandLine.IsSet(name) +} + // Set sets the value of the named flag. func (f *FlagSet) Set(name, value string) error { flag, ok := f.formal[name] From 26f399ddf3405216dd31419f2627fff8f3204616 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Thu, 20 Nov 2014 16:07:55 -0800 Subject: [PATCH 61/99] Typed errors for iptables chain raw command output. YAYYYYYY. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- iptables/iptables.go | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/iptables/iptables.go b/iptables/iptables.go index 53e6e14..b783347 100644 --- a/iptables/iptables.go +++ b/iptables/iptables.go @@ -20,9 +20,9 @@ const ( ) var ( - ErrIptablesNotFound = errors.New("Iptables not found") nat = []string{"-t", "nat"} supportsXlock = false + ErrIptablesNotFound = errors.New("Iptables not found") ) type Chain struct { @@ -30,6 +30,15 @@ type Chain struct { Bridge string } +type ChainError struct { + Chain string + Output []byte +} + +func (e *ChainError) Error() string { + return fmt.Sprintf("Error iptables %s: %s", e.Chain, string(e.Output)) +} + func init() { supportsXlock = exec.Command("iptables", "--wait", "-L", "-n").Run() == nil } @@ -78,7 +87,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str "--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil { return err } else if len(output) != 0 { - return fmt.Errorf("Error iptables forward: %s", output) + return &ChainError{Chain: "FORWARD", Output: output} } fAction := action @@ -94,7 +103,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str "-j", "ACCEPT"); err != nil { return err } else if len(output) != 0 { - return fmt.Errorf("Error iptables forward: %s", output) + return &ChainError{Chain: "FORWARD", Output: output} } return nil @@ -108,7 +117,7 @@ func (c *Chain) Prerouting(action Action, args ...string) error { if output, err := Raw(append(a, "-j", c.Name)...); err != nil { return err } else if len(output) != 0 { - return fmt.Errorf("Error iptables prerouting: %s", output) + return &ChainError{Chain: "PREROUTING", Output: output} } return nil } @@ -121,7 +130,7 @@ func (c *Chain) Output(action Action, args ...string) error { if output, err := Raw(append(a, "-j", c.Name)...); err != nil { return err } else if len(output) != 0 { - return fmt.Errorf("Error iptables output: %s", output) + return &ChainError{Chain: "OUTPUT", Output: output} } return nil } From 92251ceb7bb7a104a2f715ee553af1fbc5e411b8 Mon Sep 17 00:00:00 2001 From: Yohei Ueda Date: Fri, 21 Nov 2014 22:12:03 +0900 Subject: [PATCH 62/99] Use termios via CGO Signed-off-by: Yohei Ueda --- term/term.go | 10 ++++----- term/term_cgo.go | 47 +++++++++++++++++++++++++++++++++++++++++ term/term_nocgo.go | 18 ++++++++++++++++ term/termios_darwin.go | 2 ++ term/termios_freebsd.go | 2 ++ term/termios_linux.go | 2 ++ 6 files changed, 75 insertions(+), 6 deletions(-) create mode 100644 term/term_cgo.go create mode 100644 term/term_nocgo.go diff --git a/term/term.go b/term/term.go index 553747a..8d807d8 100644 --- a/term/term.go +++ b/term/term.go @@ -47,8 +47,7 @@ func SetWinsize(fd uintptr, ws *Winsize) error { // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { var termios Termios - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&termios))) - return err == 0 + return tcget(fd, &termios) == 0 } // Restore restores the terminal connected to the given file descriptor to a @@ -57,8 +56,7 @@ func RestoreTerminal(fd uintptr, state *State) error { if state == nil { return ErrInvalidState } - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&state.termios))) - if err != 0 { + if err := tcset(fd, &state.termios); err != 0 { return err } return nil @@ -66,7 +64,7 @@ func RestoreTerminal(fd uintptr, state *State) error { func SaveState(fd uintptr) (*State, error) { var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + if err := tcget(fd, &oldState.termios); err != 0 { return nil, err } @@ -77,7 +75,7 @@ func DisableEcho(fd uintptr, state *State) error { newState := state.termios newState.Lflag &^= syscall.ECHO - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + if err := tcset(fd, &newState); err != 0 { return err } handleInterrupt(fd, state) diff --git a/term/term_cgo.go b/term/term_cgo.go new file mode 100644 index 0000000..ddf080c --- /dev/null +++ b/term/term_cgo.go @@ -0,0 +1,47 @@ +// +build !windows,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/term/term_nocgo.go b/term/term_nocgo.go new file mode 100644 index 0000000..c211c39 --- /dev/null +++ b/term/term_nocgo.go @@ -0,0 +1,18 @@ +// +build !windows,!cgo + +package term + +import ( + "syscall" + "unsafe" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/term/termios_darwin.go b/term/termios_darwin.go index 11cd70d..2640e8b 100644 --- a/term/termios_darwin.go +++ b/term/termios_darwin.go @@ -1,3 +1,5 @@ +// +build !cgo + package term import ( diff --git a/term/termios_freebsd.go b/term/termios_freebsd.go index ed36595..969beda 100644 --- a/term/termios_freebsd.go +++ b/term/termios_freebsd.go @@ -1,3 +1,5 @@ +// +build !cgo + package term import ( diff --git a/term/termios_linux.go b/term/termios_linux.go index 4a717c8..024187f 100644 --- a/term/termios_linux.go +++ b/term/termios_linux.go @@ -1,3 +1,5 @@ +// +build !cgo + package term import ( From 6948710cc013d8340f8f3294f391678900f04fe0 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 14 Nov 2014 09:33:13 -0500 Subject: [PATCH 63/99] pkg/devicemapper: clarify TaskCreate and createTask * Rename and expose createTask() to TaskCreateNamed() * add comments Signed-off-by: Vincent Batts --- devicemapper/devmapper.go | 55 +++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/devicemapper/devmapper.go b/devicemapper/devmapper.go index 16c0ac1..e5c99ae 100644 --- a/devicemapper/devmapper.go +++ b/devicemapper/devmapper.go @@ -63,7 +63,7 @@ var ( ErrGetLibraryVersion = errors.New("dm_get_library_version failed") ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") ErrRunRemoveDevice = errors.New("running RemoveDevice failed") - ErrInvalidAddNode = errors.New("Invalide AddNoce type") + ErrInvalidAddNode = errors.New("Invalid AddNode type") ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") ErrBusy = errors.New("Device is Busy") @@ -104,6 +104,20 @@ func (t *Task) destroy() { } } +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("Can't create task of type %d", int(t)) + } + if err := task.SetName(name); err != nil { + return nil, fmt.Errorf("Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype func TaskCreate(tasktype TaskType) *Task { Ctask := DmTaskCreate(int(tasktype)) if Ctask == nil { @@ -298,7 +312,7 @@ func GetLibraryVersion() (string, error) { func RemoveDevice(name string) error { log.Debugf("[devmapper] RemoveDevice START") defer log.Debugf("[devmapper] RemoveDevice END") - task, err := createTask(DeviceRemove, name) + task, err := TaskCreateNamed(DeviceRemove, name) if task == nil { return err } @@ -354,7 +368,7 @@ func BlockDeviceDiscard(path string) error { // This is the programmatic example of "dmsetup create" func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := createTask(DeviceCreate, poolName) + task, err := TaskCreateNamed(DeviceCreate, poolName) if task == nil { return err } @@ -383,7 +397,7 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize } func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := createTask(DeviceReload, poolName) + task, err := TaskCreateNamed(DeviceReload, poolName) if task == nil { return err } @@ -405,19 +419,8 @@ func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize return nil } -func createTask(t TaskType, name string) (*Task, error) { - task := TaskCreate(t) - if task == nil { - return nil, fmt.Errorf("Can't create task of type %d", int(t)) - } - if err := task.SetName(name); err != nil { - return nil, fmt.Errorf("Can't set task name %s", name) - } - return task, nil -} - func GetDeps(name string) (*Deps, error) { - task, err := createTask(DeviceDeps, name) + task, err := TaskCreateNamed(DeviceDeps, name) if task == nil { return nil, err } @@ -428,7 +431,7 @@ func GetDeps(name string) (*Deps, error) { } func GetInfo(name string) (*Info, error) { - task, err := createTask(DeviceInfo, name) + task, err := TaskCreateNamed(DeviceInfo, name) if task == nil { return nil, err } @@ -450,9 +453,9 @@ func GetDriverVersion() (string, error) { } func GetStatus(name string) (uint64, uint64, string, string, error) { - task, err := createTask(DeviceStatus, name) + task, err := TaskCreateNamed(DeviceStatus, name) if task == nil { - log.Debugf("GetStatus: Error createTask: %s", err) + log.Debugf("GetStatus: Error TaskCreateNamed: %s", err) return 0, 0, "", "", err } if err := task.Run(); err != nil { @@ -475,7 +478,7 @@ func GetStatus(name string) (uint64, uint64, string, string, error) { } func SetTransactionId(poolName string, oldId uint64, newId uint64) error { - task, err := createTask(DeviceTargetMsg, poolName) + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { return err } @@ -495,7 +498,7 @@ func SetTransactionId(poolName string, oldId uint64, newId uint64) error { } func SuspendDevice(name string) error { - task, err := createTask(DeviceSuspend, name) + task, err := TaskCreateNamed(DeviceSuspend, name) if task == nil { return err } @@ -506,7 +509,7 @@ func SuspendDevice(name string) error { } func ResumeDevice(name string) error { - task, err := createTask(DeviceResume, name) + task, err := TaskCreateNamed(DeviceResume, name) if task == nil { return err } @@ -528,7 +531,7 @@ func CreateDevice(poolName string, deviceId *int) error { log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) for { - task, err := createTask(DeviceTargetMsg, poolName) + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { return err } @@ -556,7 +559,7 @@ func CreateDevice(poolName string, deviceId *int) error { } func DeleteDevice(poolName string, deviceId int) error { - task, err := createTask(DeviceTargetMsg, poolName) + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { return err } @@ -576,7 +579,7 @@ func DeleteDevice(poolName string, deviceId int) error { } func ActivateDevice(poolName string, name string, deviceId int, size uint64) error { - task, err := createTask(DeviceCreate, name) + task, err := TaskCreateNamed(DeviceCreate, name) if task == nil { return err } @@ -614,7 +617,7 @@ func CreateSnapDevice(poolName string, deviceId *int, baseName string, baseDevic } for { - task, err := createTask(DeviceTargetMsg, poolName) + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { if doSuspend { ResumeDevice(baseName) From 0b5fa520c25bbb010237fb70fdec753a7bbb5376 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Sat, 22 Nov 2014 05:25:57 -0800 Subject: [PATCH 64/99] Add missing unit testcase for new IsSet() func in mflag Forgot to add this when I did PR #9259 Signed-off-by: Doug Davis --- mflag/flag_test.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/mflag/flag_test.go b/mflag/flag_test.go index 340a1cb..622e8a9 100644 --- a/mflag/flag_test.go +++ b/mflag/flag_test.go @@ -168,11 +168,14 @@ func testParse(f *FlagSet, t *testing.T) { } boolFlag := f.Bool([]string{"bool"}, false, "bool value") bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") + f.Bool([]string{"bool3"}, false, "bool3 value") + bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value") intFlag := f.Int([]string{"-int"}, 0, "int value") int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") uintFlag := f.Uint([]string{"uint"}, 0, "uint value") uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") stringFlag := f.String([]string{"string"}, "0", "string value") + f.String([]string{"string2"}, "0", "string2 value") singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") @@ -185,6 +188,7 @@ func testParse(f *FlagSet, t *testing.T) { args := []string{ "-bool", "-bool2=true", + "-bool4=false", "--int", "22", "--int64", "0x23", "-uint", "24", @@ -212,6 +216,18 @@ func testParse(f *FlagSet, t *testing.T) { if *bool2Flag != true { t.Error("bool2 flag should be true, is ", *bool2Flag) } + if !f.IsSet("bool2") { + t.Error("bool2 should be marked as set") + } + if f.IsSet("bool3") { + t.Error("bool3 should not be marked as set") + } + if !f.IsSet("bool4") { + t.Error("bool4 should be marked as set") + } + if *bool4Flag != false { + t.Error("bool4 flag should be false, is ", *bool4Flag) + } if *intFlag != 22 { t.Error("int flag should be 22, is ", *intFlag) } @@ -227,6 +243,12 @@ func testParse(f *FlagSet, t *testing.T) { if *stringFlag != "hello" { t.Error("string flag should be `hello`, is ", *stringFlag) } + if !f.IsSet("string") { + t.Error("string flag should be marked as set") + } + if f.IsSet("string2") { + t.Error("string2 flag should not be marked as set") + } if *singleQuoteFlag != "single" { t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) } From 2746675b42bb4a536ddd7c483d724ad063d37484 Mon Sep 17 00:00:00 2001 From: unclejack Date: Tue, 28 Oct 2014 23:18:45 +0200 Subject: [PATCH 65/99] pkg/symlink: avoid following out of scope Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- symlink/fs.go | 47 +++++++++---- symlink/fs_test.go | 150 ++++++++++++++++++++++++++++++++++++---- symlink/testdata/fs/j/k | 1 + 3 files changed, 171 insertions(+), 27 deletions(-) create mode 120000 symlink/testdata/fs/j/k diff --git a/symlink/fs.go b/symlink/fs.go index d761732..6ce99c6 100644 --- a/symlink/fs.go +++ b/symlink/fs.go @@ -12,6 +12,12 @@ const maxLoopCounter = 100 // FollowSymlink will follow an existing link and scope it to the root // path provided. +// The role of this function is to return an absolute path in the root +// or normalize to the root if the symlink leads to a path which is +// outside of the root. +// Errors encountered while attempting to follow the symlink in path +// will be reported. +// Normalizations to the root don't constitute errors. func FollowSymlinkInScope(link, root string) (string, error) { root, err := filepath.Abs(root) if err != nil { @@ -60,25 +66,36 @@ func FollowSymlinkInScope(link, root string) (string, error) { } return "", err } - if stat.Mode()&os.ModeSymlink == os.ModeSymlink { - dest, err := os.Readlink(prev) - if err != nil { - return "", err - } - if path.IsAbs(dest) { - prev = filepath.Join(root, dest) - } else { - prev, _ = filepath.Abs(prev) - - if prev = filepath.Join(filepath.Dir(prev), dest); len(prev) < len(root) { - prev = filepath.Join(root, filepath.Base(dest)) - } - } - } else { + // let's break if we're not dealing with a symlink + if stat.Mode()&os.ModeSymlink != os.ModeSymlink { break } + + // process the symlink + dest, err := os.Readlink(prev) + if err != nil { + return "", err + } + + if path.IsAbs(dest) { + prev = filepath.Join(root, dest) + } else { + prev, _ = filepath.Abs(prev) + + dir := filepath.Dir(prev) + prev = filepath.Join(dir, dest) + if dir == root && !strings.HasPrefix(prev, root) { + prev = root + } + if len(prev) < len(root) || (len(prev) == len(root) && prev != root) { + prev = filepath.Join(root, filepath.Base(dest)) + } + } } } + if prev == "/" { + prev = root + } return prev, nil } diff --git a/symlink/fs_test.go b/symlink/fs_test.go index cc0d82d..0e2f948 100644 --- a/symlink/fs_test.go +++ b/symlink/fs_test.go @@ -98,25 +98,151 @@ func TestFollowSymLinkRelativeLink(t *testing.T) { } func TestFollowSymLinkRelativeLinkScope(t *testing.T) { - link := "testdata/fs/a/f" + // avoid letting symlink f lead us out of the "testdata" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + { + link := "testdata/fs/a/f" - rewrite, err := FollowSymlinkInScope(link, "testdata") - if err != nil { - t.Fatal(err) + rewrite, err := FollowSymlinkInScope(link, "testdata") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/test"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } } - if expected := abs(t, "testdata/test"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) + // avoid letting symlink f lead us out of the "testdata/fs" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + { + link := "testdata/fs/a/f" + + rewrite, err := FollowSymlinkInScope(link, "testdata/fs") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/fs/test"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } } - link = "testdata/fs/b/h" + // avoid letting symlink g (pointed at by symlink h) take out of scope + // TODO: we should probably normalize to scope here because ../[....]/root + // is out of scope and we leak information + { + link := "testdata/fs/b/h" - rewrite, err = FollowSymlinkInScope(link, "testdata") - if err != nil { - t.Fatal(err) + rewrite, err := FollowSymlinkInScope(link, "testdata") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/root"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } } - if expected := abs(t, "testdata/root"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + { + link := "testdata/fs/a/e" + + rewrite, err := FollowSymlinkInScope(link, "testdata/fs/a") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/fs/a"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } + } + + // avoid letting symlink -> ../directory/file escape from scope + // normalize to "testdata/fs/j" + { + link := "testdata/fs/j/k" + + rewrite, err := FollowSymlinkInScope(link, "testdata/fs/j") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/fs/j"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } + } + + // make sure we don't allow escaping to / + // normalize to dir + { + dir, err := ioutil.TempDir("", "docker-fs-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + linkFile := filepath.Join(dir, "foo") + os.Mkdir(filepath.Join(dir, ""), 0700) + os.Symlink("/", linkFile) + + rewrite, err := FollowSymlinkInScope(linkFile, dir) + if err != nil { + t.Fatal(err) + } + + if rewrite != dir { + t.Fatalf("Expected %s got %s", dir, rewrite) + } + } + + // make sure we don't allow escaping to / + // normalize to dir + { + dir, err := ioutil.TempDir("", "docker-fs-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + linkFile := filepath.Join(dir, "foo") + os.Mkdir(filepath.Join(dir, ""), 0700) + os.Symlink("/../../", linkFile) + + rewrite, err := FollowSymlinkInScope(linkFile, dir) + if err != nil { + t.Fatal(err) + } + + if rewrite != dir { + t.Fatalf("Expected %s got %s", dir, rewrite) + } + } + + // make sure we stay in scope without leaking information + // this also checks for escaping to / + // normalize to dir + { + dir, err := ioutil.TempDir("", "docker-fs-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + linkFile := filepath.Join(dir, "foo") + os.Mkdir(filepath.Join(dir, ""), 0700) + os.Symlink("../../", linkFile) + + rewrite, err := FollowSymlinkInScope(linkFile, dir) + if err != nil { + t.Fatal(err) + } + + if rewrite != dir { + t.Fatalf("Expected %s got %s", dir, rewrite) + } } } diff --git a/symlink/testdata/fs/j/k b/symlink/testdata/fs/j/k new file mode 120000 index 0000000..f559e8f --- /dev/null +++ b/symlink/testdata/fs/j/k @@ -0,0 +1 @@ +../i/a \ No newline at end of file From e19f49915fc5fed9d7ac61747ccd26b4cbbe4208 Mon Sep 17 00:00:00 2001 From: unclejack Date: Wed, 29 Oct 2014 21:06:51 +0200 Subject: [PATCH 66/99] add pkg/chrootarchive and use it on the daemon Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) Conflicts: builder/internals.go daemon/graphdriver/aufs/aufs.go daemon/volumes.go fixed conflicts in imports --- chrootarchive/archive.go | 76 +++++++++++++++++++++++++++++++++++ chrootarchive/diff.go | 38 ++++++++++++++++++ chrootarchive/init.go | 18 +++++++++ reexec/command_linux.go | 18 +++++++++ reexec/command_unsupported.go | 11 +++++ reexec/reexec.go | 3 -- 6 files changed, 161 insertions(+), 3 deletions(-) create mode 100644 chrootarchive/archive.go create mode 100644 chrootarchive/diff.go create mode 100644 chrootarchive/init.go create mode 100644 reexec/command_linux.go create mode 100644 reexec/command_unsupported.go diff --git a/chrootarchive/archive.go b/chrootarchive/archive.go new file mode 100644 index 0000000..f1df57c --- /dev/null +++ b/chrootarchive/archive.go @@ -0,0 +1,76 @@ +package chrootarchive + +import ( + "flag" + "fmt" + "io" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func untar() { + runtime.LockOSThread() + flag.Parse() + + if err := syscall.Chroot(flag.Arg(0)); err != nil { + fatal(err) + } + if err := syscall.Chdir("/"); err != nil { + fatal(err) + } + if err := archive.Untar(os.Stdin, "/", nil); err != nil { + fatal(err) + } + os.Exit(0) +} + +var ( + chrootArchiver = &archive.Archiver{Untar} +) + +func Untar(archive io.Reader, dest string, options *archive.TarOptions) error { + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := os.MkdirAll(dest, 0777); err != nil { + return err + } + } + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = archive + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Untar %s %s", err, out) + } + return nil +} + +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/', the final destination path +// will be `dst/base(src)`. +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff --git a/chrootarchive/diff.go b/chrootarchive/diff.go new file mode 100644 index 0000000..2133200 --- /dev/null +++ b/chrootarchive/diff.go @@ -0,0 +1,38 @@ +package chrootarchive + +import ( + "flag" + "fmt" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func applyLayer() { + runtime.LockOSThread() + flag.Parse() + + if err := syscall.Chroot(flag.Arg(0)); err != nil { + fatal(err) + } + if err := syscall.Chdir("/"); err != nil { + fatal(err) + } + if err := archive.ApplyLayer("/", os.Stdin); err != nil { + fatal(err) + } + os.Exit(0) +} + +func ApplyLayer(dest string, layer archive.ArchiveReader) error { + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("ApplyLayer %s %s", err, out) + } + return nil +} diff --git a/chrootarchive/init.go b/chrootarchive/init.go new file mode 100644 index 0000000..b548e9f --- /dev/null +++ b/chrootarchive/init.go @@ -0,0 +1,18 @@ +package chrootarchive + +import ( + "fmt" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-untar", untar) + reexec.Register("docker-applyLayer", applyLayer) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} diff --git a/reexec/command_linux.go b/reexec/command_linux.go new file mode 100644 index 0000000..8dc3f3a --- /dev/null +++ b/reexec/command_linux.go @@ -0,0 +1,18 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" +) + +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, + } +} diff --git a/reexec/command_unsupported.go b/reexec/command_unsupported.go new file mode 100644 index 0000000..a579318 --- /dev/null +++ b/reexec/command_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package reexec + +import ( + "os/exec" +) + +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/reexec/reexec.go b/reexec/reexec.go index 136b905..774e71c 100644 --- a/reexec/reexec.go +++ b/reexec/reexec.go @@ -27,19 +27,16 @@ func Init() bool { return true } - return false } // Self returns the path to the current processes binary func Self() string { name := os.Args[0] - if filepath.Base(name) == name { if lp, err := exec.LookPath(name); err == nil { name = lp } } - return name } From 466e44195a5b3fb5a75dba938a2c4d6c262bb6b1 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Sat, 8 Nov 2014 10:38:42 -0500 Subject: [PATCH 67/99] pkg/chrootarchive: pass TarOptions via CLI arg Signed-off-by: Tibor Vass Conflicts: graph/load.go fixed conflict in imports --- chrootarchive/archive.go | 18 ++++++++++++++-- chrootarchive/archive_test.go | 39 +++++++++++++++++++++++++++++++++++ chrootarchive/init.go | 1 + 3 files changed, 56 insertions(+), 2 deletions(-) create mode 100644 chrootarchive/archive_test.go diff --git a/chrootarchive/archive.go b/chrootarchive/archive.go index f1df57c..fc2bea2 100644 --- a/chrootarchive/archive.go +++ b/chrootarchive/archive.go @@ -1,11 +1,14 @@ package chrootarchive import ( + "bytes" + "encoding/json" "flag" "fmt" "io" "os" "runtime" + "strings" "syscall" "github.com/docker/docker/pkg/archive" @@ -22,7 +25,12 @@ func untar() { if err := syscall.Chdir("/"); err != nil { fatal(err) } - if err := archive.Untar(os.Stdin, "/", nil); err != nil { + options := new(archive.TarOptions) + dec := json.NewDecoder(strings.NewReader(flag.Arg(1))) + if err := dec.Decode(options); err != nil { + fatal(err) + } + if err := archive.Untar(os.Stdin, "/", options); err != nil { fatal(err) } os.Exit(0) @@ -33,12 +41,18 @@ var ( ) func Untar(archive io.Reader, dest string, options *archive.TarOptions) error { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(options); err != nil { + return fmt.Errorf("Untar json encode: %v", err) + } if _, err := os.Stat(dest); os.IsNotExist(err) { if err := os.MkdirAll(dest, 0777); err != nil { return err } } - cmd := reexec.Command("docker-untar", dest) + + cmd := reexec.Command("docker-untar", dest, buf.String()) cmd.Stdin = archive out, err := cmd.CombinedOutput() if err != nil { diff --git a/chrootarchive/archive_test.go b/chrootarchive/archive_test.go new file mode 100644 index 0000000..aeac448 --- /dev/null +++ b/chrootarchive/archive_test.go @@ -0,0 +1,39 @@ +package chrootarchive + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/archive" +) + +func TestChrootTarUntar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := os.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "src") + if err := os.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + if err := Untar(stream, dest, &archive.TarOptions{Excludes: []string{"lolo"}}); err != nil { + t.Fatal(err) + } +} diff --git a/chrootarchive/init.go b/chrootarchive/init.go index b548e9f..f05698f 100644 --- a/chrootarchive/init.go +++ b/chrootarchive/init.go @@ -10,6 +10,7 @@ import ( func init() { reexec.Register("docker-untar", untar) reexec.Register("docker-applyLayer", applyLayer) + reexec.Init() } func fatal(err error) { From 5343f641d3eeec1e898ebac0e6720018e47db6b5 Mon Sep 17 00:00:00 2001 From: unclejack Date: Tue, 11 Nov 2014 13:02:14 +0200 Subject: [PATCH 68/99] don't call reexec.Init from chrootarchive Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) Conflicts: daemon/graphdriver/aufs/aufs_test.go fixed conflict caused by imports --- chrootarchive/archive_test.go | 5 +++++ chrootarchive/init.go | 1 - 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/chrootarchive/archive_test.go b/chrootarchive/archive_test.go index aeac448..69e18e3 100644 --- a/chrootarchive/archive_test.go +++ b/chrootarchive/archive_test.go @@ -7,8 +7,13 @@ import ( "testing" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" ) +func init() { + reexec.Init() +} + func TestChrootTarUntar(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") if err != nil { diff --git a/chrootarchive/init.go b/chrootarchive/init.go index f05698f..b548e9f 100644 --- a/chrootarchive/init.go +++ b/chrootarchive/init.go @@ -10,7 +10,6 @@ import ( func init() { reexec.Register("docker-untar", untar) reexec.Register("docker-applyLayer", applyLayer) - reexec.Init() } func fatal(err error) { From 1752a203afca295f271a2cc6b907fff366c13642 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Mon, 20 Oct 2014 15:35:48 -0400 Subject: [PATCH 69/99] archive: add breakout tests Signed-off-by: Tibor Vass Conflicts: pkg/archive/archive.go fixed conflict which git couldn't fix with the added BreakoutError Conflicts: pkg/archive/archive_test.go fixed conflict in imports --- archive/archive.go | 5 ++ archive/archive_test.go | 192 +++++++++++++++++++++++++++++++++++++++- archive/diff_test.go | 191 +++++++++++++++++++++++++++++++++++++++ archive/utils_test.go | 166 ++++++++++++++++++++++++++++++++++ 4 files changed, 553 insertions(+), 1 deletion(-) create mode 100644 archive/diff_test.go create mode 100644 archive/utils_test.go diff --git a/archive/archive.go b/archive/archive.go index 9956681..d90dfcf 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -42,6 +42,11 @@ type ( Archiver struct { Untar func(io.Reader, string, *TarOptions) error } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error ) var ( diff --git a/archive/archive_test.go b/archive/archive_test.go index 3516aca..36abdb9 100644 --- a/archive/archive_test.go +++ b/archive/archive_test.go @@ -8,6 +8,7 @@ import ( "os" "os/exec" "path" + "path/filepath" "syscall" "testing" "time" @@ -214,7 +215,12 @@ func TestTarWithOptions(t *testing.T) { // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} - err := createTarFile("pax_global_header", "some_dir", &hdr, nil, true) + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true) if err != nil { t.Fatal(err) } @@ -403,3 +409,187 @@ func BenchmarkTarUntarWithLinks(b *testing.B) { os.RemoveAll(target) } } + +func TestUntarInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} diff --git a/archive/diff_test.go b/archive/diff_test.go new file mode 100644 index 0000000..758c411 --- /dev/null +++ b/archive/diff_test.go @@ -0,0 +1,191 @@ +package archive + +import ( + "testing" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func TestApplyLayerInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} diff --git a/archive/utils_test.go b/archive/utils_test.go new file mode 100644 index 0000000..3624fe5 --- /dev/null +++ b/archive/utils_test.go @@ -0,0 +1,166 @@ +package archive + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +var testUntarFns = map[string]func(string, io.Reader) error{ + "untar": func(dest string, r io.Reader) error { + return Untar(r, dest, nil) + }, + "applylayer": func(dest string, r io.Reader) error { + return ApplyLayer(dest, ArchiveReader(r)) + }, +} + +// testBreakout is a helper function that, within the provided `tmpdir` directory, +// creates a `victim` folder with a generated `hello` file in it. +// `untar` extracts to a directory named `dest`, the tar file created from `headers`. +// +// Here are the tested scenarios: +// - removed `victim` folder (write) +// - removed files from `victim` folder (write) +// - new files in `victim` folder (write) +// - modified files in `victim` folder (write) +// - file in `dest` with same content as `victim/hello` (read) +// +// When using testBreakout make sure you cover one of the scenarios listed above. +func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { + tmpdir, err := ioutil.TempDir("", tmpdir) + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + + dest := filepath.Join(tmpdir, "dest") + if err := os.Mkdir(dest, 0755); err != nil { + return err + } + + victim := filepath.Join(tmpdir, "victim") + if err := os.Mkdir(victim, 0755); err != nil { + return err + } + hello := filepath.Join(victim, "hello") + helloData, err := time.Now().MarshalText() + if err != nil { + return err + } + if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { + return err + } + helloStat, err := os.Stat(hello) + if err != nil { + return err + } + + reader, writer := io.Pipe() + go func() { + t := tar.NewWriter(writer) + for _, hdr := range headers { + t.WriteHeader(hdr) + } + t.Close() + }() + + untar := testUntarFns[untarFn] + if untar == nil { + return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) + } + if err := untar(dest, reader); err != nil { + if _, ok := err.(breakoutError); !ok { + // If untar returns an error unrelated to an archive breakout, + // then consider this an unexpected error and abort. + return err + } + // Here, untar detected the breakout. + // Let's move on verifying that indeed there was no breakout. + fmt.Printf("breakoutError: %v\n", err) + } + + // Check victim folder + f, err := os.Open(victim) + if err != nil { + // codepath taken if victim folder was removed + return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) + } + defer f.Close() + + // Check contents of victim folder + // + // We are only interested in getting 2 files from the victim folder, because if all is well + // we expect only one result, the `hello` file. If there is a second result, it cannot + // hold the same name `hello` and we assume that a new file got created in the victim folder. + // That is enough to detect an archive breakout. + names, err := f.Readdirnames(2) + if err != nil { + // codepath taken if victim is not a folder + return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) + } + for _, name := range names { + if name != "hello" { + // codepath taken if new file was created in victim folder + return fmt.Errorf("archive breakout: new file %q", name) + } + } + + // Check victim/hello + f, err = os.Open(hello) + if err != nil { + // codepath taken if read permissions were removed + return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fi, err := f.Stat() + if err != nil { + return err + } + if helloStat.IsDir() != fi.IsDir() || + // TODO: cannot check for fi.ModTime() change + helloStat.Mode() != fi.Mode() || + helloStat.Size() != fi.Size() || + !bytes.Equal(helloData, b) { + // codepath taken if hello has been modified + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi) + } + + // Check that nothing in dest/ has the same content as victim/hello. + // Since victim/hello was generated with time.Now(), it is safe to assume + // that any file whose content matches exactly victim/hello, managed somehow + // to access victim/hello. + return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + if err != nil { + // skip directory if error + return filepath.SkipDir + } + // enter directory + return nil + } + if err != nil { + // skip file if error + return nil + } + b, err := ioutil.ReadFile(path) + if err != nil { + // Houston, we have a problem. Aborting (space)walk. + return err + } + if bytes.Equal(helloData, b) { + return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) + } + return nil + }) +} From aa62eca9404461184562cc7026ae51e7fe3a2f7c Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Mon, 20 Oct 2014 15:36:28 -0400 Subject: [PATCH 70/99] archive: prevent breakout in Untar Signed-off-by: Tibor Vass --- archive/archive.go | 22 +++++++++++++++++++++- symlink/fs.go | 4 +++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index d90dfcf..67eb0be 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -22,6 +22,7 @@ import ( "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" ) @@ -292,11 +293,23 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeLink: - if err := os.Link(filepath.Join(extractDir, hdr.Linkname), path); err != nil { + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: + // check for symlink breakout + if _, err := symlink.FollowSymlinkInScope(filepath.Join(filepath.Dir(path), hdr.Linkname), extractDir); err != nil { + if _, ok := err.(symlink.ErrBreakout); ok { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + return err + } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } @@ -456,6 +469,8 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(archive io.Reader, dest string, options *TarOptions) error { + dest = filepath.Clean(dest) + if options == nil { options = &TarOptions{} } @@ -493,6 +508,7 @@ loop: } // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/" hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.Excludes { @@ -513,7 +529,11 @@ loop: } } + // Prevent symlink breakout path := filepath.Join(dest, hdr.Name) + if !strings.HasPrefix(path, dest) { + return breakoutError(fmt.Errorf("%q is outside of %q", path, dest)) + } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from diff --git a/symlink/fs.go b/symlink/fs.go index 6ce99c6..09271ff 100644 --- a/symlink/fs.go +++ b/symlink/fs.go @@ -10,6 +10,8 @@ import ( const maxLoopCounter = 100 +type ErrBreakout error + // FollowSymlink will follow an existing link and scope it to the root // path provided. // The role of this function is to return an absolute path in the root @@ -34,7 +36,7 @@ func FollowSymlinkInScope(link, root string) (string, error) { } if !strings.HasPrefix(filepath.Dir(link), root) { - return "", fmt.Errorf("%s is not within %s", link, root) + return "", ErrBreakout(fmt.Errorf("%s is not within %s", link, root)) } prev := "/" From a80a838e6f8799d7f7275e79e044f5c46fc3054a Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Fri, 31 Oct 2014 13:18:39 -0400 Subject: [PATCH 71/99] archive: prevent breakout in ApplyLayer Signed-off-by: Tibor Vass --- archive/diff.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/archive/diff.go b/archive/diff.go index eabb7c4..856cedc 100644 --- a/archive/diff.go +++ b/archive/diff.go @@ -18,6 +18,8 @@ import ( // ApplyLayer parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. func ApplyLayer(dest string, layer ArchiveReader) error { + dest = filepath.Clean(dest) + // We need to be able to set any perms oldmask, err := system.Umask(0) if err != nil { @@ -91,6 +93,12 @@ func ApplyLayer(dest string, layer ArchiveReader) error { path := filepath.Join(dest, hdr.Name) base := filepath.Base(path) + + // Prevent symlink breakout + if !strings.HasPrefix(path, dest) { + return breakoutError(fmt.Errorf("%q is outside of %q", path, dest)) + } + if strings.HasPrefix(base, ".wh.") { originalBase := base[len(".wh."):] originalPath := filepath.Join(filepath.Dir(path), originalBase) From 78bd3c03561969fc159149aa7d5eea17644ff385 Mon Sep 17 00:00:00 2001 From: unclejack Date: Tue, 18 Nov 2014 23:33:13 +0200 Subject: [PATCH 72/99] pkg/chrootarchive: provide TMPDIR for ApplyLayer Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- chrootarchive/diff.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/chrootarchive/diff.go b/chrootarchive/diff.go index 2133200..2653aef 100644 --- a/chrootarchive/diff.go +++ b/chrootarchive/diff.go @@ -3,6 +3,7 @@ package chrootarchive import ( "flag" "fmt" + "io/ioutil" "os" "runtime" "syscall" @@ -21,9 +22,16 @@ func applyLayer() { if err := syscall.Chdir("/"); err != nil { fatal(err) } - if err := archive.ApplyLayer("/", os.Stdin); err != nil { + tmpDir, err := ioutil.TempDir("/", "temp-docker-extract") + if err != nil { fatal(err) } + os.Setenv("TMPDIR", tmpDir) + if err := archive.ApplyLayer("/", os.Stdin); err != nil { + os.RemoveAll(tmpDir) + fatal(err) + } + os.RemoveAll(tmpDir) os.Exit(0) } From bdff6d8011388c31f8e30fd70adc1c26c077983e Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Wed, 19 Nov 2014 11:27:34 -0500 Subject: [PATCH 73/99] archive: do not call FollowSymlinkInScope in createTarFile Signed-off-by: Tibor Vass --- archive/archive.go | 15 ++++++++------- archive/archive_test.go | 14 ++++++++++++++ symlink/fs.go | 4 +--- 3 files changed, 23 insertions(+), 10 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index 67eb0be..aaeed31 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -22,7 +22,6 @@ import ( "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" ) @@ -303,12 +302,14 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeSymlink: - // check for symlink breakout - if _, err := symlink.FollowSymlinkInScope(filepath.Join(filepath.Dir(path), hdr.Linkname), extractDir); err != nil { - if _, ok := err.(symlink.ErrBreakout); ok { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - return err + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err diff --git a/archive/archive_test.go b/archive/archive_test.go index 36abdb9..05362a2 100644 --- a/archive/archive_test.go +++ b/archive/archive_test.go @@ -587,6 +587,20 @@ func TestUntarInvalidSymlink(t *testing.T) { Mode: 0644, }, }, + { // try writing to victim/newdir/newfile with a symlink in the path + { + // this header needs to be before the next one, or else there is an error + Name: "dir/loophole", + Typeflag: tar.TypeSymlink, + Linkname: "../../victim", + Mode: 0755, + }, + { + Name: "dir/loophole/newdir/newfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) diff --git a/symlink/fs.go b/symlink/fs.go index 09271ff..6ce99c6 100644 --- a/symlink/fs.go +++ b/symlink/fs.go @@ -10,8 +10,6 @@ import ( const maxLoopCounter = 100 -type ErrBreakout error - // FollowSymlink will follow an existing link and scope it to the root // path provided. // The role of this function is to return an absolute path in the root @@ -36,7 +34,7 @@ func FollowSymlinkInScope(link, root string) (string, error) { } if !strings.HasPrefix(filepath.Dir(link), root) { - return "", ErrBreakout(fmt.Errorf("%s is not within %s", link, root)) + return "", fmt.Errorf("%s is not within %s", link, root) } prev := "/" From d7eadc78dc7b069b4793a363d49191267653ff15 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 24 Nov 2014 18:10:37 -0500 Subject: [PATCH 74/99] Move git and url checks into pkg This moves the IsGIT and IsURL functions out of the generic `utils` package and into their own `urlutil` pkg. Signed-off-by: Michael Crosby --- urlutil/git.go | 30 ++++++++++++++++++++++++++++++ urlutil/git_test.go | 43 +++++++++++++++++++++++++++++++++++++++++++ urlutil/url.go | 19 +++++++++++++++++++ 3 files changed, 92 insertions(+) create mode 100644 urlutil/git.go create mode 100644 urlutil/git_test.go create mode 100644 urlutil/url.go diff --git a/urlutil/git.go b/urlutil/git.go new file mode 100644 index 0000000..ba88ddf --- /dev/null +++ b/urlutil/git.go @@ -0,0 +1,30 @@ +package urlutil + +import "strings" + +var ( + validPrefixes = []string{ + "git://", + "github.com/", + "git@", + } +) + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && strings.HasSuffix(str, ".git") { + return true + } + for _, prefix := range validPrefixes { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} + +// IsGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func IsGitTransport(str string) bool { + return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} diff --git a/urlutil/git_test.go b/urlutil/git_test.go new file mode 100644 index 0000000..01dcea7 --- /dev/null +++ b/urlutil/git_test.go @@ -0,0 +1,43 @@ +package urlutil + +import "testing" + +var ( + gitUrls = []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + } + incompleteGitUrls = []string{ + "github.com/docker/docker", + } +) + +func TestValidGitTransport(t *testing.T) { + for _, url := range gitUrls { + if IsGitTransport(url) == false { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitTransport(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsGIT(t *testing.T) { + for _, url := range gitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + for _, url := range incompleteGitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } +} diff --git a/urlutil/url.go b/urlutil/url.go new file mode 100644 index 0000000..eeae56e --- /dev/null +++ b/urlutil/url.go @@ -0,0 +1,19 @@ +package urlutil + +import "strings" + +var validUrlPrefixes = []string{ + "http://", + "https://", +} + +// IsURL returns true if the provided str is a valid URL by doing +// a simple change for the transport of the url. +func IsURL(str string) bool { + for _, prefix := range validUrlPrefixes { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} From 7c1b9831df0eb80501d05bf3d1500fecd7dbb82b Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 24 Oct 2014 16:23:50 -0400 Subject: [PATCH 75/99] pkg/tarsum: specification on TarSum checksum Signed-off-by: Vincent Batts --- tarsum/tarsum_spec.md | 228 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 228 insertions(+) create mode 100644 tarsum/tarsum_spec.md diff --git a/tarsum/tarsum_spec.md b/tarsum/tarsum_spec.md new file mode 100644 index 0000000..bffd44a --- /dev/null +++ b/tarsum/tarsum_spec.md @@ -0,0 +1,228 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithm used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on file system layers, the need for this method over existing +methods, and the versioning of this calculation. + + +## Introduction + +The transportation of file systems, regarding docker, is done with tar(1) +archives. Types of transpiration include distribution to and from a registry +endpoint, saving and loading through commands or docker daemon APIs, +transferring the build context from client to docker daemon, and committing the +file system of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved file system, +while maintaining a deterministic accountability. This includes neither +constrain the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for file systems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + + +## Concept + +The checksum mechanism must ensure the integrity and confidentiality of the +file system payload. + + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* associated hashing cipher - used to checksum each file payload and attribute + information. +* checksum list - each file of the file system archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* archive being calculated - the tar archive having its checksum calculated + + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and block cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from block cipher +* ':' separates calculation mechanics from expected hash + +Example: + + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| + + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the checksum "tarsum" + + +### Version1 + +Its element in the checksum "tarsum.v1" + +The notable changes in this version: +* exclusion of file mtime from the file information headers, in each file + checksum calculation +* inclusion of extended attributes (xattrs. Also seen as "SCHILY.xattr." prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the checksum "tarsum.dev" + +This is a floating place holder for a next version. The methods used for +calculation are subject to change without notice. + +## Ciphers + +The official default and standard block cipher used in the calculation mechanic +is "sha256". This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the algorithm itself is not exclusively bound to this single block +cipher, and support for alternate block ciphers was later added [1]. Presently +use of this is for isolated use-cases and future-proofing the TarSum checksum +format. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the life and cycle of the tar archive. In that the tar archive is not an +immutable, permanent artifact. Otherwise options like relying on a known block +cipher checksum of the archive itself would be reliable enough. Since the tar +archive is used as a transportation medium, and is thrown away after its +contents are extracted. Therefore, for consistent validation items such as +order of files in the tar archive and time stamps are subject to change once an +image is received. + + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extented attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + + +#### Body + +After the order headers of the file have been added to the checksum for the +file, then the body of the file is written to the hash. + + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + + +#### Final Checksum + +Using an initialize hash of the associated hash cipher, if there is additional +payload to include in the TarSum calculation for the archive, it is written +first. Then each checksum from the ordered list of files sums is written to the +hash. The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for + + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e + +## Acknowledgements + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + From 0597513d59a142483100796c771b092c091a13fb Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Wed, 12 Nov 2014 09:25:46 -0500 Subject: [PATCH 76/99] pkg/tarsum: review amendments (separate commit to preserve github conversation) Signed-off-by: Vincent Batts --- tarsum/tarsum_spec.md | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/tarsum/tarsum_spec.md b/tarsum/tarsum_spec.md index bffd44a..aa5065d 100644 --- a/tarsum/tarsum_spec.md +++ b/tarsum/tarsum_spec.md @@ -14,8 +14,10 @@ methods, and the versioning of this calculation. ## Introduction The transportation of file systems, regarding docker, is done with tar(1) -archives. Types of transpiration include distribution to and from a registry -endpoint, saving and loading through commands or docker daemon APIs, +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or docker daemon APIs, transferring the build context from client to docker daemon, and committing the file system of a container to become an image. @@ -40,7 +42,7 @@ versions. ## Concept -The checksum mechanism must ensure the integrity and confidentiality of the +The checksum mechanism must ensure the integrity and assurance of the file system payload. @@ -62,11 +64,11 @@ A checksum mechanism must define the following operations and attributes: The calculated sum output is a text string. The elements included in the output of the calculated sum comprise the information needed for validation of the sum -(TarSum version and block cipher used) and the expected checksum in hexadecimal +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal form. There are two delimiters used: -* '+' separates TarSum version from block cipher +* '+' separates TarSum version from hashing cipher * ':' separates calculation mechanics from expected hash Example: @@ -114,11 +116,11 @@ calculation are subject to change without notice. ## Ciphers -The official default and standard block cipher used in the calculation mechanic +The official default and standard hashing cipher used in the calculation mechanic is "sha256". This refers to SHA256 hash algorithm as defined in FIPS 180-4. -Though the algorithm itself is not exclusively bound to this single block -cipher, and support for alternate block ciphers was later added [1]. Presently +Though the algorithm itself is not exclusively bound to this single hashing +cipher, and support for alternate hashing ciphers was later added [1]. Presently use of this is for isolated use-cases and future-proofing the TarSum checksum format. @@ -128,7 +130,7 @@ format. As mentioned earlier, the calculation is such that it takes into consideration the life and cycle of the tar archive. In that the tar archive is not an -immutable, permanent artifact. Otherwise options like relying on a known block +immutable, permanent artifact. Otherwise options like relying on a known hashing cipher checksum of the archive itself would be reliable enough. Since the tar archive is used as a transportation medium, and is thrown away after its contents are extracted. Therefore, for consistent validation items such as @@ -200,10 +202,12 @@ body. #### Final Checksum -Using an initialize hash of the associated hash cipher, if there is additional -payload to include in the TarSum calculation for the archive, it is written -first. Then each checksum from the ordered list of files sums is written to the -hash. The resulting digest is formatted per the Elements of TarSum checksum, +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, including the TarSum version, the associated hash cipher and the hexadecimal encoded checksum digest. @@ -213,13 +217,16 @@ encoded checksum digest. The initial version of TarSum has undergone one update that could invalidate handcrafted tar archives. The tar archive format supports appending of files with same names as prior files in the archive. The latter file will clobber the -prior file of the same path. Due to this the algorithm now accounts for +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. ## Footnotes * [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 * [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 ## Acknowledgements From 9a45c4235a5f0ff13f2b112587776e67e9148cb0 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 13 Nov 2014 13:09:05 -0500 Subject: [PATCH 77/99] pkg/tarsum: review cleanup Signed-off-by: Vincent Batts --- tarsum/tarsum_spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tarsum/tarsum_spec.md b/tarsum/tarsum_spec.md index aa5065d..b51e5b1 100644 --- a/tarsum/tarsum_spec.md +++ b/tarsum/tarsum_spec.md @@ -188,7 +188,7 @@ with no newline. #### Body After the order headers of the file have been added to the checksum for the -file, then the body of the file is written to the hash. +file, the body of the file is written to the hash. #### List of file sums From bd9c676bb7b609d71bbb01288766a72585c3856c Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 20 Nov 2014 15:46:15 -0500 Subject: [PATCH 78/99] tarsum: updates for jamtur01 comments Signed-off-by: Vincent Batts --- tarsum/tarsum_spec.md | 82 +++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 46 deletions(-) diff --git a/tarsum/tarsum_spec.md b/tarsum/tarsum_spec.md index b51e5b1..7a6f8ed 100644 --- a/tarsum/tarsum_spec.md +++ b/tarsum/tarsum_spec.md @@ -1,5 +1,5 @@ page_title: TarSum checksum specification -page_description: Documentation for algorithm used in the TarSum checksum calculation +page_description: Documentation for algorithms used in the TarSum checksum calculation page_keywords: docker, checksum, validation, tarsum # TarSum Checksum Specification @@ -7,58 +7,54 @@ page_keywords: docker, checksum, validation, tarsum ## Abstract This document describes the algorithms used in performing the TarSum checksum -calculation on file system layers, the need for this method over existing +calculation on filesystem layers, the need for this method over existing methods, and the versioning of this calculation. ## Introduction -The transportation of file systems, regarding docker, is done with tar(1) +The transportation of filesystems, regarding Docker, is done with tar(1) archives. There are a variety of tar serialization formats [2], and a key concern here is ensuring a repeatable checksum given a set of inputs from a generic tar archive. Types of transportation include distribution to and from a -registry endpoint, saving and loading through commands or docker daemon APIs, -transferring the build context from client to docker daemon, and committing the -file system of a container to become an image. +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. As tar archives are used for transit, but not preserved in many situations, the -focus of the algorithm is to ensure the integrity of the preserved file system, +focus of the algorithm is to ensure the integrity of the preserved filesystem, while maintaining a deterministic accountability. This includes neither -constrain the ordering or manipulation of the files during the creation or +constraining the ordering or manipulation of the files during the creation or unpacking of the archive, nor include additional metadata state about the file system attributes. - ## Intended Audience This document is outlining the methods used for consistent checksum calculation -for file systems transported via tar archives. +for filesystems transported via tar archives. Auditing these methodologies is an open and iterative process. This document should accommodate the review of source code. Ultimately, this document should be the starting point of further refinements to the algorithm and its future versions. - ## Concept The checksum mechanism must ensure the integrity and assurance of the -file system payload. - +filesystem payload. ## Checksum Algorithm Profile A checksum mechanism must define the following operations and attributes: -* associated hashing cipher - used to checksum each file payload and attribute +* Associated hashing cipher - used to checksum each file payload and attribute information. -* checksum list - each file of the file system archive has its checksum +* Checksum list - each file of the filesystem archive has its checksum calculated from the payload and attributes of the file. The final checksum is calculated from this list, with specific ordering. -* version - as the algorithm adapts to requirements, there are behaviors of the +* Version - as the algorithm adapts to requirements, there are behaviors of the algorithm to manage by versioning. -* archive being calculated - the tar archive having its checksum calculated - +* Archive being calculated - the tar archive having its checksum calculated ## Elements of TarSum checksum @@ -73,13 +69,14 @@ There are two delimiters used: Example: +``` "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" | | \ | | | \ | |_version_|_cipher__|__ | | \ | |_calculation_mechanics_|______________________expected_sum_______________________| - +``` ## Versioning @@ -92,51 +89,50 @@ The general algorithm will be describe further in the 'Calculation'. This is the initial version of TarSum. -Its element in the checksum "tarsum" - +Its element in the TarSum checksum string is `tarsum`. ### Version1 -Its element in the checksum "tarsum.v1" +Its element in the TarSum checksum is `tarsum.v1`. The notable changes in this version: -* exclusion of file mtime from the file information headers, in each file +* Exclusion of file `mtime` from the file information headers, in each file checksum calculation -* inclusion of extended attributes (xattrs. Also seen as "SCHILY.xattr." prefixed Pax +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax tar file info headers) keys and values in each file checksum calculation ### VersionDev *Do not use unless validating refinements to the checksum algorithm* -Its element in the checksum "tarsum.dev" +Its element in the TarSum checksum is `tarsum.dev`. -This is a floating place holder for a next version. The methods used for -calculation are subject to change without notice. +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. ## Ciphers The official default and standard hashing cipher used in the calculation mechanic -is "sha256". This refers to SHA256 hash algorithm as defined in FIPS 180-4. +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. -Though the algorithm itself is not exclusively bound to this single hashing -cipher, and support for alternate hashing ciphers was later added [1]. Presently -use of this is for isolated use-cases and future-proofing the TarSum checksum -format. +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. ## Calculation ### Requirement As mentioned earlier, the calculation is such that it takes into consideration -the life and cycle of the tar archive. In that the tar archive is not an -immutable, permanent artifact. Otherwise options like relying on a known hashing -cipher checksum of the archive itself would be reliable enough. Since the tar -archive is used as a transportation medium, and is thrown away after its -contents are extracted. Therefore, for consistent validation items such as -order of files in the tar archive and time stamps are subject to change once an -image is received. - +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. ### Process @@ -175,7 +171,6 @@ For >= Version1, the extented attribute headers ("SCHILY.xattr." prefixed pax headers) included after the above list. These xattrs key/values are first sorted by the keys. - #### Header Format The ordered headers are written to the hash in the format of @@ -184,13 +179,11 @@ The ordered headers are written to the hash in the format of with no newline. - #### Body After the order headers of the file have been added to the checksum for the file, the body of the file is written to the hash. - #### List of file sums The list of file sums is sorted by the string of the hexadecimal digest. @@ -199,7 +192,6 @@ If there are two files in the tar with matching paths, the order of occurrence for that path is reflected for the sums of the corresponding file header and body. - #### Final Checksum Begin with a fresh or initial state of the associated hash cipher. If there is @@ -211,7 +203,6 @@ The resulting digest is formatted per the Elements of TarSum checksum, including the TarSum version, the associated hash cipher and the hexadecimal encoded checksum digest. - ## Security Considerations The initial version of TarSum has undergone one update that could invalidate @@ -220,7 +211,6 @@ with same names as prior files in the archive. The latter file will clobber the prior file of the same path. Due to this the algorithm now accounts for files with matching paths, and orders the list of file sums accordingly [3]. - ## Footnotes * [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 From c1532556b2ce8a5b340230cb700538b453705535 Mon Sep 17 00:00:00 2001 From: Yohei Ueda Date: Tue, 25 Nov 2014 20:49:01 +0900 Subject: [PATCH 79/99] Use termios via CGO only on Linux Signed-off-by: Yohei Ueda --- term/{term_cgo.go => tc_linux_cgo.go} | 2 +- term/{term_nocgo.go => tc_other.go} | 3 ++- term/termios_darwin.go | 2 -- term/termios_freebsd.go | 2 -- 4 files changed, 3 insertions(+), 6 deletions(-) rename term/{term_cgo.go => tc_linux_cgo.go} (97%) rename term/{term_nocgo.go => tc_other.go} (90%) diff --git a/term/term_cgo.go b/term/tc_linux_cgo.go similarity index 97% rename from term/term_cgo.go rename to term/tc_linux_cgo.go index ddf080c..ae9516c 100644 --- a/term/term_cgo.go +++ b/term/tc_linux_cgo.go @@ -1,4 +1,4 @@ -// +build !windows,cgo +// +build linux,cgo package term diff --git a/term/term_nocgo.go b/term/tc_other.go similarity index 90% rename from term/term_nocgo.go rename to term/tc_other.go index c211c39..266039b 100644 --- a/term/term_nocgo.go +++ b/term/tc_other.go @@ -1,4 +1,5 @@ -// +build !windows,!cgo +// +build !windows +// +build !linux !cgo package term diff --git a/term/termios_darwin.go b/term/termios_darwin.go index 2640e8b..11cd70d 100644 --- a/term/termios_darwin.go +++ b/term/termios_darwin.go @@ -1,5 +1,3 @@ -// +build !cgo - package term import ( diff --git a/term/termios_freebsd.go b/term/termios_freebsd.go index 969beda..ed36595 100644 --- a/term/termios_freebsd.go +++ b/term/termios_freebsd.go @@ -1,5 +1,3 @@ -// +build !cgo - package term import ( From f47ea48cfe4330e4f89ac134b7395c0e03577e48 Mon Sep 17 00:00:00 2001 From: unclejack Date: Wed, 26 Nov 2014 00:41:42 +0200 Subject: [PATCH 80/99] pkg/tarsum: add maintainers & add missing s Signed-off-by: Cristian Staretu --- tarsum/MAINTAINER | 1 - tarsum/MAINTAINERS | 4 ++++ 2 files changed, 4 insertions(+), 1 deletion(-) delete mode 100644 tarsum/MAINTAINER create mode 100644 tarsum/MAINTAINERS diff --git a/tarsum/MAINTAINER b/tarsum/MAINTAINER deleted file mode 100644 index bd492e8..0000000 --- a/tarsum/MAINTAINER +++ /dev/null @@ -1 +0,0 @@ -Eric Windisch (@ewindisch) diff --git a/tarsum/MAINTAINERS b/tarsum/MAINTAINERS new file mode 100644 index 0000000..9571a14 --- /dev/null +++ b/tarsum/MAINTAINERS @@ -0,0 +1,4 @@ +Derek McGowan (github: dmcgowan) +Eric Windisch (github: ewindisch) +Josh Hawn (github: jlhawn) +Vincent Batts (github: vbatts) From 32fb3913b5fc663d443cda2653e45000aec1ed49 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Fri, 7 Nov 2014 13:44:35 -0500 Subject: [PATCH 81/99] Cleanup ParseHost Current implementation is comingling things that ought not be together. There are _some_ similarities between parsing for the different proto types, but they are more different than alike, making the code extremely difficult to reason about. Signed-off-by: Brian Goff --- parsers/parsers.go | 98 ++++++++++++++++++++++------------------------ 1 file changed, 47 insertions(+), 51 deletions(-) diff --git a/parsers/parsers.go b/parsers/parsers.go index e6e3718..2851fe1 100644 --- a/parsers/parsers.go +++ b/parsers/parsers.go @@ -7,63 +7,59 @@ import ( ) // FIXME: Change this not to receive default value as parameter -func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { - var ( - proto string - host string - port int - ) +func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { addr = strings.TrimSpace(addr) - switch { - case addr == "tcp://": - return "", fmt.Errorf("Invalid bind address format: %s", addr) - case strings.HasPrefix(addr, "unix://"): - proto = "unix" - addr = strings.TrimPrefix(addr, "unix://") - if addr == "" { - addr = defaultUnix - } - case strings.HasPrefix(addr, "tcp://"): - proto = "tcp" - addr = strings.TrimPrefix(addr, "tcp://") - case strings.HasPrefix(addr, "fd://"): + if addr == "" { + addr = fmt.Sprintf("unix://%s", defaultUnixAddr) + } + addrParts := strings.Split(addr, "://") + if len(addrParts) == 1 { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], defaultTCPAddr) + case "unix": + return ParseUnixAddr(addrParts[1], defaultUnixAddr) + case "fd": return addr, nil - case addr == "": - proto = "unix" - addr = defaultUnix default: - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid bind address protocol: %s", addr) - } - proto = "tcp" - } - - if proto != "unix" && strings.Contains(addr, ":") { - hostParts := strings.Split(addr, ":") - if len(hostParts) != 2 { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } - if hostParts[0] != "" { - host = hostParts[0] - } else { - host = defaultHost - } - - if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 { - port = p - } else { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } - - } else if proto == "tcp" && !strings.Contains(addr, ":") { return "", fmt.Errorf("Invalid bind address format: %s", addr) - } else { - host = addr } - if proto == "unix" { - return fmt.Sprintf("%s://%s", proto, host), nil +} + +func ParseUnixAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "unix://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) } - return fmt.Sprintf("%s://%s:%d", proto, host, port), nil + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("unix://%s", addr), nil +} + +func ParseTCPAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr) + } + + hostParts := strings.Split(addr, ":") + if len(hostParts) != 2 { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + host := hostParts[0] + if host == "" { + host = defaultAddr + } + + p, err := strconv.Atoi(hostParts[1]) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + return fmt.Sprintf("tcp://%s:%d", host, p), nil } // Get a repos name and returns the right reposName + tag From 2ebf95a81ea99005912a82ec64381d51e84e4034 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Wed, 26 Nov 2014 23:00:13 -0800 Subject: [PATCH 82/99] Change path breakout detection logic in archive package Fixes #9375 Signed-off-by: Alexandr Morozov --- archive/archive.go | 9 ++++++--- archive/diff.go | 12 +++++++----- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index aaeed31..3783e72 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -530,10 +530,13 @@ loop: } } - // Prevent symlink breakout path := filepath.Join(dest, hdr.Name) - if !strings.HasPrefix(path, dest) { - return breakoutError(fmt.Errorf("%q is outside of %q", path, dest)) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, "..") { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it diff --git a/archive/diff.go b/archive/diff.go index 856cedc..c6118c5 100644 --- a/archive/diff.go +++ b/archive/diff.go @@ -92,12 +92,14 @@ func ApplyLayer(dest string, layer ArchiveReader) error { } path := filepath.Join(dest, hdr.Name) - base := filepath.Base(path) - - // Prevent symlink breakout - if !strings.HasPrefix(path, dest) { - return breakoutError(fmt.Errorf("%q is outside of %q", path, dest)) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err } + if strings.HasPrefix(rel, "..") { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) if strings.HasPrefix(base, ".wh.") { originalBase := base[len(".wh."):] From 16130e775f9b87a98932b7f31100a0dd332ce3f2 Mon Sep 17 00:00:00 2001 From: Qiang Huang Date: Sun, 30 Nov 2014 01:58:16 +0800 Subject: [PATCH 83/99] flag: fix the comments Signed-off-by: Qiang Huang --- mflag/flag.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mflag/flag.go b/mflag/flag.go index c9061c2..a30c41b 100644 --- a/mflag/flag.go +++ b/mflag/flag.go @@ -23,12 +23,12 @@ flag.Var(&flagVal, []string{"name"}, "help message for flagname") For such flags, the default value is just the initial value of the variable. - You can also add "deprecated" flags, they are still usable, bur are not shown + You can also add "deprecated" flags, they are still usable, but are not shown in the usage and will display a warning when you try to use them: - var ip = flag.Int([]string{"f", "#flagname", "-flagname"}, 1234, "help message for flagname") - this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` and + var ip = flag.Int([]string{"#f", "#flagname", "-flagname2"}, 1234, "help message for flagname") + this will display: `Warning: '--flagname' is deprecated, it will be replaced by '--flagname2' soon. See usage.` and var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") - will display: `Warning: '-t' is deprecated, it will be removed soon. See usage.` + will display: `Warning: '-f' is deprecated, it will be removed soon. See usage.` You can also group one letter flags, bif you declare var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") From 12efe03ef03bf6a831afb7435a96534da124a9df Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Tue, 2 Dec 2014 03:02:25 -0800 Subject: [PATCH 84/99] graphdb: initialize the database semi-idempotently on every connection. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- graphdb/conn_sqlite3.go | 18 +--------- graphdb/graphdb.go | 76 +++++++++++++++++++++++------------------ graphdb/graphdb_test.go | 2 +- 3 files changed, 45 insertions(+), 51 deletions(-) diff --git a/graphdb/conn_sqlite3.go b/graphdb/conn_sqlite3.go index b6a8027..455790a 100644 --- a/graphdb/conn_sqlite3.go +++ b/graphdb/conn_sqlite3.go @@ -4,31 +4,15 @@ package graphdb import ( "database/sql" - "os" _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite ) func NewSqliteConn(root string) (*Database, error) { - initDatabase := false - - stat, err := os.Stat(root) - if err != nil { - if os.IsNotExist(err) { - initDatabase = true - } else { - return nil, err - } - } - - if stat != nil && stat.Size() == 0 { - initDatabase = true - } - conn, err := sql.Open("sqlite3", root) if err != nil { return nil, err } - return NewDatabase(conn, initDatabase) + return NewDatabase(conn) } diff --git a/graphdb/graphdb.go b/graphdb/graphdb.go index 450bd50..6234203 100644 --- a/graphdb/graphdb.go +++ b/graphdb/graphdb.go @@ -73,45 +73,55 @@ func IsNonUniqueNameError(err error) bool { } // Create a new graph database initialized with a root entity -func NewDatabase(conn *sql.DB, init bool) (*Database, error) { +func NewDatabase(conn *sql.DB) (*Database, error) { if conn == nil { return nil, fmt.Errorf("Database connection cannot be nil") } db := &Database{conn: conn} - if init { - if _, err := conn.Exec(createEntityTable); err != nil { - return nil, err - } - if _, err := conn.Exec(createEdgeTable); err != nil { - return nil, err - } - if _, err := conn.Exec(createEdgeIndices); err != nil { - return nil, err - } - - rollback := func() { - conn.Exec("ROLLBACK") - } - - // Create root entities - if _, err := conn.Exec("BEGIN"); err != nil { - return nil, err - } - if _, err := conn.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { - rollback() - return nil, err - } - - if _, err := conn.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { - rollback() - return nil, err - } - - if _, err := conn.Exec("COMMIT"); err != nil { - return nil, err - } + if _, err := conn.Exec(createEntityTable); err != nil { + return nil, err } + if _, err := conn.Exec(createEdgeTable); err != nil { + return nil, err + } + if _, err := conn.Exec(createEdgeIndices); err != nil { + return nil, err + } + + rollback := func() { + conn.Exec("ROLLBACK") + } + + // Create root entities + if _, err := conn.Exec("BEGIN"); err != nil { + return nil, err + } + + if _, err := conn.Exec("DELETE FROM entity where id = ?", "0"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("DELETE FROM edge where entity_id=? and name=?", "0", "/"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("COMMIT"); err != nil { + return nil, err + } + return db, nil } diff --git a/graphdb/graphdb_test.go b/graphdb/graphdb_test.go index 7568e66..f228285 100644 --- a/graphdb/graphdb_test.go +++ b/graphdb/graphdb_test.go @@ -14,7 +14,7 @@ import ( func newTestDb(t *testing.T) (*Database, string) { p := path.Join(os.TempDir(), "sqlite.db") conn, err := sql.Open("sqlite3", p) - db, err := NewDatabase(conn, true) + db, err := NewDatabase(conn) if err != nil { t.Fatal(err) } From 2b403ab36031adc4e37cb9aeeef121a704687f04 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Tue, 2 Dec 2014 15:23:49 -0800 Subject: [PATCH 85/99] Fix TarSum iteration test I noticed that 3 of the tarsum test cases had expected a tarsum with a sha256 hash of e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 As I've been working with sha256 quite a bit lately, it struck me that this is the initial digest value for sha256, which means that no data was processed. However, these tests *do* process data. It turns out that there was a bug in the test handling code which did not wait for tarsum to end completely. This patch corrects these test cases. I'm unaware of anywhere else in the code base where this would be an issue, though we definitily need to look out in the future to ensure we are completing tarsum reads (waiting for EOF). Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- tarsum/tarsum_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tarsum/tarsum_test.go b/tarsum/tarsum_test.go index 5e7f042..41e1b9b 100644 --- a/tarsum/tarsum_test.go +++ b/tarsum/tarsum_test.go @@ -337,7 +337,7 @@ func TestIteration(t *testing.T) { data []byte }{ { - "tarsum+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", Version0, &tar.Header{ Name: "file.txt", @@ -349,7 +349,7 @@ func TestIteration(t *testing.T) { []byte(""), }, { - "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", VersionDev, &tar.Header{ Name: "file.txt", @@ -361,7 +361,7 @@ func TestIteration(t *testing.T) { []byte(""), }, { - "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", VersionDev, &tar.Header{ Name: "another.txt", @@ -463,6 +463,7 @@ func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { for { hdr, err := tr.Next() if hdr == nil || err == io.EOF { + // Signals the end of the archive. break } if err != nil { @@ -471,7 +472,6 @@ func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { if _, err = io.Copy(ioutil.Discard, tr); err != nil { return "", err } - break // we're just reading one header ... } return ts.Sum(nil), nil } From 12524d565748e716486bdd7903628a03550e8eb6 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 86/99] devmapper: Use device id as specified by caller Currently devicemapper CreateDevice and CreateSnapDevice keep on retrying device creation till a suitable device id is found. With new transaction mechanism we need to store device id in transaction before it has been created. So change the logic in such a way that caller decides the devices Id to use. If that device Id is not available, caller bumps up the device Id and retries. That way caller can update transaciton too when it tries a new Id. Transaction related patches will come later in the series. Signed-off-by: Vivek Goyal --- devicemapper/devmapper.go | 108 +++++++++++++++++++------------------- 1 file changed, 55 insertions(+), 53 deletions(-) diff --git a/devicemapper/devmapper.go b/devicemapper/devmapper.go index a7306ba..c23a362 100644 --- a/devicemapper/devmapper.go +++ b/devicemapper/devmapper.go @@ -67,6 +67,7 @@ var ( ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") ErrBusy = errors.New("Device is Busy") + ErrDeviceIdExists = errors.New("Device Id Exists") dmSawBusy bool dmSawExist bool @@ -97,6 +98,16 @@ type ( AddNodeType int ) +// Returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIdExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIdExists) +} + func (t *Task) destroy() { if t != nil { DmTaskDestroy(t.unmanaged) @@ -528,33 +539,29 @@ func ResumeDevice(name string) error { return nil } -func CreateDevice(poolName string, deviceId *int) error { - log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) +func CreateDevice(poolName string, deviceId int) error { + log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, deviceId) + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) + if task == nil { + return err + } - for { - task, err := TaskCreateNamed(DeviceTargetMsg, poolName) - if task == nil { - return err - } + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } - if err := task.SetSector(0); err != nil { - return fmt.Errorf("Can't set sector %s", err) - } + if err := task.SetMessage(fmt.Sprintf("create_thin %d", deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } - if err := task.SetMessage(fmt.Sprintf("create_thin %d", *deviceId)); err != nil { - return fmt.Errorf("Can't set message %s", err) - } - - dmSawExist = false // reset before the task is run - if err := task.Run(); err != nil { - if dmSawExist { - // Already exists, try next id - *deviceId++ - continue - } + dmSawExist = false // reset before the task is run + if err := task.Run(); err != nil { + // Caller wants to know about ErrDeviceIdExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIdExists + } else { return fmt.Errorf("Error running CreateDevice %s", err) } - break } return nil } @@ -607,7 +614,7 @@ func ActivateDevice(poolName string, name string, deviceId int, size uint64) err return nil } -func CreateSnapDevice(poolName string, deviceId *int, baseName string, baseDeviceId int) error { +func CreateSnapDevice(poolName string, deviceId int, baseName string, baseDeviceId int) error { devinfo, _ := GetInfo(baseName) doSuspend := devinfo != nil && devinfo.Exists != 0 @@ -617,44 +624,39 @@ func CreateSnapDevice(poolName string, deviceId *int, baseName string, baseDevic } } - for { - task, err := TaskCreateNamed(DeviceTargetMsg, poolName) - if task == nil { - if doSuspend { - ResumeDevice(baseName) - } - return err + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) + if task == nil { + if doSuspend { + ResumeDevice(baseName) } + return err + } - if err := task.SetSector(0); err != nil { - if doSuspend { - ResumeDevice(baseName) - } - return fmt.Errorf("Can't set sector %s", err) + if err := task.SetSector(0); err != nil { + if doSuspend { + ResumeDevice(baseName) } + return fmt.Errorf("Can't set sector %s", err) + } - if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", *deviceId, baseDeviceId)); err != nil { - if doSuspend { - ResumeDevice(baseName) - } - return fmt.Errorf("Can't set message %s", err) + if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", deviceId, baseDeviceId)); err != nil { + if doSuspend { + ResumeDevice(baseName) } + return fmt.Errorf("Can't set message %s", err) + } - dmSawExist = false // reset before the task is run - if err := task.Run(); err != nil { - if dmSawExist { - // Already exists, try next id - *deviceId++ - continue - } - - if doSuspend { - ResumeDevice(baseName) - } + dmSawExist = false // reset before the task is run + if err := task.Run(); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + // Caller wants to know about ErrDeviceIdExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIdExists + } else { return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err) } - - break } if doSuspend { From 985d3bd404c1a87dbff2d43ac7be64bd2c32afb4 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 3 Dec 2014 10:35:20 -0800 Subject: [PATCH 87/99] Correct TarSum benchmarks: 9kTar and 9kTarGzip These two cases did not actually read the same content with each iteration of the benchmark. After the first read, the buffer was consumed. This patch corrects this by using a bytes.Reader and seeking to the beginning of the buffer at the beginning of each iteration. Unfortunately, this benchmark was not actually as fast as we believed. But the new results do bring its results closer to those of the other benchmarks. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- tarsum/tarsum_test.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tarsum/tarsum_test.go b/tarsum/tarsum_test.go index 41e1b9b..4e1f30e 100644 --- a/tarsum/tarsum_test.go +++ b/tarsum/tarsum_test.go @@ -486,10 +486,13 @@ func Benchmark9kTar(b *testing.B) { n, err := io.Copy(buf, fh) fh.Close() + reader := bytes.NewReader(buf.Bytes()) + b.SetBytes(n) b.ResetTimer() for i := 0; i < b.N; i++ { - ts, err := NewTarSum(buf, true, Version0) + reader.Seek(0, 0) + ts, err := NewTarSum(reader, true, Version0) if err != nil { b.Error(err) return @@ -509,10 +512,13 @@ func Benchmark9kTarGzip(b *testing.B) { n, err := io.Copy(buf, fh) fh.Close() + reader := bytes.NewReader(buf.Bytes()) + b.SetBytes(n) b.ResetTimer() for i := 0; i < b.N; i++ { - ts, err := NewTarSum(buf, false, Version0) + reader.Seek(0, 0) + ts, err := NewTarSum(reader, false, Version0) if err != nil { b.Error(err) return From 5e3e0f129e5268a1c85a7003c2456c3d0b5ea0a7 Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Wed, 3 Dec 2014 15:36:57 -0500 Subject: [PATCH 88/99] Fix invalid argument error on push With 32ba6ab from #9261, TempArchive now closes the underlying file and cleans it up as soon as the file's contents have been read. When pushing an image, PushImageLayerRegistry attempts to call Close() on the layer, which is a TempArchive that has already been closed. In this situation, Close() returns an "invalid argument" error. Add a Close method to TempArchive that does a no-op if the underlying file has already been closed. Signed-off-by: Andy Goldstein --- archive/archive.go | 21 +++++++++++++++++---- archive/archive_test.go | 16 ++++++++++++++++ 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index 3783e72..ead85be 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -771,20 +771,33 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) { return nil, err } size := st.Size() - return &TempArchive{f, size, 0}, nil + return &TempArchive{File: f, Size: size}, nil } type TempArchive struct { *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { - archive.File.Close() + archive.Close() os.Remove(archive.File.Name()) } return n, err diff --git a/archive/archive_test.go b/archive/archive_test.go index 05362a2..fdba6fb 100644 --- a/archive/archive_test.go +++ b/archive/archive_test.go @@ -9,6 +9,7 @@ import ( "os/exec" "path" "path/filepath" + "strings" "syscall" "testing" "time" @@ -607,3 +608,18 @@ func TestUntarInvalidSymlink(t *testing.T) { } } } + +func TestTempArchiveCloseMultipleTimes(t *testing.T) { + reader := ioutil.NopCloser(strings.NewReader("hello")) + tempArchive, err := NewTempArchive(reader, "") + buf := make([]byte, 10) + n, err := tempArchive.Read(buf) + if n != 5 { + t.Fatalf("Expected to read 5 bytes. Read %d instead", n) + } + for i := 0; i < 3; i++ { + if err = tempArchive.Close(); err != nil { + t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) + } + } +} From 267e518231a412b481c7f7fbedbafb9d5d7585fe Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Wed, 3 Dec 2014 19:02:51 -0800 Subject: [PATCH 89/99] Fix output format where no variable specified in mount pkg Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- mount/sharedsubtree_linux_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mount/sharedsubtree_linux_test.go b/mount/sharedsubtree_linux_test.go index 145d57b..0986bd9 100644 --- a/mount/sharedsubtree_linux_test.go +++ b/mount/sharedsubtree_linux_test.go @@ -312,7 +312,7 @@ func TestSubtreeUnbindable(t *testing.T) { if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { t.Fatal(err) } else if err == nil { - t.Fatalf("%q should not have been bindable") + t.Fatalf("%q should not have been bindable", sourceDir) } defer func() { if err := Unmount(targetDir); err != nil { From d98b6f38ba9faca24d62ba578f282ae80060e545 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 3 Dec 2014 22:23:31 -0800 Subject: [PATCH 90/99] Correctly close generated benchmark archives Another update to TarSum tests, this patch fixes an issue where the benchmarks were generating archives incorrectly by not closing the tarWriter. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- tarsum/tarsum_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tarsum/tarsum_test.go b/tarsum/tarsum_test.go index 4e1f30e..26f12cc 100644 --- a/tarsum/tarsum_test.go +++ b/tarsum/tarsum_test.go @@ -132,6 +132,7 @@ func sizedTar(opts sizedOptions) io.Reader { fh = bytes.NewBuffer([]byte{}) } tarW := tar.NewWriter(fh) + defer tarW.Close() for i := int64(0); i < opts.num; i++ { err := tarW.WriteHeader(&tar.Header{ Name: fmt.Sprintf("/testdata%d", i), From fec996b35d80a1f68338597785ed7d27f93e785d Mon Sep 17 00:00:00 2001 From: Lewis Marshall Date: Sat, 6 Dec 2014 02:30:03 +0000 Subject: [PATCH 91/99] Fix chroot untar for zero padded archive from slow reader Signed-off-by: Lewis Marshall --- chrootarchive/archive.go | 5 +++++ chrootarchive/archive_test.go | 41 +++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/chrootarchive/archive.go b/chrootarchive/archive.go index fc2bea2..8d139fa 100644 --- a/chrootarchive/archive.go +++ b/chrootarchive/archive.go @@ -6,6 +6,7 @@ import ( "flag" "fmt" "io" + "io/ioutil" "os" "runtime" "strings" @@ -33,6 +34,10 @@ func untar() { if err := archive.Untar(os.Stdin, "/", options); err != nil { fatal(err) } + // fully consume stdin in case it is zero padded + if _, err := ioutil.ReadAll(os.Stdin); err != nil { + fatal(err) + } os.Exit(0) } diff --git a/chrootarchive/archive_test.go b/chrootarchive/archive_test.go index 69e18e3..8477c06 100644 --- a/chrootarchive/archive_test.go +++ b/chrootarchive/archive_test.go @@ -1,10 +1,12 @@ package chrootarchive import ( + "io" "io/ioutil" "os" "path/filepath" "testing" + "time" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" @@ -42,3 +44,42 @@ func TestChrootTarUntar(t *testing.T) { t.Fatal(err) } } + +type slowEmptyTarReader struct { + size int + offset int + chunkSize int +} + +// Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null") +func (s *slowEmptyTarReader) Read(p []byte) (int, error) { + time.Sleep(100 * time.Millisecond) + count := s.chunkSize + if len(p) < s.chunkSize { + count = len(p) + } + for i := 0; i < count; i++ { + p[i] = 0 + } + s.offset += count + if s.offset > s.size { + return count, io.EOF + } + return count, nil +} + +func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := os.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if err := Untar(stream, dest, nil); err != nil { + t.Fatal(err) + } +} From ad11e1392c35006d72febca4ffea14b197a2aae7 Mon Sep 17 00:00:00 2001 From: Erik Dubbelboer Date: Sat, 6 Dec 2014 22:42:32 +0800 Subject: [PATCH 92/99] Removed race condition If two interrupts were fired really quickly interruptCount could have been incremented twice before the LoadUint32 making cleanup not being called at all. Signed-off-by: Erik Dubbelboer --- signal/trap.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/signal/trap.go b/signal/trap.go index 9be8267..78a709b 100644 --- a/signal/trap.go +++ b/signal/trap.go @@ -34,9 +34,8 @@ func Trap(cleanup func()) { case os.Interrupt, syscall.SIGTERM: // If the user really wants to interrupt, let him do so. if atomic.LoadUint32(&interruptCount) < 3 { - atomic.AddUint32(&interruptCount, 1) // Initiate the cleanup only once - if atomic.LoadUint32(&interruptCount) == 1 { + if atomic.AddUint32(&interruptCount, 1) == 1 { // Call cleanup handler cleanup() os.Exit(0) From 6bd7943697763bd62246a7ab3f36319867f1fe13 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 8 Dec 2014 15:04:34 -0800 Subject: [PATCH 93/99] Flush stdin from within chroot archive This makes sure that we don't buffer in memory and that we also flush stdin from diff as well as untar. Signed-off-by: Michael Crosby --- chrootarchive/archive.go | 5 +---- chrootarchive/archive_test.go | 16 ++++++++++++++++ chrootarchive/diff.go | 1 + chrootarchive/init.go | 8 ++++++++ 4 files changed, 26 insertions(+), 4 deletions(-) diff --git a/chrootarchive/archive.go b/chrootarchive/archive.go index 8d139fa..2942d9d 100644 --- a/chrootarchive/archive.go +++ b/chrootarchive/archive.go @@ -6,7 +6,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "os" "runtime" "strings" @@ -35,9 +34,7 @@ func untar() { fatal(err) } // fully consume stdin in case it is zero padded - if _, err := ioutil.ReadAll(os.Stdin); err != nil { - fatal(err) - } + flush(os.Stdin) os.Exit(0) } diff --git a/chrootarchive/archive_test.go b/chrootarchive/archive_test.go index 8477c06..0fe3d64 100644 --- a/chrootarchive/archive_test.go +++ b/chrootarchive/archive_test.go @@ -83,3 +83,19 @@ func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { t.Fatal(err) } } + +func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := os.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} diff --git a/chrootarchive/diff.go b/chrootarchive/diff.go index 2653aef..f9f9b9d 100644 --- a/chrootarchive/diff.go +++ b/chrootarchive/diff.go @@ -32,6 +32,7 @@ func applyLayer() { fatal(err) } os.RemoveAll(tmpDir) + flush(os.Stdin) os.Exit(0) } diff --git a/chrootarchive/init.go b/chrootarchive/init.go index b548e9f..4116026 100644 --- a/chrootarchive/init.go +++ b/chrootarchive/init.go @@ -2,6 +2,8 @@ package chrootarchive import ( "fmt" + "io" + "io/ioutil" "os" "github.com/docker/docker/pkg/reexec" @@ -16,3 +18,9 @@ func fatal(err error) { fmt.Fprint(os.Stderr, err) os.Exit(1) } + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) { + io.Copy(ioutil.Discard, r) +} From 96678a759b1fbf632bf13fb22ffc997dc3b4fca0 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Fri, 5 Dec 2014 14:58:46 -0700 Subject: [PATCH 94/99] Simplify FollowSymlinkInScope based on Go 1.3.3's EvalSymlinks Signed-off-by: Andrew Page --- symlink/LICENSE.APACHE | 191 ++++++++++++++++++++++++++++++++++++++ symlink/LICENSE.BSD | 27 ++++++ symlink/MAINTAINERS | 5 +- symlink/README.md | 5 + symlink/fs.go | 202 +++++++++++++++++++++++------------------ symlink/fs_test.go | 2 + 6 files changed, 344 insertions(+), 88 deletions(-) create mode 100644 symlink/LICENSE.APACHE create mode 100644 symlink/LICENSE.BSD create mode 100644 symlink/README.md diff --git a/symlink/LICENSE.APACHE b/symlink/LICENSE.APACHE new file mode 100644 index 0000000..2744858 --- /dev/null +++ b/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/symlink/LICENSE.BSD b/symlink/LICENSE.BSD new file mode 100644 index 0000000..ebcfbcc --- /dev/null +++ b/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/symlink/MAINTAINERS b/symlink/MAINTAINERS index 68a97d2..51a41a5 100644 --- a/symlink/MAINTAINERS +++ b/symlink/MAINTAINERS @@ -1,2 +1,3 @@ -Michael Crosby (@crosbymichael) -Victor Vieux (@vieux) +Tibor Vass (@tiborvass) +Cristian Staretu (@unclejack) +Tianon Gravi (@tianon) diff --git a/symlink/README.md b/symlink/README.md new file mode 100644 index 0000000..0d1dbb7 --- /dev/null +++ b/symlink/README.md @@ -0,0 +1,5 @@ +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks +from the [Go standard library](https://golang.org/pkg/path/filepath). + +The code from filepath.EvalSymlinks has been adapted in fs.go. +Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/symlink/fs.go b/symlink/fs.go index 6ce99c6..b4bdff2 100644 --- a/symlink/fs.go +++ b/symlink/fs.go @@ -1,101 +1,131 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + package symlink import ( - "fmt" + "bytes" + "errors" "os" - "path" "path/filepath" "strings" ) -const maxLoopCounter = 100 - -// FollowSymlink will follow an existing link and scope it to the root -// path provided. -// The role of this function is to return an absolute path in the root -// or normalize to the root if the symlink leads to a path which is -// outside of the root. -// Errors encountered while attempting to follow the symlink in path -// will be reported. -// Normalizations to the root don't constitute errors. -func FollowSymlinkInScope(link, root string) (string, error) { - root, err := filepath.Abs(root) +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an absolute path +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(path) if err != nil { return "", err } - - link, err = filepath.Abs(link) + root, err = filepath.Abs(root) if err != nil { return "", err } - - if link == root { - return root, nil - } - - if !strings.HasPrefix(filepath.Dir(link), root) { - return "", fmt.Errorf("%s is not within %s", link, root) - } - - prev := "/" - - for _, p := range strings.Split(link, "/") { - prev = filepath.Join(prev, p) - - loopCounter := 0 - for { - loopCounter++ - - if loopCounter >= maxLoopCounter { - return "", fmt.Errorf("loopCounter reached MAX: %v", loopCounter) - } - - if !strings.HasPrefix(prev, root) { - // Don't resolve symlinks outside of root. For example, - // we don't have to check /home in the below. - // - // /home -> usr/home - // FollowSymlinkInScope("/home/bob/foo/bar", "/home/bob/foo") - break - } - - stat, err := os.Lstat(prev) - if err != nil { - if os.IsNotExist(err) { - break - } - return "", err - } - - // let's break if we're not dealing with a symlink - if stat.Mode()&os.ModeSymlink != os.ModeSymlink { - break - } - - // process the symlink - dest, err := os.Readlink(prev) - if err != nil { - return "", err - } - - if path.IsAbs(dest) { - prev = filepath.Join(root, dest) - } else { - prev, _ = filepath.Abs(prev) - - dir := filepath.Dir(prev) - prev = filepath.Join(dir, dest) - if dir == root && !strings.HasPrefix(prev, root) { - prev = root - } - if len(prev) < len(root) || (len(prev) == len(root) && prev != root) { - prev = filepath.Join(root, filepath.Base(dest)) - } - } - } - } - if prev == "/" { - prev = root - } - return prev, nil + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if cleanP == string(filepath.Separator) { + // never Lstat "/" itself + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p + string(filepath.Separator)) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil } diff --git a/symlink/fs_test.go b/symlink/fs_test.go index 0e2f948..24ffb1e 100644 --- a/symlink/fs_test.go +++ b/symlink/fs_test.go @@ -1,3 +1,5 @@ +// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE + package symlink import ( From 9bd8acc9b77513851c98017b589f9539edc5d3d9 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Sun, 30 Nov 2014 11:39:28 -0500 Subject: [PATCH 95/99] Refactor of symlink tests to remove testdata dir Signed-off-by: Tibor Vass --- symlink/fs_test.go | 295 ++++++++++++++++++---------------------- symlink/testdata/fs/a/d | 1 - symlink/testdata/fs/a/e | 1 - symlink/testdata/fs/a/f | 1 - symlink/testdata/fs/b/h | 1 - symlink/testdata/fs/g | 1 - symlink/testdata/fs/i | 1 - symlink/testdata/fs/j/k | 1 - 8 files changed, 132 insertions(+), 170 deletions(-) delete mode 120000 symlink/testdata/fs/a/d delete mode 120000 symlink/testdata/fs/a/e delete mode 120000 symlink/testdata/fs/a/f delete mode 120000 symlink/testdata/fs/b/h delete mode 120000 symlink/testdata/fs/g delete mode 120000 symlink/testdata/fs/i delete mode 120000 symlink/testdata/fs/j/k diff --git a/symlink/fs_test.go b/symlink/fs_test.go index 24ffb1e..9d12041 100644 --- a/symlink/fs_test.go +++ b/symlink/fs_test.go @@ -3,248 +3,217 @@ package symlink import ( + "fmt" "io/ioutil" "os" "path/filepath" "testing" ) -func abs(t *testing.T, p string) string { - o, err := filepath.Abs(p) - if err != nil { - t.Fatal(err) - } - return o +type dirOrLink struct { + path string + target string } -func TestFollowSymLinkNormal(t *testing.T) { - link := "testdata/fs/a/d/c/data" +func makeFs(tmpdir string, fs []dirOrLink) error { + for _, s := range fs { + s.path = filepath.Join(tmpdir, s.path) + if s.target == "" { + os.MkdirAll(s.path, 0755) + continue + } + if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { + return err + } + if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { + return err + } + } + return nil +} - rewrite, err := FollowSymlinkInScope(link, "testdata") +func testSymlink(tmpdir, path, expected, scope string) error { + rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) + if err != nil { + return err + } + expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) + if err != nil { + return err + } + if expected != rewrite { + return fmt.Errorf("Expected %q got %q", expected, rewrite) + } + return nil +} + +func TestFollowSymlinkNormal(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNormal") if err != nil { t.Fatal(err) } - - if expected := abs(t, "testdata/b/c/data"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { + t.Fatal(err) } } -func TestFollowSymLinkRelativePath(t *testing.T) { - link := "testdata/fs/i" - - rewrite, err := FollowSymlinkInScope(link, "testdata") +func TestFollowSymlinkRelativePath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") if err != nil { t.Fatal(err) } - - if expected := abs(t, "testdata/fs/a"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { + t.Fatal(err) } } -func TestFollowSymLinkUnderLinkedDir(t *testing.T) { - dir, err := ioutil.TempDir("", "docker-fs-test") +func TestFollowSymlinkUnderLinkedDir(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkUnderLinkedDir") if err != nil { t.Fatal(err) } - defer os.RemoveAll(dir) - - os.Mkdir(filepath.Join(dir, "realdir"), 0700) - os.Symlink("realdir", filepath.Join(dir, "linkdir")) - - linkDir := filepath.Join(dir, "linkdir", "foo") - dirUnderLinkDir := filepath.Join(dir, "linkdir", "foo", "bar") - os.MkdirAll(dirUnderLinkDir, 0700) - - rewrite, err := FollowSymlinkInScope(dirUnderLinkDir, linkDir) - if err != nil { + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{ + {path: "linkdir", target: "realdir"}, + {path: "linkdir/foo/bar"}, + }); err != nil { t.Fatal(err) } - - if rewrite != dirUnderLinkDir { - t.Fatalf("Expected %s got %s", dirUnderLinkDir, rewrite) + if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { + t.Fatal(err) } } -func TestFollowSymLinkRandomString(t *testing.T) { +func TestFollowSymlinkRandomString(t *testing.T) { if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { t.Fatal("Random string should fail but didn't") } } -func TestFollowSymLinkLastLink(t *testing.T) { - link := "testdata/fs/a/d" - - rewrite, err := FollowSymlinkInScope(link, "testdata") +func TestFollowSymlinkLastLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") if err != nil { t.Fatal(err) } - - if expected := abs(t, "testdata/b"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { + t.Fatal(err) } } -func TestFollowSymLinkRelativeLink(t *testing.T) { - link := "testdata/fs/a/e/c/data" - - rewrite, err := FollowSymlinkInScope(link, "testdata") +func TestFollowSymlinkRelativeLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLink") if err != nil { t.Fatal(err) } - - if expected := abs(t, "testdata/fs/b/c/data"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { + t.Fatal(err) } } -func TestFollowSymLinkRelativeLinkScope(t *testing.T) { +func TestFollowSymlinkRelativeLinkScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { + t.Fatal(err) + } // avoid letting symlink f lead us out of the "testdata" scope // we don't normalize because symlink f is in scope and there is no // information leak - { - link := "testdata/fs/a/f" - - rewrite, err := FollowSymlinkInScope(link, "testdata") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/test"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { + t.Fatal(err) } - // avoid letting symlink f lead us out of the "testdata/fs" scope // we don't normalize because symlink f is in scope and there is no // information leak - { - link := "testdata/fs/a/f" - - rewrite, err := FollowSymlinkInScope(link, "testdata/fs") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/fs/test"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { + t.Fatal(err) } // avoid letting symlink g (pointed at by symlink h) take out of scope // TODO: we should probably normalize to scope here because ../[....]/root // is out of scope and we leak information - { - link := "testdata/fs/b/h" - - rewrite, err := FollowSymlinkInScope(link, "testdata") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/root"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } + if err := makeFs(tmpdir, []dirOrLink{ + {path: "testdata/fs/b/h", target: "../g"}, + {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { + t.Fatal(err) } // avoid letting allowing symlink e lead us to ../b // normalize to the "testdata/fs/a" - { - link := "testdata/fs/a/e" - - rewrite, err := FollowSymlinkInScope(link, "testdata/fs/a") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/fs/a"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } + if err := makeFs(tmpdir, []dirOrLink{ + {path: "testdata/fs/a/e", target: "../b"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { + t.Fatal(err) } // avoid letting symlink -> ../directory/file escape from scope // normalize to "testdata/fs/j" - { - link := "testdata/fs/j/k" - - rewrite, err := FollowSymlinkInScope(link, "testdata/fs/j") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/fs/j"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { + t.Fatal(err) } // make sure we don't allow escaping to / // normalize to dir - { - dir, err := ioutil.TempDir("", "docker-fs-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - linkFile := filepath.Join(dir, "foo") - os.Mkdir(filepath.Join(dir, ""), 0700) - os.Symlink("/", linkFile) - - rewrite, err := FollowSymlinkInScope(linkFile, dir) - if err != nil { - t.Fatal(err) - } - - if rewrite != dir { - t.Fatalf("Expected %s got %s", dir, rewrite) - } + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) } // make sure we don't allow escaping to / // normalize to dir - { - dir, err := ioutil.TempDir("", "docker-fs-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - linkFile := filepath.Join(dir, "foo") - os.Mkdir(filepath.Join(dir, ""), 0700) - os.Symlink("/../../", linkFile) - - rewrite, err := FollowSymlinkInScope(linkFile, dir) - if err != nil { - t.Fatal(err) - } - - if rewrite != dir { - t.Fatalf("Expected %s got %s", dir, rewrite) - } + if err := makeFs(filepath.Join(tmpdir, "dir", "subdir"), []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) } // make sure we stay in scope without leaking information // this also checks for escaping to / // normalize to dir - { - dir, err := ioutil.TempDir("", "docker-fs-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + if err := makeFs(filepath.Join(tmpdir, "dir", "subdir"), []dirOrLink{{path: "foo", target: "../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } - linkFile := filepath.Join(dir, "foo") - os.Mkdir(filepath.Join(dir, ""), 0700) - os.Symlink("../../", linkFile) - - rewrite, err := FollowSymlinkInScope(linkFile, dir) - if err != nil { - t.Fatal(err) - } - - if rewrite != dir { - t.Fatalf("Expected %s got %s", dir, rewrite) - } + if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { + t.Fatal(err) } } diff --git a/symlink/testdata/fs/a/d b/symlink/testdata/fs/a/d deleted file mode 120000 index 28abc96..0000000 --- a/symlink/testdata/fs/a/d +++ /dev/null @@ -1 +0,0 @@ -/b \ No newline at end of file diff --git a/symlink/testdata/fs/a/e b/symlink/testdata/fs/a/e deleted file mode 120000 index 42532fe..0000000 --- a/symlink/testdata/fs/a/e +++ /dev/null @@ -1 +0,0 @@ -../b \ No newline at end of file diff --git a/symlink/testdata/fs/a/f b/symlink/testdata/fs/a/f deleted file mode 120000 index 21de7ed..0000000 --- a/symlink/testdata/fs/a/f +++ /dev/null @@ -1 +0,0 @@ -../../../../test \ No newline at end of file diff --git a/symlink/testdata/fs/b/h b/symlink/testdata/fs/b/h deleted file mode 120000 index 24387a6..0000000 --- a/symlink/testdata/fs/b/h +++ /dev/null @@ -1 +0,0 @@ -../g \ No newline at end of file diff --git a/symlink/testdata/fs/g b/symlink/testdata/fs/g deleted file mode 120000 index 0ce5de0..0000000 --- a/symlink/testdata/fs/g +++ /dev/null @@ -1 +0,0 @@ -../../../../../../../../../../../../root \ No newline at end of file diff --git a/symlink/testdata/fs/i b/symlink/testdata/fs/i deleted file mode 120000 index 2e65efe..0000000 --- a/symlink/testdata/fs/i +++ /dev/null @@ -1 +0,0 @@ -a \ No newline at end of file diff --git a/symlink/testdata/fs/j/k b/symlink/testdata/fs/j/k deleted file mode 120000 index f559e8f..0000000 --- a/symlink/testdata/fs/j/k +++ /dev/null @@ -1 +0,0 @@ -../i/a \ No newline at end of file From 21026f47e6f92d1ec7e61cbd33aa6403b657ae03 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Fri, 5 Dec 2014 15:33:11 -0500 Subject: [PATCH 96/99] symlink: cleanup names and break big test into multiple smaller ones Signed-off-by: Tibor Vass --- symlink/fs_test.go | 84 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 64 insertions(+), 20 deletions(-) diff --git a/symlink/fs_test.go b/symlink/fs_test.go index 9d12041..3869e1d 100644 --- a/symlink/fs_test.go +++ b/symlink/fs_test.go @@ -47,8 +47,8 @@ func testSymlink(tmpdir, path, expected, scope string) error { return nil } -func TestFollowSymlinkNormal(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNormal") +func TestFollowSymlinkAbsolute(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") if err != nil { t.Fatal(err) } @@ -75,8 +75,8 @@ func TestFollowSymlinkRelativePath(t *testing.T) { } } -func TestFollowSymlinkUnderLinkedDir(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkUnderLinkedDir") +func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") if err != nil { t.Fatal(err) } @@ -92,9 +92,9 @@ func TestFollowSymlinkUnderLinkedDir(t *testing.T) { } } -func TestFollowSymlinkRandomString(t *testing.T) { +func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { - t.Fatal("Random string should fail but didn't") + t.Fatal("expected an error") } } @@ -112,8 +112,8 @@ func TestFollowSymlinkLastLink(t *testing.T) { } } -func TestFollowSymlinkRelativeLink(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLink") +func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") if err != nil { t.Fatal(err) } @@ -124,10 +124,15 @@ func TestFollowSymlinkRelativeLink(t *testing.T) { if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { t.Fatal(err) } + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { + t.Fatal(err) + } } -func TestFollowSymlinkRelativeLinkScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkScope") +func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") if err != nil { t.Fatal(err) } @@ -148,6 +153,14 @@ func TestFollowSymlinkRelativeLinkScope(t *testing.T) { if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { t.Fatal(err) } +} + +func TestFollowSymlinkRelativeLinkChain(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) // avoid letting symlink g (pointed at by symlink h) take out of scope // TODO: we should probably normalize to scope here because ../[....]/root @@ -161,17 +174,14 @@ func TestFollowSymlinkRelativeLinkScope(t *testing.T) { if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { t.Fatal(err) } +} - // avoid letting allowing symlink e lead us to ../b - // normalize to the "testdata/fs/a" - if err := makeFs(tmpdir, []dirOrLink{ - {path: "testdata/fs/a/e", target: "../b"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { +func TestFollowSymlinkBreakoutPath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") + if err != nil { t.Fatal(err) } + defer os.RemoveAll(tmpdir) // avoid letting symlink -> ../directory/file escape from scope // normalize to "testdata/fs/j" @@ -181,6 +191,14 @@ func TestFollowSymlinkRelativeLinkScope(t *testing.T) { if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { t.Fatal(err) } +} + +func TestFollowSymlinkToRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) // make sure we don't allow escaping to / // normalize to dir @@ -190,25 +208,51 @@ func TestFollowSymlinkRelativeLinkScope(t *testing.T) { if err := testSymlink(tmpdir, "foo", "", ""); err != nil { t.Fatal(err) } +} + +func TestFollowSymlinkSlashDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") // make sure we don't allow escaping to / // normalize to dir - if err := makeFs(filepath.Join(tmpdir, "dir", "subdir"), []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "foo", "", ""); err != nil { t.Fatal(err) } +} + +func TestFollowSymlinkDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") // make sure we stay in scope without leaking information // this also checks for escaping to / // normalize to dir - if err := makeFs(filepath.Join(tmpdir, "dir", "subdir"), []dirOrLink{{path: "foo", target: "../../"}}); err != nil { + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "foo", "", ""); err != nil { t.Fatal(err) } +} + +func TestFollowSymlinkRelativePath2(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { t.Fatal(err) From 2a81dcd671cea7bd0af1341926c0f4898896b1fb Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Fri, 5 Dec 2014 16:40:53 -0500 Subject: [PATCH 97/99] symlink: add more tests Signed-off-by: Tibor Vass --- symlink/fs_test.go | 139 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 139 insertions(+) diff --git a/symlink/fs_test.go b/symlink/fs_test.go index 3869e1d..6b2496c 100644 --- a/symlink/fs_test.go +++ b/symlink/fs_test.go @@ -261,3 +261,142 @@ func TestFollowSymlinkRelativePath2(t *testing.T) { t.Fatal(err) } } + +func TestFollowSymlinkScopeLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root2/foo", target: "../bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRootScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + expected, err := filepath.EvalSymlinks(tmpdir) + if err != nil { + t.Fatal(err) + } + rewrite, err := FollowSymlinkInScope(tmpdir, "/") + if err != nil { + t.Fatal(err) + } + if rewrite != expected { + t.Fatalf("expected %q got %q", expected, rewrite) + } +} + +func TestFollowSymlinkEmpty(t *testing.T) { + res, err := FollowSymlinkInScope("", "") + if err != nil { + t.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if res != wd { + t.Fatal("expected %q got %q", wd, res) + } +} + +func TestFollowSymlinkCircular(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for foo -> foo") + } + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/bar", target: "baz"}, + {path: "root/baz", target: "../bak"}, + {path: "root/bak", target: "/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for bar -> baz -> bak -> bar") + } +} + +func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root/a", target: "r/s"}, + {path: "root/r", target: "../root/t"}, + {path: "root/root/t/s/b", target: "/../u"}, + {path: "root/u/c", target: "."}, + {path: "root/u/x/y", target: "../v"}, + {path: "root/u/v", target: "/../w"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/slash", target: "/"}, + {path: "root/sym", target: "/idontexist/../slash"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/sym", target: "/foo/bar"}, + {path: "root/hello", target: "/sym/../baz"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { + t.Fatal(err) + } +} From eab5117c5cd824e691443133b38ef89cb7bf3725 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 8 Dec 2014 16:14:56 -0500 Subject: [PATCH 98/99] Update chroot apply layer to handle decompression outside chroot Signed-off-by: Michael Crosby Conflicts: pkg/archive/diff.go pkg/chrootarchive/archive.go Conflicts: pkg/archive/diff.go pkg/chrootarchive/diff.go --- archive/diff.go | 39 ++++++++++++++++++++------------------- chrootarchive/archive.go | 25 ++++++++++++++++++++----- chrootarchive/diff.go | 27 ++++++++++++++++++++------- 3 files changed, 60 insertions(+), 31 deletions(-) diff --git a/archive/diff.go b/archive/diff.go index c6118c5..ba22c41 100644 --- a/archive/diff.go +++ b/archive/diff.go @@ -15,24 +15,7 @@ import ( "github.com/docker/docker/pkg/system" ) -// ApplyLayer parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. -func ApplyLayer(dest string, layer ArchiveReader) error { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - if err != nil { - return err - } - - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform - - layer, err = DecompressStream(layer) - if err != nil { - return err - } - +func UnpackLayer(dest string, layer ArchiveReader) error { tr := tar.NewReader(layer) trBuf := pools.BufioReader32KPool.Get(tr) defer pools.BufioReader32KPool.Put(trBuf) @@ -159,6 +142,24 @@ func ApplyLayer(dest string, layer ArchiveReader) error { return err } } - return nil } + +// ApplyLayer parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. +func ApplyLayer(dest string, layer ArchiveReader) error { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + layer, err = DecompressStream(layer) + if err != nil { + return err + } + return UnpackLayer(dest, layer) +} diff --git a/chrootarchive/archive.go b/chrootarchive/archive.go index 2942d9d..a29d30e 100644 --- a/chrootarchive/archive.go +++ b/chrootarchive/archive.go @@ -15,6 +15,15 @@ import ( "github.com/docker/docker/pkg/reexec" ) +var chrootArchiver = &archive.Archiver{Untar} + +func chroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return err + } + return syscall.Chdir("/") +} + func untar() { runtime.LockOSThread() flag.Parse() @@ -38,11 +47,17 @@ func untar() { os.Exit(0) } -var ( - chrootArchiver = &archive.Archiver{Untar} -) +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.Excludes == nil { + options.Excludes = []string{} + } -func Untar(archive io.Reader, dest string, options *archive.TarOptions) error { var buf bytes.Buffer enc := json.NewEncoder(&buf) if err := enc.Encode(options); err != nil { @@ -55,7 +70,7 @@ func Untar(archive io.Reader, dest string, options *archive.TarOptions) error { } cmd := reexec.Command("docker-untar", dest, buf.String()) - cmd.Stdin = archive + cmd.Stdin = tarArchive out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("Untar %s %s", err, out) diff --git a/chrootarchive/diff.go b/chrootarchive/diff.go index f9f9b9d..d4e9529 100644 --- a/chrootarchive/diff.go +++ b/chrootarchive/diff.go @@ -3,8 +3,10 @@ package chrootarchive import ( "flag" "fmt" + "io" "io/ioutil" "os" + "path/filepath" "runtime" "syscall" @@ -16,19 +18,20 @@ func applyLayer() { runtime.LockOSThread() flag.Parse() - if err := syscall.Chroot(flag.Arg(0)); err != nil { - fatal(err) - } - if err := syscall.Chdir("/"); err != nil { + if err := chroot(flag.Arg(0)); err != nil { fatal(err) } + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) tmpDir, err := ioutil.TempDir("/", "temp-docker-extract") if err != nil { fatal(err) } os.Setenv("TMPDIR", tmpDir) - if err := archive.ApplyLayer("/", os.Stdin); err != nil { - os.RemoveAll(tmpDir) + err = archive.UnpackLayer("/", os.Stdin) + os.RemoveAll(tmpDir) + if err != nil { fatal(err) } os.RemoveAll(tmpDir) @@ -37,8 +40,18 @@ func applyLayer() { } func ApplyLayer(dest string, layer archive.ArchiveReader) error { + dest = filepath.Clean(dest) + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return err + } + defer func() { + if c, ok := decompressed.(io.Closer); ok { + c.Close() + } + }() cmd := reexec.Command("docker-applyLayer", dest) - cmd.Stdin = layer + cmd.Stdin = decompressed out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("ApplyLayer %s %s", err, out) From 6454d26740859627c5c19c65f3b42e9414d8ef5c Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 8 Dec 2014 16:19:24 -0500 Subject: [PATCH 99/99] Decompress archive before streaming the unpack in a chroot Signed-off-by: Michael Crosby Conflicts: pkg/archive/archive.go pkg/chrootarchive/archive.go Conflicts: pkg/archive/archive.go --- archive/archive.go | 52 +++++++++++++++++++--------------------- chrootarchive/archive.go | 28 ++++++++++++---------- 2 files changed, 41 insertions(+), 39 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index ead85be..ec45d85 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -464,32 +464,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) return pipeReader, nil } -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(archive io.Reader, dest string, options *TarOptions) error { - dest = filepath.Clean(dest) - - if options == nil { - options = &TarOptions{} - } - - if archive == nil { - return fmt.Errorf("Empty archive") - } - - if options.Excludes == nil { - options.Excludes = []string{} - } - - decompressedArchive, err := DecompressStream(archive) - if err != nil { - return err - } - defer decompressedArchive.Close() - +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) @@ -572,10 +547,33 @@ loop: return err } } - return nil } +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(archive io.Reader, dest string, options *TarOptions) error { + if archive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.Excludes == nil { + options.Excludes = []string{} + } + decompressedArchive, err := DecompressStream(archive) + if err != nil { + return err + } + defer decompressedArchive.Close() + return Unpack(decompressedArchive, dest, options) +} + func (archiver *Archiver) TarUntar(src, dst string) error { log.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) diff --git a/chrootarchive/archive.go b/chrootarchive/archive.go index a29d30e..0077f93 100644 --- a/chrootarchive/archive.go +++ b/chrootarchive/archive.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "os" + "path/filepath" "runtime" "strings" "syscall" @@ -27,19 +28,14 @@ func chroot(path string) error { func untar() { runtime.LockOSThread() flag.Parse() - - if err := syscall.Chroot(flag.Arg(0)); err != nil { + if err := chroot(flag.Arg(0)); err != nil { fatal(err) } - if err := syscall.Chdir("/"); err != nil { + var options *archive.TarOptions + if err := json.NewDecoder(strings.NewReader(flag.Arg(1))).Decode(&options); err != nil { fatal(err) } - options := new(archive.TarOptions) - dec := json.NewDecoder(strings.NewReader(flag.Arg(1))) - if err := dec.Decode(options); err != nil { - fatal(err) - } - if err := archive.Untar(os.Stdin, "/", options); err != nil { + if err := archive.Unpack(os.Stdin, "/", options); err != nil { fatal(err) } // fully consume stdin in case it is zero padded @@ -58,8 +54,10 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error options.Excludes = []string{} } - var buf bytes.Buffer - enc := json.NewEncoder(&buf) + var ( + buf bytes.Buffer + enc = json.NewEncoder(&buf) + ) if err := enc.Encode(options); err != nil { return fmt.Errorf("Untar json encode: %v", err) } @@ -68,9 +66,15 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error return err } } + dest = filepath.Clean(dest) + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() cmd := reexec.Command("docker-untar", dest, buf.String()) - cmd.Stdin = tarArchive + cmd.Stdin = decompressedArchive out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("Untar %s %s", err, out)