add better generate

Signed-off-by: Jess Frazelle <acidburn@microsoft.com>
This commit is contained in:
Jess Frazelle 2018-03-20 01:33:56 -04:00
parent 3fc6abf56b
commit cdd93563f5
5655 changed files with 1187011 additions and 392 deletions

View file

@ -0,0 +1,115 @@
// Package builder defines interfaces for any Docker builder to implement.
//
// Historically, only server-side Dockerfile interpreters existed.
// This package allows for other implementations of Docker builders.
package builder // import "github.com/docker/docker/builder"
import (
"io"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
containerpkg "github.com/docker/docker/container"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/containerfs"
"golang.org/x/net/context"
)
const (
// DefaultDockerfileName is the Default filename with Docker commands, read by docker build
DefaultDockerfileName string = "Dockerfile"
)
// Source defines a location that can be used as a source for the ADD/COPY
// instructions in the builder.
type Source interface {
// Root returns root path for accessing source
Root() containerfs.ContainerFS
// Close allows to signal that the filesystem tree won't be used anymore.
// For Context implementations using a temporary directory, it is recommended to
// delete the temporary directory in Close().
Close() error
// Hash returns a checksum for a file
Hash(path string) (string, error)
}
// Backend abstracts calls to a Docker Daemon.
type Backend interface {
ImageBackend
ExecBackend
// CommitBuildStep creates a new Docker image from the config generated by
// a build step.
CommitBuildStep(backend.CommitConfig) (image.ID, error)
// ContainerCreateWorkdir creates the workdir
ContainerCreateWorkdir(containerID string) error
CreateImage(config []byte, parent string) (Image, error)
ImageCacheBuilder
}
// ImageBackend are the interface methods required from an image component
type ImageBackend interface {
GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ROLayer, error)
}
// ExecBackend contains the interface methods required for executing containers
type ExecBackend interface {
// ContainerAttachRaw attaches to container.
ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error
// ContainerCreate creates a new Docker container and returns potential warnings
ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
// ContainerRm removes a container specified by `id`.
ContainerRm(name string, config *types.ContainerRmConfig) error
// ContainerKill stops the container execution abruptly.
ContainerKill(containerID string, sig uint64) error
// ContainerStart starts a new container
ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
// ContainerWait stops processing until the given container is stopped.
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
}
// Result is the output produced by a Builder
type Result struct {
ImageID string
FromImage Image
}
// ImageCacheBuilder represents a generator for stateful image cache.
type ImageCacheBuilder interface {
// MakeImageCache creates a stateful image cache.
MakeImageCache(cacheFrom []string) ImageCache
}
// ImageCache abstracts an image cache.
// (parent image, child runconfig) -> child image
type ImageCache interface {
// GetCache returns a reference to a cached image whose parent equals `parent`
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
GetCache(parentID string, cfg *container.Config) (imageID string, err error)
}
// Image represents a Docker image used by the builder.
type Image interface {
ImageID() string
RunConfig() *container.Config
MarshalJSON() ([]byte, error)
OperatingSystem() string
}
// ROLayer is a reference to image rootfs layer
type ROLayer interface {
Release() error
NewRWLayer() (RWLayer, error)
DiffID() layer.DiffID
}
// RWLayer is active layer that can be read/modified
type RWLayer interface {
Release() error
Root() containerfs.ContainerFS
Commit() (ROLayer, error)
}

View file

@ -0,0 +1,168 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"fmt"
"io"
"github.com/docker/docker/runconfig/opts"
)
// builtinAllowedBuildArgs is list of built-in allowed build args
// these args are considered transparent and are excluded from the image history.
// Filtering from history is implemented in dispatchers.go
var builtinAllowedBuildArgs = map[string]bool{
"HTTP_PROXY": true,
"http_proxy": true,
"HTTPS_PROXY": true,
"https_proxy": true,
"FTP_PROXY": true,
"ftp_proxy": true,
"NO_PROXY": true,
"no_proxy": true,
}
// buildArgs manages arguments used by the builder
type buildArgs struct {
// args that are allowed for expansion/substitution and passing to commands in 'run'.
allowedBuildArgs map[string]*string
// args defined before the first `FROM` in a Dockerfile
allowedMetaArgs map[string]*string
// args referenced by the Dockerfile
referencedArgs map[string]struct{}
// args provided by the user on the command line
argsFromOptions map[string]*string
}
func newBuildArgs(argsFromOptions map[string]*string) *buildArgs {
return &buildArgs{
allowedBuildArgs: make(map[string]*string),
allowedMetaArgs: make(map[string]*string),
referencedArgs: make(map[string]struct{}),
argsFromOptions: argsFromOptions,
}
}
func (b *buildArgs) Clone() *buildArgs {
result := newBuildArgs(b.argsFromOptions)
for k, v := range b.allowedBuildArgs {
result.allowedBuildArgs[k] = v
}
for k, v := range b.allowedMetaArgs {
result.allowedMetaArgs[k] = v
}
for k := range b.referencedArgs {
result.referencedArgs[k] = struct{}{}
}
return result
}
func (b *buildArgs) MergeReferencedArgs(other *buildArgs) {
for k := range other.referencedArgs {
b.referencedArgs[k] = struct{}{}
}
}
// WarnOnUnusedBuildArgs checks if there are any leftover build-args that were
// passed but not consumed during build. Print a warning, if there are any.
func (b *buildArgs) WarnOnUnusedBuildArgs(out io.Writer) {
leftoverArgs := []string{}
for arg := range b.argsFromOptions {
_, isReferenced := b.referencedArgs[arg]
_, isBuiltin := builtinAllowedBuildArgs[arg]
if !isBuiltin && !isReferenced {
leftoverArgs = append(leftoverArgs, arg)
}
}
if len(leftoverArgs) > 0 {
fmt.Fprintf(out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs)
}
}
// ResetAllowed clears the list of args that are allowed to be used by a
// directive
func (b *buildArgs) ResetAllowed() {
b.allowedBuildArgs = make(map[string]*string)
}
// AddMetaArg adds a new meta arg that can be used by FROM directives
func (b *buildArgs) AddMetaArg(key string, value *string) {
b.allowedMetaArgs[key] = value
}
// AddArg adds a new arg that can be used by directives
func (b *buildArgs) AddArg(key string, value *string) {
b.allowedBuildArgs[key] = value
b.referencedArgs[key] = struct{}{}
}
// IsReferencedOrNotBuiltin checks if the key is a built-in arg, or if it has been
// referenced by the Dockerfile. Returns true if the arg is not a builtin or
// if the builtin has been referenced in the Dockerfile.
func (b *buildArgs) IsReferencedOrNotBuiltin(key string) bool {
_, isBuiltin := builtinAllowedBuildArgs[key]
_, isAllowed := b.allowedBuildArgs[key]
return isAllowed || !isBuiltin
}
// GetAllAllowed returns a mapping with all the allowed args
func (b *buildArgs) GetAllAllowed() map[string]string {
return b.getAllFromMapping(b.allowedBuildArgs)
}
// GetAllMeta returns a mapping with all the meta meta args
func (b *buildArgs) GetAllMeta() map[string]string {
return b.getAllFromMapping(b.allowedMetaArgs)
}
func (b *buildArgs) getAllFromMapping(source map[string]*string) map[string]string {
m := make(map[string]string)
keys := keysFromMaps(source, builtinAllowedBuildArgs)
for _, key := range keys {
v, ok := b.getBuildArg(key, source)
if ok {
m[key] = v
}
}
return m
}
// FilterAllowed returns all allowed args without the filtered args
func (b *buildArgs) FilterAllowed(filter []string) []string {
envs := []string{}
configEnv := opts.ConvertKVStringsToMap(filter)
for key, val := range b.GetAllAllowed() {
if _, ok := configEnv[key]; !ok {
envs = append(envs, fmt.Sprintf("%s=%s", key, val))
}
}
return envs
}
func (b *buildArgs) getBuildArg(key string, mapping map[string]*string) (string, bool) {
defaultValue, exists := mapping[key]
// Return override from options if one is defined
if v, ok := b.argsFromOptions[key]; ok && v != nil {
return *v, ok
}
if defaultValue == nil {
if v, ok := b.allowedMetaArgs[key]; ok && v != nil {
return *v, ok
}
return "", false
}
return *defaultValue, exists
}
func keysFromMaps(source map[string]*string, builtin map[string]bool) []string {
keys := []string{}
for key := range source {
keys = append(keys, key)
}
for key := range builtin {
keys = append(keys, key)
}
return keys
}

View file

@ -0,0 +1,102 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"bytes"
"strings"
"testing"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
)
func strPtr(source string) *string {
return &source
}
func TestGetAllAllowed(t *testing.T) {
buildArgs := newBuildArgs(map[string]*string{
"ArgNotUsedInDockerfile": strPtr("fromopt1"),
"ArgOverriddenByOptions": strPtr("fromopt2"),
"ArgNoDefaultInDockerfileFromOptions": strPtr("fromopt3"),
"HTTP_PROXY": strPtr("theproxy"),
})
buildArgs.AddMetaArg("ArgFromMeta", strPtr("frommeta1"))
buildArgs.AddMetaArg("ArgFromMetaOverridden", strPtr("frommeta2"))
buildArgs.AddMetaArg("ArgFromMetaNotUsed", strPtr("frommeta3"))
buildArgs.AddArg("ArgOverriddenByOptions", strPtr("fromdockerfile2"))
buildArgs.AddArg("ArgWithDefaultInDockerfile", strPtr("fromdockerfile1"))
buildArgs.AddArg("ArgNoDefaultInDockerfile", nil)
buildArgs.AddArg("ArgNoDefaultInDockerfileFromOptions", nil)
buildArgs.AddArg("ArgFromMeta", nil)
buildArgs.AddArg("ArgFromMetaOverridden", strPtr("fromdockerfile3"))
all := buildArgs.GetAllAllowed()
expected := map[string]string{
"HTTP_PROXY": "theproxy",
"ArgOverriddenByOptions": "fromopt2",
"ArgWithDefaultInDockerfile": "fromdockerfile1",
"ArgNoDefaultInDockerfileFromOptions": "fromopt3",
"ArgFromMeta": "frommeta1",
"ArgFromMetaOverridden": "fromdockerfile3",
}
assert.Check(t, is.DeepEqual(expected, all))
}
func TestGetAllMeta(t *testing.T) {
buildArgs := newBuildArgs(map[string]*string{
"ArgNotUsedInDockerfile": strPtr("fromopt1"),
"ArgOverriddenByOptions": strPtr("fromopt2"),
"ArgNoDefaultInMetaFromOptions": strPtr("fromopt3"),
"HTTP_PROXY": strPtr("theproxy"),
})
buildArgs.AddMetaArg("ArgFromMeta", strPtr("frommeta1"))
buildArgs.AddMetaArg("ArgOverriddenByOptions", strPtr("frommeta2"))
buildArgs.AddMetaArg("ArgNoDefaultInMetaFromOptions", nil)
all := buildArgs.GetAllMeta()
expected := map[string]string{
"HTTP_PROXY": "theproxy",
"ArgFromMeta": "frommeta1",
"ArgOverriddenByOptions": "fromopt2",
"ArgNoDefaultInMetaFromOptions": "fromopt3",
}
assert.Check(t, is.DeepEqual(expected, all))
}
func TestWarnOnUnusedBuildArgs(t *testing.T) {
buildArgs := newBuildArgs(map[string]*string{
"ThisArgIsUsed": strPtr("fromopt1"),
"ThisArgIsNotUsed": strPtr("fromopt2"),
"HTTPS_PROXY": strPtr("referenced builtin"),
"HTTP_PROXY": strPtr("unreferenced builtin"),
})
buildArgs.AddArg("ThisArgIsUsed", nil)
buildArgs.AddArg("HTTPS_PROXY", nil)
buffer := new(bytes.Buffer)
buildArgs.WarnOnUnusedBuildArgs(buffer)
out := buffer.String()
assert.Assert(t, !strings.Contains(out, "ThisArgIsUsed"), out)
assert.Assert(t, !strings.Contains(out, "HTTPS_PROXY"), out)
assert.Assert(t, !strings.Contains(out, "HTTP_PROXY"), out)
assert.Check(t, is.Contains(out, "ThisArgIsNotUsed"))
}
func TestIsUnreferencedBuiltin(t *testing.T) {
buildArgs := newBuildArgs(map[string]*string{
"ThisArgIsUsed": strPtr("fromopt1"),
"ThisArgIsNotUsed": strPtr("fromopt2"),
"HTTPS_PROXY": strPtr("referenced builtin"),
"HTTP_PROXY": strPtr("unreferenced builtin"),
})
buildArgs.AddArg("ThisArgIsUsed", nil)
buildArgs.AddArg("HTTPS_PROXY", nil)
assert.Check(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsUsed"))
assert.Check(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsNotUsed"))
assert.Check(t, buildArgs.IsReferencedOrNotBuiltin("HTTPS_PROXY"))
assert.Check(t, !buildArgs.IsReferencedOrNotBuiltin("HTTP_PROXY"))
}

View file

@ -0,0 +1,420 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"runtime"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/docker/docker/builder/dockerfile/parser"
"github.com/docker/docker/builder/dockerfile/shell"
"github.com/docker/docker/builder/fscache"
"github.com/docker/docker/builder/remotecontext"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/system"
"github.com/moby/buildkit/session"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"golang.org/x/sync/syncmap"
)
var validCommitCommands = map[string]bool{
"cmd": true,
"entrypoint": true,
"healthcheck": true,
"env": true,
"expose": true,
"label": true,
"onbuild": true,
"user": true,
"volume": true,
"workdir": true,
}
const (
stepFormat = "Step %d/%d : %v"
)
// SessionGetter is object used to get access to a session by uuid
type SessionGetter interface {
Get(ctx context.Context, uuid string) (session.Caller, error)
}
// BuildManager is shared across all Builder objects
type BuildManager struct {
idMappings *idtools.IDMappings
backend builder.Backend
pathCache pathCache // TODO: make this persistent
sg SessionGetter
fsCache *fscache.FSCache
}
// NewBuildManager creates a BuildManager
func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, idMappings *idtools.IDMappings) (*BuildManager, error) {
bm := &BuildManager{
backend: b,
pathCache: &syncmap.Map{},
sg: sg,
idMappings: idMappings,
fsCache: fsCache,
}
if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil {
return nil, err
}
return bm, nil
}
// Build starts a new build from a BuildConfig
func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (*builder.Result, error) {
buildsTriggered.Inc()
if config.Options.Dockerfile == "" {
config.Options.Dockerfile = builder.DefaultDockerfileName
}
source, dockerfile, err := remotecontext.Detect(config)
if err != nil {
return nil, err
}
defer func() {
if source != nil {
if err := source.Close(); err != nil {
logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err)
}
}
}()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if src, err := bm.initializeClientSession(ctx, cancel, config.Options); err != nil {
return nil, err
} else if src != nil {
source = src
}
os := runtime.GOOS
optionsPlatform := system.ParsePlatform(config.Options.Platform)
if dockerfile.OS != "" {
if optionsPlatform.OS != "" && optionsPlatform.OS != dockerfile.OS {
return nil, fmt.Errorf("invalid platform")
}
os = dockerfile.OS
} else if optionsPlatform.OS != "" {
os = optionsPlatform.OS
}
config.Options.Platform = os
dockerfile.OS = os
builderOptions := builderOptions{
Options: config.Options,
ProgressWriter: config.ProgressWriter,
Backend: bm.backend,
PathCache: bm.pathCache,
IDMappings: bm.idMappings,
}
return newBuilder(ctx, builderOptions).build(source, dockerfile)
}
func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) {
if options.SessionID == "" || bm.sg == nil {
return nil, nil
}
logrus.Debug("client is session enabled")
connectCtx, cancelCtx := context.WithTimeout(ctx, sessionConnectTimeout)
defer cancelCtx()
c, err := bm.sg.Get(connectCtx, options.SessionID)
if err != nil {
return nil, err
}
go func() {
<-c.Context().Done()
cancel()
}()
if options.RemoteContext == remotecontext.ClientSessionRemote {
st := time.Now()
csi, err := NewClientSessionSourceIdentifier(ctx, bm.sg, options.SessionID)
if err != nil {
return nil, err
}
src, err := bm.fsCache.SyncFrom(ctx, csi)
if err != nil {
return nil, err
}
logrus.Debugf("sync-time: %v", time.Since(st))
return src, nil
}
return nil, nil
}
// builderOptions are the dependencies required by the builder
type builderOptions struct {
Options *types.ImageBuildOptions
Backend builder.Backend
ProgressWriter backend.ProgressWriter
PathCache pathCache
IDMappings *idtools.IDMappings
}
// Builder is a Dockerfile builder
// It implements the builder.Backend interface.
type Builder struct {
options *types.ImageBuildOptions
Stdout io.Writer
Stderr io.Writer
Aux *streamformatter.AuxFormatter
Output io.Writer
docker builder.Backend
clientCtx context.Context
idMappings *idtools.IDMappings
disableCommit bool
imageSources *imageSources
pathCache pathCache
containerManager *containerManager
imageProber ImageProber
}
// newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options.
func newBuilder(clientCtx context.Context, options builderOptions) *Builder {
config := options.Options
if config == nil {
config = new(types.ImageBuildOptions)
}
b := &Builder{
clientCtx: clientCtx,
options: config,
Stdout: options.ProgressWriter.StdoutFormatter,
Stderr: options.ProgressWriter.StderrFormatter,
Aux: options.ProgressWriter.AuxFormatter,
Output: options.ProgressWriter.Output,
docker: options.Backend,
idMappings: options.IDMappings,
imageSources: newImageSources(clientCtx, options),
pathCache: options.PathCache,
imageProber: newImageProber(options.Backend, config.CacheFrom, config.NoCache),
containerManager: newContainerManager(options.Backend),
}
return b
}
// Build runs the Dockerfile builder by parsing the Dockerfile and executing
// the instructions from the file.
func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*builder.Result, error) {
defer b.imageSources.Unmount()
addNodesForLabelOption(dockerfile.AST, b.options.Labels)
stages, metaArgs, err := instructions.Parse(dockerfile.AST)
if err != nil {
if instructions.IsUnknownInstruction(err) {
buildsFailed.WithValues(metricsUnknownInstructionError).Inc()
}
return nil, errdefs.InvalidParameter(err)
}
if b.options.Target != "" {
targetIx, found := instructions.HasStage(stages, b.options.Target)
if !found {
buildsFailed.WithValues(metricsBuildTargetNotReachableError).Inc()
return nil, errors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target)
}
stages = stages[:targetIx+1]
}
dockerfile.PrintWarnings(b.Stderr)
dispatchState, err := b.dispatchDockerfileWithCancellation(stages, metaArgs, dockerfile.EscapeToken, source)
if err != nil {
return nil, err
}
if dispatchState.imageID == "" {
buildsFailed.WithValues(metricsDockerfileEmptyError).Inc()
return nil, errors.New("No image was generated. Is your Dockerfile empty?")
}
return &builder.Result{ImageID: dispatchState.imageID, FromImage: dispatchState.baseImage}, nil
}
func emitImageID(aux *streamformatter.AuxFormatter, state *dispatchState) error {
if aux == nil || state.imageID == "" {
return nil
}
return aux.Emit(types.BuildResult{ID: state.imageID})
}
func processMetaArg(meta instructions.ArgCommand, shlex *shell.Lex, args *buildArgs) error {
// shell.Lex currently only support the concatenated string format
envs := convertMapToEnvList(args.GetAllAllowed())
if err := meta.Expand(func(word string) (string, error) {
return shlex.ProcessWord(word, envs)
}); err != nil {
return err
}
args.AddArg(meta.Key, meta.Value)
args.AddMetaArg(meta.Key, meta.Value)
return nil
}
func printCommand(out io.Writer, currentCommandIndex int, totalCommands int, cmd interface{}) int {
fmt.Fprintf(out, stepFormat, currentCommandIndex, totalCommands, cmd)
fmt.Fprintln(out)
return currentCommandIndex + 1
}
func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions.Stage, metaArgs []instructions.ArgCommand, escapeToken rune, source builder.Source) (*dispatchState, error) {
dispatchRequest := dispatchRequest{}
buildArgs := newBuildArgs(b.options.BuildArgs)
totalCommands := len(metaArgs) + len(parseResult)
currentCommandIndex := 1
for _, stage := range parseResult {
totalCommands += len(stage.Commands)
}
shlex := shell.NewLex(escapeToken)
for _, meta := range metaArgs {
currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, &meta)
err := processMetaArg(meta, shlex, buildArgs)
if err != nil {
return nil, err
}
}
stagesResults := newStagesBuildResults()
for _, stage := range parseResult {
if err := stagesResults.checkStageNameAvailable(stage.Name); err != nil {
return nil, err
}
dispatchRequest = newDispatchRequest(b, escapeToken, source, buildArgs, stagesResults)
currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, stage.SourceCode)
if err := initializeStage(dispatchRequest, &stage); err != nil {
return nil, err
}
dispatchRequest.state.updateRunConfig()
fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID))
for _, cmd := range stage.Commands {
select {
case <-b.clientCtx.Done():
logrus.Debug("Builder: build cancelled!")
fmt.Fprint(b.Stdout, "Build cancelled\n")
buildsFailed.WithValues(metricsBuildCanceled).Inc()
return nil, errors.New("Build cancelled")
default:
// Not cancelled yet, keep going...
}
currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, cmd)
if err := dispatch(dispatchRequest, cmd); err != nil {
return nil, err
}
dispatchRequest.state.updateRunConfig()
fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID))
}
if err := emitImageID(b.Aux, dispatchRequest.state); err != nil {
return nil, err
}
buildArgs.MergeReferencedArgs(dispatchRequest.state.buildArgs)
if err := commitStage(dispatchRequest.state, stagesResults); err != nil {
return nil, err
}
}
buildArgs.WarnOnUnusedBuildArgs(b.Stdout)
return dispatchRequest.state, nil
}
func addNodesForLabelOption(dockerfile *parser.Node, labels map[string]string) {
if len(labels) == 0 {
return
}
node := parser.NodeFromLabels(labels)
dockerfile.Children = append(dockerfile.Children, node)
}
// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile
// It will:
// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries.
// - Do build by calling builder.dispatch() to call all entries' handling routines
//
// BuildFromConfig is used by the /commit endpoint, with the changes
// coming from the query parameter of the same name.
//
// TODO: Remove?
func BuildFromConfig(config *container.Config, changes []string, os string) (*container.Config, error) {
if !system.IsOSSupported(os) {
return nil, errdefs.InvalidParameter(system.ErrNotSupportedOperatingSystem)
}
if len(changes) == 0 {
return config, nil
}
dockerfile, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")))
if err != nil {
return nil, errdefs.InvalidParameter(err)
}
b := newBuilder(context.Background(), builderOptions{
Options: &types.ImageBuildOptions{NoCache: true},
})
// ensure that the commands are valid
for _, n := range dockerfile.AST.Children {
if !validCommitCommands[n.Value] {
return nil, errdefs.InvalidParameter(errors.Errorf("%s is not a valid change command", n.Value))
}
}
b.Stdout = ioutil.Discard
b.Stderr = ioutil.Discard
b.disableCommit = true
commands := []instructions.Command{}
for _, n := range dockerfile.AST.Children {
cmd, err := instructions.ParseCommand(n)
if err != nil {
return nil, errdefs.InvalidParameter(err)
}
commands = append(commands, cmd)
}
dispatchRequest := newDispatchRequest(b, dockerfile.EscapeToken, nil, newBuildArgs(b.options.BuildArgs), newStagesBuildResults())
// We make mutations to the configuration, ensure we have a copy
dispatchRequest.state.runConfig = copyRunConfig(config)
dispatchRequest.state.imageID = config.Image
dispatchRequest.state.operatingSystem = os
for _, cmd := range commands {
err := dispatch(dispatchRequest, cmd)
if err != nil {
return nil, errdefs.InvalidParameter(err)
}
dispatchRequest.state.updateRunConfig()
}
return dispatchRequest.state.runConfig, nil
}
func convertMapToEnvList(m map[string]string) []string {
result := []string{}
for k, v := range m {
result = append(result, k+"="+v)
}
return result
}

View file

@ -0,0 +1,35 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"strings"
"testing"
"github.com/docker/docker/builder/dockerfile/parser"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
)
func TestAddNodesForLabelOption(t *testing.T) {
dockerfile := "FROM scratch"
result, err := parser.Parse(strings.NewReader(dockerfile))
assert.Check(t, err)
labels := map[string]string{
"org.e": "cli-e",
"org.d": "cli-d",
"org.c": "cli-c",
"org.b": "cli-b",
"org.a": "cli-a",
}
nodes := result.AST
addNodesForLabelOption(nodes, labels)
expected := []string{
"FROM scratch",
`LABEL "org.a"='cli-a' "org.b"='cli-b' "org.c"='cli-c' "org.d"='cli-d' "org.e"='cli-e'`,
}
assert.Check(t, is.Len(nodes.Children, 2))
for i, v := range nodes.Children {
assert.Check(t, is.Equal(expected[i], v.Original))
}
}

View file

@ -0,0 +1,7 @@
// +build !windows
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
func defaultShellForOS(os string) []string {
return []string{"/bin/sh", "-c"}
}

View file

@ -0,0 +1,8 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
func defaultShellForOS(os string) []string {
if os == "linux" {
return []string{"/bin/sh", "-c"}
}
return []string{"cmd", "/S", "/C"}
}

View file

@ -0,0 +1,76 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"time"
"github.com/docker/docker/builder/fscache"
"github.com/docker/docker/builder/remotecontext"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/filesync"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
const sessionConnectTimeout = 5 * time.Second
// ClientSessionTransport is a transport for copying files from docker client
// to the daemon.
type ClientSessionTransport struct{}
// NewClientSessionTransport returns new ClientSessionTransport instance
func NewClientSessionTransport() *ClientSessionTransport {
return &ClientSessionTransport{}
}
// Copy data from a remote to a destination directory.
func (cst *ClientSessionTransport) Copy(ctx context.Context, id fscache.RemoteIdentifier, dest string, cu filesync.CacheUpdater) error {
csi, ok := id.(*ClientSessionSourceIdentifier)
if !ok {
return errors.New("invalid identifier for client session")
}
return filesync.FSSync(ctx, csi.caller, filesync.FSSendRequestOpt{
IncludePatterns: csi.includePatterns,
DestDir: dest,
CacheUpdater: cu,
})
}
// ClientSessionSourceIdentifier is an identifier that can be used for requesting
// files from remote client
type ClientSessionSourceIdentifier struct {
includePatterns []string
caller session.Caller
uuid string
}
// NewClientSessionSourceIdentifier returns new ClientSessionSourceIdentifier instance
func NewClientSessionSourceIdentifier(ctx context.Context, sg SessionGetter, uuid string) (*ClientSessionSourceIdentifier, error) {
csi := &ClientSessionSourceIdentifier{
uuid: uuid,
}
caller, err := sg.Get(ctx, uuid)
if err != nil {
return nil, errors.Wrapf(err, "failed to get session for %s", uuid)
}
csi.caller = caller
return csi, nil
}
// Transport returns transport identifier for remote identifier
func (csi *ClientSessionSourceIdentifier) Transport() string {
return remotecontext.ClientSessionRemote
}
// SharedKey returns shared key for remote identifier. Shared key is used
// for finding the base for a repeated transfer.
func (csi *ClientSessionSourceIdentifier) SharedKey() string {
return csi.caller.SharedKey()
}
// Key returns unique key for remote identifier. Requests with same key return
// same data.
func (csi *ClientSessionSourceIdentifier) Key() string {
return csi.uuid
}

View file

@ -0,0 +1,46 @@
// Package command contains the set of Dockerfile commands.
package command // import "github.com/docker/docker/builder/dockerfile/command"
// Define constants for the command strings
const (
Add = "add"
Arg = "arg"
Cmd = "cmd"
Copy = "copy"
Entrypoint = "entrypoint"
Env = "env"
Expose = "expose"
From = "from"
Healthcheck = "healthcheck"
Label = "label"
Maintainer = "maintainer"
Onbuild = "onbuild"
Run = "run"
Shell = "shell"
StopSignal = "stopsignal"
User = "user"
Volume = "volume"
Workdir = "workdir"
)
// Commands is list of all Dockerfile commands
var Commands = map[string]struct{}{
Add: {},
Arg: {},
Cmd: {},
Copy: {},
Entrypoint: {},
Env: {},
Expose: {},
From: {},
Healthcheck: {},
Label: {},
Maintainer: {},
Onbuild: {},
Run: {},
Shell: {},
StopSignal: {},
User: {},
Volume: {},
Workdir: {},
}

View file

@ -0,0 +1,146 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"fmt"
"io"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/builder"
containerpkg "github.com/docker/docker/container"
"github.com/docker/docker/pkg/stringid"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
type containerManager struct {
tmpContainers map[string]struct{}
backend builder.ExecBackend
}
// newContainerManager creates a new container backend
func newContainerManager(docker builder.ExecBackend) *containerManager {
return &containerManager{
backend: docker,
tmpContainers: make(map[string]struct{}),
}
}
// Create a container
func (c *containerManager) Create(runConfig *container.Config, hostConfig *container.HostConfig) (container.ContainerCreateCreatedBody, error) {
container, err := c.backend.ContainerCreate(types.ContainerCreateConfig{
Config: runConfig,
HostConfig: hostConfig,
})
if err != nil {
return container, err
}
c.tmpContainers[container.ID] = struct{}{}
return container, nil
}
var errCancelled = errors.New("build cancelled")
// Run a container by ID
func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr io.Writer) (err error) {
attached := make(chan struct{})
errCh := make(chan error)
go func() {
errCh <- c.backend.ContainerAttachRaw(cID, nil, stdout, stderr, true, attached)
}()
select {
case err := <-errCh:
return err
case <-attached:
}
finished := make(chan struct{})
cancelErrCh := make(chan error, 1)
go func() {
select {
case <-ctx.Done():
logrus.Debugln("Build cancelled, killing and removing container:", cID)
c.backend.ContainerKill(cID, 0)
c.removeContainer(cID, stdout)
cancelErrCh <- errCancelled
case <-finished:
cancelErrCh <- nil
}
}()
if err := c.backend.ContainerStart(cID, nil, "", ""); err != nil {
close(finished)
logCancellationError(cancelErrCh, "error from ContainerStart: "+err.Error())
return err
}
// Block on reading output from container, stop on err or chan closed
if err := <-errCh; err != nil {
close(finished)
logCancellationError(cancelErrCh, "error from errCh: "+err.Error())
return err
}
waitC, err := c.backend.ContainerWait(ctx, cID, containerpkg.WaitConditionNotRunning)
if err != nil {
close(finished)
logCancellationError(cancelErrCh, fmt.Sprintf("unable to begin ContainerWait: %s", err))
return err
}
if status := <-waitC; status.ExitCode() != 0 {
close(finished)
logCancellationError(cancelErrCh,
fmt.Sprintf("a non-zero code from ContainerWait: %d", status.ExitCode()))
return &statusCodeError{code: status.ExitCode(), err: status.Err()}
}
close(finished)
return <-cancelErrCh
}
func logCancellationError(cancelErrCh chan error, msg string) {
if cancelErr := <-cancelErrCh; cancelErr != nil {
logrus.Debugf("Build cancelled (%v): %s", cancelErr, msg)
}
}
type statusCodeError struct {
code int
err error
}
func (e *statusCodeError) Error() string {
if e.err == nil {
return ""
}
return e.err.Error()
}
func (e *statusCodeError) StatusCode() int {
return e.code
}
func (c *containerManager) removeContainer(containerID string, stdout io.Writer) error {
rmConfig := &types.ContainerRmConfig{
ForceRemove: true,
RemoveVolume: true,
}
if err := c.backend.ContainerRm(containerID, rmConfig); err != nil {
fmt.Fprintf(stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(containerID), err)
return err
}
return nil
}
// RemoveAll containers managed by this container manager
func (c *containerManager) RemoveAll(stdout io.Writer) {
for containerID := range c.tmpContainers {
if err := c.removeContainer(containerID, stdout); err != nil {
return
}
delete(c.tmpContainers, containerID)
fmt.Fprintf(stdout, "Removing intermediate container %s\n", stringid.TruncateID(containerID))
}
}

View file

@ -0,0 +1,558 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"archive/tar"
"fmt"
"io"
"mime"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"time"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/remotecontext"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/urlutil"
"github.com/pkg/errors"
)
const unnamedFilename = "__unnamed__"
type pathCache interface {
Load(key interface{}) (value interface{}, ok bool)
Store(key, value interface{})
}
// copyInfo is a data object which stores the metadata about each source file in
// a copyInstruction
type copyInfo struct {
root containerfs.ContainerFS
path string
hash string
noDecompress bool
}
func (c copyInfo) fullPath() (string, error) {
return c.root.ResolveScopedPath(c.path, true)
}
func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo {
return copyInfo{root: source.Root(), path: path, hash: hash}
}
func newCopyInfos(copyInfos ...copyInfo) []copyInfo {
return copyInfos
}
// copyInstruction is a fully parsed COPY or ADD command that is passed to
// Builder.performCopy to copy files into the image filesystem
type copyInstruction struct {
cmdName string
infos []copyInfo
dest string
chownStr string
allowLocalDecompression bool
}
// copier reads a raw COPY or ADD command, fetches remote sources using a downloader,
// and creates a copyInstruction
type copier struct {
imageSource *imageMount
source builder.Source
pathCache pathCache
download sourceDownloader
platform string
// for cleanup. TODO: having copier.cleanup() is error prone and hard to
// follow. Code calling performCopy should manage the lifecycle of its params.
// Copier should take override source as input, not imageMount.
activeLayer builder.RWLayer
tmpPaths []string
}
func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier {
return copier{
source: req.source,
pathCache: req.builder.pathCache,
download: download,
imageSource: imageSource,
platform: req.builder.options.Platform,
}
}
func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) {
inst := copyInstruction{cmdName: cmdName}
last := len(args) - 1
// Work in platform-specific filepath semantics
inst.dest = fromSlash(args[last], o.platform)
separator := string(separator(o.platform))
infos, err := o.getCopyInfosForSourcePaths(args[0:last], inst.dest)
if err != nil {
return inst, errors.Wrapf(err, "%s failed", cmdName)
}
if len(infos) > 1 && !strings.HasSuffix(inst.dest, separator) {
return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
}
inst.infos = infos
return inst, nil
}
// getCopyInfosForSourcePaths iterates over the source files and calculate the info
// needed to copy (e.g. hash value if cached)
// The dest is used in case source is URL (and ends with "/")
func (o *copier) getCopyInfosForSourcePaths(sources []string, dest string) ([]copyInfo, error) {
var infos []copyInfo
for _, orig := range sources {
subinfos, err := o.getCopyInfoForSourcePath(orig, dest)
if err != nil {
return nil, err
}
infos = append(infos, subinfos...)
}
if len(infos) == 0 {
return nil, errors.New("no source files were specified")
}
return infos, nil
}
func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error) {
if !urlutil.IsURL(orig) {
return o.calcCopyInfo(orig, true)
}
remote, path, err := o.download(orig)
if err != nil {
return nil, err
}
// If path == "" then we are unable to determine filename from src
// We have to make sure dest is available
if path == "" {
if strings.HasSuffix(dest, "/") {
return nil, errors.Errorf("cannot determine filename for source %s", orig)
}
path = unnamedFilename
}
o.tmpPaths = append(o.tmpPaths, remote.Root().Path())
hash, err := remote.Hash(path)
ci := newCopyInfoFromSource(remote, path, hash)
ci.noDecompress = true // data from http shouldn't be extracted even on ADD
return newCopyInfos(ci), err
}
// Cleanup removes any temporary directories created as part of downloading
// remote files.
func (o *copier) Cleanup() {
for _, path := range o.tmpPaths {
os.RemoveAll(path)
}
o.tmpPaths = []string{}
if o.activeLayer != nil {
o.activeLayer.Release()
o.activeLayer = nil
}
}
// TODO: allowWildcards can probably be removed by refactoring this function further.
func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) {
imageSource := o.imageSource
// TODO: do this when creating copier. Requires validateCopySourcePath
// (and other below) to be aware of the difference sources. Why is it only
// done on image Source?
if imageSource != nil {
var err error
rwLayer, err := imageSource.NewRWLayer()
if err != nil {
return nil, err
}
o.activeLayer = rwLayer
o.source, err = remotecontext.NewLazySource(rwLayer.Root())
if err != nil {
return nil, errors.Wrapf(err, "failed to create context for copy from %s", rwLayer.Root().Path())
}
}
if o.source == nil {
return nil, errors.Errorf("missing build context")
}
root := o.source.Root()
if err := validateCopySourcePath(imageSource, origPath, root.OS()); err != nil {
return nil, err
}
// Work in source OS specific filepath semantics
// For LCOW, this is NOT the daemon OS.
origPath = root.FromSlash(origPath)
origPath = strings.TrimPrefix(origPath, string(root.Separator()))
origPath = strings.TrimPrefix(origPath, "."+string(root.Separator()))
// Deal with wildcards
if allowWildcards && containsWildcards(origPath, root.OS()) {
return o.copyWithWildcards(origPath)
}
if imageSource != nil && imageSource.ImageID() != "" {
// return a cached copy if one exists
if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok {
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil
}
}
// Deal with the single file case
copyInfo, err := copyInfoForFile(o.source, origPath)
switch {
case err != nil:
return nil, err
case copyInfo.hash != "":
o.storeInPathCache(imageSource, origPath, copyInfo.hash)
return newCopyInfos(copyInfo), err
}
// TODO: remove, handle dirs in Hash()
subfiles, err := walkSource(o.source, origPath)
if err != nil {
return nil, err
}
hash := hashStringSlice("dir", subfiles)
o.storeInPathCache(imageSource, origPath, hash)
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil
}
func containsWildcards(name, platform string) bool {
isWindows := platform == "windows"
for i := 0; i < len(name); i++ {
ch := name[i]
if ch == '\\' && !isWindows {
i++
} else if ch == '*' || ch == '?' || ch == '[' {
return true
}
}
return false
}
func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
if im != nil {
o.pathCache.Store(im.ImageID()+path, hash)
}
}
func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
root := o.source.Root()
var copyInfos []copyInfo
if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
rel, err := remotecontext.Rel(root, path)
if err != nil {
return err
}
if rel == "." {
return nil
}
if match, _ := root.Match(origPath, rel); !match {
return nil
}
// Note we set allowWildcards to false in case the name has
// a * in it
subInfos, err := o.calcCopyInfo(rel, false)
if err != nil {
return err
}
copyInfos = append(copyInfos, subInfos...)
return nil
}); err != nil {
return nil, err
}
return copyInfos, nil
}
func copyInfoForFile(source builder.Source, path string) (copyInfo, error) {
fi, err := remotecontext.StatAt(source, path)
if err != nil {
return copyInfo{}, err
}
if fi.IsDir() {
return copyInfo{}, nil
}
hash, err := source.Hash(path)
if err != nil {
return copyInfo{}, err
}
return newCopyInfoFromSource(source, path, "file:"+hash), nil
}
// TODO: dedupe with copyWithWildcards()
func walkSource(source builder.Source, origPath string) ([]string, error) {
fp, err := remotecontext.FullPath(source, origPath)
if err != nil {
return nil, err
}
// Must be a dir
var subfiles []string
err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
rel, err := remotecontext.Rel(source.Root(), path)
if err != nil {
return err
}
if rel == "." {
return nil
}
hash, err := source.Hash(rel)
if err != nil {
return nil
}
// we already checked handleHash above
subfiles = append(subfiles, hash)
return nil
})
if err != nil {
return nil, err
}
sort.Strings(subfiles)
return subfiles, nil
}
type sourceDownloader func(string) (builder.Source, string, error)
func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader {
return func(url string) (builder.Source, string, error) {
return downloadSource(output, stdout, url)
}
}
func errOnSourceDownload(_ string) (builder.Source, string, error) {
return nil, "", errors.New("source can't be a URL for COPY")
}
func getFilenameForDownload(path string, resp *http.Response) string {
// Guess filename based on source
if path != "" && !strings.HasSuffix(path, "/") {
if filename := filepath.Base(filepath.FromSlash(path)); filename != "" {
return filename
}
}
// Guess filename based on Content-Disposition
if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" {
if _, params, err := mime.ParseMediaType(contentDisposition); err == nil {
if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") {
if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" {
return filename
}
}
}
}
return ""
}
func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) {
u, err := url.Parse(srcURL)
if err != nil {
return
}
resp, err := remotecontext.GetWithStatusError(srcURL)
if err != nil {
return
}
filename := getFilenameForDownload(u.Path, resp)
// Prepare file in a tmp dir
tmpDir, err := ioutils.TempDir("", "docker-remote")
if err != nil {
return
}
defer func() {
if err != nil {
os.RemoveAll(tmpDir)
}
}()
// If filename is empty, the returned filename will be "" but
// the tmp filename will be created as "__unnamed__"
tmpFileName := filename
if filename == "" {
tmpFileName = unnamedFilename
}
tmpFileName = filepath.Join(tmpDir, tmpFileName)
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return
}
progressOutput := streamformatter.NewJSONProgressOutput(output, true)
progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading")
// Download and dump result to tmp file
// TODO: add filehash directly
if _, err = io.Copy(tmpFile, progressReader); err != nil {
tmpFile.Close()
return
}
// TODO: how important is this random blank line to the output?
fmt.Fprintln(stdout)
// Set the mtime to the Last-Modified header value if present
// Otherwise just remove atime and mtime
mTime := time.Time{}
lastMod := resp.Header.Get("Last-Modified")
if lastMod != "" {
// If we can't parse it then just let it default to 'zero'
// otherwise use the parsed time value
if parsedMTime, err := http.ParseTime(lastMod); err == nil {
mTime = parsedMTime
}
}
tmpFile.Close()
if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
return
}
lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir))
return lc, filename, err
}
type copyFileOptions struct {
decompress bool
chownPair idtools.IDPair
archiver Archiver
}
type copyEndpoint struct {
driver containerfs.Driver
path string
}
func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error {
srcPath, err := source.fullPath()
if err != nil {
return err
}
destPath, err := dest.fullPath()
if err != nil {
return err
}
archiver := options.archiver
srcEndpoint := &copyEndpoint{driver: source.root, path: srcPath}
destEndpoint := &copyEndpoint{driver: dest.root, path: destPath}
src, err := source.root.Stat(srcPath)
if err != nil {
return errors.Wrapf(err, "source path not found")
}
if src.IsDir() {
return copyDirectory(archiver, srcEndpoint, destEndpoint, options.chownPair)
}
if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress {
return archiver.UntarPath(srcPath, destPath)
}
destExistsAsDir, err := isExistingDirectory(destEndpoint)
if err != nil {
return err
}
// dest.path must be used because destPath has already been cleaned of any
// trailing slash
if endsInSlash(dest.root, dest.path) || destExistsAsDir {
// source.path must be used to get the correct filename when the source
// is a symlink
destPath = dest.root.Join(destPath, source.root.Base(source.path))
destEndpoint = &copyEndpoint{driver: dest.root, path: destPath}
}
return copyFile(archiver, srcEndpoint, destEndpoint, options.chownPair)
}
func isArchivePath(driver containerfs.ContainerFS, path string) bool {
file, err := driver.Open(path)
if err != nil {
return false
}
defer file.Close()
rdr, err := archive.DecompressStream(file)
if err != nil {
return false
}
r := tar.NewReader(rdr)
_, err = r.Next()
return err == nil
}
func copyDirectory(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error {
destExists, err := isExistingDirectory(dest)
if err != nil {
return errors.Wrapf(err, "failed to query destination path")
}
if err := archiver.CopyWithTar(source.path, dest.path); err != nil {
return errors.Wrapf(err, "failed to copy directory")
}
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
return fixPermissions(source.path, dest.path, chownPair, !destExists)
}
func copyFile(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error {
if runtime.GOOS == "windows" && dest.driver.OS() == "linux" {
// LCOW
if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil {
return errors.Wrapf(err, "failed to create new directory")
}
} else {
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, chownPair); err != nil {
// Normal containers
return errors.Wrapf(err, "failed to create new directory")
}
}
if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil {
return errors.Wrapf(err, "failed to copy file")
}
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
return fixPermissions(source.path, dest.path, chownPair, false)
}
func endsInSlash(driver containerfs.Driver, path string) bool {
return strings.HasSuffix(path, string(driver.Separator()))
}
// isExistingDirectory returns true if the path exists and is a directory
func isExistingDirectory(point *copyEndpoint) (bool, error) {
destStat, err := point.driver.Stat(point.path)
switch {
case os.IsNotExist(err):
return false, nil
case err != nil:
return false, err
}
return destStat.IsDir(), nil
}

View file

@ -0,0 +1,148 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"net/http"
"testing"
"github.com/docker/docker/pkg/containerfs"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
"github.com/gotestyourself/gotestyourself/fs"
)
func TestIsExistingDirectory(t *testing.T) {
tmpfile := fs.NewFile(t, "file-exists-test", fs.WithContent("something"))
defer tmpfile.Remove()
tmpdir := fs.NewDir(t, "dir-exists-test")
defer tmpdir.Remove()
var testcases = []struct {
doc string
path string
expected bool
}{
{
doc: "directory exists",
path: tmpdir.Path(),
expected: true,
},
{
doc: "path doesn't exist",
path: "/bogus/path/does/not/exist",
expected: false,
},
{
doc: "file exists",
path: tmpfile.Path(),
expected: false,
},
}
for _, testcase := range testcases {
result, err := isExistingDirectory(&copyEndpoint{driver: containerfs.NewLocalDriver(), path: testcase.path})
if !assert.Check(t, err) {
continue
}
assert.Check(t, is.Equal(testcase.expected, result), testcase.doc)
}
}
func TestGetFilenameForDownload(t *testing.T) {
var testcases = []struct {
path string
disposition string
expected string
}{
{
path: "http://www.example.com/",
expected: "",
},
{
path: "http://www.example.com/xyz",
expected: "xyz",
},
{
path: "http://www.example.com/xyz.html",
expected: "xyz.html",
},
{
path: "http://www.example.com/xyz/",
expected: "",
},
{
path: "http://www.example.com/xyz/uvw",
expected: "uvw",
},
{
path: "http://www.example.com/xyz/uvw.html",
expected: "uvw.html",
},
{
path: "http://www.example.com/xyz/uvw/",
expected: "",
},
{
path: "/",
expected: "",
},
{
path: "/xyz",
expected: "xyz",
},
{
path: "/xyz.html",
expected: "xyz.html",
},
{
path: "/xyz/",
expected: "",
},
{
path: "/xyz/",
disposition: "attachment; filename=xyz.html",
expected: "xyz.html",
},
{
disposition: "",
expected: "",
},
{
disposition: "attachment; filename=xyz",
expected: "xyz",
},
{
disposition: "attachment; filename=xyz.html",
expected: "xyz.html",
},
{
disposition: "attachment; filename=\"xyz\"",
expected: "xyz",
},
{
disposition: "attachment; filename=\"xyz.html\"",
expected: "xyz.html",
},
{
disposition: "attachment; filename=\"/xyz.html\"",
expected: "xyz.html",
},
{
disposition: "attachment; filename=\"/xyz/uvw\"",
expected: "uvw",
},
{
disposition: "attachment; filename=\"Naïve file.txt\"",
expected: "Naïve file.txt",
},
}
for _, testcase := range testcases {
resp := http.Response{
Header: make(map[string][]string),
}
if testcase.disposition != "" {
resp.Header.Add("Content-Disposition", testcase.disposition)
}
filename := getFilenameForDownload(testcase.path, &resp)
assert.Check(t, is.Equal(testcase.expected, filename))
}
}

View file

@ -0,0 +1,48 @@
// +build !windows
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"os"
"path/filepath"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/idtools"
)
func fixPermissions(source, destination string, rootIDs idtools.IDPair, overrideSkip bool) error {
var (
skipChownRoot bool
err error
)
if !overrideSkip {
destEndpoint := &copyEndpoint{driver: containerfs.NewLocalDriver(), path: destination}
skipChownRoot, err = isExistingDirectory(destEndpoint)
if err != nil {
return err
}
}
// We Walk on the source rather than on the destination because we don't
// want to change permissions on things we haven't created or modified.
return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
// Do not alter the walk root iff. it existed before, as it doesn't fall under
// the domain of "things we should chown".
if skipChownRoot && source == fullpath {
return nil
}
// Path is prefixed by source: substitute with destination instead.
cleaned, err := filepath.Rel(source, fullpath)
if err != nil {
return err
}
fullpath = filepath.Join(destination, cleaned)
return os.Lchown(fullpath, rootIDs.UID, rootIDs.GID)
})
}
func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error {
return nil
}

View file

@ -0,0 +1,43 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"errors"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/idtools"
)
var pathBlacklist = map[string]bool{
"c:\\": true,
"c:\\windows": true,
}
func fixPermissions(source, destination string, rootIDs idtools.IDPair, overrideSkip bool) error {
// chown is not supported on Windows
return nil
}
func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error {
// validate windows paths from other images + LCOW
if imageSource == nil || platform != "windows" {
return nil
}
origPath = filepath.FromSlash(origPath)
p := strings.ToLower(filepath.Clean(origPath))
if !filepath.IsAbs(p) {
if filepath.VolumeName(p) != "" {
if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths
p = p[:len(p)-1]
}
p += "\\"
} else {
p = filepath.Join("c:\\", p)
}
}
if _, blacklisted := pathBlacklist[p]; blacklisted {
return errors.New("copy from c:\\ or c:\\windows is not allowed on windows")
}
return nil
}

View file

@ -0,0 +1,550 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
// This file contains the dispatchers for each command. Note that
// `nullDispatch` is not actually a command, but support for commands we parse
// but do nothing with.
//
// See evaluator.go for a higher level discussion of the whole evaluator
// package.
import (
"bytes"
"fmt"
"runtime"
"sort"
"strings"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/docker/docker/builder/dockerfile/parser"
"github.com/docker/docker/builder/dockerfile/shell"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/system"
"github.com/docker/go-connections/nat"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ENV foo bar
//
// Sets the environment variable foo to bar, also makes interpolation
// in the dockerfile available from the next statement on via ${foo}.
//
func dispatchEnv(d dispatchRequest, c *instructions.EnvCommand) error {
runConfig := d.state.runConfig
commitMessage := bytes.NewBufferString("ENV")
for _, e := range c.Env {
name := e.Key
newVar := e.String()
commitMessage.WriteString(" " + newVar)
gotOne := false
for i, envVar := range runConfig.Env {
envParts := strings.SplitN(envVar, "=", 2)
compareFrom := envParts[0]
if shell.EqualEnvKeys(compareFrom, name) {
runConfig.Env[i] = newVar
gotOne = true
break
}
}
if !gotOne {
runConfig.Env = append(runConfig.Env, newVar)
}
}
return d.builder.commit(d.state, commitMessage.String())
}
// MAINTAINER some text <maybe@an.email.address>
//
// Sets the maintainer metadata.
func dispatchMaintainer(d dispatchRequest, c *instructions.MaintainerCommand) error {
d.state.maintainer = c.Maintainer
return d.builder.commit(d.state, "MAINTAINER "+c.Maintainer)
}
// LABEL some json data describing the image
//
// Sets the Label variable foo to bar,
//
func dispatchLabel(d dispatchRequest, c *instructions.LabelCommand) error {
if d.state.runConfig.Labels == nil {
d.state.runConfig.Labels = make(map[string]string)
}
commitStr := "LABEL"
for _, v := range c.Labels {
d.state.runConfig.Labels[v.Key] = v.Value
commitStr += " " + v.String()
}
return d.builder.commit(d.state, commitStr)
}
// ADD foo /path
//
// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
// exist here. If you do not wish to have this automatic handling, use COPY.
//
func dispatchAdd(d dispatchRequest, c *instructions.AddCommand) error {
downloader := newRemoteSourceDownloader(d.builder.Output, d.builder.Stdout)
copier := copierFromDispatchRequest(d, downloader, nil)
defer copier.Cleanup()
copyInstruction, err := copier.createCopyInstruction(c.SourcesAndDest, "ADD")
if err != nil {
return err
}
copyInstruction.chownStr = c.Chown
copyInstruction.allowLocalDecompression = true
return d.builder.performCopy(d.state, copyInstruction)
}
// COPY foo /path
//
// Same as 'ADD' but without the tar and remote url handling.
//
func dispatchCopy(d dispatchRequest, c *instructions.CopyCommand) error {
var im *imageMount
var err error
if c.From != "" {
im, err = d.getImageMount(c.From)
if err != nil {
return errors.Wrapf(err, "invalid from flag value %s", c.From)
}
}
copier := copierFromDispatchRequest(d, errOnSourceDownload, im)
defer copier.Cleanup()
copyInstruction, err := copier.createCopyInstruction(c.SourcesAndDest, "COPY")
if err != nil {
return err
}
copyInstruction.chownStr = c.Chown
return d.builder.performCopy(d.state, copyInstruction)
}
func (d *dispatchRequest) getImageMount(imageRefOrID string) (*imageMount, error) {
if imageRefOrID == "" {
// TODO: this could return the source in the default case as well?
return nil, nil
}
var localOnly bool
stage, err := d.stages.get(imageRefOrID)
if err != nil {
return nil, err
}
if stage != nil {
imageRefOrID = stage.Image
localOnly = true
}
return d.builder.imageSources.Get(imageRefOrID, localOnly)
}
// FROM imagename[:tag | @digest] [AS build-stage-name]
//
func initializeStage(d dispatchRequest, cmd *instructions.Stage) error {
d.builder.imageProber.Reset()
image, err := d.getFromImage(d.shlex, cmd.BaseName)
if err != nil {
return err
}
state := d.state
if err := state.beginStage(cmd.Name, image); err != nil {
return err
}
if len(state.runConfig.OnBuild) > 0 {
triggers := state.runConfig.OnBuild
state.runConfig.OnBuild = nil
return dispatchTriggeredOnBuild(d, triggers)
}
return nil
}
func dispatchTriggeredOnBuild(d dispatchRequest, triggers []string) error {
fmt.Fprintf(d.builder.Stdout, "# Executing %d build trigger", len(triggers))
if len(triggers) > 1 {
fmt.Fprint(d.builder.Stdout, "s")
}
fmt.Fprintln(d.builder.Stdout)
for _, trigger := range triggers {
d.state.updateRunConfig()
ast, err := parser.Parse(strings.NewReader(trigger))
if err != nil {
return err
}
if len(ast.AST.Children) != 1 {
return errors.New("onbuild trigger should be a single expression")
}
cmd, err := instructions.ParseCommand(ast.AST.Children[0])
if err != nil {
if instructions.IsUnknownInstruction(err) {
buildsFailed.WithValues(metricsUnknownInstructionError).Inc()
}
return err
}
err = dispatch(d, cmd)
if err != nil {
return err
}
}
return nil
}
func (d *dispatchRequest) getExpandedImageName(shlex *shell.Lex, name string) (string, error) {
substitutionArgs := []string{}
for key, value := range d.state.buildArgs.GetAllMeta() {
substitutionArgs = append(substitutionArgs, key+"="+value)
}
name, err := shlex.ProcessWord(name, substitutionArgs)
if err != nil {
return "", err
}
return name, nil
}
func (d *dispatchRequest) getImageOrStage(name string) (builder.Image, error) {
var localOnly bool
if im, ok := d.stages.getByName(name); ok {
name = im.Image
localOnly = true
}
// Windows cannot support a container with no base image unless it is LCOW.
if name == api.NoBaseImageSpecifier {
imageImage := &image.Image{}
imageImage.OS = runtime.GOOS
if runtime.GOOS == "windows" {
optionsOS := system.ParsePlatform(d.builder.options.Platform).OS
switch optionsOS {
case "windows", "":
return nil, errors.New("Windows does not support FROM scratch")
case "linux":
if !system.LCOWSupported() {
return nil, errors.New("Linux containers are not supported on this system")
}
imageImage.OS = "linux"
default:
return nil, errors.Errorf("operating system %q is not supported", optionsOS)
}
}
return builder.Image(imageImage), nil
}
imageMount, err := d.builder.imageSources.Get(name, localOnly)
if err != nil {
return nil, err
}
return imageMount.Image(), nil
}
func (d *dispatchRequest) getFromImage(shlex *shell.Lex, name string) (builder.Image, error) {
name, err := d.getExpandedImageName(shlex, name)
if err != nil {
return nil, err
}
return d.getImageOrStage(name)
}
func dispatchOnbuild(d dispatchRequest, c *instructions.OnbuildCommand) error {
d.state.runConfig.OnBuild = append(d.state.runConfig.OnBuild, c.Expression)
return d.builder.commit(d.state, "ONBUILD "+c.Expression)
}
// WORKDIR /tmp
//
// Set the working directory for future RUN/CMD/etc statements.
//
func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error {
runConfig := d.state.runConfig
var err error
baseImageOS := system.ParsePlatform(d.state.operatingSystem).OS
runConfig.WorkingDir, err = normalizeWorkdir(baseImageOS, runConfig.WorkingDir, c.Path)
if err != nil {
return err
}
// For performance reasons, we explicitly do a create/mkdir now
// This avoids having an unnecessary expensive mount/unmount calls
// (on Windows in particular) during each container create.
// Prior to 1.13, the mkdir was deferred and not executed at this step.
if d.builder.disableCommit {
// Don't call back into the daemon if we're going through docker commit --change "WORKDIR /foo".
// We've already updated the runConfig and that's enough.
return nil
}
comment := "WORKDIR " + runConfig.WorkingDir
runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment, baseImageOS))
containerID, err := d.builder.probeAndCreate(d.state, runConfigWithCommentCmd)
if err != nil || containerID == "" {
return err
}
if err := d.builder.docker.ContainerCreateWorkdir(containerID); err != nil {
return err
}
return d.builder.commitContainer(d.state, containerID, runConfigWithCommentCmd)
}
func resolveCmdLine(cmd instructions.ShellDependantCmdLine, runConfig *container.Config, os string) []string {
result := cmd.CmdLine
if cmd.PrependShell && result != nil {
result = append(getShell(runConfig, os), result...)
}
return result
}
// RUN some command yo
//
// run a command and commit the image. Args are automatically prepended with
// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under
// Windows, in the event there is only one argument The difference in processing:
//
// RUN echo hi # sh -c echo hi (Linux and LCOW)
// RUN echo hi # cmd /S /C echo hi (Windows)
// RUN [ "echo", "hi" ] # echo hi
//
func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error {
if !system.IsOSSupported(d.state.operatingSystem) {
return system.ErrNotSupportedOperatingSystem
}
stateRunConfig := d.state.runConfig
cmdFromArgs := resolveCmdLine(c.ShellDependantCmdLine, stateRunConfig, d.state.operatingSystem)
buildArgs := d.state.buildArgs.FilterAllowed(stateRunConfig.Env)
saveCmd := cmdFromArgs
if len(buildArgs) > 0 {
saveCmd = prependEnvOnCmd(d.state.buildArgs, buildArgs, cmdFromArgs)
}
runConfigForCacheProbe := copyRunConfig(stateRunConfig,
withCmd(saveCmd),
withEntrypointOverride(saveCmd, nil))
hit, err := d.builder.probeCache(d.state, runConfigForCacheProbe)
if err != nil || hit {
return err
}
runConfig := copyRunConfig(stateRunConfig,
withCmd(cmdFromArgs),
withEnv(append(stateRunConfig.Env, buildArgs...)),
withEntrypointOverride(saveCmd, strslice.StrSlice{""}))
// set config as already being escaped, this prevents double escaping on windows
runConfig.ArgsEscaped = true
logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd)
cID, err := d.builder.create(runConfig)
if err != nil {
return err
}
if err := d.builder.containerManager.Run(d.builder.clientCtx, cID, d.builder.Stdout, d.builder.Stderr); err != nil {
if err, ok := err.(*statusCodeError); ok {
// TODO: change error type, because jsonmessage.JSONError assumes HTTP
msg := fmt.Sprintf(
"The command '%s' returned a non-zero code: %d",
strings.Join(runConfig.Cmd, " "), err.StatusCode())
if err.Error() != "" {
msg = fmt.Sprintf("%s: %s", msg, err.Error())
}
return &jsonmessage.JSONError{
Message: msg,
Code: err.StatusCode(),
}
}
return err
}
return d.builder.commitContainer(d.state, cID, runConfigForCacheProbe)
}
// Derive the command to use for probeCache() and to commit in this container.
// Note that we only do this if there are any build-time env vars. Also, we
// use the special argument "|#" at the start of the args array. This will
// avoid conflicts with any RUN command since commands can not
// start with | (vertical bar). The "#" (number of build envs) is there to
// help ensure proper cache matches. We don't want a RUN command
// that starts with "foo=abc" to be considered part of a build-time env var.
//
// remove any unreferenced built-in args from the environment variables.
// These args are transparent so resulting image should be the same regardless
// of the value.
func prependEnvOnCmd(buildArgs *buildArgs, buildArgVars []string, cmd strslice.StrSlice) strslice.StrSlice {
var tmpBuildEnv []string
for _, env := range buildArgVars {
key := strings.SplitN(env, "=", 2)[0]
if buildArgs.IsReferencedOrNotBuiltin(key) {
tmpBuildEnv = append(tmpBuildEnv, env)
}
}
sort.Strings(tmpBuildEnv)
tmpEnv := append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...)
return strslice.StrSlice(append(tmpEnv, cmd...))
}
// CMD foo
//
// Set the default command to run in the container (which may be empty).
// Argument handling is the same as RUN.
//
func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error {
runConfig := d.state.runConfig
optionsOS := system.ParsePlatform(d.builder.options.Platform).OS
cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, optionsOS)
runConfig.Cmd = cmd
// set config as already being escaped, this prevents double escaping on windows
runConfig.ArgsEscaped = true
if err := d.builder.commit(d.state, fmt.Sprintf("CMD %q", cmd)); err != nil {
return err
}
if len(c.ShellDependantCmdLine.CmdLine) != 0 {
d.state.cmdSet = true
}
return nil
}
// HEALTHCHECK foo
//
// Set the default healthcheck command to run in the container (which may be empty).
// Argument handling is the same as RUN.
//
func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand) error {
runConfig := d.state.runConfig
if runConfig.Healthcheck != nil {
oldCmd := runConfig.Healthcheck.Test
if len(oldCmd) > 0 && oldCmd[0] != "NONE" {
fmt.Fprintf(d.builder.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd)
}
}
runConfig.Healthcheck = c.Health
return d.builder.commit(d.state, fmt.Sprintf("HEALTHCHECK %q", runConfig.Healthcheck))
}
// ENTRYPOINT /usr/sbin/nginx
//
// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments
// to /usr/sbin/nginx. Uses the default shell if not in JSON format.
//
// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint
// is initialized at newBuilder time instead of through argument parsing.
//
func dispatchEntrypoint(d dispatchRequest, c *instructions.EntrypointCommand) error {
runConfig := d.state.runConfig
optionsOS := system.ParsePlatform(d.builder.options.Platform).OS
cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, optionsOS)
runConfig.Entrypoint = cmd
if !d.state.cmdSet {
runConfig.Cmd = nil
}
return d.builder.commit(d.state, fmt.Sprintf("ENTRYPOINT %q", runConfig.Entrypoint))
}
// EXPOSE 6667/tcp 7000/tcp
//
// Expose ports for links and port mappings. This all ends up in
// req.runConfig.ExposedPorts for runconfig.
//
func dispatchExpose(d dispatchRequest, c *instructions.ExposeCommand, envs []string) error {
// custom multi word expansion
// expose $FOO with FOO="80 443" is expanded as EXPOSE [80,443]. This is the only command supporting word to words expansion
// so the word processing has been de-generalized
ports := []string{}
for _, p := range c.Ports {
ps, err := d.shlex.ProcessWords(p, envs)
if err != nil {
return err
}
ports = append(ports, ps...)
}
c.Ports = ports
ps, _, err := nat.ParsePortSpecs(ports)
if err != nil {
return err
}
if d.state.runConfig.ExposedPorts == nil {
d.state.runConfig.ExposedPorts = make(nat.PortSet)
}
for p := range ps {
d.state.runConfig.ExposedPorts[p] = struct{}{}
}
return d.builder.commit(d.state, "EXPOSE "+strings.Join(c.Ports, " "))
}
// USER foo
//
// Set the user to 'foo' for future commands and when running the
// ENTRYPOINT/CMD at container run time.
//
func dispatchUser(d dispatchRequest, c *instructions.UserCommand) error {
d.state.runConfig.User = c.User
return d.builder.commit(d.state, fmt.Sprintf("USER %v", c.User))
}
// VOLUME /foo
//
// Expose the volume /foo for use. Will also accept the JSON array form.
//
func dispatchVolume(d dispatchRequest, c *instructions.VolumeCommand) error {
if d.state.runConfig.Volumes == nil {
d.state.runConfig.Volumes = map[string]struct{}{}
}
for _, v := range c.Volumes {
if v == "" {
return errors.New("VOLUME specified can not be an empty string")
}
d.state.runConfig.Volumes[v] = struct{}{}
}
return d.builder.commit(d.state, fmt.Sprintf("VOLUME %v", c.Volumes))
}
// STOPSIGNAL signal
//
// Set the signal that will be used to kill the container.
func dispatchStopSignal(d dispatchRequest, c *instructions.StopSignalCommand) error {
_, err := signal.ParseSignal(c.Signal)
if err != nil {
return errdefs.InvalidParameter(err)
}
d.state.runConfig.StopSignal = c.Signal
return d.builder.commit(d.state, fmt.Sprintf("STOPSIGNAL %v", c.Signal))
}
// ARG name[=value]
//
// Adds the variable foo to the trusted list of variables that can be passed
// to builder using the --build-arg flag for expansion/substitution or passing to 'run'.
// Dockerfile author may optionally set a default value of this variable.
func dispatchArg(d dispatchRequest, c *instructions.ArgCommand) error {
commitStr := "ARG " + c.Key
if c.Value != nil {
commitStr += "=" + *c.Value
}
d.state.buildArgs.AddArg(c.Key, c.Value)
return d.builder.commit(d.state, commitStr)
}
// SHELL powershell -command
//
// Set the non-default shell to use.
func dispatchShell(d dispatchRequest, c *instructions.ShellCommand) error {
d.state.runConfig.Shell = c.Shell
return d.builder.commit(d.state, fmt.Sprintf("SHELL %v", d.state.runConfig.Shell))
}

View file

@ -0,0 +1,470 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"bytes"
"context"
"runtime"
"testing"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/docker/docker/builder/dockerfile/shell"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/system"
"github.com/docker/go-connections/nat"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
)
func newBuilderWithMockBackend() *Builder {
mockBackend := &MockBackend{}
ctx := context.Background()
b := &Builder{
options: &types.ImageBuildOptions{Platform: runtime.GOOS},
docker: mockBackend,
Stdout: new(bytes.Buffer),
clientCtx: ctx,
disableCommit: true,
imageSources: newImageSources(ctx, builderOptions{
Options: &types.ImageBuildOptions{Platform: runtime.GOOS},
Backend: mockBackend,
}),
imageProber: newImageProber(mockBackend, nil, false),
containerManager: newContainerManager(mockBackend),
}
return b
}
func TestEnv2Variables(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
envCommand := &instructions.EnvCommand{
Env: instructions.KeyValuePairs{
instructions.KeyValuePair{Key: "var1", Value: "val1"},
instructions.KeyValuePair{Key: "var2", Value: "val2"},
},
}
err := dispatch(sb, envCommand)
assert.NilError(t, err)
expected := []string{
"var1=val1",
"var2=val2",
}
assert.Check(t, is.DeepEqual(expected, sb.state.runConfig.Env))
}
func TestEnvValueWithExistingRunConfigEnv(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
sb.state.runConfig.Env = []string{"var1=old", "var2=fromenv"}
envCommand := &instructions.EnvCommand{
Env: instructions.KeyValuePairs{
instructions.KeyValuePair{Key: "var1", Value: "val1"},
},
}
err := dispatch(sb, envCommand)
assert.NilError(t, err)
expected := []string{
"var1=val1",
"var2=fromenv",
}
assert.Check(t, is.DeepEqual(expected, sb.state.runConfig.Env))
}
func TestMaintainer(t *testing.T) {
maintainerEntry := "Some Maintainer <maintainer@example.com>"
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
cmd := &instructions.MaintainerCommand{Maintainer: maintainerEntry}
err := dispatch(sb, cmd)
assert.NilError(t, err)
assert.Check(t, is.Equal(maintainerEntry, sb.state.maintainer))
}
func TestLabel(t *testing.T) {
labelName := "label"
labelValue := "value"
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
cmd := &instructions.LabelCommand{
Labels: instructions.KeyValuePairs{
instructions.KeyValuePair{Key: labelName, Value: labelValue},
},
}
err := dispatch(sb, cmd)
assert.NilError(t, err)
assert.Assert(t, is.Contains(sb.state.runConfig.Labels, labelName))
assert.Check(t, is.Equal(sb.state.runConfig.Labels[labelName], labelValue))
}
func TestFromScratch(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
cmd := &instructions.Stage{
BaseName: "scratch",
}
err := initializeStage(sb, cmd)
if runtime.GOOS == "windows" && !system.LCOWSupported() {
assert.Check(t, is.Error(err, "Windows does not support FROM scratch"))
return
}
assert.NilError(t, err)
assert.Check(t, sb.state.hasFromImage())
assert.Check(t, is.Equal("", sb.state.imageID))
expected := "PATH=" + system.DefaultPathEnv(runtime.GOOS)
assert.Check(t, is.DeepEqual([]string{expected}, sb.state.runConfig.Env))
}
func TestFromWithArg(t *testing.T) {
tag, expected := ":sometag", "expectedthisid"
getImage := func(name string) (builder.Image, builder.ROLayer, error) {
assert.Check(t, is.Equal("alpine"+tag, name))
return &mockImage{id: "expectedthisid"}, nil, nil
}
b := newBuilderWithMockBackend()
b.docker.(*MockBackend).getImageFunc = getImage
args := newBuildArgs(make(map[string]*string))
val := "sometag"
metaArg := instructions.ArgCommand{
Key: "THETAG",
Value: &val,
}
cmd := &instructions.Stage{
BaseName: "alpine:${THETAG}",
}
err := processMetaArg(metaArg, shell.NewLex('\\'), args)
sb := newDispatchRequest(b, '\\', nil, args, newStagesBuildResults())
assert.NilError(t, err)
err = initializeStage(sb, cmd)
assert.NilError(t, err)
assert.Check(t, is.Equal(expected, sb.state.imageID))
assert.Check(t, is.Equal(expected, sb.state.baseImage.ImageID()))
assert.Check(t, is.Len(sb.state.buildArgs.GetAllAllowed(), 0))
assert.Check(t, is.Len(sb.state.buildArgs.GetAllMeta(), 1))
}
func TestFromWithUndefinedArg(t *testing.T) {
tag, expected := "sometag", "expectedthisid"
getImage := func(name string) (builder.Image, builder.ROLayer, error) {
assert.Check(t, is.Equal("alpine", name))
return &mockImage{id: "expectedthisid"}, nil, nil
}
b := newBuilderWithMockBackend()
b.docker.(*MockBackend).getImageFunc = getImage
sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
b.options.BuildArgs = map[string]*string{"THETAG": &tag}
cmd := &instructions.Stage{
BaseName: "alpine${THETAG}",
}
err := initializeStage(sb, cmd)
assert.NilError(t, err)
assert.Check(t, is.Equal(expected, sb.state.imageID))
}
func TestFromMultiStageWithNamedStage(t *testing.T) {
b := newBuilderWithMockBackend()
firstFrom := &instructions.Stage{BaseName: "someimg", Name: "base"}
secondFrom := &instructions.Stage{BaseName: "base"}
previousResults := newStagesBuildResults()
firstSB := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), previousResults)
secondSB := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), previousResults)
err := initializeStage(firstSB, firstFrom)
assert.NilError(t, err)
assert.Check(t, firstSB.state.hasFromImage())
previousResults.indexed["base"] = firstSB.state.runConfig
previousResults.flat = append(previousResults.flat, firstSB.state.runConfig)
err = initializeStage(secondSB, secondFrom)
assert.NilError(t, err)
assert.Check(t, secondSB.state.hasFromImage())
}
func TestOnbuild(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
cmd := &instructions.OnbuildCommand{
Expression: "ADD . /app/src",
}
err := dispatch(sb, cmd)
assert.NilError(t, err)
assert.Check(t, is.Equal("ADD . /app/src", sb.state.runConfig.OnBuild[0]))
}
func TestWorkdir(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
workingDir := "/app"
if runtime.GOOS == "windows" {
workingDir = "C:\\app"
}
cmd := &instructions.WorkdirCommand{
Path: workingDir,
}
err := dispatch(sb, cmd)
assert.NilError(t, err)
assert.Check(t, is.Equal(workingDir, sb.state.runConfig.WorkingDir))
}
func TestCmd(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
command := "./executable"
cmd := &instructions.CmdCommand{
ShellDependantCmdLine: instructions.ShellDependantCmdLine{
CmdLine: strslice.StrSlice{command},
PrependShell: true,
},
}
err := dispatch(sb, cmd)
assert.NilError(t, err)
var expectedCommand strslice.StrSlice
if runtime.GOOS == "windows" {
expectedCommand = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", command))
} else {
expectedCommand = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", command))
}
assert.Check(t, is.DeepEqual(expectedCommand, sb.state.runConfig.Cmd))
assert.Check(t, sb.state.cmdSet)
}
func TestHealthcheckNone(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
cmd := &instructions.HealthCheckCommand{
Health: &container.HealthConfig{
Test: []string{"NONE"},
},
}
err := dispatch(sb, cmd)
assert.NilError(t, err)
assert.Assert(t, sb.state.runConfig.Healthcheck != nil)
assert.Check(t, is.DeepEqual([]string{"NONE"}, sb.state.runConfig.Healthcheck.Test))
}
func TestHealthcheckCmd(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
expectedTest := []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"}
cmd := &instructions.HealthCheckCommand{
Health: &container.HealthConfig{
Test: expectedTest,
},
}
err := dispatch(sb, cmd)
assert.NilError(t, err)
assert.Assert(t, sb.state.runConfig.Healthcheck != nil)
assert.Check(t, is.DeepEqual(expectedTest, sb.state.runConfig.Healthcheck.Test))
}
func TestEntrypoint(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
entrypointCmd := "/usr/sbin/nginx"
cmd := &instructions.EntrypointCommand{
ShellDependantCmdLine: instructions.ShellDependantCmdLine{
CmdLine: strslice.StrSlice{entrypointCmd},
PrependShell: true,
},
}
err := dispatch(sb, cmd)
assert.NilError(t, err)
assert.Assert(t, sb.state.runConfig.Entrypoint != nil)
var expectedEntrypoint strslice.StrSlice
if runtime.GOOS == "windows" {
expectedEntrypoint = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", entrypointCmd))
} else {
expectedEntrypoint = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", entrypointCmd))
}
assert.Check(t, is.DeepEqual(expectedEntrypoint, sb.state.runConfig.Entrypoint))
}
func TestExpose(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
exposedPort := "80"
cmd := &instructions.ExposeCommand{
Ports: []string{exposedPort},
}
err := dispatch(sb, cmd)
assert.NilError(t, err)
assert.Assert(t, sb.state.runConfig.ExposedPorts != nil)
assert.Assert(t, is.Len(sb.state.runConfig.ExposedPorts, 1))
portsMapping, err := nat.ParsePortSpec(exposedPort)
assert.NilError(t, err)
assert.Check(t, is.Contains(sb.state.runConfig.ExposedPorts, portsMapping[0].Port))
}
func TestUser(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
cmd := &instructions.UserCommand{
User: "test",
}
err := dispatch(sb, cmd)
assert.NilError(t, err)
assert.Check(t, is.Equal("test", sb.state.runConfig.User))
}
func TestVolume(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
exposedVolume := "/foo"
cmd := &instructions.VolumeCommand{
Volumes: []string{exposedVolume},
}
err := dispatch(sb, cmd)
assert.NilError(t, err)
assert.Assert(t, sb.state.runConfig.Volumes != nil)
assert.Check(t, is.Len(sb.state.runConfig.Volumes, 1))
assert.Check(t, is.Contains(sb.state.runConfig.Volumes, exposedVolume))
}
func TestStopSignal(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Windows does not support stopsignal")
return
}
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
signal := "SIGKILL"
cmd := &instructions.StopSignalCommand{
Signal: signal,
}
err := dispatch(sb, cmd)
assert.NilError(t, err)
assert.Check(t, is.Equal(signal, sb.state.runConfig.StopSignal))
}
func TestArg(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
argName := "foo"
argVal := "bar"
cmd := &instructions.ArgCommand{Key: argName, Value: &argVal}
err := dispatch(sb, cmd)
assert.NilError(t, err)
expected := map[string]string{argName: argVal}
assert.Check(t, is.DeepEqual(expected, sb.state.buildArgs.GetAllAllowed()))
}
func TestShell(t *testing.T) {
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
shellCmd := "powershell"
cmd := &instructions.ShellCommand{Shell: strslice.StrSlice{shellCmd}}
err := dispatch(sb, cmd)
assert.NilError(t, err)
expectedShell := strslice.StrSlice([]string{shellCmd})
assert.Check(t, is.DeepEqual(expectedShell, sb.state.runConfig.Shell))
}
func TestPrependEnvOnCmd(t *testing.T) {
buildArgs := newBuildArgs(nil)
buildArgs.AddArg("NO_PROXY", nil)
args := []string{"sorted=nope", "args=not", "http_proxy=foo", "NO_PROXY=YA"}
cmd := []string{"foo", "bar"}
cmdWithEnv := prependEnvOnCmd(buildArgs, args, cmd)
expected := strslice.StrSlice([]string{
"|3", "NO_PROXY=YA", "args=not", "sorted=nope", "foo", "bar"})
assert.Check(t, is.DeepEqual(expected, cmdWithEnv))
}
func TestRunWithBuildArgs(t *testing.T) {
b := newBuilderWithMockBackend()
args := newBuildArgs(make(map[string]*string))
args.argsFromOptions["HTTP_PROXY"] = strPtr("FOO")
b.disableCommit = false
sb := newDispatchRequest(b, '`', nil, args, newStagesBuildResults())
runConfig := &container.Config{}
origCmd := strslice.StrSlice([]string{"cmd", "in", "from", "image"})
cmdWithShell := strslice.StrSlice(append(getShell(runConfig, runtime.GOOS), "echo foo"))
envVars := []string{"|1", "one=two"}
cachedCmd := strslice.StrSlice(append(envVars, cmdWithShell...))
imageCache := &mockImageCache{
getCacheFunc: func(parentID string, cfg *container.Config) (string, error) {
// Check the runConfig.Cmd sent to probeCache()
assert.Check(t, is.DeepEqual(cachedCmd, cfg.Cmd))
assert.Check(t, is.DeepEqual(strslice.StrSlice(nil), cfg.Entrypoint))
return "", nil
},
}
mockBackend := b.docker.(*MockBackend)
mockBackend.makeImageCacheFunc = func(_ []string) builder.ImageCache {
return imageCache
}
b.imageProber = newImageProber(mockBackend, nil, false)
mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ROLayer, error) {
return &mockImage{
id: "abcdef",
config: &container.Config{Cmd: origCmd},
}, nil, nil
}
mockBackend.containerCreateFunc = func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) {
// Check the runConfig.Cmd sent to create()
assert.Check(t, is.DeepEqual(cmdWithShell, config.Config.Cmd))
assert.Check(t, is.Contains(config.Config.Env, "one=two"))
assert.Check(t, is.DeepEqual(strslice.StrSlice{""}, config.Config.Entrypoint))
return container.ContainerCreateCreatedBody{ID: "12345"}, nil
}
mockBackend.commitFunc = func(cfg backend.CommitConfig) (image.ID, error) {
// Check the runConfig.Cmd sent to commit()
assert.Check(t, is.DeepEqual(origCmd, cfg.Config.Cmd))
assert.Check(t, is.DeepEqual(cachedCmd, cfg.ContainerConfig.Cmd))
assert.Check(t, is.DeepEqual(strslice.StrSlice(nil), cfg.Config.Entrypoint))
return "", nil
}
from := &instructions.Stage{BaseName: "abcdef"}
err := initializeStage(sb, from)
assert.NilError(t, err)
sb.state.buildArgs.AddArg("one", strPtr("two"))
run := &instructions.RunCommand{
ShellDependantCmdLine: instructions.ShellDependantCmdLine{
CmdLine: strslice.StrSlice{"echo foo"},
PrependShell: true,
},
}
assert.NilError(t, dispatch(sb, run))
// Check that runConfig.Cmd has not been modified by run
assert.Check(t, is.DeepEqual(origCmd, sb.state.runConfig.Cmd))
}

View file

@ -0,0 +1,23 @@
// +build !windows
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"errors"
"os"
"path/filepath"
)
// normalizeWorkdir normalizes a user requested working directory in a
// platform semantically consistent way.
func normalizeWorkdir(_ string, current string, requested string) (string, error) {
if requested == "" {
return "", errors.New("cannot normalize nothing")
}
current = filepath.FromSlash(current)
requested = filepath.FromSlash(requested)
if !filepath.IsAbs(requested) {
return filepath.Join(string(os.PathSeparator), current, requested), nil
}
return requested, nil
}

View file

@ -0,0 +1,34 @@
// +build !windows
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"runtime"
"testing"
)
func TestNormalizeWorkdir(t *testing.T) {
testCases := []struct{ current, requested, expected, expectedError string }{
{``, ``, ``, `cannot normalize nothing`},
{``, `foo`, `/foo`, ``},
{``, `/foo`, `/foo`, ``},
{`/foo`, `bar`, `/foo/bar`, ``},
{`/foo`, `/bar`, `/bar`, ``},
}
for _, test := range testCases {
normalized, err := normalizeWorkdir(runtime.GOOS, test.current, test.requested)
if test.expectedError != "" && err == nil {
t.Fatalf("NormalizeWorkdir should return an error %s, got nil", test.expectedError)
}
if test.expectedError != "" && err.Error() != test.expectedError {
t.Fatalf("NormalizeWorkdir returned wrong error. Expected %s, got %s", test.expectedError, err.Error())
}
if normalized != test.expected {
t.Fatalf("NormalizeWorkdir error. Expected %s for current %s and requested %s, got %s", test.expected, test.current, test.requested, normalized)
}
}
}

View file

@ -0,0 +1,95 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"errors"
"fmt"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/docker/docker/pkg/system"
)
var pattern = regexp.MustCompile(`^[a-zA-Z]:\.$`)
// normalizeWorkdir normalizes a user requested working directory in a
// platform semantically consistent way.
func normalizeWorkdir(platform string, current string, requested string) (string, error) {
if platform == "" {
platform = "windows"
}
if platform == "windows" {
return normalizeWorkdirWindows(current, requested)
}
return normalizeWorkdirUnix(current, requested)
}
// normalizeWorkdirUnix normalizes a user requested working directory in a
// platform semantically consistent way.
func normalizeWorkdirUnix(current string, requested string) (string, error) {
if requested == "" {
return "", errors.New("cannot normalize nothing")
}
current = strings.Replace(current, string(os.PathSeparator), "/", -1)
requested = strings.Replace(requested, string(os.PathSeparator), "/", -1)
if !path.IsAbs(requested) {
return path.Join(`/`, current, requested), nil
}
return requested, nil
}
// normalizeWorkdirWindows normalizes a user requested working directory in a
// platform semantically consistent way.
func normalizeWorkdirWindows(current string, requested string) (string, error) {
if requested == "" {
return "", errors.New("cannot normalize nothing")
}
// `filepath.Clean` will replace "" with "." so skip in that case
if current != "" {
current = filepath.Clean(current)
}
if requested != "" {
requested = filepath.Clean(requested)
}
// If either current or requested in Windows is:
// C:
// C:.
// then an error will be thrown as the definition for the above
// refers to `current directory on drive C:`
// Since filepath.Clean() will automatically normalize the above
// to `C:.`, we only need to check the last format
if pattern.MatchString(current) {
return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", current)
}
if pattern.MatchString(requested) {
return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", requested)
}
// Target semantics is C:\somefolder, specifically in the format:
// UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already
// guaranteed that `current`, if set, is consistent. This allows us to
// cope correctly with any of the following in a Dockerfile:
// WORKDIR a --> C:\a
// WORKDIR c:\\foo --> C:\foo
// WORKDIR \\foo --> C:\foo
// WORKDIR /foo --> C:\foo
// WORKDIR c:\\foo \ WORKDIR bar --> C:\foo --> C:\foo\bar
// WORKDIR C:/foo \ WORKDIR bar --> C:\foo --> C:\foo\bar
// WORKDIR C:/foo \ WORKDIR \\bar --> C:\foo --> C:\bar
// WORKDIR /foo \ WORKDIR c:/bar --> C:\foo --> C:\bar
if len(current) == 0 || system.IsAbs(requested) {
if (requested[0] == os.PathSeparator) ||
(len(requested) > 1 && string(requested[1]) != ":") ||
(len(requested) == 1) {
requested = filepath.Join(`C:\`, requested)
}
} else {
requested = filepath.Join(current, requested)
}
// Upper-case drive letter
return (strings.ToUpper(string(requested[0])) + requested[1:]), nil
}

View file

@ -0,0 +1,46 @@
// +build windows
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import "testing"
func TestNormalizeWorkdir(t *testing.T) {
tests := []struct{ platform, current, requested, expected, etext string }{
{"windows", ``, ``, ``, `cannot normalize nothing`},
{"windows", ``, `C:`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`},
{"windows", ``, `C:.`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`},
{"windows", `c:`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`},
{"windows", `c:.`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`},
{"windows", ``, `a`, `C:\a`, ``},
{"windows", ``, `c:\foo`, `C:\foo`, ``},
{"windows", ``, `c:\\foo`, `C:\foo`, ``},
{"windows", ``, `\foo`, `C:\foo`, ``},
{"windows", ``, `\\foo`, `C:\foo`, ``},
{"windows", ``, `/foo`, `C:\foo`, ``},
{"windows", ``, `C:/foo`, `C:\foo`, ``},
{"windows", `C:\foo`, `bar`, `C:\foo\bar`, ``},
{"windows", `C:\foo`, `/bar`, `C:\bar`, ``},
{"windows", `C:\foo`, `\bar`, `C:\bar`, ``},
{"linux", ``, ``, ``, `cannot normalize nothing`},
{"linux", ``, `foo`, `/foo`, ``},
{"linux", ``, `/foo`, `/foo`, ``},
{"linux", `/foo`, `bar`, `/foo/bar`, ``},
{"linux", `/foo`, `/bar`, `/bar`, ``},
{"linux", `\a`, `b\c`, `/a/b/c`, ``},
}
for _, i := range tests {
r, e := normalizeWorkdir(i.platform, i.current, i.requested)
if i.etext != "" && e == nil {
t.Fatalf("TestNormalizeWorkingDir Expected error %s for '%s' '%s', got no error", i.etext, i.current, i.requested)
}
if i.etext != "" && e.Error() != i.etext {
t.Fatalf("TestNormalizeWorkingDir Expected error %s for '%s' '%s', got %s", i.etext, i.current, i.requested, e.Error())
}
if r != i.expected {
t.Fatalf("TestNormalizeWorkingDir Expected '%s' for '%s' '%s', got '%s'", i.expected, i.current, i.requested, r)
}
}
}

View file

@ -0,0 +1,251 @@
// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline.
//
// It incorporates a dispatch table based on the parser.Node values (see the
// parser package for more information) that are yielded from the parser itself.
// Calling newBuilder with the BuildOpts struct can be used to customize the
// experience for execution purposes only. Parsing is controlled in the parser
// package, and this division of responsibility should be respected.
//
// Please see the jump table targets for the actual invocations, most of which
// will call out to the functions in internals.go to deal with their tasks.
//
// ONBUILD is a special case, which is covered in the onbuild() func in
// dispatchers.go.
//
// The evaluator uses the concept of "steps", which are usually each processable
// line in the Dockerfile. Each step is numbered and certain actions are taken
// before and after each step, such as creating an image ID and removing temporary
// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which
// includes its own set of steps (usually only one of them).
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"reflect"
"runtime"
"strconv"
"strings"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/docker/docker/builder/dockerfile/shell"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/runconfig/opts"
"github.com/pkg/errors"
)
func dispatch(d dispatchRequest, cmd instructions.Command) (err error) {
if c, ok := cmd.(instructions.PlatformSpecific); ok {
optionsOS := system.ParsePlatform(d.builder.options.Platform).OS
err := c.CheckPlatform(optionsOS)
if err != nil {
return errdefs.InvalidParameter(err)
}
}
runConfigEnv := d.state.runConfig.Env
envs := append(runConfigEnv, d.state.buildArgs.FilterAllowed(runConfigEnv)...)
if ex, ok := cmd.(instructions.SupportsSingleWordExpansion); ok {
err := ex.Expand(func(word string) (string, error) {
return d.shlex.ProcessWord(word, envs)
})
if err != nil {
return errdefs.InvalidParameter(err)
}
}
defer func() {
if d.builder.options.ForceRemove {
d.builder.containerManager.RemoveAll(d.builder.Stdout)
return
}
if d.builder.options.Remove && err == nil {
d.builder.containerManager.RemoveAll(d.builder.Stdout)
return
}
}()
switch c := cmd.(type) {
case *instructions.EnvCommand:
return dispatchEnv(d, c)
case *instructions.MaintainerCommand:
return dispatchMaintainer(d, c)
case *instructions.LabelCommand:
return dispatchLabel(d, c)
case *instructions.AddCommand:
return dispatchAdd(d, c)
case *instructions.CopyCommand:
return dispatchCopy(d, c)
case *instructions.OnbuildCommand:
return dispatchOnbuild(d, c)
case *instructions.WorkdirCommand:
return dispatchWorkdir(d, c)
case *instructions.RunCommand:
return dispatchRun(d, c)
case *instructions.CmdCommand:
return dispatchCmd(d, c)
case *instructions.HealthCheckCommand:
return dispatchHealthcheck(d, c)
case *instructions.EntrypointCommand:
return dispatchEntrypoint(d, c)
case *instructions.ExposeCommand:
return dispatchExpose(d, c, envs)
case *instructions.UserCommand:
return dispatchUser(d, c)
case *instructions.VolumeCommand:
return dispatchVolume(d, c)
case *instructions.StopSignalCommand:
return dispatchStopSignal(d, c)
case *instructions.ArgCommand:
return dispatchArg(d, c)
case *instructions.ShellCommand:
return dispatchShell(d, c)
}
return errors.Errorf("unsupported command type: %v", reflect.TypeOf(cmd))
}
// dispatchState is a data object which is modified by dispatchers
type dispatchState struct {
runConfig *container.Config
maintainer string
cmdSet bool
imageID string
baseImage builder.Image
stageName string
buildArgs *buildArgs
operatingSystem string
}
func newDispatchState(baseArgs *buildArgs) *dispatchState {
args := baseArgs.Clone()
args.ResetAllowed()
return &dispatchState{runConfig: &container.Config{}, buildArgs: args}
}
type stagesBuildResults struct {
flat []*container.Config
indexed map[string]*container.Config
}
func newStagesBuildResults() *stagesBuildResults {
return &stagesBuildResults{
indexed: make(map[string]*container.Config),
}
}
func (r *stagesBuildResults) getByName(name string) (*container.Config, bool) {
c, ok := r.indexed[strings.ToLower(name)]
return c, ok
}
func (r *stagesBuildResults) validateIndex(i int) error {
if i == len(r.flat) {
return errors.New("refers to current build stage")
}
if i < 0 || i > len(r.flat) {
return errors.New("index out of bounds")
}
return nil
}
func (r *stagesBuildResults) get(nameOrIndex string) (*container.Config, error) {
if c, ok := r.getByName(nameOrIndex); ok {
return c, nil
}
ix, err := strconv.ParseInt(nameOrIndex, 10, 0)
if err != nil {
return nil, nil
}
if err := r.validateIndex(int(ix)); err != nil {
return nil, err
}
return r.flat[ix], nil
}
func (r *stagesBuildResults) checkStageNameAvailable(name string) error {
if name != "" {
if _, ok := r.getByName(name); ok {
return errors.Errorf("%s stage name already used", name)
}
}
return nil
}
func (r *stagesBuildResults) commitStage(name string, config *container.Config) error {
if name != "" {
if _, ok := r.getByName(name); ok {
return errors.Errorf("%s stage name already used", name)
}
r.indexed[strings.ToLower(name)] = config
}
r.flat = append(r.flat, config)
return nil
}
func commitStage(state *dispatchState, stages *stagesBuildResults) error {
return stages.commitStage(state.stageName, state.runConfig)
}
type dispatchRequest struct {
state *dispatchState
shlex *shell.Lex
builder *Builder
source builder.Source
stages *stagesBuildResults
}
func newDispatchRequest(builder *Builder, escapeToken rune, source builder.Source, buildArgs *buildArgs, stages *stagesBuildResults) dispatchRequest {
return dispatchRequest{
state: newDispatchState(buildArgs),
shlex: shell.NewLex(escapeToken),
builder: builder,
source: source,
stages: stages,
}
}
func (s *dispatchState) updateRunConfig() {
s.runConfig.Image = s.imageID
}
// hasFromImage returns true if the builder has processed a `FROM <image>` line
func (s *dispatchState) hasFromImage() bool {
return s.imageID != "" || (s.baseImage != nil && s.baseImage.ImageID() == "")
}
func (s *dispatchState) beginStage(stageName string, image builder.Image) error {
s.stageName = stageName
s.imageID = image.ImageID()
s.operatingSystem = image.OperatingSystem()
if s.operatingSystem == "" { // In case it isn't set
s.operatingSystem = runtime.GOOS
}
if !system.IsOSSupported(s.operatingSystem) {
return system.ErrNotSupportedOperatingSystem
}
if image.RunConfig() != nil {
// copy avoids referencing the same instance when 2 stages have the same base
s.runConfig = copyRunConfig(image.RunConfig())
} else {
s.runConfig = &container.Config{}
}
s.baseImage = image
s.setDefaultPath()
s.runConfig.OpenStdin = false
s.runConfig.StdinOnce = false
return nil
}
// Add the default PATH to runConfig.ENV if one exists for the operating system and there
// is no PATH set. Note that Windows containers on Windows won't have one as it's set by HCS
func (s *dispatchState) setDefaultPath() {
defaultPath := system.DefaultPathEnv(s.operatingSystem)
if defaultPath == "" {
return
}
envMap := opts.ConvertKVStringsToMap(s.runConfig.Env)
if _, ok := envMap["PATH"]; !ok {
s.runConfig.Env = append(s.runConfig.Env, "PATH="+defaultPath)
}
}

View file

@ -0,0 +1,140 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"testing"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/docker/docker/builder/remotecontext"
"github.com/docker/docker/internal/testutil"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
)
type dispatchTestCase struct {
name, expectedError string
cmd instructions.Command
files map[string]string
}
func init() {
reexec.Init()
}
func initDispatchTestCases() []dispatchTestCase {
dispatchTestCases := []dispatchTestCase{
{
name: "ADD multiple files to file",
cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{
"file1.txt",
"file2.txt",
"test",
}},
expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
},
{
name: "Wildcard ADD multiple files to file",
cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{
"file*.txt",
"test",
}},
expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
},
{
name: "COPY multiple files to file",
cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{
"file1.txt",
"file2.txt",
"test",
}},
expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /",
files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
},
{
name: "ADD multiple files to file with whitespace",
cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{
"test file1.txt",
"test file2.txt",
"test",
}},
expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"},
},
{
name: "COPY multiple files to file with whitespace",
cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{
"test file1.txt",
"test file2.txt",
"test",
}},
expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /",
files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"},
},
{
name: "COPY wildcard no files",
cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{
"file*.txt",
"/tmp/",
}},
expectedError: "COPY failed: no source files were specified",
files: nil,
},
{
name: "COPY url",
cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{
"https://index.docker.io/robots.txt",
"/",
}},
expectedError: "source can't be a URL for COPY",
files: nil,
}}
return dispatchTestCases
}
func TestDispatch(t *testing.T) {
testCases := initDispatchTestCases()
for _, testCase := range testCases {
executeTestCase(t, testCase)
}
}
func executeTestCase(t *testing.T, testCase dispatchTestCase) {
contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test")
defer cleanup()
for filename, content := range testCase.files {
createTestTempFile(t, contextDir, filename, content, 0777)
}
tarStream, err := archive.Tar(contextDir, archive.Uncompressed)
if err != nil {
t.Fatalf("Error when creating tar stream: %s", err)
}
defer func() {
if err = tarStream.Close(); err != nil {
t.Fatalf("Error when closing tar stream: %s", err)
}
}()
context, err := remotecontext.FromArchive(tarStream)
if err != nil {
t.Fatalf("Error when creating tar context: %s", err)
}
defer func() {
if err = context.Close(); err != nil {
t.Fatalf("Error when closing tar context: %s", err)
}
}()
b := newBuilderWithMockBackend()
sb := newDispatchRequest(b, '`', context, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
err = dispatch(sb, testCase.cmd)
testutil.ErrorContains(t, err, testCase.expectedError)
}

View file

@ -0,0 +1,123 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"runtime"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/builder"
dockerimage "github.com/docker/docker/image"
"github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
type getAndMountFunc func(string, bool) (builder.Image, builder.ROLayer, error)
// imageSources mounts images and provides a cache for mounted images. It tracks
// all images so they can be unmounted at the end of the build.
type imageSources struct {
byImageID map[string]*imageMount
mounts []*imageMount
getImage getAndMountFunc
}
func newImageSources(ctx context.Context, options builderOptions) *imageSources {
getAndMount := func(idOrRef string, localOnly bool) (builder.Image, builder.ROLayer, error) {
pullOption := backend.PullOptionNoPull
if !localOnly {
if options.Options.PullParent {
pullOption = backend.PullOptionForcePull
} else {
pullOption = backend.PullOptionPreferLocal
}
}
optionsPlatform := system.ParsePlatform(options.Options.Platform)
return options.Backend.GetImageAndReleasableLayer(ctx, idOrRef, backend.GetImageAndLayerOptions{
PullOption: pullOption,
AuthConfig: options.Options.AuthConfigs,
Output: options.ProgressWriter.Output,
OS: optionsPlatform.OS,
})
}
return &imageSources{
byImageID: make(map[string]*imageMount),
getImage: getAndMount,
}
}
func (m *imageSources) Get(idOrRef string, localOnly bool) (*imageMount, error) {
if im, ok := m.byImageID[idOrRef]; ok {
return im, nil
}
image, layer, err := m.getImage(idOrRef, localOnly)
if err != nil {
return nil, err
}
im := newImageMount(image, layer)
m.Add(im)
return im, nil
}
func (m *imageSources) Unmount() (retErr error) {
for _, im := range m.mounts {
if err := im.unmount(); err != nil {
logrus.Error(err)
retErr = err
}
}
return
}
func (m *imageSources) Add(im *imageMount) {
switch im.image {
case nil:
// set the OS for scratch images
os := runtime.GOOS
// Windows does not support scratch except for LCOW
if runtime.GOOS == "windows" {
os = "linux"
}
im.image = &dockerimage.Image{V1Image: dockerimage.V1Image{OS: os}}
default:
m.byImageID[im.image.ImageID()] = im
}
m.mounts = append(m.mounts, im)
}
// imageMount is a reference to an image that can be used as a builder.Source
type imageMount struct {
image builder.Image
source builder.Source
layer builder.ROLayer
}
func newImageMount(image builder.Image, layer builder.ROLayer) *imageMount {
im := &imageMount{image: image, layer: layer}
return im
}
func (im *imageMount) unmount() error {
if im.layer == nil {
return nil
}
if err := im.layer.Release(); err != nil {
return errors.Wrapf(err, "failed to unmount previous build image %s", im.image.ImageID())
}
im.layer = nil
return nil
}
func (im *imageMount) Image() builder.Image {
return im.image
}
func (im *imageMount) NewRWLayer() (builder.RWLayer, error) {
return im.layer.NewRWLayer()
}
func (im *imageMount) ImageID() string {
return im.image.ImageID()
}

View file

@ -0,0 +1,63 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/builder"
"github.com/sirupsen/logrus"
)
// ImageProber exposes an Image cache to the Builder. It supports resetting a
// cache.
type ImageProber interface {
Reset()
Probe(parentID string, runConfig *container.Config) (string, error)
}
type imageProber struct {
cache builder.ImageCache
reset func() builder.ImageCache
cacheBusted bool
}
func newImageProber(cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, noCache bool) ImageProber {
if noCache {
return &nopProber{}
}
reset := func() builder.ImageCache {
return cacheBuilder.MakeImageCache(cacheFrom)
}
return &imageProber{cache: reset(), reset: reset}
}
func (c *imageProber) Reset() {
c.cache = c.reset()
c.cacheBusted = false
}
// Probe checks if cache match can be found for current build instruction.
// It returns the cachedID if there is a hit, and the empty string on miss
func (c *imageProber) Probe(parentID string, runConfig *container.Config) (string, error) {
if c.cacheBusted {
return "", nil
}
cacheID, err := c.cache.GetCache(parentID, runConfig)
if err != nil {
return "", err
}
if len(cacheID) == 0 {
logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd)
c.cacheBusted = true
return "", nil
}
logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd)
return cacheID, nil
}
type nopProber struct{}
func (c *nopProber) Reset() {}
func (c *nopProber) Probe(_ string, _ *container.Config) (string, error) {
return "", nil
}

View file

@ -0,0 +1,183 @@
package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
import (
"fmt"
"strings"
)
// FlagType is the type of the build flag
type FlagType int
const (
boolType FlagType = iota
stringType
)
// BFlags contains all flags information for the builder
type BFlags struct {
Args []string // actual flags/args from cmd line
flags map[string]*Flag
used map[string]*Flag
Err error
}
// Flag contains all information for a flag
type Flag struct {
bf *BFlags
name string
flagType FlagType
Value string
}
// NewBFlags returns the new BFlags struct
func NewBFlags() *BFlags {
return &BFlags{
flags: make(map[string]*Flag),
used: make(map[string]*Flag),
}
}
// NewBFlagsWithArgs returns the new BFlags struct with Args set to args
func NewBFlagsWithArgs(args []string) *BFlags {
flags := NewBFlags()
flags.Args = args
return flags
}
// AddBool adds a bool flag to BFlags
// Note, any error will be generated when Parse() is called (see Parse).
func (bf *BFlags) AddBool(name string, def bool) *Flag {
flag := bf.addFlag(name, boolType)
if flag == nil {
return nil
}
if def {
flag.Value = "true"
} else {
flag.Value = "false"
}
return flag
}
// AddString adds a string flag to BFlags
// Note, any error will be generated when Parse() is called (see Parse).
func (bf *BFlags) AddString(name string, def string) *Flag {
flag := bf.addFlag(name, stringType)
if flag == nil {
return nil
}
flag.Value = def
return flag
}
// addFlag is a generic func used by the other AddXXX() func
// to add a new flag to the BFlags struct.
// Note, any error will be generated when Parse() is called (see Parse).
func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag {
if _, ok := bf.flags[name]; ok {
bf.Err = fmt.Errorf("Duplicate flag defined: %s", name)
return nil
}
newFlag := &Flag{
bf: bf,
name: name,
flagType: flagType,
}
bf.flags[name] = newFlag
return newFlag
}
// IsUsed checks if the flag is used
func (fl *Flag) IsUsed() bool {
if _, ok := fl.bf.used[fl.name]; ok {
return true
}
return false
}
// IsTrue checks if a bool flag is true
func (fl *Flag) IsTrue() bool {
if fl.flagType != boolType {
// Should never get here
panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name))
}
return fl.Value == "true"
}
// Parse parses and checks if the BFlags is valid.
// Any error noticed during the AddXXX() funcs will be generated/returned
// here. We do this because an error during AddXXX() is more like a
// compile time error so it doesn't matter too much when we stop our
// processing as long as we do stop it, so this allows the code
// around AddXXX() to be just:
// defFlag := AddString("description", "")
// w/o needing to add an if-statement around each one.
func (bf *BFlags) Parse() error {
// If there was an error while defining the possible flags
// go ahead and bubble it back up here since we didn't do it
// earlier in the processing
if bf.Err != nil {
return fmt.Errorf("Error setting up flags: %s", bf.Err)
}
for _, arg := range bf.Args {
if !strings.HasPrefix(arg, "--") {
return fmt.Errorf("Arg should start with -- : %s", arg)
}
if arg == "--" {
return nil
}
arg = arg[2:]
value := ""
index := strings.Index(arg, "=")
if index >= 0 {
value = arg[index+1:]
arg = arg[:index]
}
flag, ok := bf.flags[arg]
if !ok {
return fmt.Errorf("Unknown flag: %s", arg)
}
if _, ok = bf.used[arg]; ok {
return fmt.Errorf("Duplicate flag specified: %s", arg)
}
bf.used[arg] = flag
switch flag.flagType {
case boolType:
// value == "" is only ok if no "=" was specified
if index >= 0 && value == "" {
return fmt.Errorf("Missing a value on flag: %s", arg)
}
lower := strings.ToLower(value)
if lower == "" {
flag.Value = "true"
} else if lower == "true" || lower == "false" {
flag.Value = lower
} else {
return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value)
}
case stringType:
if index < 0 {
return fmt.Errorf("Missing a value on flag: %s", arg)
}
flag.Value = value
default:
panic("No idea what kind of flag we have! Should never get here!")
}
}
return nil
}

View file

@ -0,0 +1,187 @@
package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
import (
"testing"
)
func TestBuilderFlags(t *testing.T) {
var expected string
var err error
// ---
bf := NewBFlags()
bf.Args = []string{}
if err := bf.Parse(); err != nil {
t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err)
}
// ---
bf = NewBFlags()
bf.Args = []string{"--"}
if err := bf.Parse(); err != nil {
t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err)
}
// ---
bf = NewBFlags()
flStr1 := bf.AddString("str1", "")
flBool1 := bf.AddBool("bool1", false)
bf.Args = []string{}
if err = bf.Parse(); err != nil {
t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err)
}
if flStr1.IsUsed() {
t.Fatal("Test3 - str1 was not used!")
}
if flBool1.IsUsed() {
t.Fatal("Test3 - bool1 was not used!")
}
// ---
bf = NewBFlags()
flStr1 = bf.AddString("str1", "HI")
flBool1 = bf.AddBool("bool1", false)
bf.Args = []string{}
if err = bf.Parse(); err != nil {
t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err)
}
if flStr1.Value != "HI" {
t.Fatal("Str1 was supposed to default to: HI")
}
if flBool1.IsTrue() {
t.Fatal("Bool1 was supposed to default to: false")
}
if flStr1.IsUsed() {
t.Fatal("Str1 was not used!")
}
if flBool1.IsUsed() {
t.Fatal("Bool1 was not used!")
}
// ---
bf = NewBFlags()
flStr1 = bf.AddString("str1", "HI")
bf.Args = []string{"--str1"}
if err = bf.Parse(); err == nil {
t.Fatalf("Test %q was supposed to fail", bf.Args)
}
// ---
bf = NewBFlags()
flStr1 = bf.AddString("str1", "HI")
bf.Args = []string{"--str1="}
if err = bf.Parse(); err != nil {
t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
}
expected = ""
if flStr1.Value != expected {
t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
}
// ---
bf = NewBFlags()
flStr1 = bf.AddString("str1", "HI")
bf.Args = []string{"--str1=BYE"}
if err = bf.Parse(); err != nil {
t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
}
expected = "BYE"
if flStr1.Value != expected {
t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
}
// ---
bf = NewBFlags()
flBool1 = bf.AddBool("bool1", false)
bf.Args = []string{"--bool1"}
if err = bf.Parse(); err != nil {
t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
}
if !flBool1.IsTrue() {
t.Fatal("Test-b1 Bool1 was supposed to be true")
}
// ---
bf = NewBFlags()
flBool1 = bf.AddBool("bool1", false)
bf.Args = []string{"--bool1=true"}
if err = bf.Parse(); err != nil {
t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
}
if !flBool1.IsTrue() {
t.Fatal("Test-b2 Bool1 was supposed to be true")
}
// ---
bf = NewBFlags()
flBool1 = bf.AddBool("bool1", false)
bf.Args = []string{"--bool1=false"}
if err = bf.Parse(); err != nil {
t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
}
if flBool1.IsTrue() {
t.Fatal("Test-b3 Bool1 was supposed to be false")
}
// ---
bf = NewBFlags()
flBool1 = bf.AddBool("bool1", false)
bf.Args = []string{"--bool1=false1"}
if err = bf.Parse(); err == nil {
t.Fatalf("Test %q was supposed to fail", bf.Args)
}
// ---
bf = NewBFlags()
flBool1 = bf.AddBool("bool1", false)
bf.Args = []string{"--bool2"}
if err = bf.Parse(); err == nil {
t.Fatalf("Test %q was supposed to fail", bf.Args)
}
// ---
bf = NewBFlags()
flStr1 = bf.AddString("str1", "HI")
flBool1 = bf.AddBool("bool1", false)
bf.Args = []string{"--bool1", "--str1=BYE"}
if err = bf.Parse(); err != nil {
t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
}
if flStr1.Value != "BYE" {
t.Fatalf("Test %s, str1 should be BYE", bf.Args)
}
if !flBool1.IsTrue() {
t.Fatalf("Test %s, bool1 should be true", bf.Args)
}
}

View file

@ -0,0 +1,396 @@
package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
import (
"errors"
"strings"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/strslice"
)
// KeyValuePair represent an arbitrary named value (useful in slice instead of map[string] string to preserve ordering)
type KeyValuePair struct {
Key string
Value string
}
func (kvp *KeyValuePair) String() string {
return kvp.Key + "=" + kvp.Value
}
// Command is implemented by every command present in a dockerfile
type Command interface {
Name() string
}
// KeyValuePairs is a slice of KeyValuePair
type KeyValuePairs []KeyValuePair
// withNameAndCode is the base of every command in a Dockerfile (String() returns its source code)
type withNameAndCode struct {
code string
name string
}
func (c *withNameAndCode) String() string {
return c.code
}
// Name of the command
func (c *withNameAndCode) Name() string {
return c.name
}
func newWithNameAndCode(req parseRequest) withNameAndCode {
return withNameAndCode{code: strings.TrimSpace(req.original), name: req.command}
}
// SingleWordExpander is a provider for variable expansion where 1 word => 1 output
type SingleWordExpander func(word string) (string, error)
// SupportsSingleWordExpansion interface marks a command as supporting variable expansion
type SupportsSingleWordExpansion interface {
Expand(expander SingleWordExpander) error
}
// PlatformSpecific adds platform checks to a command
type PlatformSpecific interface {
CheckPlatform(platform string) error
}
func expandKvp(kvp KeyValuePair, expander SingleWordExpander) (KeyValuePair, error) {
key, err := expander(kvp.Key)
if err != nil {
return KeyValuePair{}, err
}
value, err := expander(kvp.Value)
if err != nil {
return KeyValuePair{}, err
}
return KeyValuePair{Key: key, Value: value}, nil
}
func expandKvpsInPlace(kvps KeyValuePairs, expander SingleWordExpander) error {
for i, kvp := range kvps {
newKvp, err := expandKvp(kvp, expander)
if err != nil {
return err
}
kvps[i] = newKvp
}
return nil
}
func expandSliceInPlace(values []string, expander SingleWordExpander) error {
for i, v := range values {
newValue, err := expander(v)
if err != nil {
return err
}
values[i] = newValue
}
return nil
}
// EnvCommand : ENV key1 value1 [keyN valueN...]
type EnvCommand struct {
withNameAndCode
Env KeyValuePairs // kvp slice instead of map to preserve ordering
}
// Expand variables
func (c *EnvCommand) Expand(expander SingleWordExpander) error {
return expandKvpsInPlace(c.Env, expander)
}
// MaintainerCommand : MAINTAINER maintainer_name
type MaintainerCommand struct {
withNameAndCode
Maintainer string
}
// LabelCommand : LABEL some json data describing the image
//
// Sets the Label variable foo to bar,
//
type LabelCommand struct {
withNameAndCode
Labels KeyValuePairs // kvp slice instead of map to preserve ordering
}
// Expand variables
func (c *LabelCommand) Expand(expander SingleWordExpander) error {
return expandKvpsInPlace(c.Labels, expander)
}
// SourcesAndDest represent a list of source files and a destination
type SourcesAndDest []string
// Sources list the source paths
func (s SourcesAndDest) Sources() []string {
res := make([]string, len(s)-1)
copy(res, s[:len(s)-1])
return res
}
// Dest path of the operation
func (s SourcesAndDest) Dest() string {
return s[len(s)-1]
}
// AddCommand : ADD foo /path
//
// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
// exist here. If you do not wish to have this automatic handling, use COPY.
//
type AddCommand struct {
withNameAndCode
SourcesAndDest
Chown string
}
// Expand variables
func (c *AddCommand) Expand(expander SingleWordExpander) error {
return expandSliceInPlace(c.SourcesAndDest, expander)
}
// CopyCommand : COPY foo /path
//
// Same as 'ADD' but without the tar and remote url handling.
//
type CopyCommand struct {
withNameAndCode
SourcesAndDest
From string
Chown string
}
// Expand variables
func (c *CopyCommand) Expand(expander SingleWordExpander) error {
return expandSliceInPlace(c.SourcesAndDest, expander)
}
// OnbuildCommand : ONBUILD <some other command>
type OnbuildCommand struct {
withNameAndCode
Expression string
}
// WorkdirCommand : WORKDIR /tmp
//
// Set the working directory for future RUN/CMD/etc statements.
//
type WorkdirCommand struct {
withNameAndCode
Path string
}
// Expand variables
func (c *WorkdirCommand) Expand(expander SingleWordExpander) error {
p, err := expander(c.Path)
if err != nil {
return err
}
c.Path = p
return nil
}
// ShellDependantCmdLine represents a cmdline optionally prepended with the shell
type ShellDependantCmdLine struct {
CmdLine strslice.StrSlice
PrependShell bool
}
// RunCommand : RUN some command yo
//
// run a command and commit the image. Args are automatically prepended with
// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under
// Windows, in the event there is only one argument The difference in processing:
//
// RUN echo hi # sh -c echo hi (Linux)
// RUN echo hi # cmd /S /C echo hi (Windows)
// RUN [ "echo", "hi" ] # echo hi
//
type RunCommand struct {
withNameAndCode
ShellDependantCmdLine
}
// CmdCommand : CMD foo
//
// Set the default command to run in the container (which may be empty).
// Argument handling is the same as RUN.
//
type CmdCommand struct {
withNameAndCode
ShellDependantCmdLine
}
// HealthCheckCommand : HEALTHCHECK foo
//
// Set the default healthcheck command to run in the container (which may be empty).
// Argument handling is the same as RUN.
//
type HealthCheckCommand struct {
withNameAndCode
Health *container.HealthConfig
}
// EntrypointCommand : ENTRYPOINT /usr/sbin/nginx
//
// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments
// to /usr/sbin/nginx. Uses the default shell if not in JSON format.
//
// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint
// is initialized at newBuilder time instead of through argument parsing.
//
type EntrypointCommand struct {
withNameAndCode
ShellDependantCmdLine
}
// ExposeCommand : EXPOSE 6667/tcp 7000/tcp
//
// Expose ports for links and port mappings. This all ends up in
// req.runConfig.ExposedPorts for runconfig.
//
type ExposeCommand struct {
withNameAndCode
Ports []string
}
// UserCommand : USER foo
//
// Set the user to 'foo' for future commands and when running the
// ENTRYPOINT/CMD at container run time.
//
type UserCommand struct {
withNameAndCode
User string
}
// Expand variables
func (c *UserCommand) Expand(expander SingleWordExpander) error {
p, err := expander(c.User)
if err != nil {
return err
}
c.User = p
return nil
}
// VolumeCommand : VOLUME /foo
//
// Expose the volume /foo for use. Will also accept the JSON array form.
//
type VolumeCommand struct {
withNameAndCode
Volumes []string
}
// Expand variables
func (c *VolumeCommand) Expand(expander SingleWordExpander) error {
return expandSliceInPlace(c.Volumes, expander)
}
// StopSignalCommand : STOPSIGNAL signal
//
// Set the signal that will be used to kill the container.
type StopSignalCommand struct {
withNameAndCode
Signal string
}
// Expand variables
func (c *StopSignalCommand) Expand(expander SingleWordExpander) error {
p, err := expander(c.Signal)
if err != nil {
return err
}
c.Signal = p
return nil
}
// CheckPlatform checks that the command is supported in the target platform
func (c *StopSignalCommand) CheckPlatform(platform string) error {
if platform == "windows" {
return errors.New("The daemon on this platform does not support the command stopsignal")
}
return nil
}
// ArgCommand : ARG name[=value]
//
// Adds the variable foo to the trusted list of variables that can be passed
// to builder using the --build-arg flag for expansion/substitution or passing to 'run'.
// Dockerfile author may optionally set a default value of this variable.
type ArgCommand struct {
withNameAndCode
Key string
Value *string
}
// Expand variables
func (c *ArgCommand) Expand(expander SingleWordExpander) error {
p, err := expander(c.Key)
if err != nil {
return err
}
c.Key = p
if c.Value != nil {
p, err = expander(*c.Value)
if err != nil {
return err
}
c.Value = &p
}
return nil
}
// ShellCommand : SHELL powershell -command
//
// Set the non-default shell to use.
type ShellCommand struct {
withNameAndCode
Shell strslice.StrSlice
}
// Stage represents a single stage in a multi-stage build
type Stage struct {
Name string
Commands []Command
BaseName string
SourceCode string
}
// AddCommand to the stage
func (s *Stage) AddCommand(cmd Command) {
// todo: validate cmd type
s.Commands = append(s.Commands, cmd)
}
// IsCurrentStage check if the stage name is the current stage
func IsCurrentStage(s []Stage, name string) bool {
if len(s) == 0 {
return false
}
return s[len(s)-1].Name == name
}
// CurrentStage return the last stage in a slice
func CurrentStage(s []Stage) (*Stage, error) {
if len(s) == 0 {
return nil, errors.New("No build stage in current context")
}
return &s[len(s)-1], nil
}
// HasStage looks for the presence of a given stage name
func HasStage(s []Stage, name string) (int, bool) {
for i, stage := range s {
if stage.Name == name {
return i, true
}
}
return -1, false
}

View file

@ -0,0 +1,9 @@
// +build !windows
package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
import "fmt"
func errNotJSON(command, _ string) error {
return fmt.Errorf("%s requires the arguments to be in JSON form", command)
}

View file

@ -0,0 +1,27 @@
package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
import (
"fmt"
"path/filepath"
"regexp"
"strings"
)
func errNotJSON(command, original string) error {
// For Windows users, give a hint if it looks like it might contain
// a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"],
// as JSON must be escaped. Unfortunate...
//
// Specifically looking for quote-driveletter-colon-backslash, there's no
// double backslash and a [] pair. No, this is not perfect, but it doesn't
// have to be. It's simply a hint to make life a little easier.
extra := ""
original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1)))
if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 &&
!strings.Contains(original, `\\`) &&
strings.Contains(original, "[") &&
strings.Contains(original, "]") {
extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original)
}
return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra)
}

View file

@ -0,0 +1,635 @@
package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
import (
"fmt"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/builder/dockerfile/command"
"github.com/docker/docker/builder/dockerfile/parser"
"github.com/pkg/errors"
)
type parseRequest struct {
command string
args []string
attributes map[string]bool
flags *BFlags
original string
}
func nodeArgs(node *parser.Node) []string {
result := []string{}
for ; node.Next != nil; node = node.Next {
arg := node.Next
if len(arg.Children) == 0 {
result = append(result, arg.Value)
} else if len(arg.Children) == 1 {
//sub command
result = append(result, arg.Children[0].Value)
result = append(result, nodeArgs(arg.Children[0])...)
}
}
return result
}
func newParseRequestFromNode(node *parser.Node) parseRequest {
return parseRequest{
command: node.Value,
args: nodeArgs(node),
attributes: node.Attributes,
original: node.Original,
flags: NewBFlagsWithArgs(node.Flags),
}
}
// ParseInstruction converts an AST to a typed instruction (either a command or a build stage beginning when encountering a `FROM` statement)
func ParseInstruction(node *parser.Node) (interface{}, error) {
req := newParseRequestFromNode(node)
switch node.Value {
case command.Env:
return parseEnv(req)
case command.Maintainer:
return parseMaintainer(req)
case command.Label:
return parseLabel(req)
case command.Add:
return parseAdd(req)
case command.Copy:
return parseCopy(req)
case command.From:
return parseFrom(req)
case command.Onbuild:
return parseOnBuild(req)
case command.Workdir:
return parseWorkdir(req)
case command.Run:
return parseRun(req)
case command.Cmd:
return parseCmd(req)
case command.Healthcheck:
return parseHealthcheck(req)
case command.Entrypoint:
return parseEntrypoint(req)
case command.Expose:
return parseExpose(req)
case command.User:
return parseUser(req)
case command.Volume:
return parseVolume(req)
case command.StopSignal:
return parseStopSignal(req)
case command.Arg:
return parseArg(req)
case command.Shell:
return parseShell(req)
}
return nil, &UnknownInstruction{Instruction: node.Value, Line: node.StartLine}
}
// ParseCommand converts an AST to a typed Command
func ParseCommand(node *parser.Node) (Command, error) {
s, err := ParseInstruction(node)
if err != nil {
return nil, err
}
if c, ok := s.(Command); ok {
return c, nil
}
return nil, errors.Errorf("%T is not a command type", s)
}
// UnknownInstruction represents an error occurring when a command is unresolvable
type UnknownInstruction struct {
Line int
Instruction string
}
func (e *UnknownInstruction) Error() string {
return fmt.Sprintf("unknown instruction: %s", strings.ToUpper(e.Instruction))
}
// IsUnknownInstruction checks if the error is an UnknownInstruction or a parseError containing an UnknownInstruction
func IsUnknownInstruction(err error) bool {
_, ok := err.(*UnknownInstruction)
if !ok {
var pe *parseError
if pe, ok = err.(*parseError); ok {
_, ok = pe.inner.(*UnknownInstruction)
}
}
return ok
}
type parseError struct {
inner error
node *parser.Node
}
func (e *parseError) Error() string {
return fmt.Sprintf("Dockerfile parse error line %d: %v", e.node.StartLine, e.inner.Error())
}
// Parse a docker file into a collection of buildable stages
func Parse(ast *parser.Node) (stages []Stage, metaArgs []ArgCommand, err error) {
for _, n := range ast.Children {
cmd, err := ParseInstruction(n)
if err != nil {
return nil, nil, &parseError{inner: err, node: n}
}
if len(stages) == 0 {
// meta arg case
if a, isArg := cmd.(*ArgCommand); isArg {
metaArgs = append(metaArgs, *a)
continue
}
}
switch c := cmd.(type) {
case *Stage:
stages = append(stages, *c)
case Command:
stage, err := CurrentStage(stages)
if err != nil {
return nil, nil, err
}
stage.AddCommand(c)
default:
return nil, nil, errors.Errorf("%T is not a command type", cmd)
}
}
return stages, metaArgs, nil
}
func parseKvps(args []string, cmdName string) (KeyValuePairs, error) {
if len(args) == 0 {
return nil, errAtLeastOneArgument(cmdName)
}
if len(args)%2 != 0 {
// should never get here, but just in case
return nil, errTooManyArguments(cmdName)
}
var res KeyValuePairs
for j := 0; j < len(args); j += 2 {
if len(args[j]) == 0 {
return nil, errBlankCommandNames(cmdName)
}
name := args[j]
value := args[j+1]
res = append(res, KeyValuePair{Key: name, Value: value})
}
return res, nil
}
func parseEnv(req parseRequest) (*EnvCommand, error) {
if err := req.flags.Parse(); err != nil {
return nil, err
}
envs, err := parseKvps(req.args, "ENV")
if err != nil {
return nil, err
}
return &EnvCommand{
Env: envs,
withNameAndCode: newWithNameAndCode(req),
}, nil
}
func parseMaintainer(req parseRequest) (*MaintainerCommand, error) {
if len(req.args) != 1 {
return nil, errExactlyOneArgument("MAINTAINER")
}
if err := req.flags.Parse(); err != nil {
return nil, err
}
return &MaintainerCommand{
Maintainer: req.args[0],
withNameAndCode: newWithNameAndCode(req),
}, nil
}
func parseLabel(req parseRequest) (*LabelCommand, error) {
if err := req.flags.Parse(); err != nil {
return nil, err
}
labels, err := parseKvps(req.args, "LABEL")
if err != nil {
return nil, err
}
return &LabelCommand{
Labels: labels,
withNameAndCode: newWithNameAndCode(req),
}, nil
}
func parseAdd(req parseRequest) (*AddCommand, error) {
if len(req.args) < 2 {
return nil, errNoDestinationArgument("ADD")
}
flChown := req.flags.AddString("chown", "")
if err := req.flags.Parse(); err != nil {
return nil, err
}
return &AddCommand{
SourcesAndDest: SourcesAndDest(req.args),
withNameAndCode: newWithNameAndCode(req),
Chown: flChown.Value,
}, nil
}
func parseCopy(req parseRequest) (*CopyCommand, error) {
if len(req.args) < 2 {
return nil, errNoDestinationArgument("COPY")
}
flChown := req.flags.AddString("chown", "")
flFrom := req.flags.AddString("from", "")
if err := req.flags.Parse(); err != nil {
return nil, err
}
return &CopyCommand{
SourcesAndDest: SourcesAndDest(req.args),
From: flFrom.Value,
withNameAndCode: newWithNameAndCode(req),
Chown: flChown.Value,
}, nil
}
func parseFrom(req parseRequest) (*Stage, error) {
stageName, err := parseBuildStageName(req.args)
if err != nil {
return nil, err
}
if err := req.flags.Parse(); err != nil {
return nil, err
}
code := strings.TrimSpace(req.original)
return &Stage{
BaseName: req.args[0],
Name: stageName,
SourceCode: code,
Commands: []Command{},
}, nil
}
func parseBuildStageName(args []string) (string, error) {
stageName := ""
switch {
case len(args) == 3 && strings.EqualFold(args[1], "as"):
stageName = strings.ToLower(args[2])
if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok {
return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", stageName)
}
case len(args) != 1:
return "", errors.New("FROM requires either one or three arguments")
}
return stageName, nil
}
func parseOnBuild(req parseRequest) (*OnbuildCommand, error) {
if len(req.args) == 0 {
return nil, errAtLeastOneArgument("ONBUILD")
}
if err := req.flags.Parse(); err != nil {
return nil, err
}
triggerInstruction := strings.ToUpper(strings.TrimSpace(req.args[0]))
switch strings.ToUpper(triggerInstruction) {
case "ONBUILD":
return nil, errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
case "MAINTAINER", "FROM":
return nil, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
}
original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "")
return &OnbuildCommand{
Expression: original,
withNameAndCode: newWithNameAndCode(req),
}, nil
}
func parseWorkdir(req parseRequest) (*WorkdirCommand, error) {
if len(req.args) != 1 {
return nil, errExactlyOneArgument("WORKDIR")
}
err := req.flags.Parse()
if err != nil {
return nil, err
}
return &WorkdirCommand{
Path: req.args[0],
withNameAndCode: newWithNameAndCode(req),
}, nil
}
func parseShellDependentCommand(req parseRequest, emptyAsNil bool) ShellDependantCmdLine {
args := handleJSONArgs(req.args, req.attributes)
cmd := strslice.StrSlice(args)
if emptyAsNil && len(cmd) == 0 {
cmd = nil
}
return ShellDependantCmdLine{
CmdLine: cmd,
PrependShell: !req.attributes["json"],
}
}
func parseRun(req parseRequest) (*RunCommand, error) {
if err := req.flags.Parse(); err != nil {
return nil, err
}
return &RunCommand{
ShellDependantCmdLine: parseShellDependentCommand(req, false),
withNameAndCode: newWithNameAndCode(req),
}, nil
}
func parseCmd(req parseRequest) (*CmdCommand, error) {
if err := req.flags.Parse(); err != nil {
return nil, err
}
return &CmdCommand{
ShellDependantCmdLine: parseShellDependentCommand(req, false),
withNameAndCode: newWithNameAndCode(req),
}, nil
}
func parseEntrypoint(req parseRequest) (*EntrypointCommand, error) {
if err := req.flags.Parse(); err != nil {
return nil, err
}
cmd := &EntrypointCommand{
ShellDependantCmdLine: parseShellDependentCommand(req, true),
withNameAndCode: newWithNameAndCode(req),
}
return cmd, nil
}
// parseOptInterval(flag) is the duration of flag.Value, or 0 if
// empty. An error is reported if the value is given and less than minimum duration.
func parseOptInterval(f *Flag) (time.Duration, error) {
s := f.Value
if s == "" {
return 0, nil
}
d, err := time.ParseDuration(s)
if err != nil {
return 0, err
}
if d < container.MinimumDuration {
return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration)
}
return d, nil
}
func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) {
if len(req.args) == 0 {
return nil, errAtLeastOneArgument("HEALTHCHECK")
}
cmd := &HealthCheckCommand{
withNameAndCode: newWithNameAndCode(req),
}
typ := strings.ToUpper(req.args[0])
args := req.args[1:]
if typ == "NONE" {
if len(args) != 0 {
return nil, errors.New("HEALTHCHECK NONE takes no arguments")
}
test := strslice.StrSlice{typ}
cmd.Health = &container.HealthConfig{
Test: test,
}
} else {
healthcheck := container.HealthConfig{}
flInterval := req.flags.AddString("interval", "")
flTimeout := req.flags.AddString("timeout", "")
flStartPeriod := req.flags.AddString("start-period", "")
flRetries := req.flags.AddString("retries", "")
if err := req.flags.Parse(); err != nil {
return nil, err
}
switch typ {
case "CMD":
cmdSlice := handleJSONArgs(args, req.attributes)
if len(cmdSlice) == 0 {
return nil, errors.New("Missing command after HEALTHCHECK CMD")
}
if !req.attributes["json"] {
typ = "CMD-SHELL"
}
healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...))
default:
return nil, fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ)
}
interval, err := parseOptInterval(flInterval)
if err != nil {
return nil, err
}
healthcheck.Interval = interval
timeout, err := parseOptInterval(flTimeout)
if err != nil {
return nil, err
}
healthcheck.Timeout = timeout
startPeriod, err := parseOptInterval(flStartPeriod)
if err != nil {
return nil, err
}
healthcheck.StartPeriod = startPeriod
if flRetries.Value != "" {
retries, err := strconv.ParseInt(flRetries.Value, 10, 32)
if err != nil {
return nil, err
}
if retries < 1 {
return nil, fmt.Errorf("--retries must be at least 1 (not %d)", retries)
}
healthcheck.Retries = int(retries)
} else {
healthcheck.Retries = 0
}
cmd.Health = &healthcheck
}
return cmd, nil
}
func parseExpose(req parseRequest) (*ExposeCommand, error) {
portsTab := req.args
if len(req.args) == 0 {
return nil, errAtLeastOneArgument("EXPOSE")
}
if err := req.flags.Parse(); err != nil {
return nil, err
}
sort.Strings(portsTab)
return &ExposeCommand{
Ports: portsTab,
withNameAndCode: newWithNameAndCode(req),
}, nil
}
func parseUser(req parseRequest) (*UserCommand, error) {
if len(req.args) != 1 {
return nil, errExactlyOneArgument("USER")
}
if err := req.flags.Parse(); err != nil {
return nil, err
}
return &UserCommand{
User: req.args[0],
withNameAndCode: newWithNameAndCode(req),
}, nil
}
func parseVolume(req parseRequest) (*VolumeCommand, error) {
if len(req.args) == 0 {
return nil, errAtLeastOneArgument("VOLUME")
}
if err := req.flags.Parse(); err != nil {
return nil, err
}
cmd := &VolumeCommand{
withNameAndCode: newWithNameAndCode(req),
}
for _, v := range req.args {
v = strings.TrimSpace(v)
if v == "" {
return nil, errors.New("VOLUME specified can not be an empty string")
}
cmd.Volumes = append(cmd.Volumes, v)
}
return cmd, nil
}
func parseStopSignal(req parseRequest) (*StopSignalCommand, error) {
if len(req.args) != 1 {
return nil, errExactlyOneArgument("STOPSIGNAL")
}
sig := req.args[0]
cmd := &StopSignalCommand{
Signal: sig,
withNameAndCode: newWithNameAndCode(req),
}
return cmd, nil
}
func parseArg(req parseRequest) (*ArgCommand, error) {
if len(req.args) != 1 {
return nil, errExactlyOneArgument("ARG")
}
var (
name string
newValue *string
)
arg := req.args[0]
// 'arg' can just be a name or name-value pair. Note that this is different
// from 'env' that handles the split of name and value at the parser level.
// The reason for doing it differently for 'arg' is that we support just
// defining an arg and not assign it a value (while 'env' always expects a
// name-value pair). If possible, it will be good to harmonize the two.
if strings.Contains(arg, "=") {
parts := strings.SplitN(arg, "=", 2)
if len(parts[0]) == 0 {
return nil, errBlankCommandNames("ARG")
}
name = parts[0]
newValue = &parts[1]
} else {
name = arg
}
return &ArgCommand{
Key: name,
Value: newValue,
withNameAndCode: newWithNameAndCode(req),
}, nil
}
func parseShell(req parseRequest) (*ShellCommand, error) {
if err := req.flags.Parse(); err != nil {
return nil, err
}
shellSlice := handleJSONArgs(req.args, req.attributes)
switch {
case len(shellSlice) == 0:
// SHELL []
return nil, errAtLeastOneArgument("SHELL")
case req.attributes["json"]:
// SHELL ["powershell", "-command"]
return &ShellCommand{
Shell: strslice.StrSlice(shellSlice),
withNameAndCode: newWithNameAndCode(req),
}, nil
default:
// SHELL powershell -command - not JSON
return nil, errNotJSON("SHELL", req.original)
}
}
func errAtLeastOneArgument(command string) error {
return errors.Errorf("%s requires at least one argument", command)
}
func errExactlyOneArgument(command string) error {
return errors.Errorf("%s requires exactly one argument", command)
}
func errNoDestinationArgument(command string) error {
return errors.Errorf("%s requires at least two arguments, but only one was provided. Destination could not be determined.", command)
}
func errBlankCommandNames(command string) error {
return errors.Errorf("%s names can not be blank", command)
}
func errTooManyArguments(command string) error {
return errors.Errorf("Bad input to %s, too many arguments", command)
}

View file

@ -0,0 +1,200 @@
package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
import (
"strings"
"testing"
"github.com/docker/docker/builder/dockerfile/command"
"github.com/docker/docker/builder/dockerfile/parser"
"github.com/docker/docker/internal/testutil"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
)
func TestCommandsExactlyOneArgument(t *testing.T) {
commands := []string{
"MAINTAINER",
"WORKDIR",
"USER",
"STOPSIGNAL",
}
for _, command := range commands {
ast, err := parser.Parse(strings.NewReader(command))
assert.NilError(t, err)
_, err = ParseInstruction(ast.AST.Children[0])
assert.Check(t, is.Error(err, errExactlyOneArgument(command).Error()))
}
}
func TestCommandsAtLeastOneArgument(t *testing.T) {
commands := []string{
"ENV",
"LABEL",
"ONBUILD",
"HEALTHCHECK",
"EXPOSE",
"VOLUME",
}
for _, command := range commands {
ast, err := parser.Parse(strings.NewReader(command))
assert.NilError(t, err)
_, err = ParseInstruction(ast.AST.Children[0])
assert.Check(t, is.Error(err, errAtLeastOneArgument(command).Error()))
}
}
func TestCommandsNoDestinationArgument(t *testing.T) {
commands := []string{
"ADD",
"COPY",
}
for _, command := range commands {
ast, err := parser.Parse(strings.NewReader(command + " arg1"))
assert.NilError(t, err)
_, err = ParseInstruction(ast.AST.Children[0])
assert.Check(t, is.Error(err, errNoDestinationArgument(command).Error()))
}
}
func TestCommandsTooManyArguments(t *testing.T) {
commands := []string{
"ENV",
"LABEL",
}
for _, command := range commands {
node := &parser.Node{
Original: command + "arg1 arg2 arg3",
Value: strings.ToLower(command),
Next: &parser.Node{
Value: "arg1",
Next: &parser.Node{
Value: "arg2",
Next: &parser.Node{
Value: "arg3",
},
},
},
}
_, err := ParseInstruction(node)
assert.Check(t, is.Error(err, errTooManyArguments(command).Error()))
}
}
func TestCommandsBlankNames(t *testing.T) {
commands := []string{
"ENV",
"LABEL",
}
for _, command := range commands {
node := &parser.Node{
Original: command + " =arg2",
Value: strings.ToLower(command),
Next: &parser.Node{
Value: "",
Next: &parser.Node{
Value: "arg2",
},
},
}
_, err := ParseInstruction(node)
assert.Check(t, is.Error(err, errBlankCommandNames(command).Error()))
}
}
func TestHealthCheckCmd(t *testing.T) {
node := &parser.Node{
Value: command.Healthcheck,
Next: &parser.Node{
Value: "CMD",
Next: &parser.Node{
Value: "hello",
Next: &parser.Node{
Value: "world",
},
},
},
}
cmd, err := ParseInstruction(node)
assert.Check(t, err)
hc, ok := cmd.(*HealthCheckCommand)
assert.Check(t, ok)
expected := []string{"CMD-SHELL", "hello world"}
assert.Check(t, is.DeepEqual(expected, hc.Health.Test))
}
func TestParseOptInterval(t *testing.T) {
flInterval := &Flag{
name: "interval",
flagType: stringType,
Value: "50ns",
}
_, err := parseOptInterval(flInterval)
testutil.ErrorContains(t, err, "cannot be less than 1ms")
flInterval.Value = "1ms"
_, err = parseOptInterval(flInterval)
assert.NilError(t, err)
}
func TestErrorCases(t *testing.T) {
cases := []struct {
name string
dockerfile string
expectedError string
}{
{
name: "copyEmptyWhitespace",
dockerfile: `COPY
quux \
bar`,
expectedError: "COPY requires at least two arguments",
},
{
name: "ONBUILD forbidden FROM",
dockerfile: "ONBUILD FROM scratch",
expectedError: "FROM isn't allowed as an ONBUILD trigger",
},
{
name: "ONBUILD forbidden MAINTAINER",
dockerfile: "ONBUILD MAINTAINER docker.io",
expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger",
},
{
name: "ARG two arguments",
dockerfile: "ARG foo bar",
expectedError: "ARG requires exactly one argument",
},
{
name: "MAINTAINER unknown flag",
dockerfile: "MAINTAINER --boo joe@example.com",
expectedError: "Unknown flag: boo",
},
{
name: "Chaining ONBUILD",
dockerfile: `ONBUILD ONBUILD RUN touch foobar`,
expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed",
},
{
name: "Invalid instruction",
dockerfile: `foo bar`,
expectedError: "unknown instruction: FOO",
},
}
for _, c := range cases {
r := strings.NewReader(c.dockerfile)
ast, err := parser.Parse(r)
if err != nil {
t.Fatalf("Error when parsing Dockerfile: %s", err)
}
n := ast.AST.Children[0]
_, err = ParseInstruction(n)
testutil.ErrorContains(t, err, c.expectedError)
}
}

View file

@ -0,0 +1,19 @@
package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
import "strings"
// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile
// for exec form it returns untouched args slice
// for shell form it returns concatenated args as the first element of a slice
func handleJSONArgs(args []string, attributes map[string]bool) []string {
if len(args) == 0 {
return []string{}
}
if attributes != nil && attributes["json"] {
return args
}
// literal string command, not an exec array
return []string{strings.Join(args, " ")}
}

View file

@ -0,0 +1,65 @@
package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
import "testing"
type testCase struct {
name string
args []string
attributes map[string]bool
expected []string
}
func initTestCases() []testCase {
testCases := []testCase{}
testCases = append(testCases, testCase{
name: "empty args",
args: []string{},
attributes: make(map[string]bool),
expected: []string{},
})
jsonAttributes := make(map[string]bool)
jsonAttributes["json"] = true
testCases = append(testCases, testCase{
name: "json attribute with one element",
args: []string{"foo"},
attributes: jsonAttributes,
expected: []string{"foo"},
})
testCases = append(testCases, testCase{
name: "json attribute with two elements",
args: []string{"foo", "bar"},
attributes: jsonAttributes,
expected: []string{"foo", "bar"},
})
testCases = append(testCases, testCase{
name: "no attributes",
args: []string{"foo", "bar"},
attributes: nil,
expected: []string{"foo bar"},
})
return testCases
}
func TestHandleJSONArgs(t *testing.T) {
testCases := initTestCases()
for _, test := range testCases {
arguments := handleJSONArgs(test.args, test.attributes)
if len(arguments) != len(test.expected) {
t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments))
}
for i := range test.expected {
if arguments[i] != test.expected[i] {
t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i])
}
}
}
}

View file

@ -0,0 +1,488 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
// internals for handling commands. Covers many areas and a lot of
// non-contiguous functionality. Please read the comments.
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/builder"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/system"
"github.com/docker/go-connections/nat"
"github.com/pkg/errors"
)
// Archiver defines an interface for copying files from one destination to
// another using Tar/Untar.
type Archiver interface {
TarUntar(src, dst string) error
UntarPath(src, dst string) error
CopyWithTar(src, dst string) error
CopyFileWithTar(src, dst string) error
IDMappings() *idtools.IDMappings
}
// The builder will use the following interfaces if the container fs implements
// these for optimized copies to and from the container.
type extractor interface {
ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error
}
type archiver interface {
ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error)
}
// helper functions to get tar/untar func
func untarFunc(i interface{}) containerfs.UntarFunc {
if ea, ok := i.(extractor); ok {
return ea.ExtractArchive
}
return chrootarchive.Untar
}
func tarFunc(i interface{}) containerfs.TarFunc {
if ap, ok := i.(archiver); ok {
return ap.ArchivePath
}
return archive.TarWithOptions
}
func (b *Builder) getArchiver(src, dst containerfs.Driver) Archiver {
t, u := tarFunc(src), untarFunc(dst)
return &containerfs.Archiver{
SrcDriver: src,
DstDriver: dst,
Tar: t,
Untar: u,
IDMappingsVar: b.idMappings,
}
}
func (b *Builder) commit(dispatchState *dispatchState, comment string) error {
if b.disableCommit {
return nil
}
if !dispatchState.hasFromImage() {
return errors.New("Please provide a source image with `from` prior to commit")
}
optionsPlatform := system.ParsePlatform(b.options.Platform)
runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment, optionsPlatform.OS))
hit, err := b.probeCache(dispatchState, runConfigWithCommentCmd)
if err != nil || hit {
return err
}
id, err := b.create(runConfigWithCommentCmd)
if err != nil {
return err
}
return b.commitContainer(dispatchState, id, runConfigWithCommentCmd)
}
func (b *Builder) commitContainer(dispatchState *dispatchState, id string, containerConfig *container.Config) error {
if b.disableCommit {
return nil
}
commitCfg := backend.CommitConfig{
Author: dispatchState.maintainer,
// TODO: this copy should be done by Commit()
Config: copyRunConfig(dispatchState.runConfig),
ContainerConfig: containerConfig,
ContainerID: id,
}
imageID, err := b.docker.CommitBuildStep(commitCfg)
dispatchState.imageID = string(imageID)
return err
}
func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, parent builder.Image, runConfig *container.Config) error {
newLayer, err := layer.Commit()
if err != nil {
return err
}
// add an image mount without an image so the layer is properly unmounted
// if there is an error before we can add the full mount with image
b.imageSources.Add(newImageMount(nil, newLayer))
parentImage, ok := parent.(*image.Image)
if !ok {
return errors.Errorf("unexpected image type")
}
newImage := image.NewChildImage(parentImage, image.ChildConfig{
Author: state.maintainer,
ContainerConfig: runConfig,
DiffID: newLayer.DiffID(),
Config: copyRunConfig(state.runConfig),
}, parentImage.OS)
// TODO: it seems strange to marshal this here instead of just passing in the
// image struct
config, err := newImage.MarshalJSON()
if err != nil {
return errors.Wrap(err, "failed to encode image config")
}
exportedImage, err := b.docker.CreateImage(config, state.imageID)
if err != nil {
return errors.Wrapf(err, "failed to export image")
}
state.imageID = exportedImage.ImageID()
b.imageSources.Add(newImageMount(exportedImage, newLayer))
return nil
}
func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error {
srcHash := getSourceHashFromInfos(inst.infos)
var chownComment string
if inst.chownStr != "" {
chownComment = fmt.Sprintf("--chown=%s", inst.chownStr)
}
commentStr := fmt.Sprintf("%s %s%s in %s ", inst.cmdName, chownComment, srcHash, inst.dest)
// TODO: should this have been using origPaths instead of srcHash in the comment?
optionsPlatform := system.ParsePlatform(b.options.Platform)
runConfigWithCommentCmd := copyRunConfig(
state.runConfig,
withCmdCommentString(commentStr, optionsPlatform.OS))
hit, err := b.probeCache(state, runConfigWithCommentCmd)
if err != nil || hit {
return err
}
imageMount, err := b.imageSources.Get(state.imageID, true)
if err != nil {
return errors.Wrapf(err, "failed to get destination image %q", state.imageID)
}
rwLayer, err := imageMount.NewRWLayer()
if err != nil {
return err
}
defer rwLayer.Release()
destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, rwLayer, b.options.Platform)
if err != nil {
return err
}
chownPair := b.idMappings.RootPair()
// if a chown was requested, perform the steps to get the uid, gid
// translated (if necessary because of user namespaces), and replace
// the root pair with the chown pair for copy operations
if inst.chownStr != "" {
chownPair, err = parseChownFlag(inst.chownStr, destInfo.root.Path(), b.idMappings)
if err != nil {
return errors.Wrapf(err, "unable to convert uid/gid chown string to host mapping")
}
}
for _, info := range inst.infos {
opts := copyFileOptions{
decompress: inst.allowLocalDecompression,
archiver: b.getArchiver(info.root, destInfo.root),
chownPair: chownPair,
}
if err := performCopyForInfo(destInfo, info, opts); err != nil {
return errors.Wrapf(err, "failed to copy files")
}
}
return b.exportImage(state, rwLayer, imageMount.Image(), runConfigWithCommentCmd)
}
func createDestInfo(workingDir string, inst copyInstruction, rwLayer builder.RWLayer, platform string) (copyInfo, error) {
// Twiddle the destination when it's a relative path - meaning, make it
// relative to the WORKINGDIR
dest, err := normalizeDest(workingDir, inst.dest, platform)
if err != nil {
return copyInfo{}, errors.Wrapf(err, "invalid %s", inst.cmdName)
}
return copyInfo{root: rwLayer.Root(), path: dest}, nil
}
// normalizeDest normalises the destination of a COPY/ADD command in a
// platform semantically consistent way.
func normalizeDest(workingDir, requested string, platform string) (string, error) {
dest := fromSlash(requested, platform)
endsInSlash := strings.HasSuffix(dest, string(separator(platform)))
if platform != "windows" {
if !path.IsAbs(requested) {
dest = path.Join("/", filepath.ToSlash(workingDir), dest)
// Make sure we preserve any trailing slash
if endsInSlash {
dest += "/"
}
}
return dest, nil
}
// We are guaranteed that the working directory is already consistent,
// However, Windows also has, for now, the limitation that ADD/COPY can
// only be done to the system drive, not any drives that might be present
// as a result of a bind mount.
//
// So... if the path requested is Linux-style absolute (/foo or \\foo),
// we assume it is the system drive. If it is a Windows-style absolute
// (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we
// strip any configured working directories drive letter so that it
// can be subsequently legitimately converted to a Windows volume-style
// pathname.
// Not a typo - filepath.IsAbs, not system.IsAbs on this next check as
// we only want to validate where the DriveColon part has been supplied.
if filepath.IsAbs(dest) {
if strings.ToUpper(string(dest[0])) != "C" {
return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)")
}
dest = dest[2:] // Strip the drive letter
}
// Cannot handle relative where WorkingDir is not the system drive.
if len(workingDir) > 0 {
if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) {
return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir)
}
if !system.IsAbs(dest) {
if string(workingDir[0]) != "C" {
return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive")
}
dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest)
// Make sure we preserve any trailing slash
if endsInSlash {
dest += string(os.PathSeparator)
}
}
}
return dest, nil
}
// For backwards compat, if there's just one info then use it as the
// cache look-up string, otherwise hash 'em all into one
func getSourceHashFromInfos(infos []copyInfo) string {
if len(infos) == 1 {
return infos[0].hash
}
var hashs []string
for _, info := range infos {
hashs = append(hashs, info.hash)
}
return hashStringSlice("multi", hashs)
}
func hashStringSlice(prefix string, slice []string) string {
hasher := sha256.New()
hasher.Write([]byte(strings.Join(slice, ",")))
return prefix + ":" + hex.EncodeToString(hasher.Sum(nil))
}
type runConfigModifier func(*container.Config)
func withCmd(cmd []string) runConfigModifier {
return func(runConfig *container.Config) {
runConfig.Cmd = cmd
}
}
// withCmdComment sets Cmd to a nop comment string. See withCmdCommentString for
// why there are two almost identical versions of this.
func withCmdComment(comment string, platform string) runConfigModifier {
return func(runConfig *container.Config) {
runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) ", comment)
}
}
// withCmdCommentString exists to maintain compatibility with older versions.
// A few instructions (workdir, copy, add) used a nop comment that is a single arg
// where as all the other instructions used a two arg comment string. This
// function implements the single arg version.
func withCmdCommentString(comment string, platform string) runConfigModifier {
return func(runConfig *container.Config) {
runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) "+comment)
}
}
func withEnv(env []string) runConfigModifier {
return func(runConfig *container.Config) {
runConfig.Env = env
}
}
// withEntrypointOverride sets an entrypoint on runConfig if the command is
// not empty. The entrypoint is left unmodified if command is empty.
//
// The dockerfile RUN instruction expect to run without an entrypoint
// so the runConfig entrypoint needs to be modified accordingly. ContainerCreate
// will change a []string{""} entrypoint to nil, so we probe the cache with the
// nil entrypoint.
func withEntrypointOverride(cmd []string, entrypoint []string) runConfigModifier {
return func(runConfig *container.Config) {
if len(cmd) > 0 {
runConfig.Entrypoint = entrypoint
}
}
}
func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config {
copy := *runConfig
copy.Cmd = copyStringSlice(runConfig.Cmd)
copy.Env = copyStringSlice(runConfig.Env)
copy.Entrypoint = copyStringSlice(runConfig.Entrypoint)
copy.OnBuild = copyStringSlice(runConfig.OnBuild)
copy.Shell = copyStringSlice(runConfig.Shell)
if copy.Volumes != nil {
copy.Volumes = make(map[string]struct{}, len(runConfig.Volumes))
for k, v := range runConfig.Volumes {
copy.Volumes[k] = v
}
}
if copy.ExposedPorts != nil {
copy.ExposedPorts = make(nat.PortSet, len(runConfig.ExposedPorts))
for k, v := range runConfig.ExposedPorts {
copy.ExposedPorts[k] = v
}
}
if copy.Labels != nil {
copy.Labels = make(map[string]string, len(runConfig.Labels))
for k, v := range runConfig.Labels {
copy.Labels[k] = v
}
}
for _, modifier := range modifiers {
modifier(&copy)
}
return &copy
}
func copyStringSlice(orig []string) []string {
if orig == nil {
return nil
}
return append([]string{}, orig...)
}
// getShell is a helper function which gets the right shell for prefixing the
// shell-form of RUN, ENTRYPOINT and CMD instructions
func getShell(c *container.Config, os string) []string {
if 0 == len(c.Shell) {
return append([]string{}, defaultShellForOS(os)[:]...)
}
return append([]string{}, c.Shell[:]...)
}
func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) {
cachedID, err := b.imageProber.Probe(dispatchState.imageID, runConfig)
if cachedID == "" || err != nil {
return false, err
}
fmt.Fprint(b.Stdout, " ---> Using cache\n")
dispatchState.imageID = cachedID
return true, nil
}
var defaultLogConfig = container.LogConfig{Type: "none"}
func (b *Builder) probeAndCreate(dispatchState *dispatchState, runConfig *container.Config) (string, error) {
if hit, err := b.probeCache(dispatchState, runConfig); err != nil || hit {
return "", err
}
// Set a log config to override any default value set on the daemon
hostConfig := &container.HostConfig{LogConfig: defaultLogConfig}
container, err := b.containerManager.Create(runConfig, hostConfig)
return container.ID, err
}
func (b *Builder) create(runConfig *container.Config) (string, error) {
hostConfig := hostConfigFromOptions(b.options)
container, err := b.containerManager.Create(runConfig, hostConfig)
if err != nil {
return "", err
}
// TODO: could this be moved into containerManager.Create() ?
for _, warning := range container.Warnings {
fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning)
}
fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(container.ID))
return container.ID, nil
}
func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConfig {
resources := container.Resources{
CgroupParent: options.CgroupParent,
CPUShares: options.CPUShares,
CPUPeriod: options.CPUPeriod,
CPUQuota: options.CPUQuota,
CpusetCpus: options.CPUSetCPUs,
CpusetMems: options.CPUSetMems,
Memory: options.Memory,
MemorySwap: options.MemorySwap,
Ulimits: options.Ulimits,
}
hc := &container.HostConfig{
SecurityOpt: options.SecurityOpt,
Isolation: options.Isolation,
ShmSize: options.ShmSize,
Resources: resources,
NetworkMode: container.NetworkMode(options.NetworkMode),
// Set a log config to override any default value set on the daemon
LogConfig: defaultLogConfig,
ExtraHosts: options.ExtraHosts,
}
// For WCOW, the default of 20GB hard-coded in the platform
// is too small for builder scenarios where many users are
// using RUN statements to install large amounts of data.
// Use 127GB as that's the default size of a VHD in Hyper-V.
if runtime.GOOS == "windows" && options.Platform == "windows" {
hc.StorageOpt = make(map[string]string)
hc.StorageOpt["size"] = "127GB"
}
return hc
}
// fromSlash works like filepath.FromSlash but with a given OS platform field
func fromSlash(path, platform string) string {
if platform == "windows" {
return strings.Replace(path, "/", "\\", -1)
}
return path
}
// separator returns a OS path separator for the given OS platform
func separator(platform string) byte {
if platform == "windows" {
return '\\'
}
return '/'
}

View file

@ -0,0 +1,88 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"path/filepath"
"strconv"
"strings"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/symlink"
lcUser "github.com/opencontainers/runc/libcontainer/user"
"github.com/pkg/errors"
)
func parseChownFlag(chown, ctrRootPath string, idMappings *idtools.IDMappings) (idtools.IDPair, error) {
var userStr, grpStr string
parts := strings.Split(chown, ":")
if len(parts) > 2 {
return idtools.IDPair{}, errors.New("invalid chown string format: " + chown)
}
if len(parts) == 1 {
// if no group specified, use the user spec as group as well
userStr, grpStr = parts[0], parts[0]
} else {
userStr, grpStr = parts[0], parts[1]
}
passwdPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "passwd"), ctrRootPath)
if err != nil {
return idtools.IDPair{}, errors.Wrapf(err, "can't resolve /etc/passwd path in container rootfs")
}
groupPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "group"), ctrRootPath)
if err != nil {
return idtools.IDPair{}, errors.Wrapf(err, "can't resolve /etc/group path in container rootfs")
}
uid, err := lookupUser(userStr, passwdPath)
if err != nil {
return idtools.IDPair{}, errors.Wrapf(err, "can't find uid for user "+userStr)
}
gid, err := lookupGroup(grpStr, groupPath)
if err != nil {
return idtools.IDPair{}, errors.Wrapf(err, "can't find gid for group "+grpStr)
}
// convert as necessary because of user namespaces
chownPair, err := idMappings.ToHost(idtools.IDPair{UID: uid, GID: gid})
if err != nil {
return idtools.IDPair{}, errors.Wrapf(err, "unable to convert uid/gid to host mapping")
}
return chownPair, nil
}
func lookupUser(userStr, filepath string) (int, error) {
// if the string is actually a uid integer, parse to int and return
// as we don't need to translate with the help of files
uid, err := strconv.Atoi(userStr)
if err == nil {
return uid, nil
}
users, err := lcUser.ParsePasswdFileFilter(filepath, func(u lcUser.User) bool {
return u.Name == userStr
})
if err != nil {
return 0, err
}
if len(users) == 0 {
return 0, errors.New("no such user: " + userStr)
}
return users[0].Uid, nil
}
func lookupGroup(groupStr, filepath string) (int, error) {
// if the string is actually a gid integer, parse to int and return
// as we don't need to translate with the help of files
gid, err := strconv.Atoi(groupStr)
if err == nil {
return gid, nil
}
groups, err := lcUser.ParseGroupFileFilter(filepath, func(g lcUser.Group) bool {
return g.Name == groupStr
})
if err != nil {
return 0, err
}
if len(groups) == 0 {
return 0, errors.New("no such group: " + groupStr)
}
return groups[0].Gid, nil
}

View file

@ -0,0 +1,138 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"os"
"path/filepath"
"testing"
"github.com/docker/docker/pkg/idtools"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
)
func TestChownFlagParsing(t *testing.T) {
testFiles := map[string]string{
"passwd": `root:x:0:0::/bin:/bin/false
bin:x:1:1::/bin:/bin/false
wwwwww:x:21:33::/bin:/bin/false
unicorn:x:1001:1002::/bin:/bin/false
`,
"group": `root:x:0:
bin:x:1:
wwwwww:x:33:
unicorn:x:1002:
somegrp:x:5555:
othergrp:x:6666:
`,
}
// test mappings for validating use of maps
idMaps := []idtools.IDMap{
{
ContainerID: 0,
HostID: 100000,
Size: 65536,
},
}
remapped := idtools.NewIDMappingsFromMaps(idMaps, idMaps)
unmapped := &idtools.IDMappings{}
contextDir, cleanup := createTestTempDir(t, "", "builder-chown-parse-test")
defer cleanup()
if err := os.Mkdir(filepath.Join(contextDir, "etc"), 0755); err != nil {
t.Fatalf("error creating test directory: %v", err)
}
for filename, content := range testFiles {
createTestTempFile(t, filepath.Join(contextDir, "etc"), filename, content, 0644)
}
// positive tests
for _, testcase := range []struct {
name string
chownStr string
idMapping *idtools.IDMappings
expected idtools.IDPair
}{
{
name: "UIDNoMap",
chownStr: "1",
idMapping: unmapped,
expected: idtools.IDPair{UID: 1, GID: 1},
},
{
name: "UIDGIDNoMap",
chownStr: "0:1",
idMapping: unmapped,
expected: idtools.IDPair{UID: 0, GID: 1},
},
{
name: "UIDWithMap",
chownStr: "0",
idMapping: remapped,
expected: idtools.IDPair{UID: 100000, GID: 100000},
},
{
name: "UIDGIDWithMap",
chownStr: "1:33",
idMapping: remapped,
expected: idtools.IDPair{UID: 100001, GID: 100033},
},
{
name: "UserNoMap",
chownStr: "bin:5555",
idMapping: unmapped,
expected: idtools.IDPair{UID: 1, GID: 5555},
},
{
name: "GroupWithMap",
chownStr: "0:unicorn",
idMapping: remapped,
expected: idtools.IDPair{UID: 100000, GID: 101002},
},
{
name: "UserOnlyWithMap",
chownStr: "unicorn",
idMapping: remapped,
expected: idtools.IDPair{UID: 101001, GID: 101002},
},
} {
t.Run(testcase.name, func(t *testing.T) {
idPair, err := parseChownFlag(testcase.chownStr, contextDir, testcase.idMapping)
assert.NilError(t, err, "Failed to parse chown flag: %q", testcase.chownStr)
assert.Check(t, is.DeepEqual(testcase.expected, idPair), "chown flag mapping failure")
})
}
// error tests
for _, testcase := range []struct {
name string
chownStr string
idMapping *idtools.IDMappings
descr string
}{
{
name: "BadChownFlagFormat",
chownStr: "bob:1:555",
idMapping: unmapped,
descr: "invalid chown string format: bob:1:555",
},
{
name: "UserNoExist",
chownStr: "bob",
idMapping: unmapped,
descr: "can't find uid for user bob: no such user: bob",
},
{
name: "GroupNoExist",
chownStr: "root:bob",
idMapping: unmapped,
descr: "can't find gid for group bob: no such group: bob",
},
} {
t.Run(testcase.name, func(t *testing.T) {
_, err := parseChownFlag(testcase.chownStr, contextDir, testcase.idMapping)
assert.Check(t, is.Error(err, testcase.descr), "Expected error string doesn't match")
})
}
}

View file

@ -0,0 +1,170 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"fmt"
"runtime"
"testing"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/remotecontext"
"github.com/docker/docker/pkg/archive"
"github.com/docker/go-connections/nat"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
)
func TestEmptyDockerfile(t *testing.T) {
contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test")
defer cleanup()
createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777)
readAndCheckDockerfile(t, "emptyDockerfile", contextDir, "", "the Dockerfile (Dockerfile) cannot be empty")
}
func TestSymlinkDockerfile(t *testing.T) {
contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test")
defer cleanup()
createTestSymlink(t, contextDir, builder.DefaultDockerfileName, "/etc/passwd")
// The reason the error is "Cannot locate specified Dockerfile" is because
// in the builder, the symlink is resolved within the context, therefore
// Dockerfile -> /etc/passwd becomes etc/passwd from the context which is
// a nonexistent file.
expectedError := fmt.Sprintf("Cannot locate specified Dockerfile: %s", builder.DefaultDockerfileName)
readAndCheckDockerfile(t, "symlinkDockerfile", contextDir, builder.DefaultDockerfileName, expectedError)
}
func TestDockerfileOutsideTheBuildContext(t *testing.T) {
contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test")
defer cleanup()
expectedError := "Forbidden path outside the build context: ../../Dockerfile ()"
readAndCheckDockerfile(t, "DockerfileOutsideTheBuildContext", contextDir, "../../Dockerfile", expectedError)
}
func TestNonExistingDockerfile(t *testing.T) {
contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test")
defer cleanup()
expectedError := "Cannot locate specified Dockerfile: Dockerfile"
readAndCheckDockerfile(t, "NonExistingDockerfile", contextDir, "Dockerfile", expectedError)
}
func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, expectedError string) {
tarStream, err := archive.Tar(contextDir, archive.Uncompressed)
assert.NilError(t, err)
defer func() {
if err = tarStream.Close(); err != nil {
t.Fatalf("Error when closing tar stream: %s", err)
}
}()
if dockerfilePath == "" { // handled in BuildWithContext
dockerfilePath = builder.DefaultDockerfileName
}
config := backend.BuildConfig{
Options: &types.ImageBuildOptions{Dockerfile: dockerfilePath},
Source: tarStream,
}
_, _, err = remotecontext.Detect(config)
assert.Check(t, is.Error(err, expectedError))
}
func TestCopyRunConfig(t *testing.T) {
defaultEnv := []string{"foo=1"}
defaultCmd := []string{"old"}
var testcases = []struct {
doc string
modifiers []runConfigModifier
expected *container.Config
}{
{
doc: "Set the command",
modifiers: []runConfigModifier{withCmd([]string{"new"})},
expected: &container.Config{
Cmd: []string{"new"},
Env: defaultEnv,
},
},
{
doc: "Set the command to a comment",
modifiers: []runConfigModifier{withCmdComment("comment", runtime.GOOS)},
expected: &container.Config{
Cmd: append(defaultShellForOS(runtime.GOOS), "#(nop) ", "comment"),
Env: defaultEnv,
},
},
{
doc: "Set the command and env",
modifiers: []runConfigModifier{
withCmd([]string{"new"}),
withEnv([]string{"one", "two"}),
},
expected: &container.Config{
Cmd: []string{"new"},
Env: []string{"one", "two"},
},
},
}
for _, testcase := range testcases {
runConfig := &container.Config{
Cmd: defaultCmd,
Env: defaultEnv,
}
runConfigCopy := copyRunConfig(runConfig, testcase.modifiers...)
assert.Check(t, is.DeepEqual(testcase.expected, runConfigCopy), testcase.doc)
// Assert the original was not modified
assert.Check(t, runConfig != runConfigCopy, testcase.doc)
}
}
func fullMutableRunConfig() *container.Config {
return &container.Config{
Cmd: []string{"command", "arg1"},
Env: []string{"env1=foo", "env2=bar"},
ExposedPorts: nat.PortSet{
"1000/tcp": {},
"1001/tcp": {},
},
Volumes: map[string]struct{}{
"one": {},
"two": {},
},
Entrypoint: []string{"entry", "arg1"},
OnBuild: []string{"first", "next"},
Labels: map[string]string{
"label1": "value1",
"label2": "value2",
},
Shell: []string{"shell", "-c"},
}
}
func TestDeepCopyRunConfig(t *testing.T) {
runConfig := fullMutableRunConfig()
copy := copyRunConfig(runConfig)
assert.Check(t, is.DeepEqual(fullMutableRunConfig(), copy))
copy.Cmd[1] = "arg2"
copy.Env[1] = "env2=new"
copy.ExposedPorts["10002"] = struct{}{}
copy.Volumes["three"] = struct{}{}
copy.Entrypoint[1] = "arg2"
copy.OnBuild[0] = "start"
copy.Labels["label3"] = "value3"
copy.Shell[0] = "sh"
assert.Check(t, is.DeepEqual(fullMutableRunConfig(), runConfig))
}

View file

@ -0,0 +1,7 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import "github.com/docker/docker/pkg/idtools"
func parseChownFlag(chown, ctrRootPath string, idMappings *idtools.IDMappings) (idtools.IDPair, error) {
return idMappings.RootPair(), nil
}

View file

@ -0,0 +1,54 @@
// +build windows
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"fmt"
"testing"
"github.com/docker/docker/internal/testutil"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
)
func TestNormalizeDest(t *testing.T) {
tests := []struct{ current, requested, expected, etext string }{
{``, `D:\`, ``, `Windows does not support destinations not on the system drive (C:)`},
{``, `e:/`, ``, `Windows does not support destinations not on the system drive (C:)`},
{`invalid`, `./c1`, ``, `Current WorkingDir invalid is not platform consistent`},
{`C:`, ``, ``, `Current WorkingDir C: is not platform consistent`},
{`C`, ``, ``, `Current WorkingDir C is not platform consistent`},
{`D:\`, `.`, ``, "Windows does not support relative paths when WORKDIR is not the system drive"},
{``, `D`, `D`, ``},
{``, `./a1`, `.\a1`, ``},
{``, `.\b1`, `.\b1`, ``},
{``, `/`, `\`, ``},
{``, `\`, `\`, ``},
{``, `c:/`, `\`, ``},
{``, `c:\`, `\`, ``},
{``, `.`, `.`, ``},
{`C:\wdd`, `./a1`, `\wdd\a1`, ``},
{`C:\wde`, `.\b1`, `\wde\b1`, ``},
{`C:\wdf`, `/`, `\`, ``},
{`C:\wdg`, `\`, `\`, ``},
{`C:\wdh`, `c:/`, `\`, ``},
{`C:\wdi`, `c:\`, `\`, ``},
{`C:\wdj`, `.`, `\wdj`, ``},
{`C:\wdk`, `foo/bar`, `\wdk\foo\bar`, ``},
{`C:\wdl`, `foo\bar`, `\wdl\foo\bar`, ``},
{`C:\wdm`, `foo/bar/`, `\wdm\foo\bar\`, ``},
{`C:\wdn`, `foo\bar/`, `\wdn\foo\bar\`, ``},
}
for _, testcase := range tests {
msg := fmt.Sprintf("Input: %s, %s", testcase.current, testcase.requested)
actual, err := normalizeDest(testcase.current, testcase.requested, "windows")
if testcase.etext == "" {
if !assert.Check(t, err, msg) {
continue
}
assert.Check(t, is.Equal(testcase.expected, actual), msg)
} else {
testutil.ErrorContains(t, err, testcase.etext)
}
}
}

View file

@ -0,0 +1,44 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"github.com/docker/go-metrics"
)
var (
buildsTriggered metrics.Counter
buildsFailed metrics.LabeledCounter
)
// Build metrics prometheus messages, these values must be initialized before
// using them. See the example below in the "builds_failed" metric definition.
const (
metricsDockerfileSyntaxError = "dockerfile_syntax_error"
metricsDockerfileEmptyError = "dockerfile_empty_error"
metricsCommandNotSupportedError = "command_not_supported_error"
metricsErrorProcessingCommandsError = "error_processing_commands_error"
metricsBuildTargetNotReachableError = "build_target_not_reachable_error"
metricsMissingOnbuildArgumentsError = "missing_onbuild_arguments_error"
metricsUnknownInstructionError = "unknown_instruction_error"
metricsBuildCanceled = "build_canceled"
)
func init() {
buildMetrics := metrics.NewNamespace("builder", "", nil)
buildsTriggered = buildMetrics.NewCounter("builds_triggered", "Number of triggered image builds")
buildsFailed = buildMetrics.NewLabeledCounter("builds_failed", "Number of failed image builds", "reason")
for _, r := range []string{
metricsDockerfileSyntaxError,
metricsDockerfileEmptyError,
metricsCommandNotSupportedError,
metricsErrorProcessingCommandsError,
metricsBuildTargetNotReachableError,
metricsMissingOnbuildArgumentsError,
metricsUnknownInstructionError,
metricsBuildCanceled,
} {
buildsFailed.WithValues(r)
}
metrics.Register(buildMetrics)
}

View file

@ -0,0 +1,148 @@
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"encoding/json"
"io"
"runtime"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/builder"
containerpkg "github.com/docker/docker/container"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/containerfs"
"golang.org/x/net/context"
)
// MockBackend implements the builder.Backend interface for unit testing
type MockBackend struct {
containerCreateFunc func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
commitFunc func(backend.CommitConfig) (image.ID, error)
getImageFunc func(string) (builder.Image, builder.ROLayer, error)
makeImageCacheFunc func(cacheFrom []string) builder.ImageCache
}
func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error {
return nil
}
func (m *MockBackend) ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) {
if m.containerCreateFunc != nil {
return m.containerCreateFunc(config)
}
return container.ContainerCreateCreatedBody{}, nil
}
func (m *MockBackend) ContainerRm(name string, config *types.ContainerRmConfig) error {
return nil
}
func (m *MockBackend) CommitBuildStep(c backend.CommitConfig) (image.ID, error) {
if m.commitFunc != nil {
return m.commitFunc(c)
}
return "", nil
}
func (m *MockBackend) ContainerKill(containerID string, sig uint64) error {
return nil
}
func (m *MockBackend) ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error {
return nil
}
func (m *MockBackend) ContainerWait(ctx context.Context, containerID string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) {
return nil, nil
}
func (m *MockBackend) ContainerCreateWorkdir(containerID string) error {
return nil
}
func (m *MockBackend) CopyOnBuild(containerID string, destPath string, srcRoot string, srcPath string, decompress bool) error {
return nil
}
func (m *MockBackend) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) {
if m.getImageFunc != nil {
return m.getImageFunc(refOrID)
}
return &mockImage{id: "theid"}, &mockLayer{}, nil
}
func (m *MockBackend) MakeImageCache(cacheFrom []string) builder.ImageCache {
if m.makeImageCacheFunc != nil {
return m.makeImageCacheFunc(cacheFrom)
}
return nil
}
func (m *MockBackend) CreateImage(config []byte, parent string) (builder.Image, error) {
return nil, nil
}
type mockImage struct {
id string
config *container.Config
}
func (i *mockImage) ImageID() string {
return i.id
}
func (i *mockImage) RunConfig() *container.Config {
return i.config
}
func (i *mockImage) OperatingSystem() string {
return runtime.GOOS
}
func (i *mockImage) MarshalJSON() ([]byte, error) {
type rawImage mockImage
return json.Marshal(rawImage(*i))
}
type mockImageCache struct {
getCacheFunc func(parentID string, cfg *container.Config) (string, error)
}
func (mic *mockImageCache) GetCache(parentID string, cfg *container.Config) (string, error) {
if mic.getCacheFunc != nil {
return mic.getCacheFunc(parentID, cfg)
}
return "", nil
}
type mockLayer struct{}
func (l *mockLayer) Release() error {
return nil
}
func (l *mockLayer) NewRWLayer() (builder.RWLayer, error) {
return &mockRWLayer{}, nil
}
func (l *mockLayer) DiffID() layer.DiffID {
return layer.DiffID("abcdef")
}
type mockRWLayer struct {
}
func (l *mockRWLayer) Release() error {
return nil
}
func (l *mockRWLayer) Commit() (builder.ROLayer, error) {
return nil, nil
}
func (l *mockRWLayer) Root() containerfs.ContainerFS {
return nil
}

View file

@ -0,0 +1,32 @@
package main
import (
"fmt"
"os"
"github.com/docker/docker/builder/dockerfile/parser"
)
func main() {
var f *os.File
var err error
if len(os.Args) < 2 {
fmt.Println("please supply filename(s)")
os.Exit(1)
}
for _, fn := range os.Args[1:] {
f, err = os.Open(fn)
if err != nil {
panic(err)
}
defer f.Close()
result, err := parser.Parse(f)
if err != nil {
panic(err)
}
fmt.Println(result.AST.Dump())
}
}

View file

@ -0,0 +1,59 @@
package parser // import "github.com/docker/docker/builder/dockerfile/parser"
import (
"testing"
)
var invalidJSONArraysOfStrings = []string{
`["a",42,"b"]`,
`["a",123.456,"b"]`,
`["a",{},"b"]`,
`["a",{"c": "d"},"b"]`,
`["a",["c"],"b"]`,
`["a",true,"b"]`,
`["a",false,"b"]`,
`["a",null,"b"]`,
}
var validJSONArraysOfStrings = map[string][]string{
`[]`: {},
`[""]`: {""},
`["a"]`: {"a"},
`["a","b"]`: {"a", "b"},
`[ "a", "b" ]`: {"a", "b"},
`[ "a", "b" ]`: {"a", "b"},
` [ "a", "b" ] `: {"a", "b"},
`["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"},
}
func TestJSONArraysOfStrings(t *testing.T) {
for json, expected := range validJSONArraysOfStrings {
d := NewDefaultDirective()
if node, _, err := parseJSON(json, d); err != nil {
t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err)
} else {
i := 0
for node != nil {
if i >= len(expected) {
t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json)
}
if node.Value != expected[i] {
t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i)
}
node = node.Next
i++
}
if i != len(expected) {
t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json)
}
}
}
for _, json := range invalidJSONArraysOfStrings {
d := NewDefaultDirective()
if _, _, err := parseJSON(json, d); err != errDockerfileNotStringArray {
t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json)
}
}
}

View file

@ -0,0 +1,399 @@
package parser // import "github.com/docker/docker/builder/dockerfile/parser"
// line parsers are dispatch calls that parse a single unit of text into a
// Node object which contains the whole statement. Dockerfiles have varied
// (but not usually unique, see ONBUILD for a unique example) parsing rules
// per-command, and these unify the processing in a way that makes it
// manageable.
import (
"encoding/json"
"errors"
"fmt"
"sort"
"strings"
"unicode"
"unicode/utf8"
"github.com/docker/docker/builder/dockerfile/command"
)
var (
errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only")
)
const (
commandLabel = "LABEL"
)
// ignore the current argument. This will still leave a command parsed, but
// will not incorporate the arguments into the ast.
func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) {
return &Node{}, nil, nil
}
// used for onbuild. Could potentially be used for anything that represents a
// statement with sub-statements.
//
// ONBUILD RUN foo bar -> (onbuild (run foo bar))
//
func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) {
if rest == "" {
return nil, nil, nil
}
child, err := newNodeFromLine(rest, d)
if err != nil {
return nil, nil, err
}
return &Node{Children: []*Node{child}}, nil, nil
}
// helper to parse words (i.e space delimited or quoted strings) in a statement.
// The quotes are preserved as part of this function and they are stripped later
// as part of processWords().
func parseWords(rest string, d *Directive) []string {
const (
inSpaces = iota // looking for start of a word
inWord
inQuote
)
words := []string{}
phase := inSpaces
word := ""
quote := '\000'
blankOK := false
var ch rune
var chWidth int
for pos := 0; pos <= len(rest); pos += chWidth {
if pos != len(rest) {
ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
}
if phase == inSpaces { // Looking for start of word
if pos == len(rest) { // end of input
break
}
if unicode.IsSpace(ch) { // skip spaces
continue
}
phase = inWord // found it, fall through
}
if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
if blankOK || len(word) > 0 {
words = append(words, word)
}
break
}
if phase == inWord {
if unicode.IsSpace(ch) {
phase = inSpaces
if blankOK || len(word) > 0 {
words = append(words, word)
}
word = ""
blankOK = false
continue
}
if ch == '\'' || ch == '"' {
quote = ch
blankOK = true
phase = inQuote
}
if ch == d.escapeToken {
if pos+chWidth == len(rest) {
continue // just skip an escape token at end of line
}
// If we're not quoted and we see an escape token, then always just
// add the escape token plus the char to the word, even if the char
// is a quote.
word += string(ch)
pos += chWidth
ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
}
word += string(ch)
continue
}
if phase == inQuote {
if ch == quote {
phase = inWord
}
// The escape token is special except for ' quotes - can't escape anything for '
if ch == d.escapeToken && quote != '\'' {
if pos+chWidth == len(rest) {
phase = inWord
continue // just skip the escape token at end
}
pos += chWidth
word += string(ch)
ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
}
word += string(ch)
}
}
return words
}
// parse environment like statements. Note that this does *not* handle
// variable interpolation, which will be handled in the evaluator.
func parseNameVal(rest string, key string, d *Directive) (*Node, error) {
// This is kind of tricky because we need to support the old
// variant: KEY name value
// as well as the new one: KEY name=value ...
// The trigger to know which one is being used will be whether we hit
// a space or = first. space ==> old, "=" ==> new
words := parseWords(rest, d)
if len(words) == 0 {
return nil, nil
}
// Old format (KEY name value)
if !strings.Contains(words[0], "=") {
parts := tokenWhitespace.Split(rest, 2)
if len(parts) < 2 {
return nil, fmt.Errorf(key + " must have two arguments")
}
return newKeyValueNode(parts[0], parts[1]), nil
}
var rootNode *Node
var prevNode *Node
for _, word := range words {
if !strings.Contains(word, "=") {
return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
}
parts := strings.SplitN(word, "=", 2)
node := newKeyValueNode(parts[0], parts[1])
rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
}
return rootNode, nil
}
func newKeyValueNode(key, value string) *Node {
return &Node{
Value: key,
Next: &Node{Value: value},
}
}
func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) {
if rootNode == nil {
rootNode = node
}
if prevNode != nil {
prevNode.Next = node
}
prevNode = node.Next
return rootNode, prevNode
}
func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) {
node, err := parseNameVal(rest, "ENV", d)
return node, nil, err
}
func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) {
node, err := parseNameVal(rest, commandLabel, d)
return node, nil, err
}
// NodeFromLabels returns a Node for the injected labels
func NodeFromLabels(labels map[string]string) *Node {
keys := []string{}
for key := range labels {
keys = append(keys, key)
}
// Sort the label to have a repeatable order
sort.Strings(keys)
labelPairs := []string{}
var rootNode *Node
var prevNode *Node
for _, key := range keys {
value := labels[key]
labelPairs = append(labelPairs, fmt.Sprintf("%q='%s'", key, value))
// Value must be single quoted to prevent env variable expansion
// See https://github.com/docker/docker/issues/26027
node := newKeyValueNode(key, "'"+value+"'")
rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
}
return &Node{
Value: command.Label,
Original: commandLabel + " " + strings.Join(labelPairs, " "),
Next: rootNode,
}
}
// parses a statement containing one or more keyword definition(s) and/or
// value assignments, like `name1 name2= name3="" name4=value`.
// Note that this is a stricter format than the old format of assignment,
// allowed by parseNameVal(), in a way that this only allows assignment of the
// form `keyword=[<value>]` like `name2=`, `name3=""`, and `name4=value` above.
// In addition, a keyword definition alone is of the form `keyword` like `name1`
// above. And the assignments `name2=` and `name3=""` are equivalent and
// assign an empty value to the respective keywords.
func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) {
words := parseWords(rest, d)
if len(words) == 0 {
return nil, nil, nil
}
var (
rootnode *Node
prevNode *Node
)
for i, word := range words {
node := &Node{}
node.Value = word
if i == 0 {
rootnode = node
} else {
prevNode.Next = node
}
prevNode = node
}
return rootnode, nil, nil
}
// parses a whitespace-delimited set of arguments. The result is effectively a
// linked list of string arguments.
func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) {
if rest == "" {
return nil, nil, nil
}
node := &Node{}
rootnode := node
prevnode := node
for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp
prevnode = node
node.Value = str
node.Next = &Node{}
node = node.Next
}
// XXX to get around regexp.Split *always* providing an empty string at the
// end due to how our loop is constructed, nil out the last node in the
// chain.
prevnode.Next = nil
return rootnode, nil, nil
}
// parseString just wraps the string in quotes and returns a working node.
func parseString(rest string, d *Directive) (*Node, map[string]bool, error) {
if rest == "" {
return nil, nil, nil
}
n := &Node{}
n.Value = rest
return n, nil, nil
}
// parseJSON converts JSON arrays to an AST.
func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
if !strings.HasPrefix(rest, "[") {
return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest)
}
var myJSON []interface{}
if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil {
return nil, nil, err
}
var top, prev *Node
for _, str := range myJSON {
s, ok := str.(string)
if !ok {
return nil, nil, errDockerfileNotStringArray
}
node := &Node{Value: s}
if prev == nil {
top = node
} else {
prev.Next = node
}
prev = node
}
return top, map[string]bool{"json": true}, nil
}
// parseMaybeJSON determines if the argument appears to be a JSON array. If
// so, passes to parseJSON; if not, quotes the result and returns a single
// node.
func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
if rest == "" {
return nil, nil, nil
}
node, attrs, err := parseJSON(rest, d)
if err == nil {
return node, attrs, nil
}
if err == errDockerfileNotStringArray {
return nil, nil, err
}
node = &Node{}
node.Value = rest
return node, nil, nil
}
// parseMaybeJSONToList determines if the argument appears to be a JSON array. If
// so, passes to parseJSON; if not, attempts to parse it as a whitespace
// delimited string.
func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) {
node, attrs, err := parseJSON(rest, d)
if err == nil {
return node, attrs, nil
}
if err == errDockerfileNotStringArray {
return nil, nil, err
}
return parseStringsWhitespaceDelimited(rest, d)
}
// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument.
func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) {
// Find end of first argument
var sep int
for ; sep < len(rest); sep++ {
if unicode.IsSpace(rune(rest[sep])) {
break
}
}
next := sep
for ; next < len(rest); next++ {
if !unicode.IsSpace(rune(rest[next])) {
break
}
}
if sep == 0 {
return nil, nil, nil
}
typ := rest[:sep]
cmd, attrs, err := parseMaybeJSON(rest[next:], d)
if err != nil {
return nil, nil, err
}
return &Node{Value: typ, Next: cmd}, attrs, err
}

View file

@ -0,0 +1,77 @@
package parser // import "github.com/docker/docker/builder/dockerfile/parser"
import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
)
func TestParseNameValOldFormat(t *testing.T) {
directive := Directive{}
node, err := parseNameVal("foo bar", "LABEL", &directive)
assert.Check(t, err)
expected := &Node{
Value: "foo",
Next: &Node{Value: "bar"},
}
assert.DeepEqual(t, expected, node, cmpNodeOpt)
}
var cmpNodeOpt = cmp.AllowUnexported(Node{})
func TestParseNameValNewFormat(t *testing.T) {
directive := Directive{}
node, err := parseNameVal("foo=bar thing=star", "LABEL", &directive)
assert.Check(t, err)
expected := &Node{
Value: "foo",
Next: &Node{
Value: "bar",
Next: &Node{
Value: "thing",
Next: &Node{
Value: "star",
},
},
},
}
assert.DeepEqual(t, expected, node, cmpNodeOpt)
}
func TestNodeFromLabels(t *testing.T) {
labels := map[string]string{
"foo": "bar",
"weird": "first' second",
}
expected := &Node{
Value: "label",
Original: `LABEL "foo"='bar' "weird"='first' second'`,
Next: &Node{
Value: "foo",
Next: &Node{
Value: "'bar'",
Next: &Node{
Value: "weird",
Next: &Node{
Value: "'first' second'",
},
},
},
},
}
node := NodeFromLabels(labels)
assert.DeepEqual(t, expected, node, cmpNodeOpt)
}
func TestParseNameValWithoutVal(t *testing.T) {
directive := Directive{}
// In Config.Env, a variable without `=` is removed from the environment. (#31634)
// However, in Dockerfile, we don't allow "unsetting" an environment variable. (#11922)
_, err := parseNameVal("foo", "ENV", &directive)
assert.Check(t, is.ErrorContains(err, ""), "ENV must have two arguments")
}

View file

@ -0,0 +1,369 @@
// Package parser implements a parser and parse tree dumper for Dockerfiles.
package parser // import "github.com/docker/docker/builder/dockerfile/parser"
import (
"bufio"
"bytes"
"fmt"
"io"
"regexp"
"runtime"
"strconv"
"strings"
"unicode"
"github.com/docker/docker/builder/dockerfile/command"
"github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
)
// Node is a structure used to represent a parse tree.
//
// In the node there are three fields, Value, Next, and Children. Value is the
// current token's string value. Next is always the next non-child token, and
// children contains all the children. Here's an example:
//
// (value next (child child-next child-next-next) next-next)
//
// This data structure is frankly pretty lousy for handling complex languages,
// but lucky for us the Dockerfile isn't very complicated. This structure
// works a little more effectively than a "proper" parse tree for our needs.
//
type Node struct {
Value string // actual content
Next *Node // the next item in the current sexp
Children []*Node // the children of this sexp
Attributes map[string]bool // special attributes for this node
Original string // original line used before parsing
Flags []string // only top Node should have this set
StartLine int // the line in the original dockerfile where the node begins
endLine int // the line in the original dockerfile where the node ends
}
// Dump dumps the AST defined by `node` as a list of sexps.
// Returns a string suitable for printing.
func (node *Node) Dump() string {
str := ""
str += node.Value
if len(node.Flags) > 0 {
str += fmt.Sprintf(" %q", node.Flags)
}
for _, n := range node.Children {
str += "(" + n.Dump() + ")\n"
}
for n := node.Next; n != nil; n = n.Next {
if len(n.Children) > 0 {
str += " " + n.Dump()
} else {
str += " " + strconv.Quote(n.Value)
}
}
return strings.TrimSpace(str)
}
func (node *Node) lines(start, end int) {
node.StartLine = start
node.endLine = end
}
// AddChild adds a new child node, and updates line information
func (node *Node) AddChild(child *Node, startLine, endLine int) {
child.lines(startLine, endLine)
if node.StartLine < 0 {
node.StartLine = startLine
}
node.endLine = endLine
node.Children = append(node.Children, child)
}
var (
dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error)
tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`)
tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P<escapechar>.).*$`)
tokenPlatformCommand = regexp.MustCompile(`^#[ \t]*platform[ \t]*=[ \t]*(?P<platform>.*)$`)
tokenComment = regexp.MustCompile(`^#.*$`)
)
// DefaultEscapeToken is the default escape token
const DefaultEscapeToken = '\\'
// Directive is the structure used during a build run to hold the state of
// parsing directives.
type Directive struct {
escapeToken rune // Current escape token
platformToken string // Current platform token
lineContinuationRegex *regexp.Regexp // Current line continuation regex
processingComplete bool // Whether we are done looking for directives
escapeSeen bool // Whether the escape directive has been seen
platformSeen bool // Whether the platform directive has been seen
}
// setEscapeToken sets the default token for escaping characters in a Dockerfile.
func (d *Directive) setEscapeToken(s string) error {
if s != "`" && s != "\\" {
return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s)
}
d.escapeToken = rune(s[0])
d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`)
return nil
}
// setPlatformToken sets the default platform for pulling images in a Dockerfile.
func (d *Directive) setPlatformToken(s string) error {
s = strings.ToLower(s)
valid := []string{runtime.GOOS}
if system.LCOWSupported() {
valid = append(valid, "linux")
}
for _, item := range valid {
if s == item {
d.platformToken = s
return nil
}
}
return fmt.Errorf("invalid PLATFORM '%s'. Must be one of %v", s, valid)
}
// possibleParserDirective looks for one or more parser directives '# escapeToken=<char>' and
// '# platform=<string>'. Parser directives must precede any builder instruction
// or other comments, and cannot be repeated.
func (d *Directive) possibleParserDirective(line string) error {
if d.processingComplete {
return nil
}
tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line))
if len(tecMatch) != 0 {
for i, n := range tokenEscapeCommand.SubexpNames() {
if n == "escapechar" {
if d.escapeSeen {
return errors.New("only one escape parser directive can be used")
}
d.escapeSeen = true
return d.setEscapeToken(tecMatch[i])
}
}
}
// Only recognise a platform token if LCOW is supported
if system.LCOWSupported() {
tpcMatch := tokenPlatformCommand.FindStringSubmatch(strings.ToLower(line))
if len(tpcMatch) != 0 {
for i, n := range tokenPlatformCommand.SubexpNames() {
if n == "platform" {
if d.platformSeen {
return errors.New("only one platform parser directive can be used")
}
d.platformSeen = true
return d.setPlatformToken(tpcMatch[i])
}
}
}
}
d.processingComplete = true
return nil
}
// NewDefaultDirective returns a new Directive with the default escapeToken token
func NewDefaultDirective() *Directive {
directive := Directive{}
directive.setEscapeToken(string(DefaultEscapeToken))
return &directive
}
func init() {
// Dispatch Table. see line_parsers.go for the parse functions.
// The command is parsed and mapped to the line parser. The line parser
// receives the arguments but not the command, and returns an AST after
// reformulating the arguments according to the rules in the parser
// functions. Errors are propagated up by Parse() and the resulting AST can
// be incorporated directly into the existing AST as a next.
dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){
command.Add: parseMaybeJSONToList,
command.Arg: parseNameOrNameVal,
command.Cmd: parseMaybeJSON,
command.Copy: parseMaybeJSONToList,
command.Entrypoint: parseMaybeJSON,
command.Env: parseEnv,
command.Expose: parseStringsWhitespaceDelimited,
command.From: parseStringsWhitespaceDelimited,
command.Healthcheck: parseHealthConfig,
command.Label: parseLabel,
command.Maintainer: parseString,
command.Onbuild: parseSubCommand,
command.Run: parseMaybeJSON,
command.Shell: parseMaybeJSON,
command.StopSignal: parseString,
command.User: parseString,
command.Volume: parseMaybeJSONToList,
command.Workdir: parseString,
}
}
// newNodeFromLine splits the line into parts, and dispatches to a function
// based on the command and command arguments. A Node is created from the
// result of the dispatch.
func newNodeFromLine(line string, directive *Directive) (*Node, error) {
cmd, flags, args, err := splitCommand(line)
if err != nil {
return nil, err
}
fn := dispatch[cmd]
// Ignore invalid Dockerfile instructions
if fn == nil {
fn = parseIgnore
}
next, attrs, err := fn(args, directive)
if err != nil {
return nil, err
}
return &Node{
Value: cmd,
Original: line,
Flags: flags,
Next: next,
Attributes: attrs,
}, nil
}
// Result is the result of parsing a Dockerfile
type Result struct {
AST *Node
EscapeToken rune
// TODO @jhowardmsft - see https://github.com/moby/moby/issues/34617
// This next field will be removed in a future update for LCOW support.
OS string
Warnings []string
}
// PrintWarnings to the writer
func (r *Result) PrintWarnings(out io.Writer) {
if len(r.Warnings) == 0 {
return
}
fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n")
}
// Parse reads lines from a Reader, parses the lines into an AST and returns
// the AST and escape token
func Parse(rwc io.Reader) (*Result, error) {
d := NewDefaultDirective()
currentLine := 0
root := &Node{StartLine: -1}
scanner := bufio.NewScanner(rwc)
warnings := []string{}
var err error
for scanner.Scan() {
bytesRead := scanner.Bytes()
if currentLine == 0 {
// First line, strip the byte-order-marker if present
bytesRead = bytes.TrimPrefix(bytesRead, utf8bom)
}
bytesRead, err = processLine(d, bytesRead, true)
if err != nil {
return nil, err
}
currentLine++
startLine := currentLine
line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d)
if isEndOfLine && line == "" {
continue
}
var hasEmptyContinuationLine bool
for !isEndOfLine && scanner.Scan() {
bytesRead, err := processLine(d, scanner.Bytes(), false)
if err != nil {
return nil, err
}
currentLine++
if isComment(scanner.Bytes()) {
// original line was a comment (processLine strips comments)
continue
}
if isEmptyContinuationLine(bytesRead) {
hasEmptyContinuationLine = true
continue
}
continuationLine := string(bytesRead)
continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d)
line += continuationLine
}
if hasEmptyContinuationLine {
warning := "[WARNING]: Empty continuation line found in:\n " + line
warnings = append(warnings, warning)
}
child, err := newNodeFromLine(line, d)
if err != nil {
return nil, err
}
root.AddChild(child, startLine, currentLine)
}
if len(warnings) > 0 {
warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.")
}
return &Result{
AST: root,
Warnings: warnings,
EscapeToken: d.escapeToken,
OS: d.platformToken,
}, handleScannerError(scanner.Err())
}
func trimComments(src []byte) []byte {
return tokenComment.ReplaceAll(src, []byte{})
}
func trimWhitespace(src []byte) []byte {
return bytes.TrimLeftFunc(src, unicode.IsSpace)
}
func isComment(line []byte) bool {
return tokenComment.Match(trimWhitespace(line))
}
func isEmptyContinuationLine(line []byte) bool {
return len(trimWhitespace(line)) == 0
}
var utf8bom = []byte{0xEF, 0xBB, 0xBF}
func trimContinuationCharacter(line string, d *Directive) (string, bool) {
if d.lineContinuationRegex.MatchString(line) {
line = d.lineContinuationRegex.ReplaceAllString(line, "")
return line, false
}
return line, true
}
// TODO: remove stripLeftWhitespace after deprecation period. It seems silly
// to preserve whitespace on continuation lines. Why is that done?
func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) {
if stripLeftWhitespace {
token = trimWhitespace(token)
}
return trimComments(token), d.possibleParserDirective(string(token))
}
func handleScannerError(err error) error {
switch err {
case bufio.ErrTooLong:
return errors.Errorf("dockerfile line greater than max allowed size of %d", bufio.MaxScanTokenSize-1)
default:
return err
}
}

View file

@ -0,0 +1,174 @@
package parser // import "github.com/docker/docker/builder/dockerfile/parser"
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
)
const testDir = "testfiles"
const negativeTestDir = "testfiles-negative"
const testFileLineInfo = "testfile-line/Dockerfile"
func getDirs(t *testing.T, dir string) []string {
f, err := os.Open(dir)
assert.NilError(t, err)
defer f.Close()
dirs, err := f.Readdirnames(0)
assert.NilError(t, err)
return dirs
}
func TestParseErrorCases(t *testing.T) {
for _, dir := range getDirs(t, negativeTestDir) {
dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile")
df, err := os.Open(dockerfile)
assert.NilError(t, err, dockerfile)
defer df.Close()
_, err = Parse(df)
assert.Check(t, is.ErrorContains(err, ""), dockerfile)
}
}
func TestParseCases(t *testing.T) {
for _, dir := range getDirs(t, testDir) {
dockerfile := filepath.Join(testDir, dir, "Dockerfile")
resultfile := filepath.Join(testDir, dir, "result")
df, err := os.Open(dockerfile)
assert.NilError(t, err, dockerfile)
defer df.Close()
result, err := Parse(df)
assert.NilError(t, err, dockerfile)
content, err := ioutil.ReadFile(resultfile)
assert.NilError(t, err, resultfile)
if runtime.GOOS == "windows" {
// CRLF --> CR to match Unix behavior
content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1)
}
assert.Check(t, is.Equal(result.AST.Dump()+"\n", string(content)), "In "+dockerfile)
}
}
func TestParseWords(t *testing.T) {
tests := []map[string][]string{
{
"input": {"foo"},
"expect": {"foo"},
},
{
"input": {"foo bar"},
"expect": {"foo", "bar"},
},
{
"input": {"foo\\ bar"},
"expect": {"foo\\ bar"},
},
{
"input": {"foo=bar"},
"expect": {"foo=bar"},
},
{
"input": {"foo bar 'abc xyz'"},
"expect": {"foo", "bar", "'abc xyz'"},
},
{
"input": {`foo bar "abc xyz"`},
"expect": {"foo", "bar", `"abc xyz"`},
},
{
"input": {"àöû"},
"expect": {"àöû"},
},
{
"input": {`föo bàr "âbc xÿz"`},
"expect": {"föo", "bàr", `"âbc xÿz"`},
},
}
for _, test := range tests {
words := parseWords(test["input"][0], NewDefaultDirective())
assert.Check(t, is.DeepEqual(test["expect"], words))
}
}
func TestParseIncludesLineNumbers(t *testing.T) {
df, err := os.Open(testFileLineInfo)
assert.NilError(t, err)
defer df.Close()
result, err := Parse(df)
assert.NilError(t, err)
ast := result.AST
assert.Check(t, is.Equal(5, ast.StartLine))
assert.Check(t, is.Equal(31, ast.endLine))
assert.Check(t, is.Len(ast.Children, 3))
expected := [][]int{
{5, 5},
{11, 12},
{17, 31},
}
for i, child := range ast.Children {
msg := fmt.Sprintf("Child %d", i)
assert.Check(t, is.DeepEqual(expected[i], []int{child.StartLine, child.endLine}), msg)
}
}
func TestParseWarnsOnEmptyContinutationLine(t *testing.T) {
dockerfile := bytes.NewBufferString(`
FROM alpine:3.6
RUN something \
following \
more
RUN another \
thing
RUN non-indented \
# this is a comment
after-comment
RUN indented \
# this is an indented comment
comment
`)
result, err := Parse(dockerfile)
assert.NilError(t, err)
warnings := result.Warnings
assert.Check(t, is.Len(warnings, 3))
assert.Check(t, is.Contains(warnings[0], "Empty continuation line found in"))
assert.Check(t, is.Contains(warnings[0], "RUN something following more"))
assert.Check(t, is.Contains(warnings[1], "RUN another thing"))
assert.Check(t, is.Contains(warnings[2], "will become errors in a future release"))
}
func TestParseReturnsScannerErrors(t *testing.T) {
label := strings.Repeat("a", bufio.MaxScanTokenSize)
dockerfile := strings.NewReader(fmt.Sprintf(`
FROM image
LABEL test=%s
`, label))
_, err := Parse(dockerfile)
assert.Check(t, is.Error(err, "dockerfile line greater than max allowed size of 65535"))
}

View file

@ -0,0 +1,118 @@
package parser // import "github.com/docker/docker/builder/dockerfile/parser"
import (
"strings"
"unicode"
)
// splitCommand takes a single line of text and parses out the cmd and args,
// which are used for dispatching to more exact parsing functions.
func splitCommand(line string) (string, []string, string, error) {
var args string
var flags []string
// Make sure we get the same results irrespective of leading/trailing spaces
cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2)
cmd := strings.ToLower(cmdline[0])
if len(cmdline) == 2 {
var err error
args, flags, err = extractBuilderFlags(cmdline[1])
if err != nil {
return "", nil, "", err
}
}
return cmd, flags, strings.TrimSpace(args), nil
}
func extractBuilderFlags(line string) (string, []string, error) {
// Parses the BuilderFlags and returns the remaining part of the line
const (
inSpaces = iota // looking for start of a word
inWord
inQuote
)
words := []string{}
phase := inSpaces
word := ""
quote := '\000'
blankOK := false
var ch rune
for pos := 0; pos <= len(line); pos++ {
if pos != len(line) {
ch = rune(line[pos])
}
if phase == inSpaces { // Looking for start of word
if pos == len(line) { // end of input
break
}
if unicode.IsSpace(ch) { // skip spaces
continue
}
// Only keep going if the next word starts with --
if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
return line[pos:], words, nil
}
phase = inWord // found something with "--", fall through
}
if (phase == inWord || phase == inQuote) && (pos == len(line)) {
if word != "--" && (blankOK || len(word) > 0) {
words = append(words, word)
}
break
}
if phase == inWord {
if unicode.IsSpace(ch) {
phase = inSpaces
if word == "--" {
return line[pos:], words, nil
}
if blankOK || len(word) > 0 {
words = append(words, word)
}
word = ""
blankOK = false
continue
}
if ch == '\'' || ch == '"' {
quote = ch
blankOK = true
phase = inQuote
continue
}
if ch == '\\' {
if pos+1 == len(line) {
continue // just skip \ at end
}
pos++
ch = rune(line[pos])
}
word += string(ch)
continue
}
if phase == inQuote {
if ch == quote {
phase = inWord
continue
}
if ch == '\\' {
if pos+1 == len(line) {
phase = inWord
continue // just skip \ at end
}
pos++
ch = rune(line[pos])
}
word += string(ch)
}
}
return "", words, nil
}

View file

@ -0,0 +1,35 @@
# ESCAPE=\
FROM brimstone/ubuntu:14.04
# TORUN -v /var/run/docker.sock:/var/run/docker.sock
ENV GOPATH \
/go
# Install the packages we need, clean up after them and us
RUN apt-get update \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
&& apt-get install -y --no-install-recommends git golang ca-certificates \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists \
&& go get -v github.com/brimstone/consuldock \
&& mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
&& rm /tmp/dpkg.* \
&& rm -rf $GOPATH

View file

@ -0,0 +1,3 @@
FROM busybox
ENV PATH

View file

@ -0,0 +1 @@
CMD [ "echo", [ "nested json" ] ]

View file

@ -0,0 +1,11 @@
FROM ubuntu:14.04
LABEL maintainer Seongyeol Lim <seongyeol37@gmail.com>
COPY . /go/src/github.com/docker/docker
ADD . /
ADD null /
COPY nullfile /tmp
ADD [ "vimrc", "/tmp" ]
COPY [ "bashrc", "/tmp" ]
COPY [ "test file", "/tmp" ]
ADD [ "test file", "/tmp/test file" ]

View file

@ -0,0 +1,10 @@
(from "ubuntu:14.04")
(label "maintainer" "Seongyeol Lim <seongyeol37@gmail.com>")
(copy "." "/go/src/github.com/docker/docker")
(add "." "/")
(add "null" "/")
(copy "nullfile" "/tmp")
(add "vimrc" "/tmp")
(copy "bashrc" "/tmp")
(copy "test file" "/tmp")
(add "test file" "/tmp/test file")

View file

@ -0,0 +1,26 @@
#escape=\
FROM brimstone/ubuntu:14.04
LABEL maintainer brimstone@the.narro.ws
# TORUN -v /var/run/docker.sock:/var/run/docker.sock
ENV GOPATH /go
# Set our command
ENTRYPOINT ["/usr/local/bin/consuldock"]
# Install the packages we need, clean up after them and us
RUN apt-get update \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
&& apt-get install -y --no-install-recommends git golang ca-certificates \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists \
&& go get -v github.com/brimstone/consuldock \
&& mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
&& rm /tmp/dpkg.* \
&& rm -rf $GOPATH

View file

@ -0,0 +1,5 @@
(from "brimstone/ubuntu:14.04")
(label "maintainer" "brimstone@the.narro.ws")
(env "GOPATH" "/go")
(entrypoint "/usr/local/bin/consuldock")
(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")

View file

@ -0,0 +1,52 @@
FROM brimstone/ubuntu:14.04
CMD []
ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"]
EXPOSE 8500 8600 8400 8301 8302
RUN apt-get update \
&& apt-get install -y unzip wget \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists
RUN cd /tmp \
&& wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \
-O web_ui.zip \
&& unzip web_ui.zip \
&& mv dist /webui \
&& rm web_ui.zip
RUN apt-get update \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
&& apt-get install -y --no-install-recommends unzip wget \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists \
&& cd /tmp \
&& wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \
-O web_ui.zip \
&& unzip web_ui.zip \
&& mv dist /webui \
&& rm web_ui.zip \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
&& rm /tmp/dpkg.*
ENV GOPATH /go
RUN apt-get update \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
&& apt-get install -y --no-install-recommends git golang ca-certificates build-essential \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists \
&& go get -v github.com/hashicorp/consul \
&& mv $GOPATH/bin/consul /usr/bin/consul \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
&& rm /tmp/dpkg.* \
&& rm -rf $GOPATH

View file

@ -0,0 +1,9 @@
(from "brimstone/ubuntu:14.04")
(cmd)
(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui")
(expose "8500" "8600" "8400" "8301" "8302")
(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists")
(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip")
(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*")
(env "GOPATH" "/go")
(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")

View file

@ -0,0 +1,3 @@
FROM alpine:3.5
RUN something \

View file

@ -0,0 +1,2 @@
(from "alpine:3.5")
(run "something")

View file

@ -0,0 +1,36 @@
FROM ubuntu:14.04
RUN echo hello\
world\
goodnight \
moon\
light\
ning
RUN echo hello \
world
RUN echo hello \
world
RUN echo hello \
goodbye\
frog
RUN echo hello \
world
RUN echo hi \
\
world \
\
good\
\
night
RUN echo goodbye\
frog
RUN echo good\
bye\
frog
RUN echo hello \
# this is a comment
# this is a comment with a blank line surrounding it
this is some more useful stuff

View file

@ -0,0 +1,10 @@
(from "ubuntu:14.04")
(run "echo hello world goodnight moon lightning")
(run "echo hello world")
(run "echo hello world")
(run "echo hello goodbyefrog")
(run "echo hello world")
(run "echo hi world goodnight")
(run "echo goodbyefrog")
(run "echo goodbyefrog")
(run "echo hello this is some more useful stuff")

View file

@ -0,0 +1,54 @@
FROM cpuguy83/ubuntu
ENV NAGIOS_HOME /opt/nagios
ENV NAGIOS_USER nagios
ENV NAGIOS_GROUP nagios
ENV NAGIOS_CMDUSER nagios
ENV NAGIOS_CMDGROUP nagios
ENV NAGIOSADMIN_USER nagiosadmin
ENV NAGIOSADMIN_PASS nagios
ENV APACHE_RUN_USER nagios
ENV APACHE_RUN_GROUP nagios
ENV NAGIOS_TIMEZONE UTC
RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list
RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx
RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP )
RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )
ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz
RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf
ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/
RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install
RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars
RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default
RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo
RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf
RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs
RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg
RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg
RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf
RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \
sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg
RUN cp /etc/services /var/spool/postfix/etc/
RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix
ADD nagios.init /etc/sv/nagios/run
ADD apache.init /etc/sv/apache/run
ADD postfix.init /etc/sv/postfix/run
ADD postfix.stop /etc/sv/postfix/finish
ADD start.sh /usr/local/bin/start_nagios
ENV APACHE_LOCK_DIR /var/run
ENV APACHE_LOG_DIR /var/log/apache2
EXPOSE 80
VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"]
CMD ["/usr/local/bin/start_nagios"]

View file

@ -0,0 +1,40 @@
(from "cpuguy83/ubuntu")
(env "NAGIOS_HOME" "/opt/nagios")
(env "NAGIOS_USER" "nagios")
(env "NAGIOS_GROUP" "nagios")
(env "NAGIOS_CMDUSER" "nagios")
(env "NAGIOS_CMDGROUP" "nagios")
(env "NAGIOSADMIN_USER" "nagiosadmin")
(env "NAGIOSADMIN_PASS" "nagios")
(env "APACHE_RUN_USER" "nagios")
(env "APACHE_RUN_GROUP" "nagios")
(env "NAGIOS_TIMEZONE" "UTC")
(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list")
(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx")
(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )")
(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )")
(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz")
(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf")
(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/")
(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install")
(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars")
(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default")
(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo")
(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf")
(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs")
(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf")
(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg")
(run "cp /etc/services /var/spool/postfix/etc/")
(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix")
(add "nagios.init" "/etc/sv/nagios/run")
(add "apache.init" "/etc/sv/apache/run")
(add "postfix.init" "/etc/sv/postfix/run")
(add "postfix.stop" "/etc/sv/postfix/finish")
(add "start.sh" "/usr/local/bin/start_nagios")
(env "APACHE_LOCK_DIR" "/var/run")
(env "APACHE_LOG_DIR" "/var/log/apache2")
(expose "80")
(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs")
(cmd "/usr/local/bin/start_nagios")

View file

@ -0,0 +1,102 @@
# This file describes the standard way to build Docker, using docker
#
# Usage:
#
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test-unit test-integration test-docker-py
#
# # Publish a release:
# docker run --privileged \
# -e AWS_S3_BUCKET=baz \
# -e AWS_ACCESS_KEY=foo \
# -e AWS_SECRET_KEY=bar \
# -e GPG_PASSPHRASE=gloubiboulga \
# docker hack/release.sh
#
# Note: AppArmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
FROM ubuntu:14.04
LABEL maintainer Tianon Gravi <admwiggin@gmail.com> (@tianon)
# Packaged dependencies
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
apt-utils \
aufs-tools \
automake \
btrfs-tools \
build-essential \
curl \
dpkg-sig \
git \
iptables \
libapparmor-dev \
libcap-dev \
mercurial \
pandoc \
parallel \
reprepro \
ruby1.9.1 \
ruby1.9.1-dev \
s3cmd=1.1.0* \
--no-install-recommends
# Get lvm2 source for compiling statically
RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly
# Compile and install lvm2
RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
# Install Go
RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz
ENV PATH /usr/local/go/bin:$PATH
ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
# Compile Go for cross compilation
ENV DOCKER_CROSSPLATFORMS \
linux/386 linux/arm \
darwin/amd64 darwin/386 \
freebsd/amd64 freebsd/386 freebsd/arm
# (set an explicit GOARM of 5 for maximum compatibility)
ENV GOARM 5
RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
# Grab Go's cover tool for dead-simple code coverage testing
RUN go get golang.org/x/tools/cmd/cover
# TODO replace FPM with some very minimal debhelper stuff
RUN gem install --no-rdoc --no-ri fpm --version 1.0.2
# Get the "busybox" image source so we can build locally instead of pulling
RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
# Setup s3cmd config
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor selinux
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
# Upload docker source
COPY . /go/src/github.com/docker/docker

View file

@ -0,0 +1,24 @@
(from "ubuntu:14.04")
(label "maintainer" "Tianon Gravi <admwiggin@gmail.com> (@tianon)")
(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends")
(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103")
(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper")
(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz")
(env "PATH" "/usr/local/go/bin:$PATH")
(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor")
(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1")
(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm")
(env "GOARM" "5")
(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'")
(run "go get golang.org/x/tools/cmd/cover")
(run "gem install --no-rdoc --no-ri fpm --version 1.0.2")
(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox")
(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg")
(run "git config --global user.email 'docker-dummy@example.com'")
(run "groupadd -r docker")
(run "useradd --create-home --gid docker unprivilegeduser")
(volume "/var/lib/docker")
(workdir "/go/src/github.com/docker/docker")
(env "DOCKER_BUILDTAGS" "apparmor selinux")
(entrypoint "hack/dind")
(copy "." "/go/src/github.com/docker/docker")

View file

@ -0,0 +1,23 @@
FROM ubuntu
ENV name value
ENV name=value
ENV name=value name2=value2
ENV name="value value1"
ENV name=value\ value2
ENV name="value'quote space'value2"
ENV name='value"double quote"value2'
ENV name=value\ value2 name2=value2\ value3
ENV name="a\"b"
ENV name="a\'b"
ENV name='a\'b'
ENV name='a\'b''
ENV name='a\"b'
ENV name="''"
# don't put anything after the next line - it must be the last line of the
# Dockerfile and it must end with \
ENV name=value \
name1=value1 \
name2="value2a \
value2b" \
name3="value3a\n\"value3b\"" \
name4="value4a\\nvalue4b" \

View file

@ -0,0 +1,16 @@
(from "ubuntu")
(env "name" "value")
(env "name" "value")
(env "name" "value" "name2" "value2")
(env "name" "\"value value1\"")
(env "name" "value\\ value2")
(env "name" "\"value'quote space'value2\"")
(env "name" "'value\"double quote\"value2'")
(env "name" "value\\ value2" "name2" "value2\\ value3")
(env "name" "\"a\\\"b\"")
(env "name" "\"a\\'b\"")
(env "name" "'a\\'b'")
(env "name" "'a\\'b''")
(env "name" "'a\\\"b'")
(env "name" "\"''\"")
(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"")

View file

@ -0,0 +1,9 @@
# Comment here. Should not be looking for the following parser directive.
# Hence the following line will be ignored, and the subsequent backslash
# continuation will be the default.
# escape = `
FROM image
LABEL maintainer foo@bar.com
ENV GOPATH \
\go

View file

@ -0,0 +1,3 @@
(from "image")
(label "maintainer" "foo@bar.com")
(env "GOPATH" "\\go")

View file

@ -0,0 +1,7 @@
# escape = ``
# There is no white space line after the directives. This still succeeds, but goes
# against best practices.
FROM image
LABEL maintainer foo@bar.com
ENV GOPATH `
\go

View file

@ -0,0 +1,3 @@
(from "image")
(label "maintainer" "foo@bar.com")
(env "GOPATH" "\\go")

View file

@ -0,0 +1,6 @@
#escape = `
FROM image
LABEL maintainer foo@bar.com
ENV GOPATH `
\go

View file

@ -0,0 +1,3 @@
(from "image")
(label "maintainer" "foo@bar.com")
(env "GOPATH" "\\go")

View file

@ -0,0 +1,14 @@
FROM ubuntu:14.04
LABEL maintainer Erik \\Hollensbe <erik@hollensbe.org>\"
RUN apt-get \update && \
apt-get \"install znc -y
ADD \conf\\" /.znc
RUN foo \
bar \
baz
CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ]

View file

@ -0,0 +1,6 @@
(from "ubuntu:14.04")
(label "maintainer" "Erik \\\\Hollensbe <erik@hollensbe.org>\\\"")
(run "apt-get \\update && apt-get \\\"install znc -y")
(add "\\conf\\\\\"" "/.znc")
(run "foo bar baz")
(cmd "/usr\\\"/bin/znc" "-f" "-r")

View file

@ -0,0 +1,10 @@
FROM scratch
COPY foo /tmp/
COPY --user=me foo /tmp/
COPY --doit=true foo /tmp/
COPY --user=me --doit=true foo /tmp/
COPY --doit=true -- foo /tmp/
COPY -- foo /tmp/
CMD --doit [ "a", "b" ]
CMD --doit=true -- [ "a", "b" ]
CMD --doit -- [ ]

View file

@ -0,0 +1,10 @@
(from "scratch")
(copy "foo" "/tmp/")
(copy ["--user=me"] "foo" "/tmp/")
(copy ["--doit=true"] "foo" "/tmp/")
(copy ["--user=me" "--doit=true"] "foo" "/tmp/")
(copy ["--doit=true"] "foo" "/tmp/")
(copy "foo" "/tmp/")
(cmd ["--doit"] "a" "b")
(cmd ["--doit=true"] "a" "b")
(cmd ["--doit"])

View file

@ -0,0 +1,10 @@
FROM debian
ADD check.sh main.sh /app/
CMD /app/main.sh
HEALTHCHECK
HEALTHCHECK --interval=5s --timeout=3s --retries=3 \
CMD /app/check.sh --quiet
HEALTHCHECK CMD
HEALTHCHECK CMD a b
HEALTHCHECK --timeout=3s CMD ["foo"]
HEALTHCHECK CONNECT TCP 7000

View file

@ -0,0 +1,9 @@
(from "debian")
(add "check.sh" "main.sh" "/app/")
(cmd "/app/main.sh")
(healthcheck)
(healthcheck ["--interval=5s" "--timeout=3s" "--retries=3"] "CMD" "/app/check.sh --quiet")
(healthcheck "CMD")
(healthcheck "CMD" "a b")
(healthcheck ["--timeout=3s"] "CMD" "foo")
(healthcheck "CONNECT" "TCP 7000")

View file

@ -0,0 +1,15 @@
FROM ubuntu:14.04
RUN apt-get update && apt-get install wget -y
RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb
RUN dpkg -i influxdb_latest_amd64.deb
RUN rm -r /opt/influxdb/shared
VOLUME /opt/influxdb/shared
CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml
EXPOSE 8083
EXPOSE 8086
EXPOSE 8090
EXPOSE 8099

View file

@ -0,0 +1,11 @@
(from "ubuntu:14.04")
(run "apt-get update && apt-get install wget -y")
(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb")
(run "dpkg -i influxdb_latest_amd64.deb")
(run "rm -r /opt/influxdb/shared")
(volume "/opt/influxdb/shared")
(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml")
(expose "8083")
(expose "8086")
(expose "8090")
(expose "8099")

View file

@ -0,0 +1 @@
CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]"

View file

@ -0,0 +1 @@
(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"")

View file

@ -0,0 +1 @@
CMD '["echo", "Well, JSON in a string is JSON too?"]'

View file

@ -0,0 +1 @@
(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'")

View file

@ -0,0 +1 @@
CMD ['echo','single quotes are invalid JSON']

View file

@ -0,0 +1 @@
(cmd "['echo','single quotes are invalid JSON']")

View file

@ -0,0 +1 @@
CMD ["echo", "Please, close the brackets when you're done"

View file

@ -0,0 +1 @@
(cmd "[\"echo\", \"Please, close the brackets when you're done\"")

View file

@ -0,0 +1 @@
CMD ["echo", "look ma, no quote!]

View file

@ -0,0 +1 @@
(cmd "[\"echo\", \"look ma, no quote!]")

View file

@ -0,0 +1,8 @@
CMD []
CMD [""]
CMD ["a"]
CMD ["a","b"]
CMD [ "a", "b" ]
CMD [ "a", "b" ]
CMD [ "a", "b" ]
CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]

View file

@ -0,0 +1,8 @@
(cmd)
(cmd "")
(cmd "a")
(cmd "a" "b")
(cmd "a" "b")
(cmd "a" "b")
(cmd "a" "b")
(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00")

View file

@ -0,0 +1,7 @@
FROM ubuntu:14.04
LABEL maintainer James Turnbull "james@example.com"
ENV REFRESHED_AT 2014-06-01
RUN apt-get update
RUN apt-get -y install redis-server redis-tools
EXPOSE 6379
ENTRYPOINT [ "/usr/bin/redis-server" ]

View file

@ -0,0 +1,7 @@
(from "ubuntu:14.04")
(label "maintainer" "James Turnbull \"james@example.com\"")
(env "REFRESHED_AT" "2014-06-01")
(run "apt-get update")
(run "apt-get -y install redis-server redis-tools")
(expose "6379")
(entrypoint "/usr/bin/redis-server")

View file

@ -0,0 +1,48 @@
FROM busybox:buildroot-2014.02
LABEL maintainer docker <docker@docker.io>
ONBUILD RUN ["echo", "test"]
ONBUILD RUN echo test
ONBUILD COPY . /
# RUN Commands \
# linebreak in comment \
RUN ["ls", "-la"]
RUN ["echo", "'1234'"]
RUN echo "1234"
RUN echo 1234
RUN echo '1234' && \
echo "456" && \
echo 789
RUN sh -c 'echo root:testpass \
> /tmp/passwd'
RUN mkdir -p /test /test2 /test3/test
# ENV \
ENV SCUBA 1 DUBA 3
ENV SCUBA "1 DUBA 3"
# CMD \
CMD ["echo", "test"]
CMD echo test
CMD echo "test"
CMD echo 'test'
CMD echo 'test' | wc -
#EXPOSE\
EXPOSE 3000
EXPOSE 9000 5000 6000
USER docker
USER docker:root
VOLUME ["/test"]
VOLUME ["/test", "/test2"]
VOLUME /test3
WORKDIR /test
ADD . /
COPY . copy

View file

@ -0,0 +1,29 @@
(from "busybox:buildroot-2014.02")
(label "maintainer" "docker <docker@docker.io>")
(onbuild (run "echo" "test"))
(onbuild (run "echo test"))
(onbuild (copy "." "/"))
(run "ls" "-la")
(run "echo" "'1234'")
(run "echo \"1234\"")
(run "echo 1234")
(run "echo '1234' && echo \"456\" && echo 789")
(run "sh -c 'echo root:testpass > /tmp/passwd'")
(run "mkdir -p /test /test2 /test3/test")
(env "SCUBA" "1 DUBA 3")
(env "SCUBA" "\"1 DUBA 3\"")
(cmd "echo" "test")
(cmd "echo test")
(cmd "echo \"test\"")
(cmd "echo 'test'")
(cmd "echo 'test' | wc -")
(expose "3000")
(expose "9000" "5000" "6000")
(user "docker")
(user "docker:root")
(volume "/test")
(volume "/test" "/test2")
(volume "/test3")
(workdir "/test")
(add "." "/")
(copy "." "copy")

View file

@ -0,0 +1,16 @@
FROM ubuntu:14.04
RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y
ADD .muttrc /
ADD .offlineimaprc /
ADD .tmux.conf /
ADD mutt /.mutt
ADD vim /.vim
ADD vimrc /.vimrc
ADD crontab /etc/crontab
RUN chmod 644 /etc/crontab
RUN mkdir /Mail
RUN mkdir /.offlineimap
RUN echo "export TERM=screen-256color" >/.zshenv
CMD setsid cron; tmux -2

View file

@ -0,0 +1,14 @@
(from "ubuntu:14.04")
(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y")
(add ".muttrc" "/")
(add ".offlineimaprc" "/")
(add ".tmux.conf" "/")
(add "mutt" "/.mutt")
(add "vim" "/.vim")
(add "vimrc" "/.vimrc")
(add "crontab" "/etc/crontab")
(run "chmod 644 /etc/crontab")
(run "mkdir /Mail")
(run "mkdir /.offlineimap")
(run "echo \"export TERM=screen-256color\" >/.zshenv")
(cmd "setsid cron; tmux -2")

View file

@ -0,0 +1,3 @@
FROM foo
VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs

Some files were not shown because too many files have changed in this diff Show more