Merge branch 'master' of github.com:docker/docker into debug

Docker-DCO-1.1-Signed-off-by: Dan Walsh <dwalsh@redhat.com> (github: rhatdan)
This commit is contained in:
Dan Walsh 2014-11-25 14:09:19 -05:00
Родитель bce9ed0e4c 7ebcdad030
Коммит 61586414ca
114 изменённых файлов: 2592 добавлений и 479 удалений

Просмотреть файл

@ -1,5 +1,21 @@
# Changelog # Changelog
## 1.3.2 (2014-11-20)
#### Security
- Fix tar breakout vulnerability
* Extractions are now sandboxed chroot
- Security options are no longer committed to images
#### Runtime
- Fix deadlock in `docker ps -f exited=1`
- Fix a bug when `--volumes-from` references a container that failed to start
#### Registry
+ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16
* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag
- Skip the experimental registry v2 API when mirroring is enabled
## 1.3.1 (2014-10-28) ## 1.3.1 (2014-10-28)
#### Security #### Security

Просмотреть файл

@ -172,7 +172,7 @@ component affected. For example, if a change affects `docs/` and `registry/`, it
needs an absolute majority from the maintainers of `docs/` AND, separately, an needs an absolute majority from the maintainers of `docs/` AND, separately, an
absolute majority of the maintainers of `registry/`. absolute majority of the maintainers of `registry/`.
For more details see [MAINTAINERS.md](hack/MAINTAINERS.md) For more details see [MAINTAINERS.md](project/MAINTAINERS.md)
### Sign your work ### Sign your work

Просмотреть файл

@ -1,23 +1,39 @@
.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate .PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate
# env vars passed through directly to Docker's build scripts
# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these
DOCKER_ENVS := \
-e BUILDFLAGS \
-e DOCKER_CLIENTONLY \
-e DOCKER_EXECDRIVER \
-e DOCKER_GRAPHDRIVER \
-e TESTDIRS \
-e TESTFLAGS \
-e TIMEOUT
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
# to allow `make BINDDIR=. shell` or `make BINDDIR= test` # to allow `make BINDDIR=. shell` or `make BINDDIR= test`
# (default to no bind mount if DOCKER_HOST is set) # (default to no bind mount if DOCKER_HOST is set)
BINDDIR := $(if $(DOCKER_HOST),,bundles) BINDDIR := $(if $(DOCKER_HOST),,bundles)
DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)")
# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs)
DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR))
# to allow `make DOCSPORT=9000 docs` # to allow `make DOCSPORT=9000 docs`
DOCSPORT := 8000 DOCSPORT := 8000
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)")
DOCKER_ENVS := -e TIMEOUT -e BUILDFLAGS -e TESTFLAGS \
-e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER \
-e DOCKER_CLIENTONLY
DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
# to allow `make DOCSDIR=docs docs-shell`
DOCKER_RUN_DOCS := docker run --rm -it $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -e AWS_S3_BUCKET DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET
# for some docs workarounds (see below in "docs-build" target)
GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
default: binary default: binary

Просмотреть файл

@ -1 +1 @@
1.3.1-dev 1.3.2-dev

Просмотреть файл

@ -38,6 +38,7 @@ import (
"github.com/docker/docker/pkg/term" "github.com/docker/docker/pkg/term"
"github.com/docker/docker/pkg/timeutils" "github.com/docker/docker/pkg/timeutils"
"github.com/docker/docker/pkg/units" "github.com/docker/docker/pkg/units"
"github.com/docker/docker/pkg/urlutil"
"github.com/docker/docker/registry" "github.com/docker/docker/registry"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
@ -47,6 +48,10 @@ const (
tarHeaderSize = 512 tarHeaderSize = 512
) )
var (
acceptedImageFilterTags = map[string]struct{}{"dangling": {}}
)
func (cli *DockerCli) CmdHelp(args ...string) error { func (cli *DockerCli) CmdHelp(args ...string) error {
if len(args) > 1 { if len(args) > 1 {
method, exists := cli.getMethod(args[:2]...) method, exists := cli.getMethod(args[:2]...)
@ -77,6 +82,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers, even after unsuccessful builds") forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers, even after unsuccessful builds")
pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
if err := cmd.Parse(args); err != nil { if err := cmd.Parse(args); err != nil {
return nil return nil
} }
@ -110,13 +116,13 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
} else { } else {
context = ioutil.NopCloser(buf) context = ioutil.NopCloser(buf)
} }
} else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) { } else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) {
isRemote = true isRemote = true
} else { } else {
root := cmd.Arg(0) root := cmd.Arg(0)
if utils.IsGIT(root) { if urlutil.IsGitURL(root) {
remoteURL := cmd.Arg(0) remoteURL := cmd.Arg(0)
if !utils.ValidGitTransport(remoteURL) { if !urlutil.IsGitTransport(remoteURL) {
remoteURL = "https://" + remoteURL remoteURL = "https://" + remoteURL
} }
@ -213,6 +219,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
v.Set("forcerm", "1") v.Set("forcerm", "1")
} }
if *pull {
v.Set("pull", "1")
}
cli.LoadConfigFile() cli.LoadConfigFile()
headers := http.Header(make(map[string][]string)) headers := http.Header(make(map[string][]string))
@ -508,6 +517,12 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
if remoteInfo.Exists("MemTotal") { if remoteInfo.Exists("MemTotal") {
fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(remoteInfo.GetInt64("MemTotal")))) fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(remoteInfo.GetInt64("MemTotal"))))
} }
if remoteInfo.Exists("Name") {
fmt.Fprintf(cli.out, "Name: %s\n", remoteInfo.Get("Name"))
}
if remoteInfo.Exists("ID") {
fmt.Fprintf(cli.out, "ID: %s\n", remoteInfo.Get("ID"))
}
if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
if remoteInfo.Exists("Debug") { if remoteInfo.Exists("Debug") {
@ -548,6 +563,13 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
if remoteInfo.Exists("IPv4Forwarding") && !remoteInfo.GetBool("IPv4Forwarding") { if remoteInfo.Exists("IPv4Forwarding") && !remoteInfo.GetBool("IPv4Forwarding") {
fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n")
} }
if remoteInfo.Exists("Labels") {
fmt.Fprintln(cli.out, "Labels:")
for _, attribute := range remoteInfo.GetList("Labels") {
fmt.Fprintf(cli.out, " %s\n", attribute)
}
}
return nil return nil
} }
@ -1336,6 +1358,12 @@ func (cli *DockerCli) CmdImages(args ...string) error {
} }
} }
for name := range imageFilterArgs {
if _, ok := acceptedImageFilterTags[name]; !ok {
return fmt.Errorf("Invalid filter '%s'", name)
}
}
matchName := cmd.Arg(0) matchName := cmd.Arg(0)
// FIXME: --viz and --tree are deprecated. Remove them in a future version. // FIXME: --viz and --tree are deprecated. Remove them in a future version.
if *flViz || *flTree { if *flViz || *flTree {
@ -2145,7 +2173,11 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc
stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false) stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false)
//if image not found try to pull it //if image not found try to pull it
if statusCode == 404 { if statusCode == 404 {
fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) repo, tag := parsers.ParseRepositoryTag(config.Image)
if tag == "" {
tag = graph.DEFAULTTAG
}
fmt.Fprintf(cli.err, "Unable to find image '%s:%s' locally\n", repo, tag)
// we don't want to write to stdout anything apart from container.ID // we don't want to write to stdout anything apart from container.ID
if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil {

Просмотреть файл

@ -3,12 +3,15 @@ package api
import ( import (
"fmt" "fmt"
"mime" "mime"
"os"
"path"
"strings" "strings"
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/version" "github.com/docker/docker/pkg/version"
"github.com/docker/docker/vendor/src/github.com/docker/libtrust"
) )
const ( const (
@ -47,3 +50,25 @@ func MatchesContentType(contentType, expectedType string) bool {
} }
return err == nil && mimetype == expectedType return err == nil && mimetype == expectedType
} }
// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
// otherwise generates a new one
func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
err := os.MkdirAll(path.Dir(trustKeyPath), 0700)
if err != nil {
return nil, err
}
trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
if err == libtrust.ErrKeyFileDoesNotExist {
trustKey, err = libtrust.GenerateECP256PrivateKey()
if err != nil {
return nil, fmt.Errorf("Error generating key: %s", err)
}
if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil {
return nil, fmt.Errorf("Error saving key file: %s", err)
}
} else if err != nil {
return nil, fmt.Errorf("Error loading key file: %s", err)
}
return trustKey, nil
}

Просмотреть файл

@ -1016,6 +1016,9 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
} else { } else {
job.Setenv("rm", r.FormValue("rm")) job.Setenv("rm", r.FormValue("rm"))
} }
if r.FormValue("pull") == "1" && version.GreaterThanOrEqualTo("1.16") {
job.Setenv("pull", "1")
}
job.Stdin.Add(r.Body) job.Stdin.Add(r.Body)
job.Setenv("remote", r.FormValue("remote")) job.Setenv("remote", r.FormValue("remote"))
job.Setenv("t", r.FormValue("t")) job.Setenv("t", r.FormValue("t"))

Просмотреть файл

@ -31,21 +31,39 @@ func nullDispatch(b *Builder, args []string, attributes map[string]bool, origina
// in the dockerfile available from the next statement on via ${foo}. // in the dockerfile available from the next statement on via ${foo}.
// //
func env(b *Builder, args []string, attributes map[string]bool, original string) error { func env(b *Builder, args []string, attributes map[string]bool, original string) error {
if len(args) != 2 { if len(args) == 0 {
return fmt.Errorf("ENV accepts two arguments") return fmt.Errorf("ENV is missing arguments")
} }
fullEnv := fmt.Sprintf("%s=%s", args[0], args[1]) if len(args)%2 != 0 {
// should never get here, but just in case
return fmt.Errorf("Bad input to ENV, too many args")
}
for i, envVar := range b.Config.Env { commitStr := "ENV"
envParts := strings.SplitN(envVar, "=", 2)
if args[0] == envParts[0] { for j := 0; j < len(args); j++ {
b.Config.Env[i] = fullEnv // name ==> args[j]
return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) // value ==> args[j+1]
newVar := args[j] + "=" + args[j+1] + ""
commitStr += " " + newVar
gotOne := false
for i, envVar := range b.Config.Env {
envParts := strings.SplitN(envVar, "=", 2)
if envParts[0] == args[j] {
b.Config.Env[i] = newVar
gotOne = true
break
}
} }
if !gotOne {
b.Config.Env = append(b.Config.Env, newVar)
}
j++
} }
b.Config.Env = append(b.Config.Env, fullEnv)
return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) return b.commit("", b.Config.Cmd, commitStr)
} }
// MAINTAINER some text <maybe@an.email.address> // MAINTAINER some text <maybe@an.email.address>
@ -97,6 +115,12 @@ func from(b *Builder, args []string, attributes map[string]bool, original string
name := args[0] name := args[0]
image, err := b.Daemon.Repositories().LookupImage(name) image, err := b.Daemon.Repositories().LookupImage(name)
if b.Pull {
image, err = b.pullImage(name)
if err != nil {
return err
}
}
if err != nil { if err != nil {
if b.Daemon.Graph().IsNotExist(err) { if b.Daemon.Graph().IsNotExist(err) {
image, err = b.pullImage(name) image, err = b.pullImage(name)

Просмотреть файл

@ -90,6 +90,7 @@ type Builder struct {
// controls how images and containers are handled between steps. // controls how images and containers are handled between steps.
Remove bool Remove bool
ForceRemove bool ForceRemove bool
Pull bool
AuthConfig *registry.AuthConfig AuthConfig *registry.AuthConfig
AuthConfigFile *registry.ConfigFile AuthConfigFile *registry.ConfigFile

Просмотреть файл

@ -24,10 +24,12 @@ import (
"github.com/docker/docker/daemon" "github.com/docker/docker/daemon"
imagepkg "github.com/docker/docker/image" imagepkg "github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/pkg/tarsum"
"github.com/docker/docker/pkg/urlutil"
"github.com/docker/docker/registry" "github.com/docker/docker/registry"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
) )
@ -46,7 +48,8 @@ func (b *Builder) readContext(context io.Reader) error {
if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil { if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
return err return err
} }
if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
return err return err
} }
@ -215,7 +218,7 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri
origPath = strings.TrimPrefix(origPath, "./") origPath = strings.TrimPrefix(origPath, "./")
// In the remote/URL case, download it and gen its hashcode // In the remote/URL case, download it and gen its hashcode
if utils.IsURL(origPath) { if urlutil.IsURL(origPath) {
if !allowRemote { if !allowRemote {
return fmt.Errorf("Source can't be a URL for %s", cmdName) return fmt.Errorf("Source can't be a URL for %s", cmdName)
} }
@ -627,7 +630,7 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec
} }
// try to successfully untar the orig // try to successfully untar the orig
if err := archive.UntarPath(origPath, tarDest); err == nil { if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
return nil return nil
} else if err != io.EOF { } else if err != io.EOF {
log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
@ -637,7 +640,7 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec
if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
return err return err
} }
if err := archive.CopyWithTar(origPath, destPath); err != nil { if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
return err return err
} }
@ -650,7 +653,7 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec
} }
func copyAsDirectory(source, destination string, destinationExists bool) error { func copyAsDirectory(source, destination string, destinationExists bool) error {
if err := archive.CopyWithTar(source, destination); err != nil { if err := chrootarchive.CopyWithTar(source, destination); err != nil {
return err return err
} }

Просмотреть файл

@ -11,6 +11,7 @@ import (
"github.com/docker/docker/graph" "github.com/docker/docker/graph"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/urlutil"
"github.com/docker/docker/registry" "github.com/docker/docker/registry"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
) )
@ -35,6 +36,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
noCache = job.GetenvBool("nocache") noCache = job.GetenvBool("nocache")
rm = job.GetenvBool("rm") rm = job.GetenvBool("rm")
forceRm = job.GetenvBool("forcerm") forceRm = job.GetenvBool("forcerm")
pull = job.GetenvBool("pull")
authConfig = &registry.AuthConfig{} authConfig = &registry.AuthConfig{}
configFile = &registry.ConfigFile{} configFile = &registry.ConfigFile{}
tag string tag string
@ -57,8 +59,8 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
if remoteURL == "" { if remoteURL == "" {
context = ioutil.NopCloser(job.Stdin) context = ioutil.NopCloser(job.Stdin)
} else if utils.IsGIT(remoteURL) { } else if urlutil.IsGitURL(remoteURL) {
if !utils.ValidGitTransport(remoteURL) { if !urlutil.IsGitTransport(remoteURL) {
remoteURL = "https://" + remoteURL remoteURL = "https://" + remoteURL
} }
root, err := ioutil.TempDir("", "docker-build-git") root, err := ioutil.TempDir("", "docker-build-git")
@ -76,7 +78,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
return job.Error(err) return job.Error(err)
} }
context = c context = c
} else if utils.IsURL(remoteURL) { } else if urlutil.IsURL(remoteURL) {
f, err := utils.Download(remoteURL) f, err := utils.Download(remoteURL)
if err != nil { if err != nil {
return job.Error(err) return job.Error(err)
@ -111,6 +113,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
UtilizeCache: !noCache, UtilizeCache: !noCache,
Remove: rm, Remove: rm,
ForceRemove: forceRm, ForceRemove: forceRm,
Pull: pull,
OutOld: job.Stdout, OutOld: job.Stdout,
StreamFormatter: sf, StreamFormatter: sf,
AuthConfig: authConfig, AuthConfig: authConfig,

Просмотреть файл

@ -12,6 +12,7 @@ import (
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
"unicode"
) )
var ( var (
@ -41,17 +42,139 @@ func parseSubCommand(rest string) (*Node, map[string]bool, error) {
// parse environment like statements. Note that this does *not* handle // parse environment like statements. Note that this does *not* handle
// variable interpolation, which will be handled in the evaluator. // variable interpolation, which will be handled in the evaluator.
func parseEnv(rest string) (*Node, map[string]bool, error) { func parseEnv(rest string) (*Node, map[string]bool, error) {
node := &Node{} // This is kind of tricky because we need to support the old
rootnode := node // variant: ENV name value
strs := TOKEN_WHITESPACE.Split(rest, 2) // as well as the new one: ENV name=value ...
// The trigger to know which one is being used will be whether we hit
// a space or = first. space ==> old, "=" ==> new
if len(strs) < 2 { const (
return nil, nil, fmt.Errorf("ENV must have two arguments") inSpaces = iota // looking for start of a word
inWord
inQuote
)
words := []string{}
phase := inSpaces
word := ""
quote := '\000'
blankOK := false
var ch rune
for pos := 0; pos <= len(rest); pos++ {
if pos != len(rest) {
ch = rune(rest[pos])
}
if phase == inSpaces { // Looking for start of word
if pos == len(rest) { // end of input
break
}
if unicode.IsSpace(ch) { // skip spaces
continue
}
phase = inWord // found it, fall thru
}
if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
if blankOK || len(word) > 0 {
words = append(words, word)
}
break
}
if phase == inWord {
if unicode.IsSpace(ch) {
phase = inSpaces
if blankOK || len(word) > 0 {
words = append(words, word)
// Look for = and if no there assume
// we're doing the old stuff and
// just read the rest of the line
if !strings.Contains(word, "=") {
word = strings.TrimSpace(rest[pos:])
words = append(words, word)
break
}
}
word = ""
blankOK = false
continue
}
if ch == '\'' || ch == '"' {
quote = ch
blankOK = true
phase = inQuote
continue
}
if ch == '\\' {
if pos+1 == len(rest) {
continue // just skip \ at end
}
pos++
ch = rune(rest[pos])
}
word += string(ch)
continue
}
if phase == inQuote {
if ch == quote {
phase = inWord
continue
}
if ch == '\\' {
if pos+1 == len(rest) {
phase = inWord
continue // just skip \ at end
}
pos++
ch = rune(rest[pos])
}
word += string(ch)
}
} }
node.Value = strs[0] if len(words) == 0 {
node.Next = &Node{} return nil, nil, fmt.Errorf("ENV must have some arguments")
node.Next.Value = strs[1] }
// Old format (ENV name value)
var rootnode *Node
if !strings.Contains(words[0], "=") {
node := &Node{}
rootnode = node
strs := TOKEN_WHITESPACE.Split(rest, 2)
if len(strs) < 2 {
return nil, nil, fmt.Errorf("ENV must have two arguments")
}
node.Value = strs[0]
node.Next = &Node{}
node.Next.Value = strs[1]
} else {
var prevNode *Node
for i, word := range words {
if !strings.Contains(word, "=") {
return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
}
parts := strings.SplitN(word, "=", 2)
name := &Node{}
value := &Node{}
name.Next = value
name.Value = parts[0]
value.Value = parts[1]
if i == 0 {
rootnode = name
} else {
prevNode.Next = name
}
prevNode = value
}
}
return rootnode, nil, nil return rootnode, nil, nil
} }

Просмотреть файл

@ -125,6 +125,12 @@ func Parse(rwc io.Reader) (*Node, error) {
break break
} }
} }
if child == nil && line != "" {
line, child, err = parseLine(line)
if err != nil {
return nil, err
}
}
} }
if child != nil { if child != nil {

Просмотреть файл

@ -1,3 +1,3 @@
FROM busybox FROM busybox
ENV PATH=PATH ENV PATH

15
builder/parser/testfiles/env/Dockerfile поставляемый Normal file
Просмотреть файл

@ -0,0 +1,15 @@
FROM ubuntu
ENV name value
ENV name=value
ENV name=value name2=value2
ENV name="value value1"
ENV name=value\ value2
ENV name="value'quote space'value2"
ENV name='value"double quote"value2'
ENV name=value\ value2 name2=value2\ value3
ENV name=value \
name1=value1 \
name2="value2a \
value2b" \
name3="value3a\n\"value3b\"" \
name4="value4a\\nvalue4b" \

10
builder/parser/testfiles/env/result поставляемый Normal file
Просмотреть файл

@ -0,0 +1,10 @@
(from "ubuntu")
(env "name" "value")
(env "name" "value")
(env "name" "value" "name2" "value2")
(env "name" "value value1")
(env "name" "value value2")
(env "name" "value'quote space'value2")
(env "name" "value\"double quote\"value2")
(env "name" "value value2" "name2" "value2 value3")
(env "name" "value" "name1" "value1" "name2" "value2a value2b" "name3" "value3an\"value3b\"" "name4" "value4a\\nvalue4b")

Просмотреть файл

@ -1,8 +1,8 @@
#!bash #!/bin/bash
# #
# bash completion file for core docker commands # bash completion file for core docker commands
# #
# This script provides supports completion of: # This script provides completion of:
# - commands and their options # - commands and their options
# - container ids and names # - container ids and names
# - image repos and tags # - image repos and tags
@ -11,9 +11,9 @@
# To enable the completions either: # To enable the completions either:
# - place this file in /etc/bash_completion.d # - place this file in /etc/bash_completion.d
# or # or
# - copy this file and add the line below to your .bashrc after # - copy this file to e.g. ~/.docker-completion.sh and add the line
# bash completion features are loaded # below to your .bashrc after bash completion features are loaded
# . docker.bash # . ~/.docker-completion.sh
# #
# Note: # Note:
# Currently, the completions will not work if the docker daemon is not # Currently, the completions will not work if the docker daemon is not
@ -99,13 +99,60 @@ __docker_pos_first_nonflag() {
echo $counter echo $counter
} }
__docker_resolve_hostname() {
command -v host >/dev/null 2>&1 || return
COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') )
}
__docker_capabilities() {
# The list of capabilities is defined in types.go, ALL was added manually.
COMPREPLY=( $( compgen -W "
ALL
AUDIT_CONTROL
AUDIT_WRITE
BLOCK_SUSPEND
CHOWN
DAC_OVERRIDE
DAC_READ_SEARCH
FOWNER
FSETID
IPC_LOCK
IPC_OWNER
KILL
LEASE
LINUX_IMMUTABLE
MAC_ADMIN
MAC_OVERRIDE
MKNOD
NET_ADMIN
NET_BIND_SERVICE
NET_BROADCAST
NET_RAW
SETFCAP
SETGID
SETPCAP
SETUID
SYS_ADMIN
SYS_BOOT
SYS_CHROOT
SYSLOG
SYS_MODULE
SYS_NICE
SYS_PACCT
SYS_PTRACE
SYS_RAWIO
SYS_RESOURCE
SYS_TIME
SYS_TTY_CONFIG
WAKE_ALARM
" -- "$cur" ) )
}
_docker_docker() { _docker_docker() {
case "$prev" in case "$prev" in
-H) -H)
return return
;; ;;
*)
;;
esac esac
case "$cur" in case "$cur" in
@ -138,8 +185,6 @@ _docker_build() {
__docker_image_repos_and_tags __docker_image_repos_and_tags
return return
;; ;;
*)
;;
esac esac
case "$cur" in case "$cur" in
@ -160,8 +205,6 @@ _docker_commit() {
-m|--message|-a|--author|--run) -m|--message|-a|--author|--run)
return return
;; ;;
*)
;;
esac esac
case "$cur" in case "$cur" in
@ -222,7 +265,7 @@ _docker_create() {
__docker_containers_all __docker_containers_all
return return
;; ;;
-v|--volume) -v|--volume|--device)
case "$cur" in case "$cur" in
*:*) *:*)
# TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
@ -255,19 +298,72 @@ _docker_create() {
esac esac
return return
;; ;;
--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf) --add-host)
case "$cur" in
*:)
__docker_resolve_hostname
return
;;
esac
;;
--cap-add|--cap-drop)
__docker_capabilities
return return
;; ;;
*) --net)
case "$cur" in
container:*)
local cur=${cur#*:}
__docker_containers_all
;;
*)
COMPREPLY=( $( compgen -W "bridge none container: host" -- "$cur") )
if [ "${COMPREPLY[*]}" = "container:" ] ; then
compopt -o nospace
fi
;;
esac
return
;;
--restart)
case "$cur" in
on-failure:*)
;;
*)
COMPREPLY=( $( compgen -W "no on-failure on-failure: always" -- "$cur") )
;;
esac
return
;;
--security-opt)
case "$cur" in
label:*:*)
;;
label:*)
local cur=${cur##*:}
COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") )
if [ "${COMPREPLY[*]}" != "disable" ] ; then
compopt -o nospace
fi
;;
*)
COMPREPLY=( $( compgen -W "label apparmor" -S ":" -- "$cur") )
compopt -o nospace
;;
esac
return
;;
--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search)
return
;; ;;
esac esac
case "$cur" in case "$cur" in
-*) -*)
COMPREPLY=( $( compgen -W "-n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) ) COMPREPLY=( $( compgen -W "--privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) )
;; ;;
*) *)
local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf') local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart')
if [ $cword -eq $counter ]; then if [ $cword -eq $counter ]; then
__docker_image_repos_and_tags_and_ids __docker_image_repos_and_tags_and_ids
@ -288,16 +384,12 @@ _docker_events() {
--since) --since)
return return
;; ;;
*)
;;
esac esac
case "$cur" in case "$cur" in
-*) -*)
COMPREPLY=( $( compgen -W "--since" -- "$cur" ) ) COMPREPLY=( $( compgen -W "--since" -- "$cur" ) )
;; ;;
*)
;;
esac esac
} }
@ -376,8 +468,6 @@ _docker_inspect() {
-f|--format) -f|--format)
return return
;; ;;
*)
;;
esac esac
case "$cur" in case "$cur" in
@ -403,16 +493,12 @@ _docker_login() {
-u|--username|-p|--password|-e|--email) -u|--username|-p|--password|-e|--email)
return return
;; ;;
*)
;;
esac esac
case "$cur" in case "$cur" in
-*) -*)
COMPREPLY=( $( compgen -W "-u --username -p --password -e --email" -- "$cur" ) ) COMPREPLY=( $( compgen -W "-u --username -p --password -e --email" -- "$cur" ) )
;; ;;
*)
;;
esac esac
} }
@ -452,16 +538,12 @@ _docker_ps() {
-n) -n)
return return
;; ;;
*)
;;
esac esac
case "$cur" in case "$cur" in
-*) -*)
COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) ) COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) )
;; ;;
*)
;;
esac esac
} }
@ -470,8 +552,6 @@ _docker_pull() {
-t|--tag) -t|--tag)
return return
;; ;;
*)
;;
esac esac
case "$cur" in case "$cur" in
@ -499,8 +579,6 @@ _docker_restart() {
-t|--time) -t|--time)
return return
;; ;;
*)
;;
esac esac
case "$cur" in case "$cur" in
@ -520,7 +598,6 @@ _docker_rm() {
return return
;; ;;
*) *)
local force=
for arg in "${COMP_WORDS[@]}"; do for arg in "${COMP_WORDS[@]}"; do
case "$arg" in case "$arg" in
-f|--force) -f|--force)
@ -553,7 +630,7 @@ _docker_run() {
__docker_containers_all __docker_containers_all
return return
;; ;;
-v|--volume) -v|--volume|--device)
case "$cur" in case "$cur" in
*:*) *:*)
# TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
@ -586,20 +663,72 @@ _docker_run() {
esac esac
return return
;; ;;
--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf) --add-host)
case "$cur" in
*:)
__docker_resolve_hostname
return
;;
esac
;;
--cap-add|--cap-drop)
__docker_capabilities
return return
;; ;;
*) --net)
case "$cur" in
container:*)
local cur=${cur#*:}
__docker_containers_all
;;
*)
COMPREPLY=( $( compgen -W "bridge none container: host" -- "$cur") )
if [ "${COMPREPLY[*]}" = "container:" ] ; then
compopt -o nospace
fi
;;
esac
return
;;
--restart)
case "$cur" in
on-failure:*)
;;
*)
COMPREPLY=( $( compgen -W "no on-failure on-failure: always" -- "$cur") )
;;
esac
return
;;
--security-opt)
case "$cur" in
label:*:*)
;;
label:*)
local cur=${cur##*:}
COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") )
if [ "${COMPREPLY[*]}" != "disable" ] ; then
compopt -o nospace
fi
;;
*)
COMPREPLY=( $( compgen -W "label apparmor" -S ":" -- "$cur") )
compopt -o nospace
;;
esac
return
;;
--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search)
return
;; ;;
esac esac
case "$cur" in case "$cur" in
-*) -*)
COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt" -- "$cur" ) ) COMPREPLY=( $( compgen -W "--rm -d --detach --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) )
;; ;;
*) *)
local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart')
local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt')
if [ $cword -eq $counter ]; then if [ $cword -eq $counter ]; then
__docker_image_repos_and_tags_and_ids __docker_image_repos_and_tags_and_ids
@ -620,16 +749,12 @@ _docker_search() {
-s|--stars) -s|--stars)
return return
;; ;;
*)
;;
esac esac
case "$cur" in case "$cur" in
-*) -*)
COMPREPLY=( $( compgen -W "--no-trunc --automated -s --stars" -- "$cur" ) ) COMPREPLY=( $( compgen -W "--no-trunc --automated -s --stars" -- "$cur" ) )
;; ;;
*)
;;
esac esac
} }
@ -649,8 +774,6 @@ _docker_stop() {
-t|--time) -t|--time)
return return
;; ;;
*)
;;
esac esac
case "$cur" in case "$cur" in
@ -752,7 +875,7 @@ _docker() {
local cur prev words cword local cur prev words cword
_get_comp_words_by_ref -n : cur prev words cword _get_comp_words_by_ref -n : cur prev words cword
local command='docker' local command='docker' cpos=0
local counter=1 local counter=1
while [ $counter -lt $cword ]; do while [ $counter -lt $cword ]; do
case "${words[$counter]}" in case "${words[$counter]}" in

Просмотреть файл

@ -177,7 +177,9 @@ __docker_commands () {
if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \
&& ! _retrieve_cache docker_subcommands; && ! _retrieve_cache docker_subcommands;
then then
_docker_subcommands=(${${${${(f)"$(_call_program commands docker 2>&1)"}[5,-1]}## #}/ ##/:}) local -a lines
lines=(${(f)"$(_call_program commands docker 2>&1)"})
_docker_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I) *]}]}## #}/ ##/:})
_docker_subcommands=($_docker_subcommands 'help:Show help for a command') _docker_subcommands=($_docker_subcommands 'help:Show help for a command')
_store_cache docker_subcommands _docker_subcommands _store_cache docker_subcommands _docker_subcommands
fi fi

Просмотреть файл

@ -15,9 +15,12 @@ done
suite="$1" suite="$1"
shift shift
# allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ...
: ${DEBOOTSTRAP:=debootstrap}
( (
set -x set -x
debootstrap "${before[@]}" "$suite" "$rootfsDir" "$@" $DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@"
) )
# now for some Docker-specific tweaks # now for some Docker-specific tweaks

Просмотреть файл

@ -40,6 +40,8 @@ type Config struct {
DisableNetwork bool DisableNetwork bool
EnableSelinuxSupport bool EnableSelinuxSupport bool
Context map[string][]string Context map[string][]string
TrustKeyPath string
Labels []string
} }
// InstallFlags adds command-line options to the top-level flag parser for // InstallFlags adds command-line options to the top-level flag parser for
@ -68,6 +70,7 @@ func (config *Config) InstallFlags() {
opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers") opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers")
opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
opts.MirrorListVar(&config.Mirrors, []string{"-registry-mirror"}, "Specify a preferred Docker registry mirror") opts.MirrorListVar(&config.Mirrors, []string{"-registry-mirror"}, "Specify a preferred Docker registry mirror")
opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon (displayed in `docker info`)")
// Localhost is by default considered as an insecure registry // Localhost is by default considered as an insecure registry
// This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker).

Просмотреть файл

@ -83,8 +83,8 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil { if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
return nil, nil, err return nil, nil, err
} }
if hostConfig != nil && config.SecurityOpt == nil { if hostConfig != nil && hostConfig.SecurityOpt == nil {
config.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode) hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

Просмотреть файл

@ -15,6 +15,7 @@ import (
"github.com/docker/libcontainer/label" "github.com/docker/libcontainer/label"
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/api"
"github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/daemon/execdriver/execdrivers" "github.com/docker/docker/daemon/execdriver/execdrivers"
"github.com/docker/docker/daemon/execdriver/lxc" "github.com/docker/docker/daemon/execdriver/lxc"
@ -83,6 +84,7 @@ func (c *contStore) List() []*Container {
} }
type Daemon struct { type Daemon struct {
ID string
repository string repository string
sysInitPath string sysInitPath string
containers *contStore containers *contStore
@ -529,10 +531,10 @@ func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint, configCmd []string)
return entrypoint, args return entrypoint, args
} }
func parseSecurityOpt(container *Container, config *runconfig.Config) error { func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error {
var ( var (
label_opts []string labelOpts []string
err error err error
) )
for _, opt := range config.SecurityOpt { for _, opt := range config.SecurityOpt {
@ -542,7 +544,7 @@ func parseSecurityOpt(container *Container, config *runconfig.Config) error {
} }
switch con[0] { switch con[0] {
case "label": case "label":
label_opts = append(label_opts, con[1]) labelOpts = append(labelOpts, con[1])
case "apparmor": case "apparmor":
container.AppArmorProfile = con[1] container.AppArmorProfile = con[1]
default: default:
@ -550,7 +552,7 @@ func parseSecurityOpt(container *Container, config *runconfig.Config) error {
} }
} }
container.ProcessLabel, container.MountLabel, err = label.InitLabels(label_opts) container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts)
return err return err
} }
@ -584,7 +586,6 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *i
execCommands: newExecStore(), execCommands: newExecStore(),
} }
container.root = daemon.containerRoot(container.ID) container.root = daemon.containerRoot(container.ID)
err = parseSecurityOpt(container, config)
return container, err return container, err
} }
@ -893,7 +894,13 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
return nil, err return nil, err
} }
trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
if err != nil {
return nil, err
}
daemon := &Daemon{ daemon := &Daemon{
ID: trustKey.PublicKey().KeyID(),
repository: daemonRepo, repository: daemonRepo,
containers: &contStore{s: make(map[string]*Container)}, containers: &contStore{s: make(map[string]*Container)},
execCommands: newExecStore(), execCommands: newExecStore(),

Просмотреть файл

@ -8,7 +8,7 @@ import (
func TestParseSecurityOpt(t *testing.T) { func TestParseSecurityOpt(t *testing.T) {
container := &Container{} container := &Container{}
config := &runconfig.Config{} config := &runconfig.HostConfig{}
// test apparmor // test apparmor
config.SecurityOpt = []string{"apparmor:test_profile"} config.SecurityOpt = []string{"apparmor:test_profile"}

Просмотреть файл

@ -122,8 +122,6 @@ func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status {
entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd) entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd)
processConfig := execdriver.ProcessConfig{ processConfig := execdriver.ProcessConfig{
Privileged: config.Privileged,
User: config.User,
Tty: config.Tty, Tty: config.Tty,
Entrypoint: entrypoint, Entrypoint: entrypoint,
Arguments: args, Arguments: args,

Просмотреть файл

@ -33,6 +33,7 @@ import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
mountpk "github.com/docker/docker/pkg/mount" mountpk "github.com/docker/docker/pkg/mount"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
"github.com/docker/libcontainer/label" "github.com/docker/libcontainer/label"
@ -305,7 +306,7 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
} }
func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error { func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error {
return archive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) return chrootarchive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil)
} }
// DiffSize calculates the changes between the specified id // DiffSize calculates the changes between the specified id

Просмотреть файл

@ -4,18 +4,25 @@ import (
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/archive"
"io/ioutil" "io/ioutil"
"os" "os"
"path" "path"
"testing" "testing"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
) )
var ( var (
tmp = path.Join(os.TempDir(), "aufs-tests", "aufs") tmpOuter = path.Join(os.TempDir(), "aufs-tests")
tmp = path.Join(tmpOuter, "aufs")
) )
func init() {
reexec.Init()
}
func testInit(dir string, t *testing.T) graphdriver.Driver { func testInit(dir string, t *testing.T) graphdriver.Driver {
d, err := Init(dir, nil) d, err := Init(dir, nil)
if err != nil { if err != nil {
@ -640,8 +647,8 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) {
t.Fatal(err) t.Fatal(err)
} }
d := testInit(mountPath, t).(*Driver)
defer os.RemoveAll(mountPath) defer os.RemoveAll(mountPath)
d := testInit(mountPath, t).(*Driver)
defer d.Cleanup() defer d.Cleanup()
var last string var last string
var expected int var expected int
@ -662,24 +669,24 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) {
if err := d.Create(current, parent); err != nil { if err := d.Create(current, parent); err != nil {
t.Logf("Current layer %d", i) t.Logf("Current layer %d", i)
t.Fatal(err) t.Error(err)
} }
point, err := d.Get(current, "") point, err := d.Get(current, "")
if err != nil { if err != nil {
t.Logf("Current layer %d", i) t.Logf("Current layer %d", i)
t.Fatal(err) t.Error(err)
} }
f, err := os.Create(path.Join(point, current)) f, err := os.Create(path.Join(point, current))
if err != nil { if err != nil {
t.Logf("Current layer %d", i) t.Logf("Current layer %d", i)
t.Fatal(err) t.Error(err)
} }
f.Close() f.Close()
if i%10 == 0 { if i%10 == 0 {
if err := os.Remove(path.Join(point, parent)); err != nil { if err := os.Remove(path.Join(point, parent)); err != nil {
t.Logf("Current layer %d", i) t.Logf("Current layer %d", i)
t.Fatal(err) t.Error(err)
} }
expected-- expected--
} }
@ -689,28 +696,30 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) {
// Perform the actual mount for the top most image // Perform the actual mount for the top most image
point, err := d.Get(last, "") point, err := d.Get(last, "")
if err != nil { if err != nil {
t.Fatal(err) t.Error(err)
} }
files, err := ioutil.ReadDir(point) files, err := ioutil.ReadDir(point)
if err != nil { if err != nil {
t.Fatal(err) t.Error(err)
} }
if len(files) != expected { if len(files) != expected {
t.Fatalf("Expected %d got %d", expected, len(files)) t.Errorf("Expected %d got %d", expected, len(files))
} }
} }
func TestMountMoreThan42Layers(t *testing.T) { func TestMountMoreThan42Layers(t *testing.T) {
os.RemoveAll(tmpOuter)
testMountMoreThan42Layers(t, tmp) testMountMoreThan42Layers(t, tmp)
} }
func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) {
tmp := "aufs-tests" defer os.RemoveAll(tmpOuter)
zeroes := "0"
for { for {
// This finds a mount path so that when combined into aufs mount options // This finds a mount path so that when combined into aufs mount options
// 4096 byte boundary would be in between the paths or in permission // 4096 byte boundary would be in between the paths or in permission
// section. For '/tmp' it will use '/tmp/aufs-tests00000000/aufs' // section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs'
mountPath := path.Join(os.TempDir(), tmp, "aufs") mountPath := path.Join(tmpOuter, zeroes, "aufs")
pathLength := 77 + len(mountPath) pathLength := 77 + len(mountPath)
if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 {
@ -718,6 +727,6 @@ func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) {
testMountMoreThan42Layers(t, mountPath) testMountMoreThan42Layers(t, mountPath)
return return
} }
tmp += "0" zeroes += "0"
} }
} }

Просмотреть файл

@ -13,6 +13,9 @@ func init() {
DefaultDataLoopbackSize = 300 * 1024 * 1024 DefaultDataLoopbackSize = 300 * 1024 * 1024
DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 DefaultMetaDataLoopbackSize = 200 * 1024 * 1024
DefaultBaseFsSize = 300 * 1024 * 1024 DefaultBaseFsSize = 300 * 1024 * 1024
if err := graphtest.InitLoopbacks(); err != nil {
panic(err)
}
} }
// This avoids creating a new driver for each test if all tests are run // This avoids creating a new driver for each test if all tests are run

Просмотреть файл

@ -8,6 +8,7 @@ import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
) )
@ -122,7 +123,7 @@ func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveRea
start := time.Now().UTC() start := time.Now().UTC()
log.Debugf("Start untar layer") log.Debugf("Start untar layer")
if err = archive.ApplyLayer(layerFs, diff); err != nil { if err = chrootarchive.ApplyLayer(layerFs, diff); err != nil {
return return
} }
log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())

Просмотреть файл

@ -1,6 +1,7 @@
package graphtest package graphtest
import ( import (
"fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"path" "path"
@ -20,6 +21,46 @@ type Driver struct {
refCount int refCount int
} }
// InitLoopbacks ensures that the loopback devices are properly created within
// the system running the device mapper tests.
func InitLoopbacks() error {
stat_t, err := getBaseLoopStats()
if err != nil {
return err
}
// create atleast 8 loopback files, ya, that is a good number
for i := 0; i < 8; i++ {
loopPath := fmt.Sprintf("/dev/loop%d", i)
// only create new loopback files if they don't exist
if _, err := os.Stat(loopPath); err != nil {
if mkerr := syscall.Mknod(loopPath,
uint32(stat_t.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil {
return mkerr
}
os.Chown(loopPath, int(stat_t.Uid), int(stat_t.Gid))
}
}
return nil
}
// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the
// loop0 device on the system. If it does not exist we assume 0,0,0660 for the
// stat data
func getBaseLoopStats() (*syscall.Stat_t, error) {
loop0, err := os.Stat("/dev/loop0")
if err != nil {
if os.IsNotExist(err) {
return &syscall.Stat_t{
Uid: 0,
Gid: 0,
Mode: 0660,
}, nil
}
return nil, err
}
return loop0.Sys().(*syscall.Stat_t), nil
}
func newDriver(t *testing.T, name string) *Driver { func newDriver(t *testing.T, name string) *Driver {
root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-") root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-")
if err != nil { if err != nil {

Просмотреть файл

@ -129,6 +129,7 @@ func supportsOverlayfs() error {
return nil return nil
} }
} }
log.Error("'overlayfs' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlayfs support loaded.")
return graphdriver.ErrNotSupported return graphdriver.ErrNotSupported
} }

Просмотреть файл

@ -8,7 +8,7 @@ import (
"path" "path"
"github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/libcontainer/label" "github.com/docker/libcontainer/label"
) )
@ -66,7 +66,7 @@ func (d *Driver) Create(id, parent string) error {
if err != nil { if err != nil {
return fmt.Errorf("%s: %s", parent, err) return fmt.Errorf("%s: %s", parent, err)
} }
if err := archive.CopyWithTar(parentDir, dir); err != nil { if err := chrootarchive.CopyWithTar(parentDir, dir); err != nil {
return err return err
} }
return nil return nil

Просмотреть файл

@ -1,10 +1,17 @@
package vfs package vfs
import ( import (
"github.com/docker/docker/daemon/graphdriver/graphtest"
"testing" "testing"
"github.com/docker/docker/daemon/graphdriver/graphtest"
"github.com/docker/docker/pkg/reexec"
) )
func init() {
reexec.Init()
}
// This avoids creating a new driver for each test if all tests are run // This avoids creating a new driver for each test if all tests are run
// Make sure to put new tests between TestVfsSetup and TestVfsTeardown // Make sure to put new tests between TestVfsSetup and TestVfsTeardown
func TestVfsSetup(t *testing.T) { func TestVfsSetup(t *testing.T) {

Просмотреть файл

@ -56,6 +56,7 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
return job.Error(err) return job.Error(err)
} }
v := &engine.Env{} v := &engine.Env{}
v.Set("ID", daemon.ID)
v.SetInt("Containers", len(daemon.List())) v.SetInt("Containers", len(daemon.List()))
v.SetInt("Images", imgcount) v.SetInt("Images", imgcount)
v.Set("Driver", daemon.GraphDriver().String()) v.Set("Driver", daemon.GraphDriver().String())
@ -75,6 +76,10 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
v.Set("InitPath", initPath) v.Set("InitPath", initPath)
v.SetInt("NCPU", runtime.NumCPU()) v.SetInt("NCPU", runtime.NumCPU())
v.SetInt64("MemTotal", meminfo.MemTotal) v.SetInt64("MemTotal", meminfo.MemTotal)
if hostname, err := os.Hostname(); err == nil {
v.Set("Name", hostname)
}
v.SetList("Labels", daemon.Config().Labels)
if _, err := v.WriteTo(job.Stdout); err != nil { if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err) return job.Error(err)
} }

Просмотреть файл

@ -47,6 +47,7 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
out.Set("ProcessLabel", container.ProcessLabel) out.Set("ProcessLabel", container.ProcessLabel)
out.SetJson("Volumes", container.Volumes) out.SetJson("Volumes", container.Volumes)
out.SetJson("VolumesRW", container.VolumesRW) out.SetJson("VolumesRW", container.VolumesRW)
out.SetJson("AppArmorProfile", container.AppArmorProfile)
if children, err := daemon.Children(container.Name); err == nil { if children, err := daemon.Children(container.Name); err == nil {
for linkAlias, child := range children { for linkAlias, child := range children {

Просмотреть файл

@ -195,7 +195,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil {
return fmt.Errorf("Unable to enable network bridge NAT: %s", err) return fmt.Errorf("Unable to enable network bridge NAT: %s", err)
} else if len(output) != 0 { } else if len(output) != 0 {
return fmt.Errorf("Error iptables postrouting: %s", output) return &iptables.ChainError{Chain: "POSTROUTING", Output: output}
} }
} }
} }
@ -236,7 +236,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil { if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil {
return fmt.Errorf("Unable to allow outgoing packets: %s", err) return fmt.Errorf("Unable to allow outgoing packets: %s", err)
} else if len(output) != 0 { } else if len(output) != 0 {
return fmt.Errorf("Error iptables allow outgoing: %s", output) return &iptables.ChainError{Chain: "FORWARD outgoing", Output: output}
} }
} }
@ -247,15 +247,15 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil { if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil {
return fmt.Errorf("Unable to allow incoming packets: %s", err) return fmt.Errorf("Unable to allow incoming packets: %s", err)
} else if len(output) != 0 { } else if len(output) != 0 {
return fmt.Errorf("Error iptables allow incoming: %s", output) return &iptables.ChainError{Chain: "FORWARD incoming", Output: output}
} }
} }
return nil return nil
} }
// configureBridge attempts to create and configure a network bridge interface named `ifaceName` on the host // configureBridge attempts to create and configure a network bridge interface named `bridgeIface` on the host
// If bridgeIP is empty, it will try to find a non-conflicting IP from the Docker-specified private ranges // If bridgeIP is empty, it will try to find a non-conflicting IP from the Docker-specified private ranges
// If the bridge `ifaceName` already exists, it will only perform the IP address association with the existing // If the bridge `bridgeIface` already exists, it will only perform the IP address association with the existing
// bridge (fixes issue #8444) // bridge (fixes issue #8444)
// If an address which doesn't conflict with existing interfaces can't be found, an error is returned. // If an address which doesn't conflict with existing interfaces can't be found, an error is returned.
func configureBridge(bridgeIP string) error { func configureBridge(bridgeIP string) error {

Просмотреть файл

@ -145,7 +145,7 @@ func (p *proxyCommand) Start() error {
select { select {
case err := <-errchan: case err := <-errchan:
return err return err
case <-time.After(1 * time.Second): case <-time.After(16 * time.Second):
return fmt.Errorf("Timed out proxy starting the userland proxy") return fmt.Errorf("Timed out proxy starting the userland proxy")
} }
} }

Просмотреть файл

@ -44,6 +44,9 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
} }
func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error { func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
if err := parseSecurityOpt(container, hostConfig); err != nil {
return err
}
// Validate the HostConfig binds. Make sure that: // Validate the HostConfig binds. Make sure that:
// the source exists // the source exists
for _, bind := range hostConfig.Binds { for _, bind := range hostConfig.Binds {

Просмотреть файл

@ -12,7 +12,7 @@ import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/volumes" "github.com/docker/docker/volumes"
) )
@ -320,7 +320,7 @@ func copyExistingContents(source, destination string) error {
if len(srcList) == 0 { if len(srcList) == 0 {
// If the source volume is empty copy files from the root into the volume // If the source volume is empty copy files from the root into the volume
if err := archive.CopyWithTar(source, destination); err != nil { if err := chrootarchive.CopyWithTar(source, destination); err != nil {
return err return err
} }
} }

Просмотреть файл

@ -34,6 +34,8 @@ func mainDaemon() {
eng := engine.New() eng := engine.New()
signal.Trap(eng.Shutdown) signal.Trap(eng.Shutdown)
daemonCfg.TrustKeyPath = *flTrustKey
// Load builtins // Load builtins
if err := builtins.Register(eng); err != nil { if err := builtins.Register(eng); err != nil {
log.Fatal(err) log.Fatal(err)

Просмотреть файл

@ -83,9 +83,14 @@ func main() {
) )
tlsConfig.InsecureSkipVerify = true tlsConfig.InsecureSkipVerify = true
// Regardless of whether the user sets it to true or false, if they
// specify --tlsverify at all then we need to turn on tls
if flag.IsSet("-tlsverify") {
*flTls = true
}
// If we should verify the server, we need to load a trusted ca // If we should verify the server, we need to load a trusted ca
if *flTlsVerify { if *flTlsVerify {
*flTls = true
certPool := x509.NewCertPool() certPool := x509.NewCertPool()
file, err := ioutil.ReadFile(*flCa) file, err := ioutil.ReadFile(*flCa)
if err != nil { if err != nil {

Просмотреть файл

@ -35,7 +35,7 @@ var (
flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group") flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group")
flLogLevel = flag.String([]string{"l", "-log-level"}, "info", "Set the logging level") flLogLevel = flag.String([]string{"l", "-log-level"}, "info", "Set the logging level")
flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API")
flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags") flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by --tlsverify flag")
flTlsVerify = flag.Bool([]string{"-tlsverify"}, dockerTlsVerify, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") flTlsVerify = flag.Bool([]string{"-tlsverify"}, dockerTlsVerify, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)")
// these are initialized in init() below since their default values depend on dockerCertPath which isn't fully initialized until init() runs // these are initialized in init() below since their default values depend on dockerCertPath which isn't fully initialized until init() runs

Просмотреть файл

@ -68,6 +68,9 @@ unix://[/path/to/socket] to use.
**-l**, **--log-level**="*debug*|*info*|*error*|*fatal*"" **-l**, **--log-level**="*debug*|*info*|*error*|*fatal*""
Set the logging level. Default is `info`. Set the logging level. Default is `info`.
**--label**="[]"
Set key=value labels to the daemon (displayed in `docker info`)
**--mtu**=VALUE **--mtu**=VALUE
Set the containers network mtu. Default is `1500`. Set the containers network mtu. Default is `1500`.

Просмотреть файл

@ -49,8 +49,9 @@ You can still call an old version of the API using
`GET /info` `GET /info`
**New!** **New!**
`info` now returns the number of CPUs available on the machine (`NCPU`) and `info` now returns the number of CPUs available on the machine (`NCPU`),
total memory available (`MemTotal`). total memory available (`MemTotal`), a user-friendly name describing the running Docker daemon (`Name`), a unique ID identifying the daemon (`ID`), and
a list of daemon labels (`Labels`).
`POST /containers/create` `POST /containers/create`

Просмотреть файл

@ -524,6 +524,7 @@ Start the container `id`
HTTP/1.1 204 No Content HTTP/1.1 204 No Content
Json Parameters: Json Parameters:
- **Binds** – A list of volume bindings for this container. Each volume - **Binds** – A list of volume bindings for this container. Each volume
binding is a string of the form `container_path` (to create a new binding is a string of the form `container_path` (to create a new
volume for the container), `host_path:container_path` (to bind-mount volume for the container), `host_path:container_path` (to bind-mount
@ -1560,7 +1561,6 @@ Sets up an exec instance in a running container `id`
"Cmd":[ "Cmd":[
"date" "date"
], ],
"Container":"e90e34656806",
} }
**Example response**: **Example response**:
@ -1574,7 +1574,12 @@ Sets up an exec instance in a running container `id`
Json Parameters: Json Parameters:
- **execConfig** ? exec configuration. - **AttachStdin** - Boolean value, attaches to stdin of the exec command.
- **AttachStdout** - Boolean value, attaches to stdout of the exec command.
- **AttachStderr** - Boolean value, attaches to stderr of the exec command.
- **Tty** - Boolean value to allocate a pseudo-TTY
- **Cmd** - Command to run specified as a string or an array of strings.
Status Codes: Status Codes:
@ -1585,8 +1590,9 @@ Status Codes:
`POST /exec/(id)/start` `POST /exec/(id)/start`
Starts a previously set up exec instance `id`. If `detach` is true, this API returns after Starts a previously set up exec instance `id`. If `detach` is true, this API
starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. returns after starting the `exec` command. Otherwise, this API sets up an
interactive session with the `exec` command.
**Example request**: **Example request**:
@ -1607,7 +1613,8 @@ starting the `exec` command. Otherwise, this API sets up an interactive session
Json Parameters: Json Parameters:
- **execConfig** ? exec configuration. - **Detach** - Detach from the exec command
- **Tty** - Boolean value to allocate a pseudo-TTY
Status Codes: Status Codes:

Просмотреть файл

@ -1156,6 +1156,7 @@ Query Parameters:
the resulting image in case of success the resulting image in case of success
- **q** – suppress verbose build output - **q** – suppress verbose build output
- **nocache** – do not use the cache when building the image - **nocache** – do not use the cache when building the image
- **pull** - attempt to pull the image even if an older image exists locally
- **rm** - remove intermediate containers after a successful build (default behavior) - **rm** - remove intermediate containers after a successful build (default behavior)
- **forcerm - always remove intermediate containers (includes rm) - **forcerm - always remove intermediate containers (includes rm)
@ -1220,6 +1221,8 @@ Display system-wide information
"KernelVersion":"3.12.0-1-amd64" "KernelVersion":"3.12.0-1-amd64"
"NCPU":1, "NCPU":1,
"MemTotal":2099236864, "MemTotal":2099236864,
"Name":"prod-server-42",
"ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS",
"Debug":false, "Debug":false,
"NFd": 11, "NFd": 11,
"NGoroutines":21, "NGoroutines":21,
@ -1228,7 +1231,8 @@ Display system-wide information
"IndexServerAddress":["https://index.docker.io/v1/"], "IndexServerAddress":["https://index.docker.io/v1/"],
"MemoryLimit":true, "MemoryLimit":true,
"SwapLimit":false, "SwapLimit":false,
"IPv4Forwarding":true "IPv4Forwarding":true,
"Labels":["storage=ssd"]
} }
Status Codes: Status Codes:
@ -1511,7 +1515,6 @@ Sets up an exec instance in a running container `id`
"Cmd":[ "Cmd":[
"date" "date"
], ],
"Container":"e90e34656806",
} }
**Example response**: **Example response**:
@ -1525,7 +1528,12 @@ Sets up an exec instance in a running container `id`
Json Parameters: Json Parameters:
- **execConfig** ? exec configuration. - **AttachStdin** - Boolean value, attaches to stdin of the exec command.
- **AttachStdout** - Boolean value, attaches to stdout of the exec command.
- **AttachStderr** - Boolean value, attaches to stderr of the exec command.
- **Tty** - Boolean value to allocate a pseudo-TTY
- **Cmd** - Command to run specified as a string or an array of strings.
Status Codes: Status Codes:
@ -1536,8 +1544,9 @@ Status Codes:
`POST /exec/(id)/start` `POST /exec/(id)/start`
Starts a previously set up exec instance `id`. If `detach` is true, this API returns after Starts a previously set up exec instance `id`. If `detach` is true, this API
starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. returns after starting the `exec` command. Otherwise, this API sets up an
interactive session with the `exec` command.
**Example request**: **Example request**:
@ -1558,7 +1567,8 @@ starting the `exec` command. Otherwise, this API sets up an interactive session
Json Parameters: Json Parameters:
- **execConfig** ? exec configuration. - **Detach** - Detach from the exec command
- **Tty** - Boolean value to allocate a pseudo-TTY
Status Codes: Status Codes:

Просмотреть файл

@ -337,11 +337,36 @@ expose ports to the host, at runtime,
## ENV ## ENV
ENV <key> <value> ENV <key> <value>
ENV <key>=<value> ...
The `ENV` instruction sets the environment variable `<key>` to the value The `ENV` instruction sets the environment variable `<key>` to the value
`<value>`. This value will be passed to all future `RUN` instructions. This is `<value>`. This value will be passed to all future `RUN` instructions. This is
functionally equivalent to prefixing the command with `<key>=<value>` functionally equivalent to prefixing the command with `<key>=<value>`
The `ENV` instruction has two forms. The first form, `ENV <key> <value>`,
will set a single variable to a value. The entire string after the first
space will be treated as the `<value>` - including characters such as
spaces and quotes.
The second form, `ENV <key>=<value> ...`, allows for multiple variables to
be set at one time. Notice that the second form uses the equals sign (=)
in the syntax, while the first form does not. Like command line parsing,
quotes and backslashes can be used to include spaces within values.
For example:
ENV myName="John Doe" myDog=Rex\ The\ Dog \
myCat=fluffy
and
ENV myName John Doe
ENV myDog Rex The Dog
ENV myCat fluffy
will yield the same net results in the final container, but the first form
does it all in one layer.
The environment variables set using `ENV` will persist when a container is run The environment variables set using `ENV` will persist when a container is run
from the resulting image. You can view the values using `docker inspect`, and from the resulting image. You can view the values using `docker inspect`, and
change them using `docker run --env <key>=<value>`. change them using `docker run --env <key>=<value>`.
@ -566,6 +591,17 @@ To examine the result further, you can use `docker exec`:
And you can gracefully request `top` to shut down using `docker stop test`. And you can gracefully request `top` to shut down using `docker stop test`.
The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the
foreground (i.e., as `PID 1`):
```
FROM debian:stable
RUN apt-get update && apt-get install -y --force-yes apache2
EXPOSE 80 443
VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"]
ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"]
```
If you need to write a starter script for a single executable, you can ensure that If you need to write a starter script for a single executable, you can ensure that
the final executable receives the Unix signals by using `exec` and `gosu` the final executable receives the Unix signals by using `exec` and `gosu`
(see [the Dockerfile best practices](/articles/dockerfile_best-practices/#entrypoint) (see [the Dockerfile best practices](/articles/dockerfile_best-practices/#entrypoint)

Просмотреть файл

@ -11,7 +11,7 @@ or execute `docker help`:
Usage: docker [OPTIONS] COMMAND [arg...] Usage: docker [OPTIONS] COMMAND [arg...]
-H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.
A self-sufficient runtime for linux containers. A self-sufficient runtime for Linux containers.
... ...
@ -76,7 +76,7 @@ expect an integer, and they can only be specified once.
--ip-masq=true Enable IP masquerading for bridge's IP range --ip-masq=true Enable IP masquerading for bridge's IP range
--iptables=true Enable Docker's addition of iptables rules --iptables=true Enable Docker's addition of iptables rules
-l, --log-level="info" Set the logging level -l, --log-level="info" Set the logging level
--label=[] Set key=value labels to the daemon (displayed in `docker info`)
--mtu=0 Set the containers network MTU --mtu=0 Set the containers network MTU
if no value is provided: default to the default route MTU or 1500 if no default route is available if no value is provided: default to the default route MTU or 1500 if no default route is available
-p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file -p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file
@ -84,7 +84,7 @@ expect an integer, and they can only be specified once.
-s, --storage-driver="" Force the Docker runtime to use a specific storage driver -s, --storage-driver="" Force the Docker runtime to use a specific storage driver
--selinux-enabled=false Enable selinux support. SELinux does not presently support the BTRFS storage driver --selinux-enabled=false Enable selinux support. SELinux does not presently support the BTRFS storage driver
--storage-opt=[] Set storage driver options --storage-opt=[] Set storage driver options
--tls=false Use TLS; implied by tls-verify flags --tls=false Use TLS; implied by --tlsverify flag
--tlscacert="/home/sven/.docker/ca.pem" Trust only remotes providing a certificate signed by the CA given here --tlscacert="/home/sven/.docker/ca.pem" Trust only remotes providing a certificate signed by the CA given here
--tlscert="/home/sven/.docker/cert.pem" Path to TLS certificate file --tlscert="/home/sven/.docker/cert.pem" Path to TLS certificate file
--tlskey="/home/sven/.docker/key.pem" Path to TLS key file --tlskey="/home/sven/.docker/key.pem" Path to TLS key file
@ -111,7 +111,7 @@ requiring either `root` permission, or `docker` group membership.
If you need to access the Docker daemon remotely, you need to enable the `tcp` If you need to access the Docker daemon remotely, you need to enable the `tcp`
Socket. Beware that the default setup provides un-encrypted and un-authenticated Socket. Beware that the default setup provides un-encrypted and un-authenticated
direct access to the Docker daemon - and should be secured either using the direct access to the Docker daemon - and should be secured either using the
[built in https encrypted socket](/articles/https/), or by putting a secure web [built in HTTPS encrypted socket](/articles/https/), or by putting a secure web
proxy in front of it. You can listen on port `2375` on all network interfaces proxy in front of it. You can listen on port `2375` on all network interfaces
with `-H tcp://0.0.0.0:2375`, or on a particular network interface using its IP with `-H tcp://0.0.0.0:2375`, or on a particular network interface using its IP
address: `-H tcp://192.168.59.103:2375`. It is conventional to use port `2375` address: `-H tcp://192.168.59.103:2375`. It is conventional to use port `2375`
@ -155,8 +155,8 @@ string is equivalent to setting the `--tlsverify` flag. The following are equiva
### Daemon storage-driver option ### Daemon storage-driver option
The Docker daemon has support for three different image layer storage drivers: `aufs`, The Docker daemon has support for several different image layer storage drivers: `aufs`,
`devicemapper`, and `btrfs`. `devicemapper`, `btrfs` and `overlayfs`.
The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that
is unlikely to be merged into the main kernel. These are also known to cause some is unlikely to be merged into the main kernel. These are also known to cause some
@ -175,6 +175,9 @@ To tell the Docker daemon to use `devicemapper`, use
The `btrfs` driver is very fast for `docker build` - but like `devicemapper` does not The `btrfs` driver is very fast for `docker build` - but like `devicemapper` does not
share executable memory between devices. Use `docker -d -s btrfs -g /mnt/btrfs_partition`. share executable memory between devices. Use `docker -d -s btrfs -g /mnt/btrfs_partition`.
The `overlayfs` is a very fast union filesystem. It is now merged in the main
Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137).
Call `docker -d -s overlayfs` to use it.
### Docker exec-driver option ### Docker exec-driver option
@ -312,6 +315,7 @@ To kill the container, use `docker kill`.
--force-rm=false Always remove intermediate containers, even after unsuccessful builds --force-rm=false Always remove intermediate containers, even after unsuccessful builds
--no-cache=false Do not use cache when building the image --no-cache=false Do not use cache when building the image
--pull=false Always attempt to pull a newer version of the image
-q, --quiet=false Suppress the verbose output generated by the containers -q, --quiet=false Suppress the verbose output generated by the containers
--rm=true Remove intermediate containers after a successful build --rm=true Remove intermediate containers after a successful build
-t, --tag="" Repository name (and optionally a tag) to be applied to the resulting image in case of success -t, --tag="" Repository name (and optionally a tag) to be applied to the resulting image in case of success
@ -458,7 +462,7 @@ Supported formats are: bzip2, gzip and xz.
This will clone the GitHub repository and use the cloned repository as This will clone the GitHub repository and use the cloned repository as
context. The Dockerfile at the root of the context. The Dockerfile at the root of the
repository is used as Dockerfile. Note that you repository is used as Dockerfile. Note that you
can specify an arbitrary Git repository by using the `git://` can specify an arbitrary Git repository by using the `git://` or `git@`
schema. schema.
> **Note:** `docker build` will return a `no such file or directory` error > **Note:** `docker build` will return a `no such file or directory` error
@ -738,19 +742,24 @@ decrease disk usage, and speed up `docker build` by
allowing each step to be cached. These intermediate layers are not shown allowing each step to be cached. These intermediate layers are not shown
by default. by default.
An image will be listed more than once if it has multiple repository names
or tags. This single image (identifiable by its matching `IMAGE ID`)
uses up the `VIRTUAL SIZE` listed only once.
#### Listing the most recently created images #### Listing the most recently created images
$ sudo docker images | head $ sudo docker images | head
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
<none> <none> 77af4d6b9913 19 hours ago 1.089 GB <none> <none> 77af4d6b9913 19 hours ago 1.089 GB
committest latest b6fa739cedf5 19 hours ago 1.089 GB committ latest b6fa739cedf5 19 hours ago 1.089 GB
<none> <none> 78a85c484f71 19 hours ago 1.089 GB <none> <none> 78a85c484f71 19 hours ago 1.089 GB
docker latest 30557a29d5ab 20 hours ago 1.089 GB docker latest 30557a29d5ab 20 hours ago 1.089 GB
<none> <none> 0124422dd9f9 20 hours ago 1.089 GB <none> <none> 5ed6274db6ce 24 hours ago 1.089 GB
<none> <none> 18ad6fad3402 22 hours ago 1.082 GB postgres 9 746b819f315e 4 days ago 213.4 MB
<none> <none> f9f1e26352f0 23 hours ago 1.089 GB postgres 9.3 746b819f315e 4 days ago 213.4 MB
tryout latest 2629d1fa0b81 23 hours ago 131.5 MB postgres 9.3.5 746b819f315e 4 days ago 213.4 MB
<none> <none> 5ed6274db6ce 24 hours ago 1.089 GB postgres latest 746b819f315e 4 days ago 213.4 MB
#### Listing the full length image IDs #### Listing the full length image IDs
@ -851,11 +860,15 @@ For example:
$ sudo docker -D info $ sudo docker -D info
Containers: 14 Containers: 14
Images: 52 Images: 52
Storage Driver: btrfs Storage Driver: aufs
Root Dir: /var/lib/docker/aufs
Dirs: 545
Execution Driver: native-0.2 Execution Driver: native-0.2
Kernel Version: 3.13.0-24-generic Kernel Version: 3.13.0-24-generic
Operating System: Ubuntu 14.04 LTS Operating System: Ubuntu 14.04 LTS
CPUs: 1 CPUs: 1
Name: prod-server-42
ID: 7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS
Total Memory: 2 GiB Total Memory: 2 GiB
Debug mode (server): false Debug mode (server): false
Debug mode (client): true Debug mode (client): true
@ -865,6 +878,8 @@ For example:
Init Path: /usr/bin/docker Init Path: /usr/bin/docker
Username: svendowideit Username: svendowideit
Registry: [https://index.docker.io/v1/] Registry: [https://index.docker.io/v1/]
Labels:
storage=ssd
The global `-D` option tells all `docker` commands to output debug information. The global `-D` option tells all `docker` commands to output debug information.

Просмотреть файл

@ -4,6 +4,35 @@ page_keywords: docker, documentation, about, technology, understanding, release
#Release Notes #Release Notes
##Version 1.3.2
(2014-11-24)
This release fixes some bugs and addresses some security issues. We have also
made improvements to aspects of `docker run`.
*Security fixes*
Patches and changes were made to address CVE-2014-6407 and CVE-2014-6408.
Specifically, changes were made in order to:
* Prevent host privilege escalation from an image extraction vulnerability (CVE-2014-6407).
* Prevent container escalation from malicious security options applied to images (CVE-2014-6408).
*Daemon fixes*
The `--insecure-registry` flag of the `docker run` command has undergone
several refinements and additions. For details, please see the
[command-line reference](http://docs.docker.com/reference/commandline/cli/#run).
* You can now specify a sub-net in order to set a range of registries which the Docker daemon will consider insecure.
* By default, Docker now defines `localhost` as an insecure registry.
* Registries can now be referenced using the Classless Inter-Domain Routing (CIDR) format.
* When mirroring is enabled, the experimental registry v2 API is skipped.
##Version 1.3.1 ##Version 1.3.1
(2014-10-28) (2014-10-28)

Просмотреть файл

@ -30,24 +30,21 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
defer os.RemoveAll(tempdir) defer os.RemoveAll(tempdir)
rootRepoMap := map[string]Repository{} rootRepoMap := map[string]Repository{}
addKey := func(name string, tag string, id string) {
log.Debugf("add key [%s:%s]", name, tag)
if repo, ok := rootRepoMap[name]; !ok {
rootRepoMap[name] = Repository{tag: id}
} else {
repo[tag] = id
}
}
for _, name := range job.Args { for _, name := range job.Args {
log.Debugf("Serializing %s", name) log.Debugf("Serializing %s", name)
rootRepo := s.Repositories[name] rootRepo := s.Repositories[name]
if rootRepo != nil { if rootRepo != nil {
// this is a base repo name, like 'busybox' // this is a base repo name, like 'busybox'
for _, id := range rootRepo { for tag, id := range rootRepo {
if _, ok := rootRepoMap[name]; !ok { addKey(name, tag, id)
rootRepoMap[name] = rootRepo
} else {
log.Debugf("Duplicate key [%s]", name)
if rootRepoMap[name].Contains(rootRepo) {
log.Debugf("skipping, because it is present [%s:%q]", name, rootRepo)
continue
}
log.Debugf("updating [%s]: [%q] with [%q]", name, rootRepoMap[name], rootRepo)
rootRepoMap[name].Update(rootRepo)
}
if err := s.exportImage(job.Eng, id, tempdir); err != nil { if err := s.exportImage(job.Eng, id, tempdir); err != nil {
return job.Error(err) return job.Error(err)
} }
@ -65,18 +62,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
// check this length, because a lookup of a truncated has will not have a tag // check this length, because a lookup of a truncated has will not have a tag
// and will not need to be added to this map // and will not need to be added to this map
if len(repoTag) > 0 { if len(repoTag) > 0 {
if _, ok := rootRepoMap[repoName]; !ok { addKey(repoName, repoTag, img.ID)
rootRepoMap[repoName] = Repository{repoTag: img.ID}
} else {
log.Debugf("Duplicate key [%s]", repoName)
newRepo := Repository{repoTag: img.ID}
if rootRepoMap[repoName].Contains(newRepo) {
log.Debugf("skipping, because it is present [%s:%q]", repoName, newRepo)
continue
}
log.Debugf("updating [%s]: [%q] with [%q]", repoName, rootRepoMap[repoName], newRepo)
rootRepoMap[repoName].Update(newRepo)
}
} }
if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil { if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil {
return job.Error(err) return job.Error(err)

Просмотреть файл

@ -1,3 +1,5 @@
// +build linux
package graph package graph
import ( import (
@ -11,6 +13,7 @@ import (
"github.com/docker/docker/engine" "github.com/docker/docker/engine"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
) )
// Loads a set of images into the repository. This is the complementary of ImageExport. // Loads a set of images into the repository. This is the complementary of ImageExport.
@ -53,7 +56,7 @@ func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
excludes[i] = k excludes[i] = k
i++ i++
} }
if err := archive.Untar(repoFile, repoDir, &archive.TarOptions{Excludes: excludes}); err != nil { if err := chrootarchive.Untar(repoFile, repoDir, &archive.TarOptions{Excludes: excludes}); err != nil {
return job.Error(err) return job.Error(err)
} }

11
graph/load_unsupported.go Normal file
Просмотреть файл

@ -0,0 +1,11 @@
// +build !linux
package graph
import (
"github.com/docker/docker/engine"
)
func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
return job.Errorf("CmdLoad is not supported on this platform")
}

Просмотреть файл

@ -1,6 +1,14 @@
package graph package graph
import "testing" import (
"testing"
"github.com/docker/docker/pkg/reexec"
)
func init() {
reexec.Init()
}
func TestPools(t *testing.T) { func TestPools(t *testing.T) {
s := &TagStore{ s := &TagStore{

Просмотреть файл

@ -179,6 +179,7 @@ func TestBuildEnvironmentReplacementAddCopy(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
@ -632,6 +633,8 @@ func TestBuildSixtySteps(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -656,6 +659,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -674,6 +679,8 @@ ADD test_file .`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
done := make(chan struct{}) done := make(chan struct{})
go func() { go func() {
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
@ -708,6 +715,8 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio'
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -947,6 +956,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -971,6 +982,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -996,6 +1009,8 @@ RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1022,6 +1037,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1040,6 +1057,8 @@ ADD . /`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1064,6 +1083,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1082,6 +1103,8 @@ COPY test_file .`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
done := make(chan struct{}) done := make(chan struct{})
go func() { go func() {
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
@ -1116,6 +1139,8 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio'
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1140,6 +1165,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1163,6 +1190,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1188,6 +1217,8 @@ RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1214,6 +1245,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1231,6 +1264,8 @@ COPY . /`,
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1858,6 +1893,7 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
out1, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name1, ".") out1, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name1, ".")
if err != nil { if err != nil {
@ -1874,6 +1910,7 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
out2, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name2, ".") out2, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name2, ".")
if err != nil { if err != nil {
@ -1890,6 +1927,7 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
out3, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name3, ".") out3, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name3, ".")
if err != nil { if err != nil {
@ -2391,7 +2429,7 @@ func TestBuildNoContext(t *testing.T) {
t.Fatalf("build failed to complete: %v %v", out, err) t.Fatalf("build failed to complete: %v %v", out, err)
} }
if out, _, err := cmd(t, "run", "--rm", "nocontext"); out != "ok\n" || err != nil { if out, _, err := dockerCmd(t, "run", "--rm", "nocontext"); out != "ok\n" || err != nil {
t.Fatalf("run produced invalid output: %q, expected %q", out, "ok") t.Fatalf("run produced invalid output: %q, expected %q", out, "ok")
} }
@ -2984,6 +3022,8 @@ RUN [ "$(cat $TO)" = "hello" ]
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, true) _, err = buildImageFromContext(name, ctx, true)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -2991,6 +3031,46 @@ RUN [ "$(cat $TO)" = "hello" ]
logDone("build - environment variables usage") logDone("build - environment variables usage")
} }
func TestBuildEnvUsage2(t *testing.T) {
name := "testbuildenvusage2"
defer deleteImages(name)
dockerfile := `FROM busybox
ENV abc=def
RUN [ "$abc" = "def" ]
ENV def="hello world"
RUN [ "$def" = "hello world" ]
ENV def=hello\ world
RUN [ "$def" = "hello world" ]
ENV v1=abc v2="hi there"
RUN [ "$v1" = "abc" ]
RUN [ "$v2" = "hi there" ]
ENV v3='boogie nights' v4="with'quotes too"
RUN [ "$v3" = "boogie nights" ]
RUN [ "$v4" = "with'quotes too" ]
ENV abc=zzz FROM=hello/docker/world
ENV abc=zzz TO=/docker/world/hello
ADD $FROM $TO
RUN [ "$(cat $TO)" = "hello" ]
ENV abc "zzz"
RUN [ $abc = \"zzz\" ]
ENV abc 'yyy'
RUN [ $abc = \'yyy\' ]
ENV abc=
RUN [ "$abc" = "" ]
`
ctx, err := fakeContext(dockerfile, map[string]string{
"hello/docker/world": "hello",
})
if err != nil {
t.Fatal(err)
}
_, err = buildImageFromContext(name, ctx, true)
if err != nil {
t.Fatal(err)
}
logDone("build - environment variables usage2")
}
func TestBuildAddScript(t *testing.T) { func TestBuildAddScript(t *testing.T) {
name := "testbuildaddscript" name := "testbuildaddscript"
defer deleteImages(name) defer deleteImages(name)
@ -3006,6 +3086,8 @@ RUN [ "$(cat /testfile)" = 'test!' ]`
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, true) _, err = buildImageFromContext(name, ctx, true)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -3060,6 +3142,7 @@ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi`
} }
return &FakeContext{Dir: tmpDir} return &FakeContext{Dir: tmpDir}
}() }()
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil { if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatalf("build failed to complete for TestBuildAddTar: %v", err) t.Fatalf("build failed to complete for TestBuildAddTar: %v", err)

Просмотреть файл

@ -23,7 +23,7 @@ const (
// Test for #5656 // Test for #5656
// Check that garbage paths don't escape the container's rootfs // Check that garbage paths don't escape the container's rootfs
func TestCpGarbagePath(t *testing.T) { func TestCpGarbagePath(t *testing.T) {
out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
if err != nil || exitCode != 0 { if err != nil || exitCode != 0 {
t.Fatal("failed to create a container", out, err) t.Fatal("failed to create a container", out, err)
} }
@ -31,7 +31,7 @@ func TestCpGarbagePath(t *testing.T) {
cleanedContainerID := stripTrailingCharacters(out) cleanedContainerID := stripTrailingCharacters(out)
defer deleteContainer(cleanedContainerID) defer deleteContainer(cleanedContainerID)
out, _, err = cmd(t, "wait", cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID)
if err != nil || stripTrailingCharacters(out) != "0" { if err != nil || stripTrailingCharacters(out) != "0" {
t.Fatal("failed to set up container", out, err) t.Fatal("failed to set up container", out, err)
} }
@ -59,7 +59,7 @@ func TestCpGarbagePath(t *testing.T) {
path := filepath.Join("../../../../../../../../../../../../", cpFullPath) path := filepath.Join("../../../../../../../../../../../../", cpFullPath)
_, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
if err != nil { if err != nil {
t.Fatalf("couldn't copy from garbage path: %s:%s %s", cleanedContainerID, path, err) t.Fatalf("couldn't copy from garbage path: %s:%s %s", cleanedContainerID, path, err)
} }
@ -85,7 +85,7 @@ func TestCpGarbagePath(t *testing.T) {
// Check that relative paths are relative to the container's rootfs // Check that relative paths are relative to the container's rootfs
func TestCpRelativePath(t *testing.T) { func TestCpRelativePath(t *testing.T) {
out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
if err != nil || exitCode != 0 { if err != nil || exitCode != 0 {
t.Fatal("failed to create a container", out, err) t.Fatal("failed to create a container", out, err)
} }
@ -93,7 +93,7 @@ func TestCpRelativePath(t *testing.T) {
cleanedContainerID := stripTrailingCharacters(out) cleanedContainerID := stripTrailingCharacters(out)
defer deleteContainer(cleanedContainerID) defer deleteContainer(cleanedContainerID)
out, _, err = cmd(t, "wait", cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID)
if err != nil || stripTrailingCharacters(out) != "0" { if err != nil || stripTrailingCharacters(out) != "0" {
t.Fatal("failed to set up container", out, err) t.Fatal("failed to set up container", out, err)
} }
@ -122,7 +122,7 @@ func TestCpRelativePath(t *testing.T) {
path, _ := filepath.Rel("/", cpFullPath) path, _ := filepath.Rel("/", cpFullPath)
_, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
if err != nil { if err != nil {
t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, path, err) t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, path, err)
} }
@ -148,7 +148,7 @@ func TestCpRelativePath(t *testing.T) {
// Check that absolute paths are relative to the container's rootfs // Check that absolute paths are relative to the container's rootfs
func TestCpAbsolutePath(t *testing.T) { func TestCpAbsolutePath(t *testing.T) {
out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
if err != nil || exitCode != 0 { if err != nil || exitCode != 0 {
t.Fatal("failed to create a container", out, err) t.Fatal("failed to create a container", out, err)
} }
@ -156,7 +156,7 @@ func TestCpAbsolutePath(t *testing.T) {
cleanedContainerID := stripTrailingCharacters(out) cleanedContainerID := stripTrailingCharacters(out)
defer deleteContainer(cleanedContainerID) defer deleteContainer(cleanedContainerID)
out, _, err = cmd(t, "wait", cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID)
if err != nil || stripTrailingCharacters(out) != "0" { if err != nil || stripTrailingCharacters(out) != "0" {
t.Fatal("failed to set up container", out, err) t.Fatal("failed to set up container", out, err)
} }
@ -185,7 +185,7 @@ func TestCpAbsolutePath(t *testing.T) {
path := cpFullPath path := cpFullPath
_, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
if err != nil { if err != nil {
t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err)
} }
@ -212,7 +212,7 @@ func TestCpAbsolutePath(t *testing.T) {
// Test for #5619 // Test for #5619
// Check that absolute symlinks are still relative to the container's rootfs // Check that absolute symlinks are still relative to the container's rootfs
func TestCpAbsoluteSymlink(t *testing.T) { func TestCpAbsoluteSymlink(t *testing.T) {
out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path")
if err != nil || exitCode != 0 { if err != nil || exitCode != 0 {
t.Fatal("failed to create a container", out, err) t.Fatal("failed to create a container", out, err)
} }
@ -220,7 +220,7 @@ func TestCpAbsoluteSymlink(t *testing.T) {
cleanedContainerID := stripTrailingCharacters(out) cleanedContainerID := stripTrailingCharacters(out)
defer deleteContainer(cleanedContainerID) defer deleteContainer(cleanedContainerID)
out, _, err = cmd(t, "wait", cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID)
if err != nil || stripTrailingCharacters(out) != "0" { if err != nil || stripTrailingCharacters(out) != "0" {
t.Fatal("failed to set up container", out, err) t.Fatal("failed to set up container", out, err)
} }
@ -249,7 +249,7 @@ func TestCpAbsoluteSymlink(t *testing.T) {
path := filepath.Join("/", "container_path") path := filepath.Join("/", "container_path")
_, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
if err != nil { if err != nil {
t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err)
} }
@ -276,7 +276,7 @@ func TestCpAbsoluteSymlink(t *testing.T) {
// Test for #5619 // Test for #5619
// Check that symlinks which are part of the resource path are still relative to the container's rootfs // Check that symlinks which are part of the resource path are still relative to the container's rootfs
func TestCpSymlinkComponent(t *testing.T) { func TestCpSymlinkComponent(t *testing.T) {
out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path")
if err != nil || exitCode != 0 { if err != nil || exitCode != 0 {
t.Fatal("failed to create a container", out, err) t.Fatal("failed to create a container", out, err)
} }
@ -284,7 +284,7 @@ func TestCpSymlinkComponent(t *testing.T) {
cleanedContainerID := stripTrailingCharacters(out) cleanedContainerID := stripTrailingCharacters(out)
defer deleteContainer(cleanedContainerID) defer deleteContainer(cleanedContainerID)
out, _, err = cmd(t, "wait", cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID)
if err != nil || stripTrailingCharacters(out) != "0" { if err != nil || stripTrailingCharacters(out) != "0" {
t.Fatal("failed to set up container", out, err) t.Fatal("failed to set up container", out, err)
} }
@ -313,7 +313,7 @@ func TestCpSymlinkComponent(t *testing.T) {
path := filepath.Join("/", "container_path", cpTestName) path := filepath.Join("/", "container_path", cpTestName)
_, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
if err != nil { if err != nil {
t.Fatalf("couldn't copy from symlink path component: %s:%s %s", cleanedContainerID, path, err) t.Fatalf("couldn't copy from symlink path component: %s:%s %s", cleanedContainerID, path, err)
} }
@ -339,7 +339,7 @@ func TestCpSymlinkComponent(t *testing.T) {
// Check that cp with unprivileged user doesn't return any error // Check that cp with unprivileged user doesn't return any error
func TestCpUnprivilegedUser(t *testing.T) { func TestCpUnprivilegedUser(t *testing.T) {
out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName)
if err != nil || exitCode != 0 { if err != nil || exitCode != 0 {
t.Fatal("failed to create a container", out, err) t.Fatal("failed to create a container", out, err)
} }
@ -347,7 +347,7 @@ func TestCpUnprivilegedUser(t *testing.T) {
cleanedContainerID := stripTrailingCharacters(out) cleanedContainerID := stripTrailingCharacters(out)
defer deleteContainer(cleanedContainerID) defer deleteContainer(cleanedContainerID)
out, _, err = cmd(t, "wait", cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID)
if err != nil || stripTrailingCharacters(out) != "0" { if err != nil || stripTrailingCharacters(out) != "0" {
t.Fatal("failed to set up container", out, err) t.Fatal("failed to set up container", out, err)
} }
@ -389,7 +389,7 @@ func TestCpVolumePath(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
out, exitCode, err := cmd(t, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") out, exitCode, err := dockerCmd(t, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar")
if err != nil || exitCode != 0 { if err != nil || exitCode != 0 {
t.Fatal("failed to create a container", out, err) t.Fatal("failed to create a container", out, err)
} }
@ -397,13 +397,13 @@ func TestCpVolumePath(t *testing.T) {
cleanedContainerID := stripTrailingCharacters(out) cleanedContainerID := stripTrailingCharacters(out)
defer deleteContainer(cleanedContainerID) defer deleteContainer(cleanedContainerID)
out, _, err = cmd(t, "wait", cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID)
if err != nil || stripTrailingCharacters(out) != "0" { if err != nil || stripTrailingCharacters(out) != "0" {
t.Fatal("failed to set up container", out, err) t.Fatal("failed to set up container", out, err)
} }
// Copy actual volume path // Copy actual volume path
_, _, err = cmd(t, "cp", cleanedContainerID+":/foo", outDir) _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo", outDir)
if err != nil { if err != nil {
t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err) t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err)
} }
@ -423,7 +423,7 @@ func TestCpVolumePath(t *testing.T) {
} }
// Copy file nested in volume // Copy file nested in volume
_, _, err = cmd(t, "cp", cleanedContainerID+":/foo/bar", outDir) _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo/bar", outDir)
if err != nil { if err != nil {
t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err) t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err)
} }
@ -436,7 +436,7 @@ func TestCpVolumePath(t *testing.T) {
} }
// Copy Bind-mounted dir // Copy Bind-mounted dir
_, _, err = cmd(t, "cp", cleanedContainerID+":/baz", outDir) _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz", outDir)
if err != nil { if err != nil {
t.Fatalf("couldn't copy from bind-mounted volume path: %s:%s %v", cleanedContainerID, "/baz", err) t.Fatalf("couldn't copy from bind-mounted volume path: %s:%s %v", cleanedContainerID, "/baz", err)
} }
@ -449,7 +449,7 @@ func TestCpVolumePath(t *testing.T) {
} }
// Copy file nested in bind-mounted dir // Copy file nested in bind-mounted dir
_, _, err = cmd(t, "cp", cleanedContainerID+":/baz/test", outDir) _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz/test", outDir)
fb, err := ioutil.ReadFile(outDir + "/baz/test") fb, err := ioutil.ReadFile(outDir + "/baz/test")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -463,7 +463,7 @@ func TestCpVolumePath(t *testing.T) {
} }
// Copy bind-mounted file // Copy bind-mounted file
_, _, err = cmd(t, "cp", cleanedContainerID+":/test", outDir) _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", outDir)
fb, err = ioutil.ReadFile(outDir + "/test") fb, err = ioutil.ReadFile(outDir + "/test")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

Просмотреть файл

@ -16,12 +16,12 @@ import (
) )
func TestEventsUntag(t *testing.T) { func TestEventsUntag(t *testing.T) {
out, _, _ := cmd(t, "images", "-q") out, _, _ := dockerCmd(t, "images", "-q")
image := strings.Split(out, "\n")[0] image := strings.Split(out, "\n")[0]
cmd(t, "tag", image, "utest:tag1") dockerCmd(t, "tag", image, "utest:tag1")
cmd(t, "tag", image, "utest:tag2") dockerCmd(t, "tag", image, "utest:tag2")
cmd(t, "rmi", "utest:tag1") dockerCmd(t, "rmi", "utest:tag1")
cmd(t, "rmi", "utest:tag2") dockerCmd(t, "rmi", "utest:tag2")
eventsCmd := exec.Command("timeout", "0.2", dockerBinary, "events", "--since=1") eventsCmd := exec.Command("timeout", "0.2", dockerBinary, "events", "--since=1")
out, _, _ = runCommandWithOutput(eventsCmd) out, _, _ = runCommandWithOutput(eventsCmd)
events := strings.Split(out, "\n") events := strings.Split(out, "\n")
@ -39,11 +39,11 @@ func TestEventsUntag(t *testing.T) {
func TestEventsPause(t *testing.T) { func TestEventsPause(t *testing.T) {
name := "testeventpause" name := "testeventpause"
out, _, _ := cmd(t, "images", "-q") out, _, _ := dockerCmd(t, "images", "-q")
image := strings.Split(out, "\n")[0] image := strings.Split(out, "\n")[0]
cmd(t, "run", "-d", "--name", name, image, "sleep", "2") dockerCmd(t, "run", "-d", "--name", name, image, "sleep", "2")
cmd(t, "pause", name) dockerCmd(t, "pause", name)
cmd(t, "unpause", name) dockerCmd(t, "unpause", name)
defer deleteAllContainers() defer deleteAllContainers()
@ -75,7 +75,7 @@ func TestEventsPause(t *testing.T) {
func TestEventsContainerFailStartDie(t *testing.T) { func TestEventsContainerFailStartDie(t *testing.T) {
defer deleteAllContainers() defer deleteAllContainers()
out, _, _ := cmd(t, "images", "-q") out, _, _ := dockerCmd(t, "images", "-q")
image := strings.Split(out, "\n")[0] image := strings.Split(out, "\n")[0]
eventsCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testeventdie", image, "blerg") eventsCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testeventdie", image, "blerg")
_, _, err := runCommandWithOutput(eventsCmd) _, _, err := runCommandWithOutput(eventsCmd)
@ -106,7 +106,7 @@ func TestEventsContainerFailStartDie(t *testing.T) {
func TestEventsLimit(t *testing.T) { func TestEventsLimit(t *testing.T) {
defer deleteAllContainers() defer deleteAllContainers()
for i := 0; i < 30; i++ { for i := 0; i < 30; i++ {
cmd(t, "run", "busybox", "echo", strconv.Itoa(i)) dockerCmd(t, "run", "busybox", "echo", strconv.Itoa(i))
} }
eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix()))
out, _, _ := runCommandWithOutput(eventsCmd) out, _, _ := runCommandWithOutput(eventsCmd)
@ -119,7 +119,7 @@ func TestEventsLimit(t *testing.T) {
} }
func TestEventsContainerEvents(t *testing.T) { func TestEventsContainerEvents(t *testing.T) {
cmd(t, "run", "--rm", "busybox", "true") dockerCmd(t, "run", "--rm", "busybox", "true")
eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix()))
out, exitCode, err := runCommandWithOutput(eventsCmd) out, exitCode, err := runCommandWithOutput(eventsCmd)
if exitCode != 0 || err != nil { if exitCode != 0 || err != nil {
@ -190,7 +190,7 @@ func TestEventsRedirectStdout(t *testing.T) {
since := time.Now().Unix() since := time.Now().Unix()
cmd(t, "run", "busybox", "true") dockerCmd(t, "run", "busybox", "true")
defer deleteAllContainers() defer deleteAllContainers()

Просмотреть файл

@ -186,3 +186,30 @@ func TestExecAfterDaemonRestart(t *testing.T) {
logDone("exec - exec running container after daemon restart") logDone("exec - exec running container after daemon restart")
} }
// Regresssion test for #9155, #9044
func TestExecEnv(t *testing.T) {
defer deleteAllContainers()
runCmd := exec.Command(dockerBinary, "run",
"-e", "LALA=value1",
"-e", "LALA=value2",
"-d", "--name", "testing", "busybox", "top")
if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil {
t.Fatal(out, err)
}
execCmd := exec.Command(dockerBinary, "exec", "testing", "env")
out, _, err := runCommandWithOutput(execCmd)
if err != nil {
t.Fatal(out, err)
}
if strings.Contains(out, "LALA=value1") ||
!strings.Contains(out, "LALA=value2") ||
!strings.Contains(out, "HOME=/root") {
t.Errorf("exec env(%q), expect %q, %q", out, "LALA=value2", "HOME=/root")
}
logDone("exec - exec inherits correct env")
}

Просмотреть файл

@ -1,7 +1,10 @@
package main package main
import ( import (
"fmt"
"os/exec" "os/exec"
"reflect"
"sort"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -63,3 +66,59 @@ func TestImagesOrderedByCreationDate(t *testing.T) {
logDone("images - ordering by creation date") logDone("images - ordering by creation date")
} }
func TestImagesErrorWithInvalidFilterNameTest(t *testing.T) {
imagesCmd := exec.Command(dockerBinary, "images", "-f", "FOO=123")
out, _, err := runCommandWithOutput(imagesCmd)
if !strings.Contains(out, "Invalid filter") {
t.Fatalf("error should occur when listing images with invalid filter name FOO, %s, %v", out, err)
}
logDone("images - invalid filter name check working")
}
func TestImagesFilterWhiteSpaceTrimmingAndLowerCasingWorking(t *testing.T) {
imageName := "images_filter_test"
defer deleteAllContainers()
defer deleteImages(imageName)
buildImage(imageName,
`FROM scratch
RUN touch /test/foo
RUN touch /test/bar
RUN touch /test/baz`, true)
filters := []string{
"dangling=true",
"Dangling=true",
" dangling=true",
"dangling=true ",
"dangling = true",
}
imageListings := make([][]string, 5, 5)
for idx, filter := range filters {
cmd := exec.Command(dockerBinary, "images", "-f", filter)
out, _, err := runCommandWithOutput(cmd)
if err != nil {
t.Fatal(err)
}
listing := strings.Split(out, "\n")
sort.Strings(listing)
imageListings[idx] = listing
}
for idx, listing := range imageListings {
if idx < 4 && !reflect.DeepEqual(listing, imageListings[idx+1]) {
for idx, errListing := range imageListings {
fmt.Printf("out %d", idx)
for _, image := range errListing {
fmt.Print(image)
}
fmt.Print("")
}
t.Fatalf("All output must be the same")
}
}
logDone("images - white space trimming and lower casing")
}

Просмотреть файл

@ -62,21 +62,21 @@ func TestLinksPingUnlinkedContainers(t *testing.T) {
func TestLinksPingLinkedContainers(t *testing.T) { func TestLinksPingLinkedContainers(t *testing.T) {
var out string var out string
out, _, _ = cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") out, _, _ = dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
idA := stripTrailingCharacters(out) idA := stripTrailingCharacters(out)
out, _, _ = cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") out, _, _ = dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
idB := stripTrailingCharacters(out) idB := stripTrailingCharacters(out)
cmd(t, "run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") dockerCmd(t, "run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1")
cmd(t, "kill", idA) dockerCmd(t, "kill", idA)
cmd(t, "kill", idB) dockerCmd(t, "kill", idB)
deleteAllContainers() deleteAllContainers()
logDone("links - ping linked container") logDone("links - ping linked container")
} }
func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) { func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) {
cmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10") dockerCmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10")
cmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10") dockerCmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10")
childIP := findContainerIP(t, "child") childIP := findContainerIP(t, "child")
parentIP := findContainerIP(t, "parent") parentIP := findContainerIP(t, "parent")
@ -87,13 +87,13 @@ func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) {
t.Fatal("Iptables rules not found") t.Fatal("Iptables rules not found")
} }
cmd(t, "rm", "--link", "parent/http") dockerCmd(t, "rm", "--link", "parent/http")
if iptables.Exists(sourceRule...) || iptables.Exists(destinationRule...) { if iptables.Exists(sourceRule...) || iptables.Exists(destinationRule...) {
t.Fatal("Iptables rules should be removed when unlink") t.Fatal("Iptables rules should be removed when unlink")
} }
cmd(t, "kill", "child") dockerCmd(t, "kill", "child")
cmd(t, "kill", "parent") dockerCmd(t, "kill", "parent")
deleteAllContainers() deleteAllContainers()
logDone("link - verify iptables when link and unlink") logDone("link - verify iptables when link and unlink")
@ -105,9 +105,9 @@ func TestLinksInspectLinksStarted(t *testing.T) {
result []string result []string
) )
defer deleteAllContainers() defer deleteAllContainers()
cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10") dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10")
links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -134,9 +134,9 @@ func TestLinksInspectLinksStopped(t *testing.T) {
result []string result []string
) )
defer deleteAllContainers() defer deleteAllContainers()
cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true")
links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

Просмотреть файл

@ -29,7 +29,7 @@ func TestRmiWithContainerFails(t *testing.T) {
} }
// make sure it didn't delete the busybox name // make sure it didn't delete the busybox name
images, _, _ := cmd(t, "images") images, _, _ := dockerCmd(t, "images")
if !strings.Contains(images, "busybox") { if !strings.Contains(images, "busybox") {
t.Fatalf("The name 'busybox' should not have been removed from images: %q", images) t.Fatalf("The name 'busybox' should not have been removed from images: %q", images)
} }
@ -40,35 +40,35 @@ func TestRmiWithContainerFails(t *testing.T) {
} }
func TestRmiTag(t *testing.T) { func TestRmiTag(t *testing.T) {
imagesBefore, _, _ := cmd(t, "images", "-a") imagesBefore, _, _ := dockerCmd(t, "images", "-a")
cmd(t, "tag", "busybox", "utest:tag1") dockerCmd(t, "tag", "busybox", "utest:tag1")
cmd(t, "tag", "busybox", "utest/docker:tag2") dockerCmd(t, "tag", "busybox", "utest/docker:tag2")
cmd(t, "tag", "busybox", "utest:5000/docker:tag3") dockerCmd(t, "tag", "busybox", "utest:5000/docker:tag3")
{ {
imagesAfter, _, _ := cmd(t, "images", "-a") imagesAfter, _, _ := dockerCmd(t, "images", "-a")
if nLines(imagesAfter) != nLines(imagesBefore)+3 { if nLines(imagesAfter) != nLines(imagesBefore)+3 {
t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
} }
} }
cmd(t, "rmi", "utest/docker:tag2") dockerCmd(t, "rmi", "utest/docker:tag2")
{ {
imagesAfter, _, _ := cmd(t, "images", "-a") imagesAfter, _, _ := dockerCmd(t, "images", "-a")
if nLines(imagesAfter) != nLines(imagesBefore)+2 { if nLines(imagesAfter) != nLines(imagesBefore)+2 {
t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
} }
} }
cmd(t, "rmi", "utest:5000/docker:tag3") dockerCmd(t, "rmi", "utest:5000/docker:tag3")
{ {
imagesAfter, _, _ := cmd(t, "images", "-a") imagesAfter, _, _ := dockerCmd(t, "images", "-a")
if nLines(imagesAfter) != nLines(imagesBefore)+1 { if nLines(imagesAfter) != nLines(imagesBefore)+1 {
t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
} }
} }
cmd(t, "rmi", "utest:tag1") dockerCmd(t, "rmi", "utest:tag1")
{ {
imagesAfter, _, _ := cmd(t, "images", "-a") imagesAfter, _, _ := dockerCmd(t, "images", "-a")
if nLines(imagesAfter) != nLines(imagesBefore)+0 { if nLines(imagesAfter) != nLines(imagesBefore)+0 {
t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
} }

Просмотреть файл

@ -798,7 +798,7 @@ func TestRunLoopbackWhenNetworkDisabled(t *testing.T) {
} }
func TestRunNetHostNotAllowedWithLinks(t *testing.T) { func TestRunNetHostNotAllowedWithLinks(t *testing.T) {
_, _, err := cmd(t, "run", "--name", "linked", "busybox", "true") _, _, err := dockerCmd(t, "run", "--name", "linked", "busybox", "true")
cmd := exec.Command(dockerBinary, "run", "--net=host", "--link", "linked:linked", "busybox", "true") cmd := exec.Command(dockerBinary, "run", "--net=host", "--link", "linked:linked", "busybox", "true")
_, _, err = runCommandWithOutput(cmd) _, _, err = runCommandWithOutput(cmd)
@ -1204,7 +1204,7 @@ func TestRunModeHostname(t *testing.T) {
} }
func TestRunRootWorkdir(t *testing.T) { func TestRunRootWorkdir(t *testing.T) {
s, _, err := cmd(t, "run", "--workdir", "/", "busybox", "pwd") s, _, err := dockerCmd(t, "run", "--workdir", "/", "busybox", "pwd")
if err != nil { if err != nil {
t.Fatal(s, err) t.Fatal(s, err)
} }
@ -1218,7 +1218,7 @@ func TestRunRootWorkdir(t *testing.T) {
} }
func TestRunAllowBindMountingRoot(t *testing.T) { func TestRunAllowBindMountingRoot(t *testing.T) {
s, _, err := cmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host") s, _, err := dockerCmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host")
if err != nil { if err != nil {
t.Fatal(s, err) t.Fatal(s, err)
} }
@ -1257,6 +1257,7 @@ func TestRunWithVolumesIsRecursive(t *testing.T) {
if err := mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""); err != nil { if err := mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""); err != nil {
t.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err) t.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err)
} }
defer mount.Unmount(tmpfsDir)
f, err := ioutil.TempFile(tmpfsDir, "touch-me") f, err := ioutil.TempFile(tmpfsDir, "touch-me")
if err != nil { if err != nil {
@ -2687,3 +2688,28 @@ func TestContainerNetworkMode(t *testing.T) {
logDone("run - container shared network namespace") logDone("run - container shared network namespace")
} }
func TestRunTLSverify(t *testing.T) {
cmd := exec.Command(dockerBinary, "ps")
out, ec, err := runCommandWithOutput(cmd)
if err != nil || ec != 0 {
t.Fatalf("Should have worked: %v:\n%v", err, out)
}
// Regardless of whether we specify true or false we need to
// test to make sure tls is turned on if --tlsverify is specified at all
cmd = exec.Command(dockerBinary, "--tlsverify=false", "ps")
out, ec, err = runCommandWithOutput(cmd)
if err == nil || ec == 0 || !strings.Contains(out, "trying to connect") {
t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err)
}
cmd = exec.Command(dockerBinary, "--tlsverify=true", "ps")
out, ec, err = runCommandWithOutput(cmd)
if err == nil || ec == 0 || !strings.Contains(out, "cert") {
t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err)
}
logDone("run - verify tls is set for --tlsverify")
}

Просмотреть файл

@ -8,6 +8,8 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"reflect" "reflect"
"sort"
"strings"
"testing" "testing"
"github.com/docker/docker/vendor/src/github.com/kr/pty" "github.com/docker/docker/vendor/src/github.com/kr/pty"
@ -260,6 +262,66 @@ func TestSaveMultipleNames(t *testing.T) {
logDone("save - save by multiple names") logDone("save - save by multiple names")
} }
func TestSaveRepoWithMultipleImages(t *testing.T) {
makeImage := func(from string, tag string) string {
runCmd := exec.Command(dockerBinary, "run", "-d", from, "true")
var (
out string
err error
)
if out, _, err = runCommandWithOutput(runCmd); err != nil {
t.Fatalf("failed to create a container: %v %v", out, err)
}
cleanedContainerID := stripTrailingCharacters(out)
commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, tag)
if out, _, err = runCommandWithOutput(commitCmd); err != nil {
t.Fatalf("failed to commit container: %v %v", out, err)
}
imageID := stripTrailingCharacters(out)
deleteContainer(cleanedContainerID)
return imageID
}
repoName := "foobar-save-multi-images-test"
tagFoo := repoName + ":foo"
tagBar := repoName + ":bar"
idFoo := makeImage("busybox:latest", tagFoo)
idBar := makeImage("busybox:latest", tagBar)
deleteImages(repoName)
// create the archive
saveCmdFinal := fmt.Sprintf("%v save %v | tar t | grep 'VERSION' |cut -d / -f1", dockerBinary, repoName)
saveCmd := exec.Command("bash", "-c", saveCmdFinal)
out, _, err := runCommandWithOutput(saveCmd)
if err != nil {
t.Fatalf("failed to save multiple images: %s, %v", out, err)
}
actual := strings.Split(stripTrailingCharacters(out), "\n")
// make the list of expected layers
historyCmdFinal := fmt.Sprintf("%v history -q --no-trunc %v", dockerBinary, "busybox:latest")
historyCmd := exec.Command("bash", "-c", historyCmdFinal)
out, _, err = runCommandWithOutput(historyCmd)
if err != nil {
t.Fatalf("failed to get history: %s, %v", out, err)
}
expected := append(strings.Split(stripTrailingCharacters(out), "\n"), idFoo, idBar)
sort.Strings(actual)
sort.Strings(expected)
if !reflect.DeepEqual(expected, actual) {
t.Fatalf("achive does not contains the right layers: got %v, expected %v", actual, expected)
}
logDone("save - save repository with multiple images")
}
// Issue #6722 #5892 ensure directories are included in changes // Issue #6722 #5892 ensure directories are included in changes
func TestSaveDirectoryPermissions(t *testing.T) { func TestSaveDirectoryPermissions(t *testing.T) {
layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"}

Просмотреть файл

@ -12,8 +12,8 @@ import (
func TestStartAttachReturnsOnError(t *testing.T) { func TestStartAttachReturnsOnError(t *testing.T) {
defer deleteAllContainers() defer deleteAllContainers()
cmd(t, "run", "-d", "--name", "test", "busybox") dockerCmd(t, "run", "-d", "--name", "test", "busybox")
cmd(t, "stop", "test") dockerCmd(t, "stop", "test")
// Expect this to fail because the above container is stopped, this is what we want // Expect this to fail because the above container is stopped, this is what we want
if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "--link", "test:test", "busybox")); err == nil { if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "--link", "test:test", "busybox")); err == nil {
@ -73,7 +73,7 @@ func TestStartRecordError(t *testing.T) {
defer deleteAllContainers() defer deleteAllContainers()
// when container runs successfully, we should not have state.Error // when container runs successfully, we should not have state.Error
cmd(t, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") dockerCmd(t, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top")
stateErr, err := inspectField("test", "State.Error") stateErr, err := inspectField("test", "State.Error")
if err != nil { if err != nil {
t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err)
@ -97,8 +97,8 @@ func TestStartRecordError(t *testing.T) {
} }
// Expect the conflict to be resolved when we stop the initial container // Expect the conflict to be resolved when we stop the initial container
cmd(t, "stop", "test") dockerCmd(t, "stop", "test")
cmd(t, "start", "test2") dockerCmd(t, "start", "test2")
stateErr, err = inspectField("test2", "State.Error") stateErr, err = inspectField("test2", "State.Error")
if err != nil { if err != nil {
t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err)
@ -115,7 +115,7 @@ func TestStartVolumesFromFailsCleanly(t *testing.T) {
defer deleteAllContainers() defer deleteAllContainers()
// Create the first data volume // Create the first data volume
cmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox") dockerCmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox")
// Expect this to fail because the data test after contaienr doesn't exist yet // Expect this to fail because the data test after contaienr doesn't exist yet
if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil { if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil {
@ -123,13 +123,13 @@ func TestStartVolumesFromFailsCleanly(t *testing.T) {
} }
// Create the second data volume // Create the second data volume
cmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox") dockerCmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox")
// Now, all the volumes should be there // Now, all the volumes should be there
cmd(t, "start", "consumer") dockerCmd(t, "start", "consumer")
// Check that we have the volumes we want // Check that we have the volumes we want
out, _, _ := cmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer") out, _, _ := dockerCmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer")
n_volumes := strings.Trim(out, " \r\n'") n_volumes := strings.Trim(out, " \r\n'")
if n_volumes != "2" { if n_volumes != "2" {
t.Fatalf("Missing volumes: expected 2, got %s", n_volumes) t.Fatalf("Missing volumes: expected 2, got %s", n_volumes)

Просмотреть файл

@ -356,11 +356,6 @@ func pullImageIfNotExist(image string) (err error) {
return return
} }
// deprecated, use dockerCmd instead
func cmd(t *testing.T, args ...string) (string, int, error) {
return dockerCmd(t, args...)
}
func dockerCmd(t *testing.T, args ...string) (string, int, error) { func dockerCmd(t *testing.T, args ...string) (string, int, error) {
out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...))
if err != nil { if err != nil {

Просмотреть файл

@ -9,6 +9,7 @@ import (
"net/http/httptest" "net/http/httptest"
"os" "os"
"path" "path"
"path/filepath"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -187,6 +188,7 @@ func newTestEngine(t Fataler, autorestart bool, root string) *engine.Engine {
// Either InterContainerCommunication or EnableIptables must be set, // Either InterContainerCommunication or EnableIptables must be set,
// otherwise NewDaemon will fail because of conflicting settings. // otherwise NewDaemon will fail because of conflicting settings.
InterContainerCommunication: true, InterContainerCommunication: true,
TrustKeyPath: filepath.Join(root, "key.json"),
} }
d, err := daemon.NewDaemon(cfg, eng) d, err := daemon.NewDaemon(cfg, eng)
if err != nil { if err != nil {

Просмотреть файл

@ -43,6 +43,10 @@ func MirrorListVar(values *[]string, names []string, usage string) {
flag.Var(newListOptsRef(values, ValidateMirror), names, usage) flag.Var(newListOptsRef(values, ValidateMirror), names, usage)
} }
func LabelListVar(values *[]string, names []string, usage string) {
flag.Var(newListOptsRef(values, ValidateLabel), names, usage)
}
// ListOpts type // ListOpts type
type ListOpts struct { type ListOpts struct {
values *[]string values *[]string
@ -227,3 +231,10 @@ func ValidateMirror(val string) (string, error) {
return fmt.Sprintf("%s://%s/v1/", uri.Scheme, uri.Host), nil return fmt.Sprintf("%s://%s/v1/", uri.Scheme, uri.Host), nil
} }
func ValidateLabel(val string) (string, error) {
if strings.Count(val, "=") != 1 {
return "", fmt.Errorf("bad attribute format: %s", val)
}
return val, nil
}

Просмотреть файл

@ -42,6 +42,11 @@ type (
Archiver struct { Archiver struct {
Untar func(io.Reader, string, *TarOptions) error Untar func(io.Reader, string, *TarOptions) error
} }
// breakoutError is used to differentiate errors related to breaking out
// When testing archive breakout in the unit tests, this error is expected
// in order for the test to pass.
breakoutError error
) )
var ( var (
@ -287,11 +292,25 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
} }
case tar.TypeLink: case tar.TypeLink:
if err := os.Link(filepath.Join(extractDir, hdr.Linkname), path); err != nil { targetPath := filepath.Join(extractDir, hdr.Linkname)
// check for hardlink breakout
if !strings.HasPrefix(targetPath, extractDir) {
return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
}
if err := os.Link(targetPath, path); err != nil {
return err return err
} }
case tar.TypeSymlink: case tar.TypeSymlink:
// path -> hdr.Linkname = targetPath
// e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
// the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
// that symlink would first have to be created, which would be caught earlier, at this very check:
if !strings.HasPrefix(targetPath, extractDir) {
return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
}
if err := os.Symlink(hdr.Linkname, path); err != nil { if err := os.Symlink(hdr.Linkname, path); err != nil {
return err return err
} }
@ -451,6 +470,8 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
// identity (uncompressed), gzip, bzip2, xz. // identity (uncompressed), gzip, bzip2, xz.
// FIXME: specify behavior when target path exists vs. doesn't exist. // FIXME: specify behavior when target path exists vs. doesn't exist.
func Untar(archive io.Reader, dest string, options *TarOptions) error { func Untar(archive io.Reader, dest string, options *TarOptions) error {
dest = filepath.Clean(dest)
if options == nil { if options == nil {
options = &TarOptions{} options = &TarOptions{}
} }
@ -488,6 +509,7 @@ loop:
} }
// Normalize name, for safety and for a simple is-root check // Normalize name, for safety and for a simple is-root check
// This keeps "../" as-is, but normalizes "/../" to "/"
hdr.Name = filepath.Clean(hdr.Name) hdr.Name = filepath.Clean(hdr.Name)
for _, exclude := range options.Excludes { for _, exclude := range options.Excludes {
@ -508,7 +530,11 @@ loop:
} }
} }
// Prevent symlink breakout
path := filepath.Join(dest, hdr.Name) path := filepath.Join(dest, hdr.Name)
if !strings.HasPrefix(path, dest) {
return breakoutError(fmt.Errorf("%q is outside of %q", path, dest))
}
// If path exits we almost always just want to remove and replace it // If path exits we almost always just want to remove and replace it
// The only exception is when it is a directory *and* the file from // The only exception is when it is a directory *and* the file from
@ -742,17 +768,20 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
return nil, err return nil, err
} }
size := st.Size() size := st.Size()
return &TempArchive{f, size}, nil return &TempArchive{f, size, 0}, nil
} }
type TempArchive struct { type TempArchive struct {
*os.File *os.File
Size int64 // Pre-computed from Stat().Size() as a convenience Size int64 // Pre-computed from Stat().Size() as a convenience
read int64
} }
func (archive *TempArchive) Read(data []byte) (int, error) { func (archive *TempArchive) Read(data []byte) (int, error) {
n, err := archive.File.Read(data) n, err := archive.File.Read(data)
if err != nil { archive.read += int64(n)
if err != nil || archive.read == archive.Size {
archive.File.Close()
os.Remove(archive.File.Name()) os.Remove(archive.File.Name())
} }
return n, err return n, err

Просмотреть файл

@ -8,6 +8,7 @@ import (
"os" "os"
"os/exec" "os/exec"
"path" "path"
"path/filepath"
"syscall" "syscall"
"testing" "testing"
"time" "time"
@ -214,7 +215,12 @@ func TestTarWithOptions(t *testing.T) {
// Failing prevents the archives from being uncompressed during ADD // Failing prevents the archives from being uncompressed during ADD
func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) {
hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader}
err := createTarFile("pax_global_header", "some_dir", &hdr, nil, true) tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -403,3 +409,201 @@ func BenchmarkTarUntarWithLinks(b *testing.B) {
os.RemoveAll(target) os.RemoveAll(target)
} }
} }
func TestUntarInvalidFilenames(t *testing.T) {
for i, headers := range [][]*tar.Header{
{
{
Name: "../victim/dotdot",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
{
{
// Note the leading slash
Name: "/../victim/slash-dotdot",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
} {
if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil {
t.Fatalf("i=%d. %v", i, err)
}
}
}
func TestUntarInvalidHardlink(t *testing.T) {
for i, headers := range [][]*tar.Header{
{ // try reading victim/hello (../)
{
Name: "dotdot",
Typeflag: tar.TypeLink,
Linkname: "../victim/hello",
Mode: 0644,
},
},
{ // try reading victim/hello (/../)
{
Name: "slash-dotdot",
Typeflag: tar.TypeLink,
// Note the leading slash
Linkname: "/../victim/hello",
Mode: 0644,
},
},
{ // try writing victim/file
{
Name: "loophole-victim",
Typeflag: tar.TypeLink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "loophole-victim/file",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
{ // try reading victim/hello (hardlink, symlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeLink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: "loophole-victim/hello",
Mode: 0644,
},
},
{ // Try reading victim/hello (hardlink, hardlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeLink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "hardlink",
Typeflag: tar.TypeLink,
Linkname: "loophole-victim/hello",
Mode: 0644,
},
},
{ // Try removing victim directory (hardlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeLink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "loophole-victim",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
} {
if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil {
t.Fatalf("i=%d. %v", i, err)
}
}
}
func TestUntarInvalidSymlink(t *testing.T) {
for i, headers := range [][]*tar.Header{
{ // try reading victim/hello (../)
{
Name: "dotdot",
Typeflag: tar.TypeSymlink,
Linkname: "../victim/hello",
Mode: 0644,
},
},
{ // try reading victim/hello (/../)
{
Name: "slash-dotdot",
Typeflag: tar.TypeSymlink,
// Note the leading slash
Linkname: "/../victim/hello",
Mode: 0644,
},
},
{ // try writing victim/file
{
Name: "loophole-victim",
Typeflag: tar.TypeSymlink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "loophole-victim/file",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
{ // try reading victim/hello (symlink, symlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeSymlink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: "loophole-victim/hello",
Mode: 0644,
},
},
{ // try reading victim/hello (symlink, hardlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeSymlink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "hardlink",
Typeflag: tar.TypeLink,
Linkname: "loophole-victim/hello",
Mode: 0644,
},
},
{ // try removing victim directory (symlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeSymlink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "loophole-victim",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
{ // try writing to victim/newdir/newfile with a symlink in the path
{
// this header needs to be before the next one, or else there is an error
Name: "dir/loophole",
Typeflag: tar.TypeSymlink,
Linkname: "../../victim",
Mode: 0755,
},
{
Name: "dir/loophole/newdir/newfile",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
} {
if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil {
t.Fatalf("i=%d. %v", i, err)
}
}
}

Просмотреть файл

@ -18,6 +18,8 @@ import (
// ApplyLayer parses a diff in the standard layer format from `layer`, and // ApplyLayer parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`. // applies it to the directory `dest`.
func ApplyLayer(dest string, layer ArchiveReader) error { func ApplyLayer(dest string, layer ArchiveReader) error {
dest = filepath.Clean(dest)
// We need to be able to set any perms // We need to be able to set any perms
oldmask, err := system.Umask(0) oldmask, err := system.Umask(0)
if err != nil { if err != nil {
@ -91,6 +93,12 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
path := filepath.Join(dest, hdr.Name) path := filepath.Join(dest, hdr.Name)
base := filepath.Base(path) base := filepath.Base(path)
// Prevent symlink breakout
if !strings.HasPrefix(path, dest) {
return breakoutError(fmt.Errorf("%q is outside of %q", path, dest))
}
if strings.HasPrefix(base, ".wh.") { if strings.HasPrefix(base, ".wh.") {
originalBase := base[len(".wh."):] originalBase := base[len(".wh."):]
originalPath := filepath.Join(filepath.Dir(path), originalBase) originalPath := filepath.Join(filepath.Dir(path), originalBase)

191
pkg/archive/diff_test.go Normal file
Просмотреть файл

@ -0,0 +1,191 @@
package archive
import (
"testing"
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
func TestApplyLayerInvalidFilenames(t *testing.T) {
for i, headers := range [][]*tar.Header{
{
{
Name: "../victim/dotdot",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
{
{
// Note the leading slash
Name: "/../victim/slash-dotdot",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
} {
if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil {
t.Fatalf("i=%d. %v", i, err)
}
}
}
func TestApplyLayerInvalidHardlink(t *testing.T) {
for i, headers := range [][]*tar.Header{
{ // try reading victim/hello (../)
{
Name: "dotdot",
Typeflag: tar.TypeLink,
Linkname: "../victim/hello",
Mode: 0644,
},
},
{ // try reading victim/hello (/../)
{
Name: "slash-dotdot",
Typeflag: tar.TypeLink,
// Note the leading slash
Linkname: "/../victim/hello",
Mode: 0644,
},
},
{ // try writing victim/file
{
Name: "loophole-victim",
Typeflag: tar.TypeLink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "loophole-victim/file",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
{ // try reading victim/hello (hardlink, symlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeLink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: "loophole-victim/hello",
Mode: 0644,
},
},
{ // Try reading victim/hello (hardlink, hardlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeLink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "hardlink",
Typeflag: tar.TypeLink,
Linkname: "loophole-victim/hello",
Mode: 0644,
},
},
{ // Try removing victim directory (hardlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeLink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "loophole-victim",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
} {
if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil {
t.Fatalf("i=%d. %v", i, err)
}
}
}
func TestApplyLayerInvalidSymlink(t *testing.T) {
for i, headers := range [][]*tar.Header{
{ // try reading victim/hello (../)
{
Name: "dotdot",
Typeflag: tar.TypeSymlink,
Linkname: "../victim/hello",
Mode: 0644,
},
},
{ // try reading victim/hello (/../)
{
Name: "slash-dotdot",
Typeflag: tar.TypeSymlink,
// Note the leading slash
Linkname: "/../victim/hello",
Mode: 0644,
},
},
{ // try writing victim/file
{
Name: "loophole-victim",
Typeflag: tar.TypeSymlink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "loophole-victim/file",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
{ // try reading victim/hello (symlink, symlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeSymlink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: "loophole-victim/hello",
Mode: 0644,
},
},
{ // try reading victim/hello (symlink, hardlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeSymlink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "hardlink",
Typeflag: tar.TypeLink,
Linkname: "loophole-victim/hello",
Mode: 0644,
},
},
{ // try removing victim directory (symlink)
{
Name: "loophole-victim",
Typeflag: tar.TypeSymlink,
Linkname: "../victim",
Mode: 0755,
},
{
Name: "loophole-victim",
Typeflag: tar.TypeReg,
Mode: 0644,
},
},
} {
if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil {
t.Fatalf("i=%d. %v", i, err)
}
}
}

166
pkg/archive/utils_test.go Normal file
Просмотреть файл

@ -0,0 +1,166 @@
package archive
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
var testUntarFns = map[string]func(string, io.Reader) error{
"untar": func(dest string, r io.Reader) error {
return Untar(r, dest, nil)
},
"applylayer": func(dest string, r io.Reader) error {
return ApplyLayer(dest, ArchiveReader(r))
},
}
// testBreakout is a helper function that, within the provided `tmpdir` directory,
// creates a `victim` folder with a generated `hello` file in it.
// `untar` extracts to a directory named `dest`, the tar file created from `headers`.
//
// Here are the tested scenarios:
// - removed `victim` folder (write)
// - removed files from `victim` folder (write)
// - new files in `victim` folder (write)
// - modified files in `victim` folder (write)
// - file in `dest` with same content as `victim/hello` (read)
//
// When using testBreakout make sure you cover one of the scenarios listed above.
func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error {
tmpdir, err := ioutil.TempDir("", tmpdir)
if err != nil {
return err
}
defer os.RemoveAll(tmpdir)
dest := filepath.Join(tmpdir, "dest")
if err := os.Mkdir(dest, 0755); err != nil {
return err
}
victim := filepath.Join(tmpdir, "victim")
if err := os.Mkdir(victim, 0755); err != nil {
return err
}
hello := filepath.Join(victim, "hello")
helloData, err := time.Now().MarshalText()
if err != nil {
return err
}
if err := ioutil.WriteFile(hello, helloData, 0644); err != nil {
return err
}
helloStat, err := os.Stat(hello)
if err != nil {
return err
}
reader, writer := io.Pipe()
go func() {
t := tar.NewWriter(writer)
for _, hdr := range headers {
t.WriteHeader(hdr)
}
t.Close()
}()
untar := testUntarFns[untarFn]
if untar == nil {
return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn)
}
if err := untar(dest, reader); err != nil {
if _, ok := err.(breakoutError); !ok {
// If untar returns an error unrelated to an archive breakout,
// then consider this an unexpected error and abort.
return err
}
// Here, untar detected the breakout.
// Let's move on verifying that indeed there was no breakout.
fmt.Printf("breakoutError: %v\n", err)
}
// Check victim folder
f, err := os.Open(victim)
if err != nil {
// codepath taken if victim folder was removed
return fmt.Errorf("archive breakout: error reading %q: %v", victim, err)
}
defer f.Close()
// Check contents of victim folder
//
// We are only interested in getting 2 files from the victim folder, because if all is well
// we expect only one result, the `hello` file. If there is a second result, it cannot
// hold the same name `hello` and we assume that a new file got created in the victim folder.
// That is enough to detect an archive breakout.
names, err := f.Readdirnames(2)
if err != nil {
// codepath taken if victim is not a folder
return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err)
}
for _, name := range names {
if name != "hello" {
// codepath taken if new file was created in victim folder
return fmt.Errorf("archive breakout: new file %q", name)
}
}
// Check victim/hello
f, err = os.Open(hello)
if err != nil {
// codepath taken if read permissions were removed
return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err)
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return err
}
fi, err := f.Stat()
if err != nil {
return err
}
if helloStat.IsDir() != fi.IsDir() ||
// TODO: cannot check for fi.ModTime() change
helloStat.Mode() != fi.Mode() ||
helloStat.Size() != fi.Size() ||
!bytes.Equal(helloData, b) {
// codepath taken if hello has been modified
return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi)
}
// Check that nothing in dest/ has the same content as victim/hello.
// Since victim/hello was generated with time.Now(), it is safe to assume
// that any file whose content matches exactly victim/hello, managed somehow
// to access victim/hello.
return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
if err != nil {
// skip directory if error
return filepath.SkipDir
}
// enter directory
return nil
}
if err != nil {
// skip file if error
return nil
}
b, err := ioutil.ReadFile(path)
if err != nil {
// Houston, we have a problem. Aborting (space)walk.
return err
}
if bytes.Equal(helloData, b) {
return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path)
}
return nil
})
}

Просмотреть файл

@ -0,0 +1,90 @@
package chrootarchive
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"os"
"runtime"
"strings"
"syscall"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
)
func untar() {
runtime.LockOSThread()
flag.Parse()
if err := syscall.Chroot(flag.Arg(0)); err != nil {
fatal(err)
}
if err := syscall.Chdir("/"); err != nil {
fatal(err)
}
options := new(archive.TarOptions)
dec := json.NewDecoder(strings.NewReader(flag.Arg(1)))
if err := dec.Decode(options); err != nil {
fatal(err)
}
if err := archive.Untar(os.Stdin, "/", options); err != nil {
fatal(err)
}
os.Exit(0)
}
var (
chrootArchiver = &archive.Archiver{Untar}
)
func Untar(archive io.Reader, dest string, options *archive.TarOptions) error {
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
if err := enc.Encode(options); err != nil {
return fmt.Errorf("Untar json encode: %v", err)
}
if _, err := os.Stat(dest); os.IsNotExist(err) {
if err := os.MkdirAll(dest, 0777); err != nil {
return err
}
}
cmd := reexec.Command("docker-untar", dest, buf.String())
cmd.Stdin = archive
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Untar %s %s", err, out)
}
return nil
}
func TarUntar(src, dst string) error {
return chrootArchiver.TarUntar(src, dst)
}
// CopyWithTar creates a tar archive of filesystem path `src`, and
// unpacks it at filesystem path `dst`.
// The archive is streamed directly with fixed buffering and no
// intermediary disk IO.
func CopyWithTar(src, dst string) error {
return chrootArchiver.CopyWithTar(src, dst)
}
// CopyFileWithTar emulates the behavior of the 'cp' command-line
// for a single file. It copies a regular file from path `src` to
// path `dst`, and preserves all its metadata.
//
// If `dst` ends with a trailing slash '/', the final destination path
// will be `dst/base(src)`.
func CopyFileWithTar(src, dst string) (err error) {
return chrootArchiver.CopyFileWithTar(src, dst)
}
// UntarPath is a convenience function which looks for an archive
// at filesystem path `src`, and unpacks it at `dst`.
func UntarPath(src, dst string) error {
return chrootArchiver.UntarPath(src, dst)
}

Просмотреть файл

@ -0,0 +1,44 @@
package chrootarchive
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
)
func init() {
reexec.Init()
}
func TestChrootTarUntar(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
src := filepath.Join(tmpdir, "src")
if err := os.MkdirAll(src, 0700); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil {
t.Fatal(err)
}
stream, err := archive.Tar(src, archive.Uncompressed)
if err != nil {
t.Fatal(err)
}
dest := filepath.Join(tmpdir, "src")
if err := os.MkdirAll(dest, 0700); err != nil {
t.Fatal(err)
}
if err := Untar(stream, dest, &archive.TarOptions{Excludes: []string{"lolo"}}); err != nil {
t.Fatal(err)
}
}

46
pkg/chrootarchive/diff.go Normal file
Просмотреть файл

@ -0,0 +1,46 @@
package chrootarchive
import (
"flag"
"fmt"
"io/ioutil"
"os"
"runtime"
"syscall"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
)
func applyLayer() {
runtime.LockOSThread()
flag.Parse()
if err := syscall.Chroot(flag.Arg(0)); err != nil {
fatal(err)
}
if err := syscall.Chdir("/"); err != nil {
fatal(err)
}
tmpDir, err := ioutil.TempDir("/", "temp-docker-extract")
if err != nil {
fatal(err)
}
os.Setenv("TMPDIR", tmpDir)
if err := archive.ApplyLayer("/", os.Stdin); err != nil {
os.RemoveAll(tmpDir)
fatal(err)
}
os.RemoveAll(tmpDir)
os.Exit(0)
}
func ApplyLayer(dest string, layer archive.ArchiveReader) error {
cmd := reexec.Command("docker-applyLayer", dest)
cmd.Stdin = layer
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("ApplyLayer %s %s", err, out)
}
return nil
}

18
pkg/chrootarchive/init.go Normal file
Просмотреть файл

@ -0,0 +1,18 @@
package chrootarchive
import (
"fmt"
"os"
"github.com/docker/docker/pkg/reexec"
)
func init() {
reexec.Register("docker-untar", untar)
reexec.Register("docker-applyLayer", applyLayer)
}
func fatal(err error) {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}

Просмотреть файл

@ -63,7 +63,7 @@ var (
ErrGetLibraryVersion = errors.New("dm_get_library_version failed") ErrGetLibraryVersion = errors.New("dm_get_library_version failed")
ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove")
ErrRunRemoveDevice = errors.New("running RemoveDevice failed") ErrRunRemoveDevice = errors.New("running RemoveDevice failed")
ErrInvalidAddNode = errors.New("Invalide AddNoce type") ErrInvalidAddNode = errors.New("Invalid AddNode type")
ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file")
ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity")
ErrBusy = errors.New("Device is Busy") ErrBusy = errors.New("Device is Busy")
@ -104,6 +104,20 @@ func (t *Task) destroy() {
} }
} }
// TaskCreateNamed is a convenience function for TaskCreate when a name
// will be set on the task as well
func TaskCreateNamed(t TaskType, name string) (*Task, error) {
task := TaskCreate(t)
if task == nil {
return nil, fmt.Errorf("Can't create task of type %d", int(t))
}
if err := task.SetName(name); err != nil {
return nil, fmt.Errorf("Can't set task name %s", name)
}
return task, nil
}
// TaskCreate initializes a devicemapper task of tasktype
func TaskCreate(tasktype TaskType) *Task { func TaskCreate(tasktype TaskType) *Task {
Ctask := DmTaskCreate(int(tasktype)) Ctask := DmTaskCreate(int(tasktype))
if Ctask == nil { if Ctask == nil {
@ -298,7 +312,7 @@ func GetLibraryVersion() (string, error) {
func RemoveDevice(name string) error { func RemoveDevice(name string) error {
log.Debugf("[devmapper] RemoveDevice START") log.Debugf("[devmapper] RemoveDevice START")
defer log.Debugf("[devmapper] RemoveDevice END") defer log.Debugf("[devmapper] RemoveDevice END")
task, err := createTask(DeviceRemove, name) task, err := TaskCreateNamed(DeviceRemove, name)
if task == nil { if task == nil {
return err return err
} }
@ -354,7 +368,7 @@ func BlockDeviceDiscard(path string) error {
// This is the programmatic example of "dmsetup create" // This is the programmatic example of "dmsetup create"
func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
task, err := createTask(DeviceCreate, poolName) task, err := TaskCreateNamed(DeviceCreate, poolName)
if task == nil { if task == nil {
return err return err
} }
@ -373,18 +387,17 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize
if err := task.SetCookie(&cookie, 0); err != nil { if err := task.SetCookie(&cookie, 0); err != nil {
return fmt.Errorf("Can't set cookie %s", err) return fmt.Errorf("Can't set cookie %s", err)
} }
defer UdevWait(cookie)
if err := task.Run(); err != nil { if err := task.Run(); err != nil {
return fmt.Errorf("Error running DeviceCreate (CreatePool) %s", err) return fmt.Errorf("Error running DeviceCreate (CreatePool) %s", err)
} }
UdevWait(cookie)
return nil return nil
} }
func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
task, err := createTask(DeviceReload, poolName) task, err := TaskCreateNamed(DeviceReload, poolName)
if task == nil { if task == nil {
return err return err
} }
@ -406,19 +419,8 @@ func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize
return nil return nil
} }
func createTask(t TaskType, name string) (*Task, error) {
task := TaskCreate(t)
if task == nil {
return nil, fmt.Errorf("Can't create task of type %d", int(t))
}
if err := task.SetName(name); err != nil {
return nil, fmt.Errorf("Can't set task name %s", name)
}
return task, nil
}
func GetDeps(name string) (*Deps, error) { func GetDeps(name string) (*Deps, error) {
task, err := createTask(DeviceDeps, name) task, err := TaskCreateNamed(DeviceDeps, name)
if task == nil { if task == nil {
return nil, err return nil, err
} }
@ -429,7 +431,7 @@ func GetDeps(name string) (*Deps, error) {
} }
func GetInfo(name string) (*Info, error) { func GetInfo(name string) (*Info, error) {
task, err := createTask(DeviceInfo, name) task, err := TaskCreateNamed(DeviceInfo, name)
if task == nil { if task == nil {
return nil, err return nil, err
} }
@ -451,9 +453,9 @@ func GetDriverVersion() (string, error) {
} }
func GetStatus(name string) (uint64, uint64, string, string, error) { func GetStatus(name string) (uint64, uint64, string, string, error) {
task, err := createTask(DeviceStatus, name) task, err := TaskCreateNamed(DeviceStatus, name)
if task == nil { if task == nil {
log.Debugf("GetStatus: Error createTask: %s", err) log.Debugf("GetStatus: Error TaskCreateNamed: %s", err)
return 0, 0, "", "", err return 0, 0, "", "", err
} }
if err := task.Run(); err != nil { if err := task.Run(); err != nil {
@ -476,7 +478,7 @@ func GetStatus(name string) (uint64, uint64, string, string, error) {
} }
func SetTransactionId(poolName string, oldId uint64, newId uint64) error { func SetTransactionId(poolName string, oldId uint64, newId uint64) error {
task, err := createTask(DeviceTargetMsg, poolName) task, err := TaskCreateNamed(DeviceTargetMsg, poolName)
if task == nil { if task == nil {
return err return err
} }
@ -496,7 +498,7 @@ func SetTransactionId(poolName string, oldId uint64, newId uint64) error {
} }
func SuspendDevice(name string) error { func SuspendDevice(name string) error {
task, err := createTask(DeviceSuspend, name) task, err := TaskCreateNamed(DeviceSuspend, name)
if task == nil { if task == nil {
return err return err
} }
@ -507,7 +509,7 @@ func SuspendDevice(name string) error {
} }
func ResumeDevice(name string) error { func ResumeDevice(name string) error {
task, err := createTask(DeviceResume, name) task, err := TaskCreateNamed(DeviceResume, name)
if task == nil { if task == nil {
return err return err
} }
@ -516,13 +518,12 @@ func ResumeDevice(name string) error {
if err := task.SetCookie(&cookie, 0); err != nil { if err := task.SetCookie(&cookie, 0); err != nil {
return fmt.Errorf("Can't set cookie %s", err) return fmt.Errorf("Can't set cookie %s", err)
} }
defer UdevWait(cookie)
if err := task.Run(); err != nil { if err := task.Run(); err != nil {
return fmt.Errorf("Error running DeviceResume %s", err) return fmt.Errorf("Error running DeviceResume %s", err)
} }
UdevWait(cookie)
return nil return nil
} }
@ -530,7 +531,7 @@ func CreateDevice(poolName string, deviceId *int) error {
log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, *deviceId)
for { for {
task, err := createTask(DeviceTargetMsg, poolName) task, err := TaskCreateNamed(DeviceTargetMsg, poolName)
if task == nil { if task == nil {
return err return err
} }
@ -558,7 +559,7 @@ func CreateDevice(poolName string, deviceId *int) error {
} }
func DeleteDevice(poolName string, deviceId int) error { func DeleteDevice(poolName string, deviceId int) error {
task, err := createTask(DeviceTargetMsg, poolName) task, err := TaskCreateNamed(DeviceTargetMsg, poolName)
if task == nil { if task == nil {
return err return err
} }
@ -578,7 +579,7 @@ func DeleteDevice(poolName string, deviceId int) error {
} }
func ActivateDevice(poolName string, name string, deviceId int, size uint64) error { func ActivateDevice(poolName string, name string, deviceId int, size uint64) error {
task, err := createTask(DeviceCreate, name) task, err := TaskCreateNamed(DeviceCreate, name)
if task == nil { if task == nil {
return err return err
} }
@ -596,12 +597,12 @@ func ActivateDevice(poolName string, name string, deviceId int, size uint64) err
return fmt.Errorf("Can't set cookie %s", err) return fmt.Errorf("Can't set cookie %s", err)
} }
defer UdevWait(cookie)
if err := task.Run(); err != nil { if err := task.Run(); err != nil {
return fmt.Errorf("Error running DeviceCreate (ActivateDevice) %s", err) return fmt.Errorf("Error running DeviceCreate (ActivateDevice) %s", err)
} }
UdevWait(cookie)
return nil return nil
} }
@ -616,7 +617,7 @@ func CreateSnapDevice(poolName string, deviceId *int, baseName string, baseDevic
} }
for { for {
task, err := createTask(DeviceTargetMsg, poolName) task, err := TaskCreateNamed(DeviceTargetMsg, poolName)
if task == nil { if task == nil {
if doSuspend { if doSuspend {
ResumeDevice(baseName) ResumeDevice(baseName)

Просмотреть файл

@ -20,9 +20,9 @@ const (
) )
var ( var (
ErrIptablesNotFound = errors.New("Iptables not found")
nat = []string{"-t", "nat"} nat = []string{"-t", "nat"}
supportsXlock = false supportsXlock = false
ErrIptablesNotFound = errors.New("Iptables not found")
) )
type Chain struct { type Chain struct {
@ -30,6 +30,15 @@ type Chain struct {
Bridge string Bridge string
} }
type ChainError struct {
Chain string
Output []byte
}
func (e *ChainError) Error() string {
return fmt.Sprintf("Error iptables %s: %s", e.Chain, string(e.Output))
}
func init() { func init() {
supportsXlock = exec.Command("iptables", "--wait", "-L", "-n").Run() == nil supportsXlock = exec.Command("iptables", "--wait", "-L", "-n").Run() == nil
} }
@ -73,11 +82,12 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str
"-p", proto, "-p", proto,
"-d", daddr, "-d", daddr,
"--dport", strconv.Itoa(port), "--dport", strconv.Itoa(port),
"!", "-i", c.Bridge,
"-j", "DNAT", "-j", "DNAT",
"--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil { "--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil {
return err return err
} else if len(output) != 0 { } else if len(output) != 0 {
return fmt.Errorf("Error iptables forward: %s", output) return &ChainError{Chain: "FORWARD", Output: output}
} }
fAction := action fAction := action
@ -93,18 +103,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str
"-j", "ACCEPT"); err != nil { "-j", "ACCEPT"); err != nil {
return err return err
} else if len(output) != 0 { } else if len(output) != 0 {
return fmt.Errorf("Error iptables forward: %s", output) return &ChainError{Chain: "FORWARD", Output: output}
}
if output, err := Raw("-t", "nat", string(fAction), "POSTROUTING",
"-p", proto,
"-s", dest_addr,
"-d", dest_addr,
"--dport", strconv.Itoa(dest_port),
"-j", "MASQUERADE"); err != nil {
return err
} else if len(output) != 0 {
return fmt.Errorf("Error iptables forward: %s", output)
} }
return nil return nil
@ -118,7 +117,7 @@ func (c *Chain) Prerouting(action Action, args ...string) error {
if output, err := Raw(append(a, "-j", c.Name)...); err != nil { if output, err := Raw(append(a, "-j", c.Name)...); err != nil {
return err return err
} else if len(output) != 0 { } else if len(output) != 0 {
return fmt.Errorf("Error iptables prerouting: %s", output) return &ChainError{Chain: "PREROUTING", Output: output}
} }
return nil return nil
} }
@ -131,7 +130,7 @@ func (c *Chain) Output(action Action, args ...string) error {
if output, err := Raw(append(a, "-j", c.Name)...); err != nil { if output, err := Raw(append(a, "-j", c.Name)...); err != nil {
return err return err
} else if len(output) != 0 { } else if len(output) != 0 {
return fmt.Errorf("Error iptables output: %s", output) return &ChainError{Chain: "OUTPUT", Output: output}
} }
return nil return nil
} }

Просмотреть файл

@ -394,12 +394,22 @@ func (f *FlagSet) Lookup(name string) *Flag {
return f.formal[name] return f.formal[name]
} }
// Indicates whether the specified flag was specified at all on the cmd line
func (f *FlagSet) IsSet(name string) bool {
return f.actual[name] != nil
}
// Lookup returns the Flag structure of the named command-line flag, // Lookup returns the Flag structure of the named command-line flag,
// returning nil if none exists. // returning nil if none exists.
func Lookup(name string) *Flag { func Lookup(name string) *Flag {
return CommandLine.formal[name] return CommandLine.formal[name]
} }
// Indicates whether the specified flag was specified at all on the cmd line
func IsSet(name string) bool {
return CommandLine.IsSet(name)
}
// Set sets the value of the named flag. // Set sets the value of the named flag.
func (f *FlagSet) Set(name, value string) error { func (f *FlagSet) Set(name, value string) error {
flag, ok := f.formal[name] flag, ok := f.formal[name]

Просмотреть файл

@ -168,11 +168,14 @@ func testParse(f *FlagSet, t *testing.T) {
} }
boolFlag := f.Bool([]string{"bool"}, false, "bool value") boolFlag := f.Bool([]string{"bool"}, false, "bool value")
bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value")
f.Bool([]string{"bool3"}, false, "bool3 value")
bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value")
intFlag := f.Int([]string{"-int"}, 0, "int value") intFlag := f.Int([]string{"-int"}, 0, "int value")
int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value")
uintFlag := f.Uint([]string{"uint"}, 0, "uint value") uintFlag := f.Uint([]string{"uint"}, 0, "uint value")
uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value")
stringFlag := f.String([]string{"string"}, "0", "string value") stringFlag := f.String([]string{"string"}, "0", "string value")
f.String([]string{"string2"}, "0", "string2 value")
singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value")
doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value")
mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value")
@ -185,6 +188,7 @@ func testParse(f *FlagSet, t *testing.T) {
args := []string{ args := []string{
"-bool", "-bool",
"-bool2=true", "-bool2=true",
"-bool4=false",
"--int", "22", "--int", "22",
"--int64", "0x23", "--int64", "0x23",
"-uint", "24", "-uint", "24",
@ -212,6 +216,18 @@ func testParse(f *FlagSet, t *testing.T) {
if *bool2Flag != true { if *bool2Flag != true {
t.Error("bool2 flag should be true, is ", *bool2Flag) t.Error("bool2 flag should be true, is ", *bool2Flag)
} }
if !f.IsSet("bool2") {
t.Error("bool2 should be marked as set")
}
if f.IsSet("bool3") {
t.Error("bool3 should not be marked as set")
}
if !f.IsSet("bool4") {
t.Error("bool4 should be marked as set")
}
if *bool4Flag != false {
t.Error("bool4 flag should be false, is ", *bool4Flag)
}
if *intFlag != 22 { if *intFlag != 22 {
t.Error("int flag should be 22, is ", *intFlag) t.Error("int flag should be 22, is ", *intFlag)
} }
@ -227,6 +243,12 @@ func testParse(f *FlagSet, t *testing.T) {
if *stringFlag != "hello" { if *stringFlag != "hello" {
t.Error("string flag should be `hello`, is ", *stringFlag) t.Error("string flag should be `hello`, is ", *stringFlag)
} }
if !f.IsSet("string") {
t.Error("string flag should be marked as set")
}
if f.IsSet("string2") {
t.Error("string2 flag should not be marked as set")
}
if *singleQuoteFlag != "single" { if *singleQuoteFlag != "single" {
t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag)
} }

Просмотреть файл

@ -29,7 +29,9 @@ func ParseFlag(arg string, prev Args) (Args, error) {
} }
f := strings.SplitN(arg, "=", 2) f := strings.SplitN(arg, "=", 2)
filters[f[0]] = append(filters[f[0]], f[1]) name := strings.ToLower(strings.TrimSpace(f[0]))
value := strings.TrimSpace(f[1])
filters[name] = append(filters[name], value)
return filters, nil return filters, nil
} }

Просмотреть файл

@ -0,0 +1,18 @@
// +build linux
package reexec
import (
"os/exec"
"syscall"
)
func Command(args ...string) *exec.Cmd {
return &exec.Cmd{
Path: Self(),
Args: args,
SysProcAttr: &syscall.SysProcAttr{
Pdeathsig: syscall.SIGTERM,
},
}
}

Просмотреть файл

@ -0,0 +1,11 @@
// +build !linux
package reexec
import (
"os/exec"
)
func Command(args ...string) *exec.Cmd {
return nil
}

Просмотреть файл

@ -27,19 +27,16 @@ func Init() bool {
return true return true
} }
return false return false
} }
// Self returns the path to the current processes binary // Self returns the path to the current processes binary
func Self() string { func Self() string {
name := os.Args[0] name := os.Args[0]
if filepath.Base(name) == name { if filepath.Base(name) == name {
if lp, err := exec.LookPath(name); err == nil { if lp, err := exec.LookPath(name); err == nil {
name = lp name = lp
} }
} }
return name return name
} }

Просмотреть файл

@ -12,6 +12,12 @@ const maxLoopCounter = 100
// FollowSymlink will follow an existing link and scope it to the root // FollowSymlink will follow an existing link and scope it to the root
// path provided. // path provided.
// The role of this function is to return an absolute path in the root
// or normalize to the root if the symlink leads to a path which is
// outside of the root.
// Errors encountered while attempting to follow the symlink in path
// will be reported.
// Normalizations to the root don't constitute errors.
func FollowSymlinkInScope(link, root string) (string, error) { func FollowSymlinkInScope(link, root string) (string, error) {
root, err := filepath.Abs(root) root, err := filepath.Abs(root)
if err != nil { if err != nil {
@ -60,25 +66,36 @@ func FollowSymlinkInScope(link, root string) (string, error) {
} }
return "", err return "", err
} }
if stat.Mode()&os.ModeSymlink == os.ModeSymlink {
dest, err := os.Readlink(prev)
if err != nil {
return "", err
}
if path.IsAbs(dest) { // let's break if we're not dealing with a symlink
prev = filepath.Join(root, dest) if stat.Mode()&os.ModeSymlink != os.ModeSymlink {
} else {
prev, _ = filepath.Abs(prev)
if prev = filepath.Join(filepath.Dir(prev), dest); len(prev) < len(root) {
prev = filepath.Join(root, filepath.Base(dest))
}
}
} else {
break break
} }
// process the symlink
dest, err := os.Readlink(prev)
if err != nil {
return "", err
}
if path.IsAbs(dest) {
prev = filepath.Join(root, dest)
} else {
prev, _ = filepath.Abs(prev)
dir := filepath.Dir(prev)
prev = filepath.Join(dir, dest)
if dir == root && !strings.HasPrefix(prev, root) {
prev = root
}
if len(prev) < len(root) || (len(prev) == len(root) && prev != root) {
prev = filepath.Join(root, filepath.Base(dest))
}
}
} }
} }
if prev == "/" {
prev = root
}
return prev, nil return prev, nil
} }

Просмотреть файл

@ -46,6 +46,7 @@ func TestFollowSymLinkUnderLinkedDir(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer os.RemoveAll(dir)
os.Mkdir(filepath.Join(dir, "realdir"), 0700) os.Mkdir(filepath.Join(dir, "realdir"), 0700)
os.Symlink("realdir", filepath.Join(dir, "linkdir")) os.Symlink("realdir", filepath.Join(dir, "linkdir"))
@ -97,25 +98,151 @@ func TestFollowSymLinkRelativeLink(t *testing.T) {
} }
func TestFollowSymLinkRelativeLinkScope(t *testing.T) { func TestFollowSymLinkRelativeLinkScope(t *testing.T) {
link := "testdata/fs/a/f" // avoid letting symlink f lead us out of the "testdata" scope
// we don't normalize because symlink f is in scope and there is no
// information leak
{
link := "testdata/fs/a/f"
rewrite, err := FollowSymlinkInScope(link, "testdata") rewrite, err := FollowSymlinkInScope(link, "testdata")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
}
if expected := abs(t, "testdata/test"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
}
} }
if expected := abs(t, "testdata/test"); expected != rewrite { // avoid letting symlink f lead us out of the "testdata/fs" scope
t.Fatalf("Expected %s got %s", expected, rewrite) // we don't normalize because symlink f is in scope and there is no
// information leak
{
link := "testdata/fs/a/f"
rewrite, err := FollowSymlinkInScope(link, "testdata/fs")
if err != nil {
t.Fatal(err)
}
if expected := abs(t, "testdata/fs/test"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
}
} }
link = "testdata/fs/b/h" // avoid letting symlink g (pointed at by symlink h) take out of scope
// TODO: we should probably normalize to scope here because ../[....]/root
// is out of scope and we leak information
{
link := "testdata/fs/b/h"
rewrite, err = FollowSymlinkInScope(link, "testdata") rewrite, err := FollowSymlinkInScope(link, "testdata")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
}
if expected := abs(t, "testdata/root"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
}
} }
if expected := abs(t, "testdata/root"); expected != rewrite { // avoid letting allowing symlink e lead us to ../b
t.Fatalf("Expected %s got %s", expected, rewrite) // normalize to the "testdata/fs/a"
{
link := "testdata/fs/a/e"
rewrite, err := FollowSymlinkInScope(link, "testdata/fs/a")
if err != nil {
t.Fatal(err)
}
if expected := abs(t, "testdata/fs/a"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
}
}
// avoid letting symlink -> ../directory/file escape from scope
// normalize to "testdata/fs/j"
{
link := "testdata/fs/j/k"
rewrite, err := FollowSymlinkInScope(link, "testdata/fs/j")
if err != nil {
t.Fatal(err)
}
if expected := abs(t, "testdata/fs/j"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
}
}
// make sure we don't allow escaping to /
// normalize to dir
{
dir, err := ioutil.TempDir("", "docker-fs-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
linkFile := filepath.Join(dir, "foo")
os.Mkdir(filepath.Join(dir, ""), 0700)
os.Symlink("/", linkFile)
rewrite, err := FollowSymlinkInScope(linkFile, dir)
if err != nil {
t.Fatal(err)
}
if rewrite != dir {
t.Fatalf("Expected %s got %s", dir, rewrite)
}
}
// make sure we don't allow escaping to /
// normalize to dir
{
dir, err := ioutil.TempDir("", "docker-fs-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
linkFile := filepath.Join(dir, "foo")
os.Mkdir(filepath.Join(dir, ""), 0700)
os.Symlink("/../../", linkFile)
rewrite, err := FollowSymlinkInScope(linkFile, dir)
if err != nil {
t.Fatal(err)
}
if rewrite != dir {
t.Fatalf("Expected %s got %s", dir, rewrite)
}
}
// make sure we stay in scope without leaking information
// this also checks for escaping to /
// normalize to dir
{
dir, err := ioutil.TempDir("", "docker-fs-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
linkFile := filepath.Join(dir, "foo")
os.Mkdir(filepath.Join(dir, ""), 0700)
os.Symlink("../../", linkFile)
rewrite, err := FollowSymlinkInScope(linkFile, dir)
if err != nil {
t.Fatal(err)
}
if rewrite != dir {
t.Fatalf("Expected %s got %s", dir, rewrite)
}
} }
} }

1
pkg/symlink/testdata/fs/j/k поставляемый Symbolic link
Просмотреть файл

@ -0,0 +1 @@
../i/a

Просмотреть файл

@ -1,11 +1,13 @@
package system package system
import ( import (
"os"
"testing" "testing"
) )
func TestLstat(t *testing.T) { func TestLstat(t *testing.T) {
file, invalid, _ := prepareFiles(t) file, invalid, _, dir := prepareFiles(t)
defer os.RemoveAll(dir)
statFile, err := Lstat(file) statFile, err := Lstat(file)
if err != nil { if err != nil {

Просмотреть файл

@ -1,12 +1,14 @@
package system package system
import ( import (
"os"
"syscall" "syscall"
"testing" "testing"
) )
func TestFromStatT(t *testing.T) { func TestFromStatT(t *testing.T) {
file, _, _ := prepareFiles(t) file, _, _, dir := prepareFiles(t)
defer os.RemoveAll(dir)
stat := &syscall.Stat_t{} stat := &syscall.Stat_t{}
err := syscall.Lstat(file, stat) err := syscall.Lstat(file, stat)

Просмотреть файл

@ -8,7 +8,7 @@ import (
"testing" "testing"
) )
func prepareFiles(t *testing.T) (string, string, string) { func prepareFiles(t *testing.T) (string, string, string, string) {
dir, err := ioutil.TempDir("", "docker-system-test") dir, err := ioutil.TempDir("", "docker-system-test")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -26,11 +26,12 @@ func prepareFiles(t *testing.T) (string, string, string) {
t.Fatal(err) t.Fatal(err)
} }
return file, invalid, symlink return file, invalid, symlink, dir
} }
func TestLUtimesNano(t *testing.T) { func TestLUtimesNano(t *testing.T) {
file, invalid, symlink := prepareFiles(t) file, invalid, symlink, dir := prepareFiles(t)
defer os.RemoveAll(dir)
before, err := os.Stat(file) before, err := os.Stat(file)
if err != nil { if err != nil {

Просмотреть файл

@ -27,11 +27,7 @@ const (
// including the byte payload of the image's json metadata as well, and for // including the byte payload of the image's json metadata as well, and for
// calculating the checksums for buildcache. // calculating the checksums for buildcache.
func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) {
headerSelector, err := getTarHeaderSelector(v) return NewTarSumHash(r, dc, v, DefaultTHash)
if err != nil {
return nil, err
}
return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector}, nil
} }
// Create a new TarSum, providing a THash to use rather than the DefaultTHash // Create a new TarSum, providing a THash to use rather than the DefaultTHash
@ -40,7 +36,9 @@ func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}, nil ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}
err = ts.initTarSum()
return ts, err
} }
// TarSum is the generic interface for calculating fixed time // TarSum is the generic interface for calculating fixed time
@ -134,12 +132,6 @@ func (ts *tarSum) initTarSum() error {
} }
func (ts *tarSum) Read(buf []byte) (int, error) { func (ts *tarSum) Read(buf []byte) (int, error) {
if ts.writer == nil {
if err := ts.initTarSum(); err != nil {
return 0, err
}
}
if ts.finished { if ts.finished {
return ts.bufWriter.Read(buf) return ts.bufWriter.Read(buf)
} }

Просмотреть файл

@ -230,6 +230,17 @@ func TestEmptyTar(t *testing.T) {
if resultSum != expectedSum { if resultSum != expectedSum {
t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
} }
// Test without ever actually writing anything.
if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil {
t.Fatal(err)
}
resultSum = ts.Sum(nil)
if resultSum != expectedSum {
t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
}
} }
var ( var (
@ -318,6 +329,153 @@ func TestTarSums(t *testing.T) {
} }
} }
func TestIteration(t *testing.T) {
headerTests := []struct {
expectedSum string // TODO(vbatts) it would be nice to get individual sums of each
version Version
hdr *tar.Header
data []byte
}{
{
"tarsum+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
Version0,
&tar.Header{
Name: "file.txt",
Size: 0,
Typeflag: tar.TypeReg,
Devminor: 0,
Devmajor: 0,
},
[]byte(""),
},
{
"tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
VersionDev,
&tar.Header{
Name: "file.txt",
Size: 0,
Typeflag: tar.TypeReg,
Devminor: 0,
Devmajor: 0,
},
[]byte(""),
},
{
"tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
VersionDev,
&tar.Header{
Name: "another.txt",
Uid: 1000,
Gid: 1000,
Uname: "slartibartfast",
Gname: "users",
Size: 4,
Typeflag: tar.TypeReg,
Devminor: 0,
Devmajor: 0,
},
[]byte("test"),
},
{
"tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd",
VersionDev,
&tar.Header{
Name: "xattrs.txt",
Uid: 1000,
Gid: 1000,
Uname: "slartibartfast",
Gname: "users",
Size: 4,
Typeflag: tar.TypeReg,
Xattrs: map[string]string{
"user.key1": "value1",
"user.key2": "value2",
},
},
[]byte("test"),
},
{
"tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760",
VersionDev,
&tar.Header{
Name: "xattrs.txt",
Uid: 1000,
Gid: 1000,
Uname: "slartibartfast",
Gname: "users",
Size: 4,
Typeflag: tar.TypeReg,
Xattrs: map[string]string{
"user.KEY1": "value1", // adding different case to ensure different sum
"user.key2": "value2",
},
},
[]byte("test"),
},
{
"tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa",
Version0,
&tar.Header{
Name: "xattrs.txt",
Uid: 1000,
Gid: 1000,
Uname: "slartibartfast",
Gname: "users",
Size: 4,
Typeflag: tar.TypeReg,
Xattrs: map[string]string{
"user.NOT": "CALCULATED",
},
},
[]byte("test"),
},
}
for _, htest := range headerTests {
s, err := renderSumForHeader(htest.version, htest.hdr, htest.data)
if err != nil {
t.Fatal(err)
}
if s != htest.expectedSum {
t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s)
}
}
}
func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) {
buf := bytes.NewBuffer(nil)
// first build our test tar
tw := tar.NewWriter(buf)
if err := tw.WriteHeader(h); err != nil {
return "", err
}
if _, err := tw.Write(data); err != nil {
return "", err
}
tw.Close()
ts, err := NewTarSum(buf, true, v)
if err != nil {
return "", err
}
tr := tar.NewReader(ts)
for {
hdr, err := tr.Next()
if hdr == nil || err == io.EOF {
break
}
if err != nil {
return "", err
}
if _, err = io.Copy(ioutil.Discard, tr); err != nil {
return "", err
}
break // we're just reading one header ...
}
return ts.Sum(nil), nil
}
func Benchmark9kTar(b *testing.B) { func Benchmark9kTar(b *testing.B) {
buf := bytes.NewBuffer([]byte{}) buf := bytes.NewBuffer([]byte{})
fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar")

30
pkg/urlutil/git.go Normal file
Просмотреть файл

@ -0,0 +1,30 @@
package urlutil
import "strings"
var (
validPrefixes = []string{
"git://",
"github.com/",
"git@",
}
)
// IsGitURL returns true if the provided str is a git repository URL.
func IsGitURL(str string) bool {
if IsURL(str) && strings.HasSuffix(str, ".git") {
return true
}
for _, prefix := range validPrefixes {
if strings.HasPrefix(str, prefix) {
return true
}
}
return false
}
// IsGitTransport returns true if the provided str is a git transport by inspecting
// the prefix of the string for known protocols used in git.
func IsGitTransport(str string) bool {
return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@")
}

43
pkg/urlutil/git_test.go Normal file
Просмотреть файл

@ -0,0 +1,43 @@
package urlutil
import "testing"
var (
gitUrls = []string{
"git://github.com/docker/docker",
"git@github.com:docker/docker.git",
"git@bitbucket.org:atlassianlabs/atlassian-docker.git",
"https://github.com/docker/docker.git",
"http://github.com/docker/docker.git",
}
incompleteGitUrls = []string{
"github.com/docker/docker",
}
)
func TestValidGitTransport(t *testing.T) {
for _, url := range gitUrls {
if IsGitTransport(url) == false {
t.Fatalf("%q should be detected as valid Git prefix", url)
}
}
for _, url := range incompleteGitUrls {
if IsGitTransport(url) == true {
t.Fatalf("%q should not be detected as valid Git prefix", url)
}
}
}
func TestIsGIT(t *testing.T) {
for _, url := range gitUrls {
if IsGitURL(url) == false {
t.Fatalf("%q should be detected as valid Git url", url)
}
}
for _, url := range incompleteGitUrls {
if IsGitURL(url) == false {
t.Fatalf("%q should be detected as valid Git url", url)
}
}
}

19
pkg/urlutil/url.go Normal file
Просмотреть файл

@ -0,0 +1,19 @@
package urlutil
import "strings"
var validUrlPrefixes = []string{
"http://",
"https://",
}
// IsURL returns true if the provided str is a valid URL by doing
// a simple change for the transport of the url.
func IsURL(str string) bool {
for _, prefix := range validUrlPrefixes {
if strings.HasPrefix(str, prefix) {
return true
}
}
return false
}

Просмотреть файл

@ -105,6 +105,10 @@ if [ -z "$DEBUG" ]; then
fi fi
LDFLAGS_STATIC='-linkmode external' LDFLAGS_STATIC='-linkmode external'
# Cgo -H windows is incompatible with -linkmode external.
if [ "$(go env GOOS)" == 'windows' ]; then
LDFLAGS_STATIC=''
fi
EXTLDFLAGS_STATIC='-static' EXTLDFLAGS_STATIC='-static'
# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build # ORIG_BUILDFLAGS is necessary for the cross target which cannot always build
# with options like -race. # with options like -race.
@ -219,7 +223,7 @@ bundle() {
bundle=$(basename $bundlescript) bundle=$(basename $bundlescript)
echo "---> Making bundle: $bundle (in bundles/$VERSION/$bundle)" echo "---> Making bundle: $bundle (in bundles/$VERSION/$bundle)"
mkdir -p bundles/$VERSION/$bundle mkdir -p bundles/$VERSION/$bundle
source $bundlescript $(pwd)/bundles/$VERSION/$bundle source "$bundlescript" "$(pwd)/bundles/$VERSION/$bundle"
} }
main() { main() {

Просмотреть файл

@ -3,19 +3,26 @@ set -e
DEST=$1 DEST=$1
BINARY_NAME="docker-$VERSION" BINARY_NAME="docker-$VERSION"
BINARY_EXTENSION=
if [ "$(go env GOOS)" = 'windows' ]; then if [ "$(go env GOOS)" = 'windows' ]; then
BINARY_NAME+='.exe' BINARY_EXTENSION='.exe'
fi
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
# Cygdrive paths don't play well with go build -o.
if [[ "$(uname -s)" == CYGWIN* ]]; then
DEST=$(cygpath -mw $DEST)
fi fi
go build \ go build \
-o "$DEST/$BINARY_NAME" \ -o "$DEST/$BINARY_FULLNAME" \
"${BUILDFLAGS[@]}" \ "${BUILDFLAGS[@]}" \
-ldflags " -ldflags "
$LDFLAGS $LDFLAGS
$LDFLAGS_STATIC_DOCKER $LDFLAGS_STATIC_DOCKER
" \ " \
./docker ./docker
echo "Created binary: $DEST/$BINARY_NAME" echo "Created binary: $DEST/$BINARY_FULLNAME"
ln -sf "$BINARY_NAME" "$DEST/docker" ln -sf "$BINARY_FULLNAME" "$DEST/docker$BINARY_EXTENSION"
hash_files "$DEST/$BINARY_NAME" hash_files "$DEST/$BINARY_FULLNAME"

Просмотреть файл

@ -51,7 +51,7 @@ clone hg code.google.com/p/go.net 84a4013f96e0
clone hg code.google.com/p/gosqlite 74691fb6f837 clone hg code.google.com/p/gosqlite 74691fb6f837
clone git github.com/docker/libtrust d273ef2565ca clone git github.com/docker/libtrust 230dfd18c232
clone git github.com/Sirupsen/logrus v0.6.0 clone git github.com/Sirupsen/logrus v0.6.0
@ -66,7 +66,7 @@ if [ "$1" = '--go' ]; then
mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar
fi fi
clone git github.com/docker/libcontainer 28cb5f9dfd6f3352c610a4f1502b5df4f69389ea clone git github.com/docker/libcontainer 84c1636580a356db88b079d118b94abe6a1a0acd
# see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) # see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file)
rm -rf src/github.com/docker/libcontainer/vendor rm -rf src/github.com/docker/libcontainer/vendor
eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')" eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')"

Просмотреть файл

@ -126,8 +126,8 @@ func LoadConfig(rootPath string) (*ConfigFile, error) {
return &configFile, err return &configFile, err
} }
authConfig.Auth = "" authConfig.Auth = ""
configFile.Configs[k] = authConfig
authConfig.ServerAddress = k authConfig.ServerAddress = k
configFile.Configs[k] = authConfig
} }
} }
return &configFile, nil return &configFile, nil

Просмотреть файл

@ -33,7 +33,6 @@ type Config struct {
NetworkDisabled bool NetworkDisabled bool
MacAddress string MacAddress string
OnBuild []string OnBuild []string
SecurityOpt []string
} }
func ContainerConfigFromJob(job *engine.Job) *Config { func ContainerConfigFromJob(job *engine.Job) *Config {
@ -58,7 +57,6 @@ func ContainerConfigFromJob(job *engine.Job) *Config {
} }
job.GetenvJson("ExposedPorts", &config.ExposedPorts) job.GetenvJson("ExposedPorts", &config.ExposedPorts)
job.GetenvJson("Volumes", &config.Volumes) job.GetenvJson("Volumes", &config.Volumes)
config.SecurityOpt = job.GetenvList("SecurityOpt")
if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil {
config.PortSpecs = PortSpecs config.PortSpecs = PortSpecs
} }

Просмотреть файл

@ -19,10 +19,11 @@ type ExecConfig struct {
func ExecConfigFromJob(job *engine.Job) *ExecConfig { func ExecConfigFromJob(job *engine.Job) *ExecConfig {
execConfig := &ExecConfig{ execConfig := &ExecConfig{
User: job.Getenv("User"), // TODO(vishh): Expose 'User' once it is supported.
Privileged: job.GetenvBool("Privileged"), //User: job.Getenv("User"),
// TODO(vishh): Expose 'Privileged' once it is supported.
//Privileged: job.GetenvBool("Privileged"),
Tty: job.GetenvBool("Tty"), Tty: job.GetenvBool("Tty"),
Container: job.Getenv("Container"),
AttachStdin: job.GetenvBool("AttachStdin"), AttachStdin: job.GetenvBool("AttachStdin"),
AttachStderr: job.GetenvBool("AttachStderr"), AttachStderr: job.GetenvBool("AttachStderr"),
AttachStdout: job.GetenvBool("AttachStdout"), AttachStdout: job.GetenvBool("AttachStdout"),

Просмотреть файл

@ -95,6 +95,7 @@ type HostConfig struct {
CapAdd []string CapAdd []string
CapDrop []string CapDrop []string
RestartPolicy RestartPolicy RestartPolicy RestartPolicy
SecurityOpt []string
} }
// This is used by the create command when you want to set both the // This is used by the create command when you want to set both the
@ -130,6 +131,7 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig {
job.GetenvJson("PortBindings", &hostConfig.PortBindings) job.GetenvJson("PortBindings", &hostConfig.PortBindings)
job.GetenvJson("Devices", &hostConfig.Devices) job.GetenvJson("Devices", &hostConfig.Devices)
job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy) job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy)
hostConfig.SecurityOpt = job.GetenvList("SecurityOpt")
if Binds := job.GetenvList("Binds"); Binds != nil { if Binds := job.GetenvList("Binds"); Binds != nil {
hostConfig.Binds = Binds hostConfig.Binds = Binds
} }

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше