Merge branch 'master' of github.com:docker/docker into error

Docker-DCO-1.1-Signed-off-by: Dan Walsh <dwalsh@redhat.com> (github: rhatdan)
This commit is contained in:
Dan Walsh 2015-07-22 08:02:32 -04:00
Родитель 96a4469835 757c4f0d5c
Коммит 4815fdc334
117 изменённых файлов: 161873 добавлений и 1497 удалений

Просмотреть файл

@ -234,6 +234,8 @@ close an issue. Including references automatically closes the issue on a merge.
Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly
from the Git history.
Please see the [Coding Style](#coding-style) for further guidelines.
### Merge approval
Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to
@ -385,3 +387,49 @@ do need a fair way to deal with people who are making our community suck.
appeals, we know that mistakes happen, and we'll work with you to come up with a
fair solution if there has been a misunderstanding.
## Coding Style
Unless explicitly stated, we follow all coding guidelines from the Go
community. While some of these standards may seem arbitrary, they somehow seem
to result in a solid, consistent codebase.
It is possible that the code base does not currently comply with these
guidelines. We are not looking for a massive PR that fixes this, since that
goes against the spirit of the guidelines. All new contributions should make a
best effort to clean up and make the code base better than they left it.
Obviously, apply your best judgement. Remember, the goal here is to make the
code base easier for humans to navigate and understand. Always keep that in
mind when nudging others to comply.
The rules:
1. All code should be formatted with `gofmt -s`.
2. All code should pass the default levels of
[`golint`](https://github.com/golang/lint).
3. All code should follow the guidelines covered in [Effective
Go](http://golang.org/doc/effective_go.html) and [Go Code Review
Comments](https://github.com/golang/go/wiki/CodeReviewComments).
4. Comment the code. Tell us the why, the history and the context.
5. Document _all_ declarations and methods, even private ones. Declare
expectations, caveats and anything else that may be important. If a type
gets exported, having the comments already there will ensure it's ready.
6. Variable name length should be proportional to it's context and no longer.
`noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
In practice, short methods will have short variable names and globals will
have longer names.
7. No underscores in package names. If you need a compound name, step back,
and re-examine why you need a compound name. If you still think you need a
compound name, lose the underscore.
8. No utils or helpers packages. If a function is not general enough to
warrant it's own package, it has not been written generally enough to be a
part of a util package. Just leave it unexported and well-documented.
9. All tests should run with `go test` and outside tooling should not be
required. No, we don't need another unit testing framework. Assertion
packages are acceptable if they provide _real_ incremental value.
10. Even though we call these "rules" above, they are actually just
guidelines. Since you've read all the rules, you now know that.
If you are having trouble getting into the mood of idiomatic Go, we recommend
reading through [Effective Go](http://golang.org/doc/effective_go.html). The
[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
kool-aid is a lot easier than going thirsty.

Просмотреть файл

@ -117,6 +117,11 @@ RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \
&& go install -v golang.org/x/tools/cmd/cover \
&& go install -v golang.org/x/tools/cmd/vet
# Grab Go's lint tool
ENV GO_LINT_COMMIT f42f5c1c440621302702cb0741e9d2ca547ae80f
RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
&& go install -v github.com/golang/lint/golint
# TODO replace FPM with some very minimal debhelper stuff
RUN gem install --no-rdoc --no-ri fpm --version 1.3.2
@ -172,7 +177,7 @@ RUN ./contrib/download-frozen-image.sh /docker-frozen-images \
# Download man page generator
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone -b v1.0.1 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \
&& git clone -b v1.0.3 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \
&& git clone -b v1.2 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \
&& go get -v -d github.com/cpuguy83/go-md2man \
&& go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \

Просмотреть файл

@ -65,7 +65,7 @@ test-docker-py: build
$(DOCKER_RUN_DOCKER) hack/make.sh binary test-docker-py
validate: build
$(DOCKER_RUN_DOCKER) hack/make.sh validate-dco validate-gofmt validate-pkg validate-test validate-toml validate-vet
$(DOCKER_RUN_DOCKER) hack/make.sh validate-dco validate-gofmt validate-pkg validate-lint validate-test validate-toml validate-vet
shell: build
$(DOCKER_RUN_DOCKER) bash

Просмотреть файл

@ -1,8 +1,14 @@
package client
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/api/types"
@ -10,48 +16,289 @@ import (
flag "github.com/docker/docker/pkg/mflag"
)
// CmdCp copies files/folders from a path on the container to a directory on the host running the command.
//
// If HOSTDIR is '-', the data is written as a tar file to STDOUT.
//
// Usage: docker cp CONTAINER:PATH HOSTDIR
func (cli *DockerCli) CmdCp(args ...string) error {
cmd := cli.Subcmd("cp", []string{"CONTAINER:PATH HOSTDIR|-"}, "Copy files/folders from a container's PATH to a HOSTDIR on the host\nrunning the command. Use '-' to write the data as a tar file to STDOUT.", true)
cmd.Require(flag.Exact, 2)
type copyDirection int
const (
fromContainer copyDirection = (1 << iota)
toContainer
acrossContainers = fromContainer | toContainer
)
// CmdCp copies files/folders to or from a path in a container.
//
// When copying from a container, if LOCALPATH is '-' the data is written as a
// tar archive file to STDOUT.
//
// When copying to a container, if LOCALPATH is '-' the data is read as a tar
// archive file from STDIN, and the destination CONTAINER:PATH, must specify
// a directory.
//
// Usage:
// docker cp CONTAINER:PATH LOCALPATH|-
// docker cp LOCALPATH|- CONTAINER:PATH
func (cli *DockerCli) CmdCp(args ...string) error {
cmd := cli.Subcmd(
"cp",
[]string{"CONTAINER:PATH LOCALPATH|-", "LOCALPATH|- CONTAINER:PATH"},
strings.Join([]string{
"Copy files/folders between a container and your host.\n",
"Use '-' as the source to read a tar archive from stdin\n",
"and extract it to a directory destination in a container.\n",
"Use '-' as the destination to stream a tar archive of a\n",
"container source to stdout.",
}, ""),
true,
)
cmd.Require(flag.Exact, 2)
cmd.ParseFlags(args, true)
// deal with path name with `:`
info := strings.SplitN(cmd.Arg(0), ":", 2)
if len(info) != 2 {
return fmt.Errorf("Error: Path not specified")
if cmd.Arg(0) == "" {
return fmt.Errorf("source can not be empty")
}
if cmd.Arg(1) == "" {
return fmt.Errorf("destination can not be empty")
}
cfg := &types.CopyConfig{
Resource: info[1],
srcContainer, srcPath := splitCpArg(cmd.Arg(0))
dstContainer, dstPath := splitCpArg(cmd.Arg(1))
var direction copyDirection
if srcContainer != "" {
direction |= fromContainer
}
serverResp, err := cli.call("POST", "/containers/"+info[0]+"/copy", cfg, nil)
if serverResp.body != nil {
defer serverResp.body.Close()
if dstContainer != "" {
direction |= toContainer
}
if serverResp.statusCode == 404 {
return fmt.Errorf("No such container: %v", info[0])
switch direction {
case fromContainer:
return cli.copyFromContainer(srcContainer, srcPath, dstPath)
case toContainer:
return cli.copyToContainer(srcPath, dstContainer, dstPath)
case acrossContainers:
// Copying between containers isn't supported.
return fmt.Errorf("copying between containers is not supported")
default:
// User didn't specify any container.
return fmt.Errorf("must specify at least one container source")
}
}
// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be
// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by
// requiring a LOCALPATH with a `:` to be made explicit with a relative or
// absolute path:
// `/path/to/file:name.txt` or `./file:name.txt`
//
// This is apparently how `scp` handles this as well:
// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/
//
// We can't simply check for a filepath separator because container names may
// have a separator, e.g., "host0/cname1" if container is in a Docker cluster,
// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows
// client, a `:` could be part of an absolute Windows path, in which case it
// is immediately proceeded by a backslash.
func splitCpArg(arg string) (container, path string) {
if filepath.IsAbs(arg) {
// Explicit local absolute path, e.g., `C:\foo` or `/foo`.
return "", arg
}
parts := strings.SplitN(arg, ":", 2)
if len(parts) == 1 || strings.HasPrefix(parts[0], ".") {
// Either there's no `:` in the arg
// OR it's an explicit local relative path like `./file:name.txt`.
return "", arg
}
return parts[0], parts[1]
}
func (cli *DockerCli) statContainerPath(containerName, path string) (types.ContainerPathStat, error) {
var stat types.ContainerPathStat
query := make(url.Values, 1)
query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
urlStr := fmt.Sprintf("/containers/%s/archive?%s", containerName, query.Encode())
response, err := cli.call("HEAD", urlStr, nil, nil)
if err != nil {
return err
return stat, err
}
defer response.body.Close()
if response.statusCode != http.StatusOK {
return stat, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
}
hostPath := cmd.Arg(1)
if serverResp.statusCode == 200 {
if hostPath == "-" {
_, err = io.Copy(cli.out, serverResp.body)
} else {
err = archive.Untar(serverResp.body, hostPath, &archive.TarOptions{NoLchown: true})
}
return getContainerPathStatFromHeader(response.header)
}
func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
var stat types.ContainerPathStat
encodedStat := header.Get("X-Docker-Container-Path-Stat")
statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
err := json.NewDecoder(statDecoder).Decode(&stat)
if err != nil {
err = fmt.Errorf("unable to decode container path stat header: %s", err)
}
return stat, err
}
func resolveLocalPath(localPath string) (absPath string, err error) {
if absPath, err = filepath.Abs(localPath); err != nil {
return
}
return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil
}
func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string) (err error) {
if dstPath != "-" {
// Get an absolute destination path.
dstPath, err = resolveLocalPath(dstPath)
if err != nil {
return err
}
}
query := make(url.Values, 1)
query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
urlStr := fmt.Sprintf("/containers/%s/archive?%s", srcContainer, query.Encode())
response, err := cli.call("GET", urlStr, nil, nil)
if err != nil {
return err
}
defer response.body.Close()
if response.statusCode != http.StatusOK {
return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
}
if dstPath == "-" {
// Send the response to STDOUT.
_, err = io.Copy(os.Stdout, response.body)
return err
}
// In order to get the copy behavior right, we need to know information
// about both the source and the destination. The response headers include
// stat info about the source that we can use in deciding exactly how to
// copy it locally. Along with the stat info about the local destination,
// we have everything we need to handle the multiple possibilities there
// can be when copying a file/dir from one location to another file/dir.
stat, err := getContainerPathStatFromHeader(response.header)
if err != nil {
return fmt.Errorf("unable to get resource stat from response: %s", err)
}
// Prepare source copy info.
srcInfo := archive.CopyInfo{
Path: srcPath,
Exists: true,
IsDir: stat.Mode.IsDir(),
}
// See comments in the implementation of `archive.CopyTo` for exactly what
// goes into deciding how and whether the source archive needs to be
// altered for the correct copy behavior.
return archive.CopyTo(response.body, srcInfo, dstPath)
}
func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (err error) {
if srcPath != "-" {
// Get an absolute source path.
srcPath, err = resolveLocalPath(srcPath)
if err != nil {
return err
}
}
// In order to get the copy behavior right, we need to know information
// about both the source and destination. The API is a simple tar
// archive/extract API but we can use the stat info header about the
// destination to be more informed about exactly what the destination is.
// Prepare destination copy info by stat-ing the container path.
dstInfo := archive.CopyInfo{Path: dstPath}
dstStat, err := cli.statContainerPath(dstContainer, dstPath)
// Ignore any error and assume that the parent directory of the destination
// path exists, in which case the copy may still succeed. If there is any
// type of conflict (e.g., non-directory overwriting an existing directory
// or vice versia) the extraction will fail. If the destination simply did
// not exist, but the parent directory does, the extraction will still
// succeed.
if err == nil {
dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir()
}
var content io.Reader
if srcPath == "-" {
// Use STDIN.
content = os.Stdin
if !dstInfo.IsDir {
return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath))
}
} else {
srcArchive, err := archive.TarResource(srcPath)
if err != nil {
return err
}
defer srcArchive.Close()
// With the stat info about the local source as well as the
// destination, we have enough information to know whether we need to
// alter the archive that we upload so that when the server extracts
// it to the specified directory in the container we get the disired
// copy behavior.
// Prepare source copy info.
srcInfo, err := archive.CopyInfoStatPath(srcPath, true)
if err != nil {
return err
}
// See comments in the implementation of `archive.PrepareArchiveCopy`
// for exactly what goes into deciding how and whether the source
// archive needs to be altered for the correct copy behavior when it is
// extracted. This function also infers from the source and destination
// info which directory to extract to, which may be the parent of the
// destination that the user specified.
dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo)
if err != nil {
return err
}
defer preparedArchive.Close()
dstPath = dstDir
content = preparedArchive
}
query := make(url.Values, 2)
query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API.
// Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
query.Set("noOverwriteDirNonDir", "true")
urlStr := fmt.Sprintf("/containers/%s/archive?%s", dstContainer, query.Encode())
response, err := cli.stream("PUT", urlStr, &streamOpts{in: content})
if err != nil {
return err
}
defer response.body.Close()
if response.statusCode != http.StatusOK {
return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
}
return nil
}

Просмотреть файл

@ -48,9 +48,13 @@ func (cli *DockerCli) CmdPort(args ...string) error {
proto = parts[1]
}
natPort := port + "/" + proto
if frontends, exists := c.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil {
newP, err := nat.NewPort(proto, port)
if err != nil {
return err
}
if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil {
for _, frontend := range frontends {
fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIP, frontend.HostPort)
}
return nil
}
@ -59,7 +63,7 @@ func (cli *DockerCli) CmdPort(args ...string) error {
for from, frontends := range c.NetworkSettings.Ports {
for _, frontend := range frontends {
fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIp, frontend.HostPort)
fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort)
}
}

Просмотреть файл

@ -1193,8 +1193,8 @@ func (s *Server) getContainersByName(version version.Version, w http.ResponseWri
return fmt.Errorf("Missing parameter")
}
if version.LessThan("1.19") {
containerJSONRaw, err := s.daemon.ContainerInspectRaw(vars["name"])
if version.LessThan("1.20") {
containerJSONRaw, err := s.daemon.ContainerInspectPre120(vars["name"])
if err != nil {
return err
}
@ -1309,6 +1309,7 @@ func (s *Server) postBuild(version version.Version, w http.ResponseWriter, r *ht
return nil
}
// postContainersCopy is deprecated in favor of getContainersArchivePath.
func (s *Server) postContainersCopy(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
@ -1348,6 +1349,104 @@ func (s *Server) postContainersCopy(version version.Version, w http.ResponseWrit
return nil
}
// // Encode the stat to JSON, base64 encode, and place in a header.
func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error {
statJSON, err := json.Marshal(stat)
if err != nil {
return err
}
header.Set(
"X-Docker-Container-Path-Stat",
base64.StdEncoding.EncodeToString(statJSON),
)
return nil
}
func (s *Server) headContainersArchive(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := parseForm(r); err != nil {
return err
}
name := vars["name"]
path := r.Form.Get("path")
switch {
case name == "":
return fmt.Errorf("bad parameter: 'name' cannot be empty")
case path == "":
return fmt.Errorf("bad parameter: 'path' cannot be empty")
}
stat, err := s.daemon.ContainerStatPath(name, path)
if err != nil {
return err
}
return setContainerPathStatHeader(stat, w.Header())
}
func (s *Server) getContainersArchive(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := parseForm(r); err != nil {
return err
}
name := vars["name"]
path := r.Form.Get("path")
switch {
case name == "":
return fmt.Errorf("bad parameter: 'name' cannot be empty")
case path == "":
return fmt.Errorf("bad parameter: 'path' cannot be empty")
}
tarArchive, stat, err := s.daemon.ContainerArchivePath(name, path)
if err != nil {
return err
}
defer tarArchive.Close()
if err := setContainerPathStatHeader(stat, w.Header()); err != nil {
return err
}
w.Header().Set("Content-Type", "application/x-tar")
_, err = io.Copy(w, tarArchive)
return err
}
func (s *Server) putContainersArchive(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := parseForm(r); err != nil {
return err
}
name := vars["name"]
path := r.Form.Get("path")
noOverwriteDirNonDir := boolValue(r, "noOverwriteDirNonDir")
switch {
case name == "":
return fmt.Errorf("bad parameter: 'name' cannot be empty")
case path == "":
return fmt.Errorf("bad parameter: 'path' cannot be empty")
}
return s.daemon.ContainerExtractToDir(name, path, noOverwriteDirNonDir, r.Body)
}
func (s *Server) postContainerExecCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
@ -1536,6 +1635,9 @@ func createRouter(s *Server) *mux.Router {
ProfilerSetup(r, "/debug/")
}
m := map[string]map[string]HttpApiFunc{
"HEAD": {
"/containers/{name:.*}/archive": s.headContainersArchive,
},
"GET": {
"/_ping": s.ping,
"/events": s.getEvents,
@ -1557,6 +1659,7 @@ func createRouter(s *Server) *mux.Router {
"/containers/{name:.*}/stats": s.getContainersStats,
"/containers/{name:.*}/attach/ws": s.wsContainersAttach,
"/exec/{id:.*}/json": s.getExecByID,
"/containers/{name:.*}/archive": s.getContainersArchive,
},
"POST": {
"/auth": s.postAuth,
@ -1582,6 +1685,9 @@ func createRouter(s *Server) *mux.Router {
"/exec/{name:.*}/resize": s.postContainerExecResize,
"/containers/{name:.*}/rename": s.postContainerRename,
},
"PUT": {
"/containers/{name:.*}/archive": s.putContainersArchive,
},
"DELETE": {
"/containers/{name:.*}": s.deleteContainers,
"/images/{name:.*}": s.deleteImages,

Просмотреть файл

@ -1,6 +1,7 @@
package types
import (
"os"
"time"
"github.com/docker/docker/daemon/network"
@ -127,6 +128,18 @@ type CopyConfig struct {
Resource string
}
// ContainerPathStat is used to encode the header from
// GET /containers/{name:.*}/archive
// "name" is the file or directory name.
// "path" is the absolute path to the resource in the container.
type ContainerPathStat struct {
Name string `json:"name"`
Path string `json:"path"`
Size int64 `json:"size"`
Mode os.FileMode `json:"mode"`
Mtime time.Time `json:"mtime"`
}
// GET "/containers/{name:.*}/top"
type ContainerProcessList struct {
Processes [][]string
@ -225,8 +238,6 @@ type ContainerJSONBase struct {
ExecDriver string
MountLabel string
ProcessLabel string
Volumes map[string]string
VolumesRW map[string]bool
AppArmorProfile string
ExecIDs []string
HostConfig *runconfig.HostConfig
@ -235,13 +246,16 @@ type ContainerJSONBase struct {
type ContainerJSON struct {
*ContainerJSONBase
Mounts []MountPoint
Config *runconfig.Config
}
// backcompatibility struct along with ContainerConfig
type ContainerJSONRaw struct {
type ContainerJSONPre120 struct {
*ContainerJSONBase
Config *ContainerConfig
Volumes map[string]string
VolumesRW map[string]bool
Config *ContainerConfig
}
type ContainerConfig struct {
@ -253,3 +267,13 @@ type ContainerConfig struct {
CpuShares int64
Cpuset string
}
// MountPoint represents a mount point configuration inside the container.
type MountPoint struct {
Name string `json:",omitempty"`
Source string
Destination string
Driver string `json:",omitempty"`
Mode string // this is internally named `Relabel`
RW bool
}

25
contrib/apparmor/docker Normal file
Просмотреть файл

@ -0,0 +1,25 @@
#include <tunables/global>
profile docker-default flags=(attach_disconnected,mediate_deleted) {
#include <abstractions/base>
network,
capability,
file,
umount,
deny @{PROC}/sys/fs/** wklx,
deny @{PROC}/sysrq-trigger rwklx,
deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
deny @{PROC}/sys/kernel/*/** wklx,
deny mount,
deny /sys/[^f]*/** wklx,
deny /sys/f[^s]*/** wklx,
deny /sys/fs/[^c]*/** wklx,
deny /sys/fs/c[^g]*/** wklx,
deny /sys/fs/cg[^r]*/** wklx,
deny /sys/firmware/efi/efivars/** rwklx,
deny /sys/kernel/security/** rwklx,
}

Просмотреть файл

@ -50,6 +50,7 @@ for version in "${versions[@]}"; do
build-essential # "essential for building Debian packages"
curl ca-certificates # for downloading Go
debhelper # for easy ".deb" building
dh-apparmor # for apparmor debhelper
dh-systemd # for systemd debhelper integration
git # for "git commit" info in "docker -v"
libapparmor-dev # for "sys/apparmor.h"

Просмотреть файл

@ -80,9 +80,5 @@ for version in "${versions[@]}"; do
echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile"
if [ "$from" == "centos:6" ]; then
echo 'ENV DOCKER_BUILDTAGS selinux exclude_graphdriver_btrfs' >> "$version/Dockerfile"
else
echo 'ENV DOCKER_BUILDTAGS selinux' >> "$version/Dockerfile"
fi
echo 'ENV DOCKER_BUILDTAGS selinux' >> "$version/Dockerfile"
done

Просмотреть файл

@ -94,13 +94,19 @@ __docker_containers_and_images() {
COMPREPLY+=( "${containers[@]}" )
}
# Finds the position of the first word that is neither option nor an option's argument.
# If there are options that require arguments, you should pass a glob describing those
# options, e.g. "--option1|-o|--option2"
# Use this function to restrict completions to exact positions after the argument list.
__docker_pos_first_nonflag() {
local argument_flags=$1
local counter=$cpos
local counter=$((command_pos + 1))
while [ $counter -le $cword ]; do
if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then
(( counter++ ))
# eat "=" in case of --option=arg syntax
[ "${words[$counter]}" = "=" ] && (( counter++ ))
else
case "${words[$counter]}" in
-*)
@ -110,12 +116,38 @@ __docker_pos_first_nonflag() {
;;
esac
fi
# Bash splits words at "=", retaining "=" as a word, examples:
# "--debug=false" => 3 words, "--log-opt syslog-facility=daemon" => 4 words
while [ "${words[$counter + 1]}" = "=" ] ; do
counter=$(( counter + 2))
done
(( counter++ ))
done
echo $counter
}
# Returns the value of the first option matching option_glob.
# Valid values for option_glob are option names like '--log-level' and
# globs like '--log-level|-l'
# Only positions between the command and the current word are considered.
__docker_value_of_option() {
local option_glob=$1
local counter=$((command_pos + 1))
while [ $counter -lt $cword ]; do
case ${words[$counter]} in
$option_glob )
echo ${words[$counter + 1]}
break
;;
esac
(( counter++ ))
done
}
# Transforms a multiline list of strings into a single line string
# with the words separated by "|".
# This is used to prepare arguments to __docker_pos_first_nonflag().
@ -182,6 +214,80 @@ __docker_capabilities() {
" -- "$cur" ) )
}
__docker_log_drivers() {
COMPREPLY=( $( compgen -W "
fluentd
gelf
journald
json-file
none
syslog
" -- "$cur" ) )
}
__docker_log_driver_options() {
# see docs/reference/logging/index.md
case $(__docker_value_of_option --log-driver) in
fluentd)
COMPREPLY=( $( compgen -W "fluentd-address fluentd-tag" -S = -- "$cur" ) )
;;
gelf)
COMPREPLY=( $( compgen -W "gelf-address gelf-tag" -S = -- "$cur" ) )
;;
syslog)
COMPREPLY=( $( compgen -W "syslog-address syslog-facility syslog-tag" -S = -- "$cur" ) )
;;
*)
return
;;
esac
compopt -o nospace
}
__docker_complete_log_driver_options() {
# "=" gets parsed to a word and assigned to either $cur or $prev depending on whether
# it is the last character or not. So we search for "xxx=" in the the last two words.
case "${words[$cword-2]}$prev=" in
*gelf-address=*)
COMPREPLY=( $( compgen -W "udp" -S "://" -- "${cur#=}" ) )
compopt -o nospace
return
;;
*syslog-address=*)
COMPREPLY=( $( compgen -W "tcp udp unix" -S "://" -- "${cur#=}" ) )
compopt -o nospace
return
;;
*syslog-facility=*)
COMPREPLY=( $( compgen -W "
auth
authpriv
cron
daemon
ftp
kern
local0
local1
local2
local3
local4
local5
local6
local7
lpr
mail
news
syslog
user
uucp
" -- "${cur#=}" ) )
return
;;
esac
return 1
}
# a selection of the available signals that is most likely of interest in the
# context of docker containers.
__docker_signals() {
@ -222,13 +328,17 @@ _docker_docker() {
return
;;
--log-driver)
COMPREPLY=( $( compgen -W "json-file syslog none" -- "$cur" ) )
__docker_log_drivers
return
;;
--log-level|-l)
COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) )
return
;;
--log-opt)
__docker_log_driver_options
return
;;
--pidfile|-p|--tlscacert|--tlscert|--tlskey)
_filedir
return
@ -242,6 +352,8 @@ _docker_docker() {
;;
esac
__docker_complete_log_driver_options && return
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "$boolean_options $main_options_with_args" -- "$cur" ) )
@ -382,8 +494,6 @@ _docker_events() {
;;
esac
# "=" gets parsed to a word and assigned to either $cur or $prev depending on whether
# it is the last character or not. So we search for "xxx=" in the the last two words.
case "${words[$cword-2]}$prev=" in
*container=*)
cur="${cur#=}"
@ -836,6 +946,7 @@ _docker_run() {
--label-file
--link
--log-driver
--log-opt
--lxc-conf
--mac-address
--memory -m
@ -941,7 +1052,11 @@ _docker_run() {
return
;;
--log-driver)
COMPREPLY=( $( compgen -W "json-file syslog none" -- "$cur") )
__docker_log_drivers
return
;;
--log-opt)
__docker_log_driver_options
return
;;
--net)
@ -996,6 +1111,8 @@ _docker_run() {
;;
esac
__docker_complete_log_driver_options && return
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) )
@ -1218,6 +1335,7 @@ _docker() {
--label
--log-driver
--log-level -l
--log-opt
--mtu
--pidfile -p
--registry-mirror
@ -1235,7 +1353,7 @@ _docker() {
local cur prev words cword
_get_comp_words_by_ref -n : cur prev words cword
local command='docker' cpos=0
local command='docker' command_pos=0
local counter=1
while [ $counter -lt $cword ]; do
case "${words[$counter]}" in
@ -1254,8 +1372,7 @@ _docker() {
;;
*)
command="${words[$counter]}"
cpos=$counter
(( cpos++ ))
command_pos=$counter
break
;;
esac

Просмотреть файл

@ -7,6 +7,7 @@
#
# contributors:
# - Felix Riedel
# - Steve Durrheimer
# - Vincent Bernat
#
# license:
@ -38,7 +39,9 @@
#
__docker_get_containers() {
local kind expl
[[ $PREFIX = -* ]] && return 1
integer ret=1
local kind
declare -a running stopped lines args
kind=$1
@ -82,54 +85,77 @@ __docker_get_containers() {
s="${name}:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}"
s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}"
if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then
stopped=($stopped $s)
stopped=($stopped ${s#*/})
else
running=($running $s)
running=($running ${s#*/})
fi
done
done
[[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running
[[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped
[[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running "$@" && ret=0
[[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped "$@" && ret=0
return ret
}
__docker_stoppedcontainers() {
[[ $PREFIX = -* ]] && return 1
__docker_get_containers stopped "$@"
}
__docker_runningcontainers() {
[[ $PREFIX = -* ]] && return 1
__docker_get_containers running "$@"
}
__docker_containers() {
[[ $PREFIX = -* ]] && return 1
__docker_get_containers all "$@"
}
__docker_images() {
local expl
[[ $PREFIX = -* ]] && return 1
integer ret=1
declare -a images
images=(${${${${(f)"$(_call_program commands docker $docker_options images)"}[2,-1]}/ ##/\\:}%% *})
images=(${${images%\\:<none>}#<none>} ${${${(f)"$(_call_program commands docker $docker_options images)"}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}})
_describe -t docker-images "images" images
images=(${${${(f)"$(_call_program commands docker $docker_options images)"}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}})
_describe -t docker-images "images" images && ret=0
__docker_repositories_with_tags && ret=0
return ret
}
__docker_tags() {
local expl
declare -a tags
tags=(${${${${${(f)"$(_call_program commands docker $docker_options images)"}#* }## #}%% *}[2,-1]})
_describe -t docker-tags "tags" tags
__docker_repositories() {
[[ $PREFIX = -* ]] && return 1
declare -a repos
repos=(${${${(f)"$(_call_program commands docker $docker_options images)"}%% *}[2,-1]})
repos=(${repos#<none>})
_describe -t docker-repos "repositories" repos
}
__docker_repositories_with_tags() {
if compset -P '*:'; then
__docker_tags
else
__docker_repositories -qS ":"
fi
[[ $PREFIX = -* ]] && return 1
integer ret=1
declare -a repos onlyrepos matched
declare m
repos=(${${${${(f)"$(_call_program commands docker $docker_options images)"}[2,-1]}/ ##/:::}%% *})
repos=(${${repos%:::<none>}#<none>})
# Check if we have a prefix-match for the current prefix.
onlyrepos=(${repos%::*})
for m in $onlyrepos; do
[[ ${PREFIX##${~~m}} != ${PREFIX} ]] && {
# Yes, complete with tags
repos=(${${repos/:::/:}/:/\\:})
_describe -t docker-repos-with-tags "repositories with tags" repos && ret=0
return ret
}
done
# No, only complete repositories
onlyrepos=(${${repos%:::*}/:/\\:})
_describe -t docker-repos "repositories" onlyrepos -qS : && ret=0
return ret
}
__docker_search() {
# declare -a dockersearch
[[ $PREFIX = -* ]] && return 1
local cache_policy
zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
if [[ -z "$cache_policy" ]]; then
@ -152,20 +178,11 @@ __docker_search() {
}
__docker_caching_policy() {
oldp=( "$1(Nmh+1)" ) # 1 hour
(( ${#oldp} ))
}
__docker_repositories() {
local expl
declare -a repos
repos=(${${${(f)"$(_call_program commands docker $docker_options images)"}%% *}[2,-1]})
_describe -t docker-repos "repositories" repos "$@"
oldp=( "$1"(Nmh+1) ) # 1 hour
(( $#oldp ))
}
__docker_commands() {
# local -a _docker_subcommands
local cache_policy
zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
@ -177,7 +194,7 @@ __docker_commands() {
&& ! _retrieve_cache docker_subcommands;
then
local -a lines
lines=(${(f)"$(_call_program commands docker $docker_options 2>&1)"})
lines=(${(f)"$(_call_program commands docker 2>&1)"})
_docker_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I) *]}]}## #}/ ##/:})
_docker_subcommands=($_docker_subcommands 'help:Show help for a command')
_store_cache docker_subcommands _docker_subcommands
@ -186,122 +203,124 @@ __docker_commands() {
}
__docker_subcommand() {
local -a _command_args
local -a _command_args opts_help opts_cpumem opts_create
local expl help="-h --help"
integer ret=1
opts_help=("(: -)"{-h,--help}"[Print usage]")
opts_cpumem=(
"($help -c --cpu-shares)"{-c,--cpu-shares=-}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)"
"($help)--cgroup-parent=-[Parent cgroup for the container]:cgroup: "
"($help)--cpu-period=-[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: "
"($help)--cpu-quota=-[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: "
"($help)--cpuset-cpus=-[CPUs in which to allow execution]:CPUs: "
"($help)--cpuset-mems=-[MEMs in which to allow execution]:MEMs: "
"($help -m --memory)"{-m,--memory=-}"[Memory limit]:Memory limit: "
"($help)--memory-swap=-[Total memory limit with swap]:Memory limit: "
)
opts_create=(
"($help -a --attach)"{-a,--attach=-}"[Attach to stdin, stdout or stderr]:device:(STDIN STDOUT STDERR)"
"($help)*--add-host=-[Add a custom host-to-IP mapping]:host\:ip mapping: "
"($help)--blkio-weight=-[Block IO (relative weight), between 10 and 1000]:Block IO weight:(10 100 500 1000)"
"($help)*--cap-add=-[Add Linux capabilities]:capability: "
"($help)*--cap-drop=-[Drop Linux capabilities]:capability: "
"($help)--cidfile=-[Write the container ID to the file]:CID file:_files"
"($help)*--device=-[Add a host device to the container]:device:_files"
"($help)*--dns=-[Set custom dns servers]:dns server: "
"($help)*--dns-search=-[Set custom DNS search domains]:dns domains: "
"($help)*"{-e,--env=-}"[Set environment variables]:environment variable: "
"($help)--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: "
"($help)*--env-file=-[Read environment variables from a file]:environment file:_files"
"($help)*--expose=-[Expose a port from the container without publishing it]: "
"($help)*--group-add=-[Add additional groups to run as]:group:_groups"
"($help -h --hostname)"{-h,--hostname=-}"[Container host name]:hostname:_hosts"
"($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]"
"($help)--ipc=-[IPC namespace to use]:IPC namespace: "
"($help)*--link=-[Add link to another container]:link:->link"
"($help)*"{-l,--label=-}"[Set meta data on a container]:label: "
"($help)--log-driver=-[Default driver for container logs]:Logging driver:(json-file syslog journald gelf fluentd none)"
"($help)*--log-opt=-[Log driver specific options]:log driver options: "
"($help)*--lxc-conf=-[Add custom lxc options]:lxc options: "
"($help)--mac-address=-[Container MAC address]:MAC address: "
"($help)--name=-[Container name]:name: "
"($help)--net=-[Network mode]:network mode:(bridge none container host)"
"($help)--oom-kill-disable[Disable OOM Killer]"
"($help -P --publish-all)"{-P,--publish-all}"[Publish all exposed ports]"
"($help)*"{-p,--publish=-}"[Expose a container's port to the host]:port:_ports"
"($help)--pid=-[PID namespace to use]:PID: "
"($help)--privileged[Give extended privileges to this container]"
"($help)--read-only[Mount the container's root filesystem as read only]"
"($help)--restart=-[Restart policy]:restart policy:(no on-failure always)"
"($help)*--security-opt=-[Security options]:security option: "
"($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]"
"($help -u --user)"{-u,--user=-}"[Username or UID]:user:_users"
"($help)*--ulimit=-[ulimit options]:ulimit: "
"($help)*-v[Bind mount a volume]:volume: "
"($help)*--volumes-from=-[Mount volumes from the specified container]:volume: "
"($help -w --workdir)"{-w,--workdir=-}"[Working directory inside the container]:directory:_directories"
)
case "$words[1]" in
(attach)
_arguments \
'(- :)--help[Print usage]' \
'--no-stdin[Do not attach stdin]' \
'--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]' \
':containers:__docker_runningcontainers' && ret=0
$opts_help \
"($help)--no-stdin[Do not attach stdin]" \
"($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \
"($help -):containers:__docker_runningcontainers" && ret=0
;;
(build)
_arguments \
'(-c --cpu-share)'{-c,--cpu-share=-}'[CPU shares (relative weight)]:CPU shares: ' \
'--cgroup-parent=-[Optional parent cgroup for the container]:cgroup parent: ' \
'--cpu-period=-[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: ' \
'--cpu-quota=-[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: ' \
'--cpuset-cpus=-[CPUs in which to allow execution (0-3, 0,1)]:CPUs: ' \
'--cpuset-mems=-[MEMs in which to allow execution (0-3, 0,1)]:MEMs: ' \
'(-f --file)'{-f,--file=-}"[Name of the Dockerfile (Default is 'PATH/Dockerfile')]:Dockerfile:_files" \
'--force-rm[Always remove intermediate containers]' \
'(- :)--help[Print usage]' \
'(-m --memory)'{-m,--memory=-}'[Memory limit]:Memory limit: ' \
'--memory-swap=-[Total memory (memory + swap), '-1' to disable swap]' \
'--no-cache[Do not use cache when building the image]' \
'--pull[Always attempt to pull a newer version of the image]' \
'(-q --quiet)'{-q,--quiet}'[Suppress the verbose output generated by the containers]' \
'--rm[Remove intermediate containers after a successful build]' \
'(-t --tag)'{-t,--tag=-}'[Repository name (and optionally a tag) for the image]:repository:__docker_repositories_with_tags' \
':path or URL:_directories' && ret=0
$opts_help \
$opts_cpumem \
"($help -f --file)"{-f,--file=-}"[Name of the Dockerfile]:Dockerfile:_files" \
"($help)--force-rm[Always remove intermediate containers]" \
"($help)--no-cache[Do not use cache when building the image]" \
"($help)--pull[Attempt to pull a newer version of the image]" \
"($help -q --quiet)"{-q,--quiet}"[Suppress verbose build output]" \
"($help)--rm[Remove intermediate containers after a successful build]" \
"($help -t --tag)"{-t,--tag=-}"[Repository, name and tag for the image]: :__docker_repositories_with_tags" \
"($help -):path or URL:_directories" && ret=0
;;
(commit)
_arguments \
'(-a --author)'{-a,--author=-}'[Author]:author: ' \
'*'{-c,--change=-}'[Apply Dockerfile instruction to the created image]' \
'(- :)--help[Print usage]' \
'(-m --message)'{-m,--message=-}'[Commit message]:message: ' \
'(-p --pause)'{-p,--pause}'[Pause container during commit]' \
':container:__docker_containers' \
':repository:__docker_repositories_with_tags' && ret=0
$opts_help \
"($help -a --author)"{-a,--author=-}"[Author]:author: " \
"($help -c --change)*"{-c,--change=-}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \
"($help -m --message)"{-m,--message=-}"[Commit message]:message: " \
"($help -p --pause)"{-p,--pause}"[Pause container during commit]" \
"($help -):container:__docker_containers" \
"($help -): :__docker_repositories_with_tags" && ret=0
;;
(cp)
_arguments \
'(- :)--help[Print usage]' \
':container:->container' \
':hostpath:_files' && ret=0
$opts_help \
"($help -)1:container:->container" \
"($help -)2:hostpath:_files" && ret=0
case $state in
(container)
if compset -P '*:'; then
_files
if compset -P "*:"; then
_files && ret=0
else
__docker_containers -qS ":"
__docker_containers -qS ":" && ret=0
fi
;;
esac
;;
(create)
_arguments \
'*'{-a,--attach=-}'[Attach to STDIN, STDOUT or STDERR]:STD:(STDIN STDOUT STDERR)' \
'*--add-host=-[Add a custom host-to-IP mapping (host:ip)]:host\:ip mapping: ' \
'--blkio-weight=-[Block IO (relative weight), between 10 and 1000]:Block IO weight: ' \
'(-c --cpu-shares)'{-c,--cpu-shares=-}'[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \
'*--cap-add=-[Add Linux capabilities]:capability: ' \
'*--cap-drop=-[Drop Linux capabilities]:capability: ' \
'--cgroup-parent=-[Optional parent cgroup for the container]:cgroup parent: ' \
'--cidfile=-[Write the container ID to the file]:CID:_files' \
'--cpu-period=-[Limit CPU CFS (Completely Fair Scheduler) period]:CPU period: ' \
'--cpu-quota=-[Limit the CPU CFS quota]:CPU quota: ' \
'--cpuset-cpus=-[CPUs in which to allow execution (0-3, 0,1)]:CPUs: ' \
'--cpuset-mems=-[MEMs in which to allow execution (0-3, 0,1)]:MEMs: ' \
'*--device=-[Add a host device to the container]:device:_files' \
'*--dns=-[Set custom dns servers]:dns server: ' \
'*--dns-search=-[Set custom DNS search domains]:dns domains: ' \
'*'{-e,--env=-}'[Set environment variables]:environment variable: ' \
'--entrypoint=-[Overwrite the default ENTRYPOINT of the image]:entry point: ' \
'*--env-file=-[Read in a file of environment variables]:environment file:_files' \
'*--expose=-[Expose a port or a range of ports]:port or a range of ports: ' \
'(-h --hostname)'{-h,--hostname=-}'[Container host name]:hostname:_hosts' \
'(- :)--help[Print usage]' \
'(-i --interactive)'{-i,--interactive}'[Keep STDIN open even if not attached]' \
'--ipc=-[IPC namespace to use]:IPC namespace: ' \
'*'{-l,--label=-}'[Set meta data on a container]:Label: ' \
'*--label-file=-[Read in a line delimited file of labels]' \
'*--link=-[Add link to another container]:link:->link' \
'--log-driver=-[Logging driver for container]:Logging driver:(json-file syslog journald gelf fluentd none)' \
'*--log-opt=-[Log driver options]:Log driver options: ' \
'*--lxc-conf=-[Add custom lxc options]:lxc options: ' \
'(-m --memory)'{-m,--memory=-}'[Memory limit (in bytes)]:Memory limit: ' \
'--mac-address=-[Container MAC address (e.g. 92:d0:c6:0a:29:33)]:MAC address: ' \
"--memory-swap=-[Total memory (memory + swap), '-1' to disable swap]:Total memory: " \
'--name=-[Assign a name to the container]:name: ' \
'--net=-[Set the Network mode for the container]:network mode:(bridge none container host)' \
'--oom-kill-disable[Disable OOM Killer]' \
'(-P --publish-all)'{-P,--publish-all}'[Publish all exposed ports to random ports]' \
'*'{-p,--publish=-}"[Publish a container's port(s) to the host]:port:_ports" \
'--pid=-[PID namespace to use]:PID: ' \
'--privileged[Give extended privileges to this container]' \
"--read-only[Mount the container's root filesystem as read only]" \
'--restart=-[Restart policy]:restart policy:(no on-failure always)' \
'--rm[Remove intermediate containers when it exits]' \
'*--security-opt=-[Security options]:security option: ' \
'(-t --tty)'{-t,--tty}'[Allocate a pseudo-TTY]' \
'(-u --user)'{-u,--user=-}'[Username or UID]:user:_users' \
'*--ulimit=-[Ulimit options]:ulimit: ' \
'--uts=-[UTS namespace to use]:UTS: ' \
'*'{-v,--volume=-}'[Bind mount a volume]:volume: ' \
'*--volumes-from=-[Mount volumes from the specified container]:volume: ' \
'(-w --workdir)'{-w,--workdir=-}'[Working directory inside the container]:directory:_directories' \
'(-):images:__docker_images' \
'(-):command: _command_names -e' \
'*::arguments: _normal' && ret=0
$opts_help \
$opts_cpumem \
$opts_create \
"($help -): :__docker_images" \
"($help -):command: _command_names -e" \
"($help -)*::arguments: _normal" && ret=0
case $state in
(link)
if compset -P '*:'; then
_wanted alias expl 'Alias' compadd -E ""
if compset -P "*:"; then
_wanted alias expl "Alias" compadd -E "" && ret=0
else
__docker_runningcontainers -qS ":"
__docker_runningcontainers -qS ":" && ret=0
fi
;;
esac
@ -309,239 +328,190 @@ __docker_subcommand() {
;;
(diff)
_arguments \
'(- :)--help[Print usage]' \
'*:containers:__docker_containers' && ret=0
$opts_help \
"($help -)*:containers:__docker_containers" && ret=0
;;
(events)
_arguments \
'*'{-f,--filter=-}'[Filter output based on conditions provided]:filter: ' \
'(- :)--help[Print usage]' \
'--since=-[Show all events created since timestamp]:timestamp: ' \
'--until=-[Stream events until this timestamp]:timestamp: ' && ret=0
$opts_help \
"($help)*"{-f,--filter=-}"[Filter values]:filter: " \
"($help)--since=-[Events created since this timestamp]:timestamp: " \
"($help)--until=-[Events created until this timestamp]:timestamp: " && ret=0
;;
(exec)
local state
_arguments \
'(-d --detach)'{-d,--detach}'[Detached mode: run command in the background]' \
'(- :)--help[Print usage]' \
'(-i --interactive)'{-i,--interactive}'[Keep STDIN open even if not attached]' \
'(-t --tty)'{-t,--tty}'[Allocate a pseudo-TTY]' \
'(-u --user)'{-u,--user=-}'[Username or UID]:User: ' \
':containers:__docker_runningcontainers' \
'*::command:->anycommand' && ret=0
$opts_help \
"($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \
"($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" \
"($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" \
"($help -u --user)"{-u,--user=-}"[Username or UID]:user:_users" \
"($help -):containers:__docker_runningcontainers" \
"($help -)*::command:->anycommand" && ret=0
case $state in
(anycommand)
shift 1 words
(( CURRENT-- ))
_normal
_normal && ret=0
;;
esac
return ret
;;
(export)
_arguments \
'(- :)--help[Print usage]' \
'(-o --output)'{-o,--output=-}'[Write to a file, instead of STDOUT]:file: ' \
'*:containers:__docker_containers' && ret=0
$opts_help \
"($help -o --output)"{-o,--output=-}"[Write to a file, instead of stdout]:output file:_files" \
"($help -)*:containers:__docker_containers" && ret=0
;;
(history)
_arguments \
'(-H --human)'{-H,--human}'[Print sizes and dates in human readable format]' \
'(- :)--help[Print usage]' \
'--no-trunc[Do not truncate output]' \
'(-q --quiet)'{-q,--quiet}'[Only show numeric IDs]' \
'*:images:__docker_images' && ret=0
$opts_help \
"($help -H --human)"{-H,--human}"[Print sizes and dates in human readable format]" \
"($help)--no-trunc[Do not truncate output]" \
"($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \
"($help -)*: :__docker_images" && ret=0
;;
(images)
_arguments \
'(-a --all)'{-a,--all}'[Show all images (default hides intermediate images)]' \
'--digests[Show digests]' \
'*'{-f,--filter=-}'[Filter output based on conditions provided]:filter: ' \
'(- :)--help[Print usage]' \
'--no-trunc[Do not truncate output]' \
'(-q --quiet)'{-q,--quiet}'[Only show numeric IDs]' \
':repository:__docker_repositories' && ret=0
$opts_help \
"($help -a --all)"{-a,--all}"[Show all images]" \
"($help)--digest[Show digests]" \
"($help)*"{-f,--filter=-}"[Filter values]:filter: " \
"($help)--no-trunc[Do not truncate output]" \
"($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \
"($help -): :__docker_repositories" && ret=0
;;
(import)
_arguments \
'*'{-c,--change=-}'[Apply Dockerfile instruction to the created image]' \
'(- :)--help[Print usage]' \
':URL:(http:// file://)' \
':repository:__docker_repositories_with_tags' && ret=0
$opts_help \
"($help -c --change)*"{-c,--change=-}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \
"($help -):URL:(- http:// file://)" \
"($help -): :__docker_repositories_with_tags" && ret=0
;;
(info|version)
_arguments \
'(- :)--help[Print usage]' && ret=0
$opts_help && ret=0
;;
(inspect)
_arguments \
'(-f --format)'{-f,--format=-}'[Format the output using the given go template]:template: ' \
'(- :)--help[Print usage]' \
'--type=-[Return JSON for specified type, permissible values are "image" or "container"]:type:(image container)' \
'*:containers:__docker_containers' && ret=0
$opts_help \
"($help -f --format=-)"{-f,--format=-}"[Format the output using the given go template]:template: " \
"($help)--type=-[Return JSON for specified type]:type:(image container)" \
"($help -)*:containers:__docker_containers" && ret=0
;;
(kill)
_arguments \
'(- :)--help[Print usage]' \
'(-s --signal)'{-s,--signal=-}'[Signal to send to the container]:signal:_signals' \
'*:containers:__docker_runningcontainers' && ret=0
$opts_help \
"($help -s --signal)"{-s,--signal=-}"[Signal to send]:signal:_signals" \
"($help -)*:containers:__docker_runningcontainers" && ret=0
;;
(load)
_arguments \
'(- :)--help[Print usage]' \
'(-i --input)'{-i,--input=-}'[Read from a tar archive file, instead of STDIN]:archive file:_files -g "*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)"' && ret=0
$opts_help \
"($help -i --input)"{-i,--input=-}"[Read from tar archive file]:archive file:_files -g "*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)"" && ret=0
;;
(login)
_arguments \
'(-e --email)'{-e,--email=-}'[Email]:email: ' \
'(- :)--help[Print usage]' \
'(-p --password)'{-p,--password=-}'[Password]:password: ' \
'(-u --user)'{-u,--user=-}'[Username]:username: ' \
'1:server:->string' && ret=0
$opts_help \
"($help -e --email)"{-e,--email=-}"[Email]:email: " \
"($help -p --password)"{-p,--password=-}"[Password]:password: " \
"($help -u --user)"{-u,--user=-}"[Username]:username: " \
"($help -)1:server: " && ret=0
;;
(logout)
_arguments \
'(- :)--help[Print usage]' \
'1:server:->string' && ret=0
$opts_help \
"($help -)1:server: " && ret=0
;;
(logs)
_arguments \
'(-f --follow)'{-f,--follow}'[Follow log output]' \
'(- :)--help[Print usage]' \
'--since=-[Show logs since timestamp]:timestamp: ' \
'(-t --timestamps)'{-t,--timestamps}'[Show timestamps]' \
'--tail=-[Number of lines to show from the end of the logs]:lines:(1 10 20 50 all)' \
'*:containers:__docker_containers' && ret=0
$opts_help \
"($help -f --follow)"{-f,--follow}"[Follow log output]" \
"($help -s --since)"{-s,--since=-}"[Show logs since this timestamp]:timestamp: " \
"($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \
"($help)--tail=-[Output the last K lines]:lines:(1 10 20 50 all)" \
"($help -)*:containers:__docker_containers" && ret=0
;;
(pause|unpause)
_arguments \
'(- :)--help[Print usage]' \
'*:containers:__docker_runningcontainers' && ret=0
$opts_help \
"($help -)*:containers:__docker_runningcontainers" && ret=0
;;
(port)
_arguments \
'(- :)--help[Print usage]' \
'1:containers:__docker_runningcontainers' \
'2:port:_ports' && ret=0
$opts_help \
"($help -)1:containers:__docker_runningcontainers" \
"($help -)2:port:_ports" && ret=0
;;
(ps)
_arguments \
'(-a --all)'{-a,--all}'[how all containers (default shows just running)]' \
'--before=-[Show only container created before Id or Name]:containers:__docker_containers' \
'*'{-f,--filter=-}'[Filter output based on conditions provided]:filter: ' \
'(- :)--help[Print usage]' \
'(-l --latest)'{-l,--latest}'[Show the latest created container, include non-running]' \
'-n[Show n last created containers, include non-running]:n:(1 5 10 25 50)' \
'--no-trunc[Do not truncate output]' \
'(-q --quiet)'{-q,--quiet}'[Only show numeric IDs]' \
'(-s --size)'{-s,--size}'[Display total file sizes]' \
'--since=-[Show created since Id or Name, include non-running]:containers:__docker_containers' && ret=0
$opts_help \
"($help -a --all)"{-a,--all}"[Show all containers]" \
"($help)--before=-[Show only container created before...]:containers:__docker_containers" \
"($help)*"{-f,--filter=-}"[Filter values]:filter: " \
"($help -l --latest)"{-l,--latest}"[Show only the latest created container]" \
"($help)-n[Show n last created containers, include non-running one]:n:(1 5 10 25 50)" \
"($help)--no-trunc[Do not truncate output]" \
"($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \
"($help -s --size)"{-s,--size}"[Display total file sizes]" \
"($help)--since=-[Show only containers created since...]:containers:__docker_containers" && ret=0
;;
(pull)
_arguments \
'(-a --all-tags)'{-a,--all-tags}'[Download all tagged images in the repository]' \
'(- :)--help[Print usage]' \
':name:__docker_search' && ret=0
$opts_help \
"($help -a --all-tags)"{-a,--all-tags}"[Download all tagged images]" \
"($help -):name:__docker_search" && ret=0
;;
(push)
_arguments \
'(- :)--help[Print usage]' \
':images:__docker_images' && ret=0
$opts_help \
"($help -): :__docker_images" && ret=0
;;
(rename)
_arguments \
'(- :)--help[Print usage]' \
':old name:__docker_containers' \
':new name: ' && ret=0
$opts_help \
"($help -):old name:__docker_containers" \
"($help -):new name: " && ret=0
;;
(restart|stop)
_arguments \
'(- :)--help[Print usage]' \
'(-t --time)'{-t,--time=-}'[Seconds to wait for stop before killing the container]:seconds to before killing:(1 5 10 30 60)' \
'*:containers:__docker_runningcontainers' && ret=0
$opts_help \
"($help -t --time=-)"{-t,--time=-}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \
"($help -)*:containers:__docker_runningcontainers" && ret=0
;;
(rm)
_arguments \
'(-f --force)'{-f,--force}'[Force the removal of a running container (uses SIGKILL)]' \
'(- :)--help[Print usage]' \
'(-l --link)'{-l,--link}'[Remove the specified link and not the underlying container]' \
'(-v --volumes)'{-v,--volumes}'[Remove the volumes associated to the container]' \
'*:containers:__docker_stoppedcontainers' && ret=0
$opts_help \
"($help -f --force)"{-f,--force}"[Force removal]" \
"($help -l --link)"{-l,--link}"[Remove the specified link and not the underlying container]" \
"($help -v --volumes)"{-v,--volumes}"[Remove the volumes associated to the container]" \
"($help -)*:containers:__docker_stoppedcontainers" && ret=0
;;
(rmi)
_arguments \
'(-f --force)'{-f,--force}'[Force removal of the image]' \
'(- :)--help[Print usage]' \
'--no-prune[Do not delete untagged parents]' \
'*:images:__docker_images' && ret=0
$opts_help \
"($help -f --force)"{-f,--force}"[Force removal]" \
"($help)--no-prune[Do not delete untagged parents]" \
"($help -)*: :__docker_images" && ret=0
;;
(run)
_arguments \
'*'{-a,--attach=-}'[Attach to STDIN, STDOUT or STDERR]:STD:(STDIN STDOUT STDERR)' \
'*--add-host=-[Add a custom host-to-IP mapping (host\:ip)]:host\:ip mapping: ' \
'--blkio-weight=-[Block IO (relative weight), between 10 and 1000]:Block IO weight: ' \
'(-c --cpu-shares)'{-c,--cpu-shares=-}'[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \
'*--cap-add=-[Add Linux capabilities]:capability: ' \
'*--cap-drop=-[Drop Linux capabilities]:capability: ' \
'--cgroup-parent=-[Optional parent cgroup for the container]:cgroup parent: ' \
'--cidfile=-[Write the container ID to the file]:CID file:_files' \
'--cpu-period=-[Limit CPU CFS (Completely Fair Scheduler) period]:CPU period: ' \
'--cpu-quota=-[Limit the CPU CFS quota]:CPU quota: ' \
'--cpuset-cpus=-[CPUs in which to allow execution (0-3, 0,1)]:CPUs: ' \
'--cpuset-mems=-[MEMs in which to allow execution (0-3, 0,1)]:MEMs: ' \
'(-d --detach)'{-d,--detach}'[Run container in background and print container ID]' \
'*--device=-[Add a host device to the container]:device:_files' \
'*--dns=-[Set custom dns servers]:dns server: ' \
'*--dns-search=-[Set custom DNS search domains]:dns domains: ' \
'*'{-e,--env=-}'[Set environment variables]:environment variable: ' \
'--entrypoint=-[Overwrite the default ENTRYPOINT of the image]:entry point: ' \
'*--env-file=-[Read in a file of environment variables]:environment file:_files' \
'*--expose=-[Expose a port or a range of ports]:port or a range of ports: ' \
'*--group-add=-[Add additional groups to run as]:group: ' \
'(-h --hostname)'{-h,--hostname=-}'[Container host name]:hostname:_hosts' \
'(- :)--help[Print usage]' \
'(-i --interactive)'{-i,--interactive}'[Keep STDIN open even if not attached]' \
'--ipc=-[IPC namespace to use]:IPC: ' \
'*'{-l,--label=-}'[Set meta data on a container]:Label: ' \
'*--label-file=-[Read in a line delimited file of labels]' \
'*--link=-[Add link to another container]:link:->link' \
'--log-driver=-[Logging driver for container]:Logging driver:(json-file syslog journald gelf fluentd none)' \
'*--log-opt=-[Log driver options]:Log driver options: ' \
'*--lxc-conf=-[Add custom lxc options]:lxc options: ' \
'(-m --memory)'{-m,--memory=-}'[Memory limit (in bytes)]:Memory limit: ' \
'--mac-address=-[Container MAC address (e.g. 92:d0:c6:0a:29:33)]:MAC address: ' \
"--memory-swap=-[Total memory (memory + swap), '-1' to disable swap]:Total memory: " \
'--name=-[Assign a name to the container]:name: ' \
'--net=-[Set the Network mode for the container]:network mode:(bridge none container host)' \
'--oom-kill-disable[Disable OOM Killer]' \
'(-P --publish-all)'{-P,--publish-all}'[Publish all exposed ports to random ports]' \
'*'{-p,--publish=-}"[Publish a container's port(s) to the host]:port:_ports" \
'--pid=-[PID namespace to use]:PID: ' \
'--privileged[Give extended privileges to this container]' \
"--read-only[Mount the container's root filesystem as read only]" \
'--restart=-[Restart policy]:restart policy:(no on-failure always)' \
'--rm[Remove intermediate containers when it exits]' \
'*--security-opt=-[Security options]:security option: ' \
'--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]' \
'(-t --tty)'{-t,--tty}'[Allocate a pseudo-TTY]' \
'(-u --user)'{-u,--user=-}'[Username or UID]:user:_users' \
'*--ulimit=-[Ulimit options]:ulimit: ' \
'--uts=-[UTS namespace to use]:UTS: ' \
'*'{-v,--volume=-}'[Bind mount a volume]:volume: ' \
'*--volumes-from=-[Mount volumes from the specified container]:volume: ' \
'(-w --workdir)'{-w,--workdir=-}'[Working directory inside the container]:directory:_directories' \
'(-):images:__docker_images' \
'(-):command: _command_names -e' \
'*::arguments: _normal' && ret=0
$opts_help \
$opts_cpumem \
$opts_create \
"($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \
"($help)--rm[Remove intermediate containers when it exits]" \
"($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \
"($help -): :__docker_images" \
"($help -):command: _command_names -e" \
"($help -)*::arguments: _normal" && ret=0
case $state in
(link)
if compset -P '*:'; then
_wanted alias expl 'Alias' compadd -E ""
if compset -P "*:"; then
_wanted alias expl "Alias" compadd -E "" && ret=0
else
__docker_runningcontainers -qS ":"
__docker_runningcontainers -qS ":" && ret=0
fi
;;
esac
@ -549,62 +519,61 @@ __docker_subcommand() {
;;
(save)
_arguments \
'(- :)--help[Print usage]' \
'(-o --output)'{-o,--output=-}'[Write to file]:file: ' \
'*:images:__docker_images' && ret=0
$opts_help \
"($help -o --output)"{-o,--output=-}"[Write to file]:file:_files" \
"($help -)*: :__docker_images" && ret=0
;;
(search)
_arguments \
'--automated[Only show automated builds]' \
'(- :)--help[Print usage]' \
'--no-trunc[Do not truncate output]' \
'(-s --stars)'{-s,--stars=-}'[Only display with at least X stars]:stars:(0 10 100 1000)' \
'1:term:->string' && ret=0
$opts_help \
"($help)--automated[Only show automated builds]" \
"($help)--no-trunc[Do not truncate output]" \
"($help -s --stars)"{-s,--stars=-}"[Only display with at least X stars]:stars:(0 10 100 1000)" \
"($help -):term: " && ret=0
;;
(start)
_arguments \
'(-a --attach)'{-a,--attach}'[Attach STDOUT/STDERR and forward signals]' \
'(- :)--help[Print usage]' \
'(-i --interactive)'{-i,--interactive}"[Attach container's STDIN]" \
'*:containers:__docker_stoppedcontainers' && ret=0
$opts_help \
"($help -a --attach)"{-a,--attach}"[Attach container's stdout/stderr and forward all signals]" \
"($help -i --interactive)"{-i,--interactive}"[Attach container's stding]" \
"($help -)*:containers:__docker_stoppedcontainers" && ret=0
;;
(stats)
_arguments \
'(- :)--help[Print usage]' \
'--no-stream[Disable streaming stats and only pull the first result]' \
'*:containers:__docker_runningcontainers' && ret=0
$opts_help \
"($help)--no-stream[Disable streaming stats and only pull the first result]" \
"($help -)*:containers:__docker_runningcontainers" && ret=0
;;
(tag)
_arguments \
'(-f --force)'{-f,--force}'[force]' \
'(- :)--help[Print usage]' \
':image:__docker_images' \
':repository:__docker_repositories_with_tags' && ret=0
$opts_help \
"($help -f --force)"{-f,--force}"[force]"\
"($help -):source:__docker_images"\
"($help -):destination:__docker_repositories_with_tags" && ret=0
;;
(top)
_arguments \
'(- :)--help[Print usage]' \
'1:containers:__docker_runningcontainers' \
'(-)*:: :->ps-arguments' && ret=0
$opts_help \
"($help -)1:containers:__docker_runningcontainers" \
"($help -)*:: :->ps-arguments" && ret=0
case $state in
(ps-arguments)
_ps
_ps && ret=0
;;
esac
;;
(wait)
_arguments \
'(- :)--help[Print usage]' \
'*:containers:__docker_runningcontainers' && ret=0
$opts_help \
"($help -)*:containers:__docker_runningcontainers" && ret=0
;;
(help)
_arguments ':subcommand:__docker_commands' && ret=0
_arguments ":subcommand:__docker_commands" && ret=0
;;
(*)
_message 'Unknown sub command'
esac
return ret
}
_docker() {
@ -615,70 +584,59 @@ _docker() {
return
fi
local curcontext="$curcontext" state line
typeset -A opt_args
local curcontext="$curcontext" state line help="-h --help"
integer ret=1
typeset -A opt_args
_arguments -C \
'--api-cors-header=-[Set CORS headers in the remote API]:CORS headers: ' \
'(-b --bridge)'{-b,--bridge=-}'[Attach containers to a network bridge]:bridge: ' \
'--bip=-[Specify network bridge IP]' \
'(-D --debug)'{-D,--debug}'[Enable debug mode]' \
'(-d --daeamon)'{-d,--daemon}'[Enable daemon mode]' \
'--default-gateway[Container default gateway IPv4 address]:IPv4 address: ' \
'--default-gateway-v6[Container default gateway IPv6 address]:IPv6 address: ' \
'*--dns=-[DNS server to use]:DNS: ' \
'*--dns-search=-[DNS search domains to use]' \
'*--default-ulimit=-[Set default ulimit settings for containers]:ulimit: ' \
'(-e --exec-driver)'{-e,--exec-driver=-}'[Exec driver to use]:driver:(native lxc Windows)' \
'*--exec-opt=-[Set exec driver options]:exec driver options: ' \
'--exec-root=-[Root of the Docker execdriver (default: /var/run/docker)]:PATH:_directories' \
'--fixed-cidr=-[IPv4 subnet for fixed IPs]:IPv4 subnet: ' \
'--fixed-cidr-v6=-[IPv6 subnet for fixed IPs]:IPv6 subnet: ' \
'(-G --group)'{-G,--group=-}'[Group for the unix socket (default: docker)]:group:_groups' \
'(-g --graph)'{-g,--graph=-}'[Root of the Docker runtime (default: /var/lib/docker)]:PATH:_directories' \
'(-H --host)'{-H,--host=-}'[tcp://host:port to bind/connect to]:host: ' \
'(-h --help)'{-h,--help}'[Print usage]' \
'--icc[Enable inter-container communication]' \
'*--insecure-registry=-[Enable insecure registry communication]:registry: ' \
'--ip=-[Default IP when binding container ports (default: 0.0.0.0)]' \
'--ip-forward=-[Enable net.ipv4.ip_forward]:enable:(true false)' \
'--ip-masq=-[Enable IP masquerading]:enable:(true false)' \
'--iptables=-[Enable addition of iptables rules]:enable:(true false)' \
'--ipv6[Enable IPv6 networking]' \
'(-l --log-level)'{-l,--log-level=-}'[Set the logging level]:level:(debug info warn error fatal)' \
'*--label=-[Set key=value labels to the daemon]:label: ' \
'--log-driver=-[Default driver for container logs (default: json-file)]:Logging driver:(json-file syslog journald gelf fluentd none)' \
'*--log-opt=-[Log driver specific options]:log driver options: ' \
'--mtu=-[Set the containers network MTU (default: 0)]' \
'(-p --pidfile)'{-p,--pidfile=-}'[Path to use for daemon PID file (default: /var/run/docker.pid)]:PID file PATH: ' \
'*--registry-mirror=-[Preferred Docker registry mirror]:registry mirror: ' \
'(-s --storage-driver)'{-s,--storage-driver=-}'[Storage driver to use]:driver:(aufs devicemapper btrfs zfs overlay)' \
'--selinux-enabled[Enable selinux support]' \
'*--storage-opt=-[Set storage driver options]:storage driver options: ' \
'--tls[Use TLS; implied by --tlsverify]' \
'--tlscacert=-[Trust certs signed only by this CA (default: ~/.docker/ca.pem)]' \
'--tlscert=-[Path to TLS certificate file (default: ~/.docker/cert.pem)]' \
'--tlskey=-[Path to TLS key file (default: ~/.docker/key.pem)]' \
'--tlsverify[Use TLS and verify the remote]' \
'--userland-proxy=-[Use userland proxy for loopback traffic]:enable:(true false)' \
'(-v --version)'{-v,--version}'[Print version information and quit]' \
'(-): :->command' \
'(-)*:: :->option-or-argument' && ret=0
"(: -)"{-h,--help}"[Print usage]" \
"($help)--api-cors-header=-[Set CORS headers in the remote API]:CORS headers: " \
"($help -b --bridge)"{-b,--bridge=-}"[Attach containers to a network bridge]:bridge:_net_interfaces" \
"($help)--bip=-[Specify network bridge IP]" \
"($help -D --debug)"{-D,--debug}"[Enable debug mode]" \
"($help -d --daeamon)"{-d,--daemon}"[Enable daemon mode]" \
"($help)--default-gateway[Container default gateway IPv4 address]:IPv4 address: " \
"($help)--default-gateway-v6[Container default gateway IPv6 address]:IPv6 address: " \
"($help)*--dns=-[DNS server to use]:DNS: " \
"($help)*--dns-search=-[DNS search domains to use]" \
"($help)*--default-ulimit=-[Set default ulimit settings for containers]:ulimit: " \
"($help -e --exec-driver)"{-e,--exec-driver=-}"[Exec driver to use]:driver:(native lxc windows)" \
"($help)*--exec-opt=-[Set exec driver options]:exec driver options: " \
"($help)--exec-root=-[Root of the Docker execdriver]:path:_directories" \
"($help)--fixed-cidr=-[IPv4 subnet for fixed IPs]:IPv4 subnet: " \
"($help)--fixed-cidr-v6=-[IPv6 subnet for fixed IPs]:IPv6 subnet: " \
"($help -G --group)"{-G,--group=-}"[Group for the unix socket]:group:_groups" \
"($help -g --graph)"{-g,--graph=-}"[Root of the Docker runtime]:path:_directories" \
"($help -H --host)"{-H,--host=-}"[tcp://host:port to bind/connect to]:host: " \
"($help)--icc[Enable inter-container communication]" \
"($help)*--insecure-registry=-[Enable insecure registry communication]:registry: " \
"($help)--ip=-[Default IP when binding container ports]" \
"($help)--ip-forward[Enable net.ipv4.ip_forward]" \
"($help)--ip-masq[Enable IP masquerading]" \
"($help)--iptables[Enable addition of iptables rules]" \
"($help)--ipv6[Enable IPv6 networking]" \
"($help -l --log-level)"{-l,--log-level=-}"[Set the logging level]:level:(debug info warn error fatal)" \
"($help)*--label=-[Set key=value labels to the daemon]:label: " \
"($help)--log-driver=-[Default driver for container logs]:Logging driver:(json-file syslog journald gelf fluentd none)" \
"($help)*--log-opt=-[Log driver specific options]:log driver options: " \
"($help)--mtu=-[Set the containers network MTU]:mtu:(0 576 1420 1500 9000)" \
"($help -p --pidfile)"{-p,--pidfile=-}"[Path to use for daemon PID file]:PID file:_files" \
"($help)*--registry-mirror=-[Preferred Docker registry mirror]:registry mirror: " \
"($help -s --storage-driver)"{-s,--storage-driver=-}"[Storage driver to use]:driver:(aufs devicemapper btrfs zfs overlay)" \
"($help)--selinux-enabled[Enable selinux support]" \
"($help)*--storage-opt=-[Set storage driver options]:storage driver options: " \
"($help)--tls[Use TLS]" \
"($help)--tlscacert=-[Trust certs signed only by this CA]:PEM file:_files -g "*.(pem|crt)"" \
"($help)--tlscert=-[Path to TLS certificate file]:PEM file:_files -g "*.(pem|crt)"" \
"($help)--tlskey=-[Path to TLS key file]:Key file:_files -g "*.(pem|key)"" \
"($help)--tlsverify[Use TLS and verify the remote]" \
"($help)--userland-proxy[Use userland proxy for loopback traffic]" \
"($help -v --version)"{-v,--version}"[Print version information and quit]" \
"($help -): :->command" \
"($help -)*:: :->option-or-argument" && ret=0
local counter=1
while [ $counter -lt ${#words} ]; do
case "${words[$counter]}" in
--host|-H)
(( counter++ ))
host="${words[$counter]}"
;;
*)
;;
esac
(( counter++ ))
done
docker_options=${host:+-H "$host"}
local host=${opt_args[-H]}${opt_args[--host]}
local docker_options=${host:+--host $host}
case $state in
(command)

297
daemon/archive.go Normal file
Просмотреть файл

@ -0,0 +1,297 @@
package daemon
import (
"errors"
"io"
"os"
"path/filepath"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/ioutils"
)
// ErrExtractPointNotDirectory is used to convey that the operation to extract
// a tar archive to a directory in a container has failed because the specified
// path does not refer to a directory.
var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory")
// ContainerCopy performs a depracated operation of archiving the resource at
// the specified path in the conatiner identified by the given name.
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
container, err := daemon.Get(name)
if err != nil {
return nil, err
}
if res[0] == '/' {
res = res[1:]
}
return container.Copy(res)
}
// ContainerStatPath stats the filesystem resource at the specified path in the
// container identified by the given name.
func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) {
container, err := daemon.Get(name)
if err != nil {
return nil, err
}
return container.StatPath(path)
}
// ContainerArchivePath creates an archive of the filesystem resource at the
// specified path in the container identified by the given name. Returns a
// tar archive of the resource and whether it was a directory or a single file.
func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
container, err := daemon.Get(name)
if err != nil {
return nil, nil, err
}
return container.ArchivePath(path)
}
// ContainerExtractToDir extracts the given archive to the specified location
// in the filesystem of the container identified by the given name. The given
// path must be of a directory in the container. If it is not, the error will
// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will
// be an error if unpacking the given content would cause an existing directory
// to be replaced with a non-directory and vice versa.
func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error {
container, err := daemon.Get(name)
if err != nil {
return err
}
return container.ExtractToDir(path, noOverwriteDirNonDir, content)
}
// StatPath stats the filesystem resource at the specified path in this
// container. Returns stat info about the resource.
func (container *Container) StatPath(path string) (stat *types.ContainerPathStat, err error) {
container.Lock()
defer container.Unlock()
if err = container.Mount(); err != nil {
return nil, err
}
defer container.Unmount()
err = container.mountVolumes()
defer container.UnmountVolumes(true)
if err != nil {
return nil, err
}
// Consider the given path as an absolute path in the container.
absPath := path
if !filepath.IsAbs(absPath) {
absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join("/", path), path)
}
resolvedPath, err := container.GetResourcePath(absPath)
if err != nil {
return nil, err
}
// A trailing "." or separator has important meaning. For example, if
// `"foo"` is a symlink to some directory `"dir"`, then `os.Lstat("foo")`
// will stat the link itself, while `os.Lstat("foo/")` will stat the link
// target. If the basename of the path is ".", it means to archive the
// contents of the directory with "." as the first path component rather
// than the name of the directory. This would cause extraction of the
// archive to *not* make another directory, but instead use the current
// directory.
resolvedPath = archive.PreserveTrailingDotOrSeparator(resolvedPath, absPath)
lstat, err := os.Lstat(resolvedPath)
if err != nil {
return nil, err
}
return &types.ContainerPathStat{
Name: lstat.Name(),
Path: absPath,
Size: lstat.Size(),
Mode: lstat.Mode(),
Mtime: lstat.ModTime(),
}, nil
}
// ArchivePath creates an archive of the filesystem resource at the specified
// path in this container. Returns a tar archive of the resource and stat info
// about the resource.
func (container *Container) ArchivePath(path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
container.Lock()
defer func() {
if err != nil {
// Wait to unlock the container until the archive is fully read
// (see the ReadCloseWrapper func below) or if there is an error
// before that occurs.
container.Unlock()
}
}()
if err = container.Mount(); err != nil {
return nil, nil, err
}
defer func() {
if err != nil {
// unmount any volumes
container.UnmountVolumes(true)
// unmount the container's rootfs
container.Unmount()
}
}()
if err = container.mountVolumes(); err != nil {
return nil, nil, err
}
// Consider the given path as an absolute path in the container.
absPath := path
if !filepath.IsAbs(absPath) {
absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join("/", path), path)
}
resolvedPath, err := container.GetResourcePath(absPath)
if err != nil {
return nil, nil, err
}
// A trailing "." or separator has important meaning. For example, if
// `"foo"` is a symlink to some directory `"dir"`, then `os.Lstat("foo")`
// will stat the link itself, while `os.Lstat("foo/")` will stat the link
// target. If the basename of the path is ".", it means to archive the
// contents of the directory with "." as the first path component rather
// than the name of the directory. This would cause extraction of the
// archive to *not* make another directory, but instead use the current
// directory.
resolvedPath = archive.PreserveTrailingDotOrSeparator(resolvedPath, absPath)
lstat, err := os.Lstat(resolvedPath)
if err != nil {
return nil, nil, err
}
stat = &types.ContainerPathStat{
Name: lstat.Name(),
Path: absPath,
Size: lstat.Size(),
Mode: lstat.Mode(),
Mtime: lstat.ModTime(),
}
data, err := archive.TarResource(resolvedPath)
if err != nil {
return nil, nil, err
}
content = ioutils.NewReadCloserWrapper(data, func() error {
err := data.Close()
container.UnmountVolumes(true)
container.Unmount()
container.Unlock()
return err
})
container.LogEvent("archive-path")
return content, stat, nil
}
// ExtractToDir extracts the given tar archive to the specified location in the
// filesystem of this container. The given path must be of a directory in the
// container. If it is not, the error will be ErrExtractPointNotDirectory. If
// noOverwriteDirNonDir is true then it will be an error if unpacking the
// given content would cause an existing directory to be replaced with a non-
// directory and vice versa.
func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
container.Lock()
defer container.Unlock()
if err = container.Mount(); err != nil {
return err
}
defer container.Unmount()
err = container.mountVolumes()
defer container.UnmountVolumes(true)
if err != nil {
return err
}
// Consider the given path as an absolute path in the container.
absPath := path
if !filepath.IsAbs(absPath) {
absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join("/", path), path)
}
resolvedPath, err := container.GetResourcePath(absPath)
if err != nil {
return err
}
// A trailing "." or separator has important meaning. For example, if
// `"foo"` is a symlink to some directory `"dir"`, then `os.Lstat("foo")`
// will stat the link itself, while `os.Lstat("foo/")` will stat the link
// target. If the basename of the path is ".", it means to archive the
// contents of the directory with "." as the first path component rather
// than the name of the directory. This would cause extraction of the
// archive to *not* make another directory, but instead use the current
// directory.
resolvedPath = archive.PreserveTrailingDotOrSeparator(resolvedPath, absPath)
stat, err := os.Lstat(resolvedPath)
if err != nil {
return err
}
if !stat.IsDir() {
return ErrExtractPointNotDirectory
}
baseRel, err := filepath.Rel(container.basefs, resolvedPath)
if err != nil {
return err
}
absPath = filepath.Join("/", baseRel)
// Need to check if the path is in a volume. If it is, it cannot be in a
// read-only volume. If it is not in a volume, the container cannot be
// configured with a read-only rootfs.
var toVolume bool
for _, mnt := range container.MountPoints {
if toVolume = mnt.hasResource(absPath); toVolume {
if mnt.RW {
break
}
return ErrVolumeReadonly
}
}
if !toVolume && container.hostConfig.ReadonlyRootfs {
return ErrContainerRootfsReadonly
}
options := &archive.TarOptions{
ChownOpts: &archive.TarChownOptions{
UID: 0, GID: 0, // TODO: use config.User? Remap to userns root?
},
NoOverwriteDirNonDir: noOverwriteDirNonDir,
}
if err := chrootarchive.Untar(content, resolvedPath, options); err != nil {
return err
}
container.LogEvent("extract-to-dir")
return nil
}

Просмотреть файл

@ -35,10 +35,11 @@ import (
)
var (
ErrNotATTY = errors.New("The PTY is not a file")
ErrNoTTY = errors.New("No PTY found")
ErrContainerStart = errors.New("The container failed to start. Unknown error")
ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.")
ErrNotATTY = errors.New("The PTY is not a file")
ErrNoTTY = errors.New("No PTY found")
ErrContainerStart = errors.New("The container failed to start. Unknown error")
ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.")
ErrContainerRootfsReadonly = errors.New("container rootfs is marked read-only")
)
type StreamConfig struct {
@ -616,13 +617,22 @@ func validateID(id string) error {
return nil
}
func (container *Container) Copy(resource string) (io.ReadCloser, error) {
func (container *Container) Copy(resource string) (rc io.ReadCloser, err error) {
container.Lock()
defer container.Unlock()
var err error
defer func() {
if err != nil {
// Wait to unlock the container until the archive is fully read
// (see the ReadCloseWrapper func below) or if there is an error
// before that occurs.
container.Unlock()
}
}()
if err := container.Mount(); err != nil {
return nil, err
}
defer func() {
if err != nil {
// unmount any volumes
@ -631,28 +641,11 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
container.Unmount()
}
}()
mounts, err := container.setupMounts()
if err != nil {
if err := container.mountVolumes(); err != nil {
return nil, err
}
for _, m := range mounts {
var dest string
dest, err = container.GetResourcePath(m.Destination)
if err != nil {
return nil, err
}
var stat os.FileInfo
stat, err = os.Stat(m.Source)
if err != nil {
return nil, err
}
if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil {
return nil, err
}
if err = mount.Mount(m.Source, dest, "bind", "rbind,ro"); err != nil {
return nil, err
}
}
basePath, err := container.GetResourcePath(resource)
if err != nil {
return nil, err
@ -688,6 +681,7 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
container.CleanupStorage()
container.UnmountVolumes(true)
container.Unmount()
container.Unlock()
return err
})
container.LogEvent("copy")
@ -1190,6 +1184,40 @@ func (container *Container) shouldRestart() bool {
(container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0)
}
func (container *Container) mountVolumes() error {
mounts, err := container.setupMounts()
if err != nil {
return err
}
for _, m := range mounts {
dest, err := container.GetResourcePath(m.Destination)
if err != nil {
return err
}
var stat os.FileInfo
stat, err = os.Stat(m.Source)
if err != nil {
return err
}
if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil {
return err
}
opts := "rbind,ro"
if m.Writable {
opts = "rbind,rw"
}
if err := mount.Mount(m.Source, dest, "bind", opts); err != nil {
return err
}
}
return nil
}
func (container *Container) copyImagePathContent(v volume.Volume, destination string) error {
rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs)
if err != nil {

Просмотреть файл

@ -521,7 +521,10 @@ func (container *Container) buildPortMapInfo(n libnetwork.Network, ep libnetwork
if expData, ok := driverInfo[netlabel.ExposedPorts]; ok {
if exposedPorts, ok := expData.([]types.TransportPort); ok {
for _, tp := range exposedPorts {
natPort := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port)))
natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port)))
if err != nil {
return nil, fmt.Errorf("Error parsing Port value(%s):%v", tp.Port, err)
}
networkSettings.Ports[natPort] = nil
}
}
@ -534,8 +537,11 @@ func (container *Container) buildPortMapInfo(n libnetwork.Network, ep libnetwork
if portMapping, ok := mapData.([]types.PortBinding); ok {
for _, pp := range portMapping {
natPort := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port)))
natBndg := nat.PortBinding{HostIp: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port)))
if err != nil {
return nil, err
}
natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
networkSettings.Ports[natPort] = append(networkSettings.Ports[natPort], natBndg)
}
}
@ -684,7 +690,7 @@ func (container *Container) buildCreateEndpointOptions() ([]libnetwork.EndpointO
bindings[p] = []nat.PortBinding{}
for _, bb := range b {
bindings[p] = append(bindings[p], nat.PortBinding{
HostIp: bb.HostIp,
HostIP: bb.HostIP,
HostPort: bb.HostPort,
})
}
@ -710,8 +716,12 @@ func (container *Container) buildCreateEndpointOptions() ([]libnetwork.EndpointO
binding := bindings[port]
for i := 0; i < len(binding); i++ {
pbCopy := pb.GetCopy()
pbCopy.HostPort = uint16(nat.Port(binding[i].HostPort).Int())
pbCopy.HostIP = net.ParseIP(binding[i].HostIp)
newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort))
if err != nil {
return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err)
}
pbCopy.HostPort = uint16(newP.Int())
pbCopy.HostIP = net.ParseIP(binding[i].HostIP)
pbList = append(pbList, pbCopy)
}

Просмотреть файл

@ -77,9 +77,8 @@ func populateCommand(c *Container, env []string) error {
case "none":
case "default", "": // empty string to support existing containers
if !c.Config.NetworkDisabled {
network := c.NetworkSettings
en.Interface = &execdriver.NetworkInterface{
MacAddress: network.MacAddress,
MacAddress: c.Config.MacAddress,
Bridge: c.daemon.config.Bridge.VirtualSwitchName,
}
}

Просмотреть файл

@ -1,16 +0,0 @@
package daemon
import "io"
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
container, err := daemon.Get(name)
if err != nil {
return nil, err
}
if res[0] == '/' {
res = res[1:]
}
return container.Copy(res)
}

Просмотреть файл

@ -686,7 +686,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
}
repositories, err := graph.NewTagStore(filepath.Join(config.Root, "repositories-"+d.driver.String()), tagCfg)
if err != nil {
return nil, fmt.Errorf("Couldn't create Tag store %s: %s", "repositories-"+d.driver.String(), err)
return nil, fmt.Errorf("Couldn't create Tag store repositories-%s: %s", d.driver.String(), err)
}
d.netController, err = initNetworkController(config)

Просмотреть файл

@ -132,7 +132,13 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig,
for port := range hostConfig.PortBindings {
_, portStr := nat.SplitProtoPort(string(port))
if _, err := nat.ParsePort(portStr); err != nil {
return warnings, fmt.Errorf("Invalid port specification: %s", portStr)
return warnings, fmt.Errorf("Invalid port specification: %q", portStr)
}
for _, pb := range hostConfig.PortBindings[port] {
_, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort))
if err != nil {
return warnings, fmt.Errorf("Invalid port specification: %q", pb.HostPort)
}
}
}
if hostConfig.LxcConf.Len() > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") {
@ -481,7 +487,6 @@ func (daemon *Daemon) NetworkApiRouter() func(w http.ResponseWriter, req *http.R
}
func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
if hostConfig == nil || hostConfig.Links == nil {
return nil
}

Просмотреть файл

@ -39,10 +39,6 @@ type Terminal interface {
Resize(height, width int) error
}
type TtyTerminal interface {
Master() libcontainer.Console
}
// ExitStatus provides exit reasons for a container.
type ExitStatus struct {
// The exit code with which the container exited.

Просмотреть файл

@ -808,10 +808,6 @@ func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pi
return tty, nil
}
func (t *TtyConsole) Master() *os.File {
return t.MasterPty
}
func (t *TtyConsole) Resize(h, w int) error {
return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
}

Просмотреть файл

@ -1,124 +0,0 @@
// +build linux
package native
import (
"fmt"
"io"
"os"
"os/exec"
"path"
"text/template"
"github.com/opencontainers/runc/libcontainer/apparmor"
)
const (
apparmorProfilePath = "/etc/apparmor.d/docker"
)
type data struct {
Name string
Imports []string
InnerImports []string
}
const baseTemplate = `
{{range $value := .Imports}}
{{$value}}
{{end}}
profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
{{range $value := .InnerImports}}
{{$value}}
{{end}}
network,
capability,
file,
umount,
deny @{PROC}/sys/fs/** wklx,
deny @{PROC}/sysrq-trigger rwklx,
deny @{PROC}/mem rwklx,
deny @{PROC}/kmem rwklx,
deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
deny @{PROC}/sys/kernel/*/** wklx,
deny mount,
deny /sys/[^f]*/** wklx,
deny /sys/f[^s]*/** wklx,
deny /sys/fs/[^c]*/** wklx,
deny /sys/fs/c[^g]*/** wklx,
deny /sys/fs/cg[^r]*/** wklx,
deny /sys/firmware/efi/efivars/** rwklx,
deny /sys/kernel/security/** rwklx,
}
`
func generateProfile(out io.Writer) error {
compiled, err := template.New("apparmor_profile").Parse(baseTemplate)
if err != nil {
return err
}
data := &data{
Name: "docker-default",
}
if tunablesExists() {
data.Imports = append(data.Imports, "#include <tunables/global>")
} else {
data.Imports = append(data.Imports, "@{PROC}=/proc/")
}
if abstractionsExists() {
data.InnerImports = append(data.InnerImports, "#include <abstractions/base>")
}
if err := compiled.Execute(out, data); err != nil {
return err
}
return nil
}
// check if the tunables/global exist
func tunablesExists() bool {
_, err := os.Stat("/etc/apparmor.d/tunables/global")
return err == nil
}
// check if abstractions/base exist
func abstractionsExists() bool {
_, err := os.Stat("/etc/apparmor.d/abstractions/base")
return err == nil
}
func installApparmorProfile() error {
if !apparmor.IsEnabled() {
return nil
}
// Make sure /etc/apparmor.d exists
if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil {
return err
}
f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
if err := generateProfile(f); err != nil {
f.Close()
return err
}
f.Close()
cmd := exec.Command("/sbin/apparmor_parser", "-r", "-W", "docker")
// to use the parser directly we have to make sure we are in the correct
// dir with the profile
cmd.Dir = "/etc/apparmor.d"
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Error loading docker apparmor profile: %s (%s)", err, output)
}
return nil
}

Просмотреть файл

@ -50,10 +50,6 @@ func NewDriver(root, initPath string, options []string) (*driver, error) {
if err := sysinfo.MkdirAll(root, 0700); err != nil {
return nil, err
}
// native driver root is at docker_root/execdriver/native. Put apparmor at docker_root
if err := installApparmorProfile(); err != nil {
return nil, err
}
// choose cgroup manager
// this makes sure there are no breaking changes to people
@ -365,7 +361,7 @@ type TtyConsole struct {
console libcontainer.Console
}
func NewTtyConsole(console libcontainer.Console, pipes *execdriver.Pipes, rootuid int) (*TtyConsole, error) {
func NewTtyConsole(console libcontainer.Console, pipes *execdriver.Pipes) (*TtyConsole, error) {
tty := &TtyConsole{
console: console,
}
@ -378,10 +374,6 @@ func NewTtyConsole(console libcontainer.Console, pipes *execdriver.Pipes, rootui
return tty, nil
}
func (t *TtyConsole) Master() libcontainer.Console {
return t.console
}
func (t *TtyConsole) Resize(h, w int) error {
return term.SetWinsize(t.console.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
}
@ -425,7 +417,7 @@ func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConf
if err != nil {
return err
}
term, err = NewTtyConsole(cons, pipes, rootuid)
term, err = NewTtyConsole(cons, pipes)
} else {
p.Stdout = pipes.Stdout
p.Stderr = pipes.Stderr

Просмотреть файл

@ -21,8 +21,6 @@ func NewTtyConsole(id string, processid uint32) *TtyConsole {
}
func (t *TtyConsole) Resize(h, w int) error {
// TODO Windows: This is not implemented in HCS. Needs plumbing through
// along with mechanism for buffering
return hcsshim.ResizeConsoleInComputeSystem(t.id, t.processid, h, w)
}

Просмотреть файл

@ -20,10 +20,22 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
return nil, err
}
return &types.ContainerJSON{base, container.Config}, nil
mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
for _, m := range container.MountPoints {
mountPoints = append(mountPoints, types.MountPoint{
Name: m.Name,
Source: m.Path(),
Destination: m.Destination,
Driver: m.Driver,
Mode: m.Relabel,
RW: m.RW,
})
}
return &types.ContainerJSON{base, mountPoints, container.Config}, nil
}
func (daemon *Daemon) ContainerInspectRaw(name string) (*types.ContainerJSONRaw, error) {
func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONPre120, error) {
container, err := daemon.Get(name)
if err != nil {
return nil, err
@ -37,6 +49,13 @@ func (daemon *Daemon) ContainerInspectRaw(name string) (*types.ContainerJSONRaw,
return nil, err
}
volumes := make(map[string]string)
volumesRW := make(map[string]bool)
for _, m := range container.MountPoints {
volumes[m.Destination] = m.Path()
volumesRW[m.Destination] = m.RW
}
config := &types.ContainerConfig{
container.Config,
container.hostConfig.Memory,
@ -45,7 +64,7 @@ func (daemon *Daemon) ContainerInspectRaw(name string) (*types.ContainerJSONRaw,
container.hostConfig.CpusetCpus,
}
return &types.ContainerJSONRaw{base, config}, nil
return &types.ContainerJSONPre120{base, volumes, volumesRW, config}, nil
}
func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSONBase, error) {
@ -76,14 +95,6 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
FinishedAt: container.State.FinishedAt,
}
volumes := make(map[string]string)
volumesRW := make(map[string]bool)
for _, m := range container.MountPoints {
volumes[m.Destination] = m.Path()
volumesRW[m.Destination] = m.RW
}
contJSONBase := &types.ContainerJSONBase{
Id: container.ID,
Created: container.Created,
@ -102,8 +113,6 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
ExecDriver: container.ExecDriver,
MountLabel: container.MountLabel,
ProcessLabel: container.ProcessLabel,
Volumes: volumes,
VolumesRW: volumesRW,
AppArmorProfile: container.AppArmorProfile,
ExecIDs: container.GetExecIDs(),
HostConfig: &hostConfig,

Просмотреть файл

@ -158,7 +158,10 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
newC.Ports = []types.Port{}
for port, bindings := range container.NetworkSettings.Ports {
p, _ := nat.ParsePort(port.Port())
p, err := nat.ParsePort(port.Port())
if err != nil {
return err
}
if len(bindings) == 0 {
newC.Ports = append(newC.Ports, types.Port{
PrivatePort: p,
@ -167,12 +170,15 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
continue
}
for _, binding := range bindings {
h, _ := nat.ParsePort(binding.HostPort)
h, err := nat.ParsePort(binding.HostPort)
if err != nil {
return err
}
newC.Ports = append(newC.Ports, types.Port{
PrivatePort: p,
PublicPort: h,
Type: port.Proto(),
IP: binding.HostIp,
IP: binding.HostIP,
})
}
}

Просмотреть файл

@ -2,11 +2,13 @@ package network
import "github.com/docker/docker/pkg/nat"
// Address represents an IP address
type Address struct {
Addr string
PrefixLen int
}
// Settings stores configuration details about the daemon network config
type Settings struct {
Bridge string
EndpointID string

Просмотреть файл

@ -1,6 +1,7 @@
package daemon
import (
"errors"
"fmt"
"io/ioutil"
"os"
@ -12,10 +13,15 @@ import (
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/volume"
"github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
"github.com/opencontainers/runc/libcontainer/label"
)
// ErrVolumeReadonly is used to signal an error when trying to copy data into
// a volume mount that is not writable.
var ErrVolumeReadonly = errors.New("mounted volume is marked read-only")
type mountPoint struct {
Name string
Destination string
@ -46,6 +52,16 @@ func (m *mountPoint) Setup() (string, error) {
return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined")
}
// hasResource checks whether the given absolute path for a container is in
// this mount point. If the relative path starts with `../` then the resource
// is outside of this mount point, but we can't simply check for this prefix
// because it misses `..` which is also outside of the mount, so check both.
func (m *mountPoint) hasResource(absolutePath string) bool {
relPath, err := filepath.Rel(m.Destination, absolutePath)
return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator))
}
func (m *mountPoint) Path() string {
if m.Volume != nil {
return m.Volume.Path()
@ -333,3 +349,18 @@ func removeVolume(v volume.Volume) error {
}
return vd.Remove(v)
}
func getVolumeDriver(name string) (volume.Driver, error) {
if name == "" {
name = volume.DefaultDriverName
}
return volumedrivers.Lookup(name)
}
func parseVolumeSource(spec string) (string, string, error) {
if !filepath.IsAbs(spec) {
return spec, "", nil
}
return "", spec, nil
}

Просмотреть файл

@ -1,25 +0,0 @@
// +build experimental
package daemon
import (
"path/filepath"
"github.com/docker/docker/volume"
"github.com/docker/docker/volume/drivers"
)
func getVolumeDriver(name string) (volume.Driver, error) {
if name == "" {
name = volume.DefaultDriverName
}
return volumedrivers.Lookup(name)
}
func parseVolumeSource(spec string) (string, string, error) {
if !filepath.IsAbs(spec) {
return spec, "", nil
}
return "", spec, nil
}

Просмотреть файл

@ -1,23 +0,0 @@
// +build !experimental
package daemon
import (
"fmt"
"path/filepath"
"github.com/docker/docker/volume"
"github.com/docker/docker/volume/drivers"
)
func getVolumeDriver(_ string) (volume.Driver, error) {
return volumedrivers.Lookup(volume.DefaultDriverName)
}
func parseVolumeSource(spec string) (string, string, error) {
if !filepath.IsAbs(spec) {
return "", "", fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", spec)
}
return "", spec, nil
}

Просмотреть файл

@ -1,82 +0,0 @@
// +build !experimental
package daemon
import (
"io/ioutil"
"os"
"testing"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/volume"
"github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
)
func TestGetVolumeDefaultDriver(t *testing.T) {
tmp, err := ioutil.TempDir("", "volume-test-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
l, err := local.New(tmp)
if err != nil {
t.Fatal(err)
}
volumedrivers.Register(l, volume.DefaultDriverName)
d, err := getVolumeDriver("missing")
if err != nil {
t.Fatal(err)
}
if d.Name() != volume.DefaultDriverName {
t.Fatalf("Expected local driver, was %s\n", d.Name)
}
}
func TestParseBindMount(t *testing.T) {
cases := []struct {
bind string
expDest string
expSource string
expName string
mountLabel string
expRW bool
fail bool
}{
{"/tmp:/tmp", "/tmp", "/tmp", "", "", true, false},
{"/tmp:/tmp:ro", "/tmp", "/tmp", "", "", false, false},
{"/tmp:/tmp:rw", "/tmp", "/tmp", "", "", true, false},
{"/tmp:/tmp:foo", "/tmp", "/tmp", "", "", false, true},
{"name:/tmp", "", "", "", "", false, true},
{"local/name:/tmp:rw", "", "", "", "", true, true},
}
for _, c := range cases {
conf := &runconfig.Config{}
m, err := parseBindMount(c.bind, c.mountLabel, conf)
if c.fail {
if err == nil {
t.Fatalf("Expected error, was nil, for spec %s\n", c.bind)
}
continue
}
if m.Destination != c.expDest {
t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind)
}
if m.Source != c.expSource {
t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind)
}
if m.Name != c.expName {
t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind)
}
if m.RW != c.expRW {
t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind)
}
}
}

Просмотреть файл

@ -105,12 +105,12 @@ not, the cache is invalidated.
of the child images is sufficient. However, certain instructions require
a little more examination and explanation.
* In the case of the `ADD` and `COPY` instructions, the contents of the file(s)
being put into the image are examined. Specifically, a checksum is done
of the file(s) and then that checksum is used during the cache lookup.
If anything has changed in the file(s), including its metadata,
then the cache is invalidated. The last-modified and last-accessed times of the
file(s) are not considered in these checksums.
* For the `ADD` and `COPY` instructions, the contents of the file(s)
in the image are examined and a checksum is calculated for each file.
The last-modified and last-accessed times of the file(s) are not considered in
these checksums. During the cache lookup, the checksum is compared against the
checksum in the existing images. If anything has changed in the file(s), such
as the contents and metadata, then the cache is invalidated.
* Aside from the `ADD` and `COPY` commands cache checking will not look at the
files in the container to determine a cache match. For example, when processing

22
docs/extend/index.md Normal file
Просмотреть файл

@ -0,0 +1,22 @@
<!--[metadata]>
+++
title = "Extend Docker"
description = "How to extend Docker with plugins"
keywords = ["extend, plugins, docker, documentation, developer"]
[menu.main]
identifier = "mn_extend"
name = "Extend Docker"
weight = 6
+++
<![end-metadata]-->
## Extending Docker
Currently, you can extend Docker by adding a plugin. This section contains the following topics:
* [Understand Docker plugins](plugins.md)
* [Write a volume plugin](plugins_volumes.md)
* [Docker plugin API](plugin_api.md)

Просмотреть файл

@ -1,13 +1,22 @@
# Experimental: Docker Plugin API
<!--[metadata]>
+++
title = "Plugins API"
description = "How to write Docker plugins extensions "
keywords = ["API, Usage, plugins, documentation, developer"]
[menu.main]
parent = "mn_extend"
weight=1
+++
<![end-metadata]-->
# Docker Plugin API
Docker plugins are out-of-process extensions which add capabilities to the
Docker Engine.
This page is intended for people who want to develop their own Docker plugin.
If you just want to learn about or use Docker plugins, look
[here](/experimental/plugins.md).
This is an experimental feature. For information on installing and using experimental features, see [the experimental feature overview](README.md).
[here](plugins.md).
## What plugins are
@ -77,10 +86,6 @@ manage startup and shutdown order.
When upgrading a plugin, you should first stop the Docker daemon, upgrade the
plugin, then start Docker again.
If a plugin is packaged as a container, this may cause issues. Plugins as
containers are currently considered experimental due to these shutdown/startup
ordering issues. These issues are mitigated by plugin retries (see below).
## Plugin activation
When a plugin is first referred to -- either by a user referring to it by name

Просмотреть файл

@ -1,14 +1,23 @@
# Experimental: Extend Docker with a plugin
<!--[metadata]>
+++
title = "Extending Docker with plugins"
description = "How to add additional functionality to Docker with plugins extensions"
keywords = ["Examples, Usage, plugins, docker, documentation, user guide"]
[menu.main]
parent = "mn_extend"
weight=-1
+++
<![end-metadata]-->
# Understand Docker plugins
You can extend the capabilities of the Docker Engine by loading third-party
plugins.
This is an experimental feature. For information on installing and using experimental features, see [the experimental feature overview](README.md).
plugins.
## Types of plugins
Plugins extend Docker's functionality. They come in specific types. For
example, a [volume plugin](/experimental/plugins_volume.md) might enable Docker
example, a [volume plugin](plugins_volume.md) might enable Docker
volumes to persist across multiple Docker hosts.
Currently Docker supports volume and network driver plugins. In the future it
@ -27,12 +36,13 @@ The following plugins exist:
databases and other stateful containers and move them around across a cluster
of machines.
* The [Weave plugin](https://github.com/weaveworks/docker-plugin) is a network
driver plugin which provides a virtual, multi-host network for containers.
* The [GlusterFS plugin](https://github.com/calavera/docker-volume-glusterfs) is
another volume plugin that provides multi-host volumes management for Docker
using GlusterFS.
* The [Calico plugin](https://github.com/metaswitch/calico-docker) is a network
driver plugin which provides a multi-host network for containers with routes
distributed by BGP.
* The [Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) is
a plugin that provides credentials and secret management using Keywhiz as
a central repository.
## Troubleshooting a plugin
@ -42,11 +52,4 @@ of the plugin for help. The Docker team may not be able to assist you.
## Writing a plugin
If you are interested in writing a plugin for Docker, or seeing how they work
under the hood, see the [docker plugins reference](/experimental/plugin_api.md).
# Related GitHub PRs and issues
- [#13222](https://github.com/docker/docker/pull/13222) Plugins plumbing
Send us feedback and comments on [#13419](https://github.com/docker/docker/issues/13419),
or on the usual Google Groups (docker-user, docker-dev) and IRC channels.
under the hood, see the [docker plugins reference](plugin_api.md).

Просмотреть файл

@ -1,34 +1,38 @@
# Experimental: Docker volume plugins
<!--[metadata]>
+++
title = "Volume plugins"
description = "How to manage data with external volume plugins"
keywords = ["Examples, Usage, volume, docker, data, volumes, plugin, api"]
[menu.main]
parent = "mn_extend"
+++
<![end-metadata]-->
# Write a volume plugin
Docker volume plugins enable Docker deployments to be integrated with external
storage systems, such as Amazon EBS, and enable data volumes to persist beyond
the lifetime of a single Docker host. See the [plugin documentation](/experimental/plugins.md)
the lifetime of a single Docker host. See the [plugin documentation](plugins.md)
for more information.
This is an experimental feature. For information on installing and using experimental features, see [the experimental feature overview](README.md).
# Command-line changes
This experimental feature introduces two changes to the `docker run` command:
A volume plugin makes use of the `-v`and `--volume-driver` flag on the `docker run` command. The `-v` flag accepts a volume name and the `--volume-driver` flag a driver type, for example:
- The `--volume-driver` flag is introduced.
- The `-v` syntax is changed to accept a volume name a first component.
$ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh
Example:
This command passes the `volumename` through to the volume plugin as a
user-given name for the volume. The `volumename` must not begin with a `/`.
$ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh
By having the user specify a `volumename`, a plugin can associate the volume
with an external volume beyond the lifetime of a single container or container
host. This can be used, for example, to move a stateful container from one
server to another.
By specifying a volume name in conjunction with a volume driver, volume plugins
such as [Flocker](https://clusterhq.com/docker-plugin/), once installed, can be
used to manage volumes external to a single host, such as those on EBS. In this
example, "volumename" is passed through to the volume plugin as a user-given
name for the volume which allows the plugin to associate it with an external
volume beyond the lifetime of a single container or container host. This can be
used, for example, to move a stateful container from one server to another.
By specifying a `volumedriver` in conjunction with a `volumename`, users can use plugins such as [Flocker](https://clusterhq.com/docker-plugin/) to manage volumes external to a single host, such as those on EBS.
The `volumename` must not begin with a `/`.
# API changes
# Create a VolumeDriver
The container creation endpoint (`/containers/create`) accepts a `VolumeDriver`
field of type `string` allowing to specify the name of the driver. It's default
@ -152,9 +156,3 @@ this point.
Respond with a string error if an error occurred.
# Related GitHub PRs and issues
- [#13161](https://github.com/docker/docker/pull/13161) Volume refactor and external volume plugins
Send us feedback and comments on [#13420](https://github.com/docker/docker/issues/13420),
or on the usual Google Groups (docker-user, docker-dev) and IRC channels.

Просмотреть файл

@ -121,6 +121,7 @@ install Docker using the following:
The system prompts you for your `sudo` password. Then, it downloads and
installs Docker and its dependencies.
>**Note**: If your company is behind a filtering proxy, you may find that the
>`apt-key`
>command fails for the Docker repo during installation. To work around this,

Просмотреть файл

@ -68,6 +68,23 @@ Running `docker rmi` emits an **untag** event when removing an image name. The
### What's new
`GET /containers/(id)/archive`
**New!**
Get an archive of filesystem content from a container.
`PUT /containers/(id)/archive`
**New!**
Upload an archive of content to be extracted to an
existing directory inside a container's filesystem.
`POST /containers/(id)/copy`
**Deprecated!**
This copy endpoint has been deprecated in favor of the above `archive` endpoint
which can be used to download files and directories from a container.
**New!**
The `hostConfig` option now accepts the field `GroupAdd`, which specifies a list of additional
groups that the container process will run as.

Просмотреть файл

@ -144,9 +144,14 @@ Create a container
"com.example.license": "GPL",
"com.example.version": "1.0"
},
"Volumes": {
"/tmp": {}
},
"Mounts": [
{
"Source": "/data",
"Destination": "/data",
"Mode": "ro,Z",
"RW": false
}
],
"WorkingDir": "",
"NetworkDisabled": false,
"MacAddress": "12:34:56:78:9a:bc",
@ -227,8 +232,7 @@ Json Parameters:
- **Entrypoint** - Set the entry point for the container as a string or an array
of strings.
- **Image** - A string specifying the image name to use for the container.
- **Volumes** – An object mapping mount point paths (strings) inside the
container to empty objects.
- **Mounts** - An array of mount points in the container.
- **WorkingDir** - A string specifying the working directory for commands to
run in.
- **NetworkDisabled** - Boolean value, when true disables networking for the
@ -424,8 +428,14 @@ Return low-level information on the container `id`
"Running": false,
"StartedAt": "2015-01-06T15:47:32.072697474Z"
},
"Volumes": {},
"VolumesRW": {}
"Mounts": [
{
"Source": "/data",
"Destination": "/data",
"Mode": "ro,Z",
"RW": false
}
]
}
Status Codes:
@ -1039,6 +1049,8 @@ Status Codes:
Copy files or folders of container `id`
**Deprecated** in favor of the `archive` endpoint below.
**Example request**:
POST /containers/4fa6e0f0c678/copy HTTP/1.1
@ -1061,6 +1073,120 @@ Status Codes:
- **404** – no such container
- **500** – server error
### Retrieving information about files and folders in a container
`HEAD /containers/(id)/archive`
See the description of the `X-Docker-Container-Path-Stat` header in the
folowing section.
### Get an archive of a filesystem resource in a container
`GET /containers/(id)/archive`
Get an tar archive of a resource in the filesystem of container `id`.
Query Parameters:
- **path** - resource in the container's filesystem to archive. Required.
If not an absolute path, it is relative to the container's root directory.
The resource specified by **path** must exist. To assert that the resource
is expected to be a directory, **path** should end in `/` or `/.`
(assuming a path separator of `/`). If **path** ends in `/.` then this
indicates that only the contents of the **path** directory should be
copied. A symlink is always resolved to its target.
**Note**: It is not possible to copy certain system files such as resources
under `/proc`, `/sys`, `/dev`, and mounts created by the user in the
container.
**Example request**:
GET /containers/8cce319429b2/archive?path=/root HTTP/1.1
**Example response**:
HTTP/1.1 200 OK
Content-Type: application/x-tar
X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInBhdGgiOiIvcm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oifQ==
{{ TAR STREAM }}
On success, a response header `X-Docker-Container-Path-Stat` will be set to a
base64-encoded JSON object containing some filesystem header information about
the archived resource. The above example value would decode to the following
JSON object (whitespace added for readability):
{
"name": "root",
"path": "/root",
"size": 4096,
"mode": 2147484096,
"mtime": "2014-02-27T20:51:23Z"
}
A `HEAD` request can also be made to this endpoint if only this information is
desired.
Status Codes:
- **200** - success, returns archive of copied resource
- **400** - client error, bad parameter, details in JSON response body, one of:
- must specify path parameter (**path** cannot be empty)
- not a directory (**path** was asserted to be a directory but exists as a
file)
- **404** - client error, resource not found, one of:
– no such container (container `id` does not exist)
- no such file or directory (**path** does not exist)
- **500** - server error
### Extract an archive of files or folders to a directory in a container
`PUT /containers/(id)/archive`
Upload a tar archive to be extracted to a path in the filesystem of container
`id`.
Query Parameters:
- **path** - path to a directory in the container
to extract the archive's contents into. Required.
If not an absolute path, it is relative to the container's root directory.
The **path** resource must exist.
- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error
if unpacking the given content would cause an existing directory to be
replaced with a non-directory and vice versa.
**Example request**:
PUT /containers/8cce319429b2/archive?path=/vol1 HTTP/1.1
Content-Type: application/x-tar
{{ TAR STREAM }}
**Example response**:
HTTP/1.1 200 OK
Status Codes:
- **200** – the content was extracted successfully
- **400** - client error, bad parameter, details in JSON response body, one of:
- must specify path parameter (**path** cannot be empty)
- not a directory (**path** should be a directory but exists as a file)
- unable to overwrite existing directory with non-directory
(if **noOverwriteDirNonDir**)
- unable to overwrite existing non-directory with directory
(if **noOverwriteDirNonDir**)
- **403** - client error, permission denied, the volume
or container rootfs is marked as read-only.
- **404** - client error, resource not found, one of:
– no such container (container `id` does not exist)
- no such file or directory (**path** resource does not exist)
- **500** – server error
## 2.2 Images
### List Images
@ -1698,9 +1824,14 @@ Create a new image from a container's changes
"Cmd": [
"date"
],
"Volumes": {
"/tmp": {}
},
"Mounts": [
{
"Source": "/data",
"Destination": "/data",
"Mode": "ro,Z",
"RW": false
}
],
"Labels": {
"key1": "value1",
"key2": "value2"
@ -2086,8 +2217,7 @@ Return low-level information about the `exec` command `id`.
"ProcessLabel" : "",
"AppArmorProfile" : "",
"RestartCount" : 0,
"Volumes" : {},
"VolumesRW" : {}
"Mounts" : [],
}
}

Просмотреть файл

@ -11,12 +11,81 @@ weight=1
# cp
Usage: docker cp CONTAINER:PATH HOSTDIR|-
Copy files/folders between a container and the local filesystem.
Copy files/folders from the PATH to the HOSTDIR.
Usage: docker cp [options] CONTAINER:PATH LOCALPATH|-
docker cp [options] LOCALPATH|- CONTAINER:PATH
Copy files or folders from a container's filesystem to the directory on the
host. Use '-' to write the data as a tar file to `STDOUT`. `CONTAINER:PATH` is
relative to the root of the container's filesystem.
--help Print usage statement
In the first synopsis form, the `docker cp` utility copies the contents of
`PATH` from the filesystem of `CONTAINER` to the `LOCALPATH` (or stream as
a tar archive to `STDOUT` if `-` is specified).
In the second synopsis form, the contents of `LOCALPATH` (or a tar archive
streamed from `STDIN` if `-` is specified) are copied from the local machine to
`PATH` in the filesystem of `CONTAINER`.
You can copy to or from either a running or stopped container. The `PATH` can
be a file or directory. The `docker cp` command assumes all `CONTAINER:PATH`
values are relative to the `/` (root) directory of the container. This means
supplying the initial forward slash is optional; The command sees
`compassionate_darwin:/tmp/foo/myfile.txt` and
`compassionate_darwin:tmp/foo/myfile.txt` as identical. If a `LOCALPATH` value
is not absolute, is it considered relative to the current working directory.
Behavior is similar to the common Unix utility `cp -a` in that directories are
copied recursively with permissions preserved if possible. Ownership is set to
the user and primary group on the receiving end of the transfer. For example,
files copied to a container will be created with `UID:GID` of the root user.
Files copied to the local machine will be created with the `UID:GID` of the
user which invoked the `docker cp` command.
Assuming a path separator of `/`, a first argument of `SRC_PATH` and second
argument of `DST_PATH`, the behavior is as follows:
- `SRC_PATH` specifies a file
- `DST_PATH` does not exist
- the file is saved to a file created at `DST_PATH`
- `DST_PATH` does not exist and ends with `/`
- Error condition: the destination directory must exist.
- `DST_PATH` exists and is a file
- the destination is overwritten with the contents of the source file
- `DST_PATH` exists and is a directory
- the file is copied into this directory using the basename from
`SRC_PATH`
- `SRC_PATH` specifies a directory
- `DST_PATH` does not exist
- `DST_PATH` is created as a directory and the *contents* of the source
directory are copied into this directory
- `DST_PATH` exists and is a file
- Error condition: cannot copy a directory to a file
- `DST_PATH` exists and is a directory
- `SRC_PATH` does not end with `/.`
- the source directory is copied into this directory
- `SRC_PAPTH` does end with `/.`
- the *content* of the source directory is copied into this
directory
The command requires `SRC_PATH` and `DST_PATH` to exist according to the above
rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not
the target, is copied.
A colon (`:`) is used as a delimiter between `CONTAINER` and `PATH`, but `:`
could also be in a valid `LOCALPATH`, like `file:name.txt`. This ambiguity is
resolved by requiring a `LOCALPATH` with a `:` to be made explicit with a
relative or absolute path, for example:
`/path/to/file:name.txt` or `./file:name.txt`
It is not possible to copy certain system files such as resources under
`/proc`, `/sys`, `/dev`, and mounts created by the user in the container.
Using `-` as the first argument in place of a `LOCALPATH` will stream the
contents of `STDIN` as a tar archive which will be extracted to the `PATH` in
the filesystem of the destination container. In this case, `PATH` must specify
a directory.
Using `-` as the second argument in place of a `LOCALPATH` will stream the
contents of the resource from the source container as a tar archive to
`STDOUT`.

Просмотреть файл

@ -18,7 +18,7 @@ commands. For example,
docker network create -d weave mynet
Some network driver plugins are listed in [plugins.md](plugins.md)
Some network driver plugins are listed in [plugins.md](/docs/extend/plugins.md)
The network thus created is owned by the plugin, so subsequent commands
referring to that network will also be run through the plugin.

Просмотреть файл

@ -70,15 +70,17 @@ clean() {
github.com/docker/docker/integration-cli # external tests
)
local dockerPlatforms=( linux/amd64 $(_dockerfile_env DOCKER_CROSSPLATFORMS) )
local dockerPlatforms=( linux/amd64 windows/amd64 $(_dockerfile_env DOCKER_CROSSPLATFORMS) )
local dockerBuildTags="$(_dockerfile_env DOCKER_BUILDTAGS)"
local buildTagCombos=(
''
'experimental'
"$dockerBuildTags"
"daemon $dockerBuildTags"
"daemon cgo $dockerBuildTags"
"experimental $dockerBuildTags"
"experimental daemon $dockerBuildTags"
"experimental daemon cgo $dockerBuildTags"
)
echo
@ -98,7 +100,10 @@ clean() {
unset IFS
echo -n 'pruning unused packages, '
findArgs=()
findArgs=(
# This directory contains only .c and .h files which are necessary
-path vendor/src/github.com/mattn/go-sqlite3/code
)
for import in "${imports[@]}"; do
[ "${#findArgs[@]}" -eq 0 ] || findArgs+=( -or )
findArgs+=( -path "vendor/src/$import" )
@ -107,12 +112,12 @@ clean() {
local prune=( $(find vendor -depth -type d -not '(' "${findArgs[@]}" ')') )
unset IFS
for dir in "${prune[@]}"; do
find "$dir" -maxdepth 1 -not -type d -exec rm -f '{}' +
find "$dir" -maxdepth 1 -not -type d -exec rm -v -f '{}' +
rmdir "$dir" 2>/dev/null || true
done
echo -n 'pruning unused files, '
find vendor -type f -name '*_test.go' -exec rm '{}' +
find vendor -type f -name '*_test.go' -exec rm -v '{}' +
echo done
}

Просмотреть файл

@ -9,3 +9,4 @@ contrib/init/systemd/docker.socket lib/systemd/system/
contrib/mk* usr/share/docker-engine/contrib/
contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/
contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/
contrib/apparmor/* etc/apparmor.d/

Просмотреть файл

@ -32,5 +32,8 @@ override_dh_installudev:
# match our existing priority
dh_installudev --priority=z80
override_dh_install:
dh_apparmor --profile-name=docker -pdocker-engine
%:
dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd)

Просмотреть файл

@ -35,6 +35,8 @@ if [ -z "$DOCKER_TEST_HOST" ]; then
(
set -x
/etc/init.d/apparmor start
/sbin/apparmor_parser -r -W -T contrib/apparmor/
)
fi

Просмотреть файл

@ -72,6 +72,10 @@ bundle_ubuntu() {
done
done
# Include contributed apparmor policy
mkdir -p "$DIR/etc/apparmor.d/"
cp contrib/apparmor/docker "$DIR/etc/apparmor.d/"
# Copy the binary
# This will fail if the binary bundle hasn't been built
mkdir -p "$DIR/usr/bin"
@ -89,6 +93,10 @@ if [ "$1" = 'configure' ] && [ -z "$2" ]; then
fi
fi
if ( aa-status --enabled ); then
/sbin/apparmor_parser -r -W -T /etc/apparmor.d/docker
fi
if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then
# we only need to do this if upstart isn't in charge
update-rc.d docker defaults > /dev/null || true
@ -149,6 +157,7 @@ EOF
--deb-recommends git \
--deb-recommends xz-utils \
--deb-recommends 'cgroupfs-mount | cgroup-lite' \
--deb-suggests apparmor \
--description "$PACKAGE_DESCRIPTION" \
--maintainer "$PACKAGE_MAINTAINER" \
--conflicts docker \

58
hack/make/validate-lint Normal file
Просмотреть файл

@ -0,0 +1,58 @@
#!/bin/bash
source "${MAKEDIR}/.validate"
# We will eventually get to the point when packages should be the complete list
# of subpackages, vendoring excluded, as given by:
#
# packages=( $(go list ./... 2> /dev/null | grep -vE "^github.com/docker/docker/vendor" || true ) )
packages=(
builder/parser/dumper
daemon/events
daemon/execdriver/native/template
daemon/graphdriver/btrfs
daemon/network
docker
dockerinit
pkg/chrootarchive
pkg/directory
pkg/fileutils
pkg/homedir
pkg/listenbuffer
pkg/mflag/example
pkg/namesgenerator
pkg/promise
pkg/pubsub
pkg/random
pkg/reexec
pkg/symlink
pkg/timeutils
pkg/tlsconfig
pkg/urlutil
pkg/version
utils
)
errors=()
for p in "${packages[@]}"; do
failedLint=$(golint "$p")
if [ "$failedLint" ]; then
errors+=( "$failedLint" )
fi
done
if [ ${#errors[@]} -eq 0 ]; then
echo 'Congratulations! All Go source files have been linted.'
else
{
echo "Errors from golint:"
for err in "${errors[@]}"; do
echo "$err"
done
echo
echo 'Please fix the above errors. You can test via "golint" and commit the result.'
echo
} >&2
false
fi

Просмотреть файл

@ -13,6 +13,7 @@ clone git github.com/gorilla/context 14f550f51a
clone git github.com/gorilla/mux e444e69cbd
clone git github.com/kr/pty 5cf931ef8f
clone git github.com/microsoft/hcsshim f674a70f1306dbe20b3a516bedd3285d85db60d9
clone git github.com/mattn/go-sqlite3 b4142c444a8941d0d92b0b7103a24df9cd815e42
clone git github.com/mistifyio/go-zfs v2.1.1
clone git github.com/natefinch/npipe 0938d701e50e580f5925c773055eb6d6b32a0cbc
clone git github.com/tchap/go-patricia v2.1.0

Просмотреть файл

@ -190,7 +190,7 @@ func (s *DockerSuite) TestContainerApiStartVolumeBinds(c *check.C) {
c.Assert(err, check.IsNil)
c.Assert(status, check.Equals, http.StatusNoContent)
pth, err := inspectFieldMap(name, "Volumes", "/tmp")
pth, err := inspectMountSourceField(name, "/tmp")
if err != nil {
c.Fatal(err)
}
@ -233,7 +233,7 @@ func (s *DockerSuite) TestContainerApiStartVolumesFrom(c *check.C) {
dockerCmd(c, "run", "-d", "--name", volName, "-v", volPath, "busybox")
name := "TestContainerApiStartDupVolumeBinds"
name := "TestContainerApiStartVolumesFrom"
config := map[string]interface{}{
"Image": "busybox",
"Volumes": map[string]struct{}{volPath: {}},
@ -250,11 +250,11 @@ func (s *DockerSuite) TestContainerApiStartVolumesFrom(c *check.C) {
c.Assert(err, check.IsNil)
c.Assert(status, check.Equals, http.StatusNoContent)
pth, err := inspectFieldMap(name, "Volumes", volPath)
pth, err := inspectMountSourceField(name, volPath)
if err != nil {
c.Fatal(err)
}
pth2, err := inspectFieldMap(volName, "Volumes", volPath)
pth2, err := inspectMountSourceField(volName, volPath)
if err != nil {
c.Fatal(err)
}
@ -705,7 +705,7 @@ func (s *DockerSuite) TestBuildApiDockerfileSymlink(c *check.C) {
func (s *DockerSuite) TestPostContainerBindNormalVolume(c *check.C) {
dockerCmd(c, "create", "-v", "/foo", "--name=one", "busybox")
fooDir, err := inspectFieldMap("one", "Volumes", "/foo")
fooDir, err := inspectMountSourceField("one", "/foo")
if err != nil {
c.Fatal(err)
}
@ -717,7 +717,7 @@ func (s *DockerSuite) TestPostContainerBindNormalVolume(c *check.C) {
c.Assert(err, check.IsNil)
c.Assert(status, check.Equals, http.StatusNoContent)
fooDir2, err := inspectFieldMap("two", "Volumes", "/foo")
fooDir2, err := inspectMountSourceField("two", "/foo")
if err != nil {
c.Fatal(err)
}
@ -872,6 +872,32 @@ func (s *DockerSuite) TestContainerApiCommitWithLabelInConfig(c *check.C) {
dockerCmd(c, "run", img.Id, "ls", "/test")
}
func (s *DockerSuite) TestContainerApiBadPort(c *check.C) {
config := map[string]interface{}{
"Image": "busybox",
"Cmd": []string{"/bin/sh", "-c", "echo test"},
"PortBindings": map[string]interface{}{
"8080/tcp": []map[string]interface{}{
{
"HostIP": "",
"HostPort": "aa80",
},
},
},
}
jsonData := bytes.NewBuffer(nil)
json.NewEncoder(jsonData).Encode(config)
status, b, err := sockRequest("POST", "/containers/create", config)
c.Assert(err, check.IsNil)
c.Assert(status, check.Equals, http.StatusInternalServerError)
if strings.TrimSpace(string(b)) != `Invalid port specification: "aa80"` {
c.Fatalf("Incorrect error msg: %s", string(b))
}
}
func (s *DockerSuite) TestContainerApiCreate(c *check.C) {
config := map[string]interface{}{
"Image": "busybox",
@ -1467,17 +1493,15 @@ func (s *DockerSuite) TestContainerApiDeleteRemoveVolume(c *check.C) {
id := strings.TrimSpace(out)
c.Assert(waitRun(id), check.IsNil)
vol, err := inspectFieldMap(id, "Volumes", "/testvolume")
c.Assert(err, check.IsNil)
_, err = os.Stat(vol)
source, err := inspectMountSourceField(id, "/testvolume")
_, err = os.Stat(source)
c.Assert(err, check.IsNil)
status, _, err := sockRequest("DELETE", "/containers/"+id+"?v=1&force=1", nil)
c.Assert(err, check.IsNil)
c.Assert(status, check.Equals, http.StatusNoContent)
if _, err := os.Stat(vol); !os.IsNotExist(err) {
if _, err := os.Stat(source); !os.IsNotExist(err) {
c.Fatalf("expected to get ErrNotExist error, got %v", err)
}
}
@ -1614,3 +1638,48 @@ func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *che
c.Assert(err, check.IsNil)
c.Assert(status, check.Equals, http.StatusCreated)
}
// #14640
func (s *DockerSuite) TestPostContainersStartWithoutLinksInHostConfig(c *check.C) {
name := "test-host-config-links"
dockerCmd(c, "create", "--name", name, "busybox", "top")
hc, err := inspectFieldJSON(name, "HostConfig")
c.Assert(err, check.IsNil)
config := `{"HostConfig":` + hc + `}`
res, _, err := sockRequestRaw("POST", "/containers/"+name+"/start", strings.NewReader(config), "application/json")
c.Assert(err, check.IsNil)
c.Assert(res.StatusCode, check.Equals, http.StatusNoContent)
}
// #14640
func (s *DockerSuite) TestPostContainersStartWithLinksInHostConfig(c *check.C) {
name := "test-host-config-links"
dockerCmd(c, "run", "--name", "foo", "-d", "busybox", "top")
dockerCmd(c, "create", "--name", name, "--link", "foo:bar", "busybox", "top")
hc, err := inspectFieldJSON(name, "HostConfig")
c.Assert(err, check.IsNil)
config := `{"HostConfig":` + hc + `}`
res, _, err := sockRequestRaw("POST", "/containers/"+name+"/start", strings.NewReader(config), "application/json")
c.Assert(err, check.IsNil)
c.Assert(res.StatusCode, check.Equals, http.StatusNoContent)
}
// #14640
func (s *DockerSuite) TestPostContainersStartWithLinksInHostConfigIdLinked(c *check.C) {
name := "test-host-config-links"
out, _ := dockerCmd(c, "run", "--name", "link0", "-d", "busybox", "top")
id := strings.TrimSpace(out)
dockerCmd(c, "create", "--name", name, "--link", id, "busybox", "top")
hc, err := inspectFieldJSON(name, "HostConfig")
c.Assert(err, check.IsNil)
config := `{"HostConfig":` + hc + `}`
res, _, err := sockRequestRaw("POST", "/containers/"+name+"/start", strings.NewReader(config), "application/json")
c.Assert(err, check.IsNil)
c.Assert(res.StatusCode, check.Equals, http.StatusNoContent)
}

Просмотреть файл

@ -2,6 +2,7 @@ package main
import (
"encoding/json"
"fmt"
"net/http"
"strings"
@ -12,28 +13,38 @@ func (s *DockerSuite) TestInspectApiContainerResponse(c *check.C) {
out, _ := dockerCmd(c, "run", "-d", "busybox", "true")
cleanedContainerID := strings.TrimSpace(out)
keysBase := []string{"Id", "State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings",
"ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "ExecDriver", "MountLabel", "ProcessLabel", "GraphDriver"}
endpoint := "/containers/" + cleanedContainerID + "/json"
status, body, err := sockRequest("GET", endpoint, nil)
c.Assert(status, check.Equals, http.StatusOK)
c.Assert(err, check.IsNil)
var inspectJSON map[string]interface{}
if err = json.Unmarshal(body, &inspectJSON); err != nil {
c.Fatalf("unable to unmarshal body for latest version: %v", err)
cases := []struct {
version string
keys []string
}{
{"1.20", append(keysBase, "Mounts")},
{"1.19", append(keysBase, "Volumes", "VolumesRW")},
}
keys := []string{"State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "ExecDriver", "MountLabel", "ProcessLabel", "Volumes", "VolumesRW", "GraphDriver"}
for _, cs := range cases {
endpoint := fmt.Sprintf("/v%s/containers/%s/json", cs.version, cleanedContainerID)
keys = append(keys, "Id")
status, body, err := sockRequest("GET", endpoint, nil)
c.Assert(status, check.Equals, http.StatusOK)
c.Assert(err, check.IsNil)
for _, key := range keys {
if _, ok := inspectJSON[key]; !ok {
c.Fatalf("%s does not exist in response for latest version", key)
var inspectJSON map[string]interface{}
if err = json.Unmarshal(body, &inspectJSON); err != nil {
c.Fatalf("unable to unmarshal body for version %s: %v", cs.version, err)
}
for _, key := range cs.keys {
if _, ok := inspectJSON[key]; !ok {
c.Fatalf("%s does not exist in response for version %s", key, cs.version)
}
}
//Issue #6830: type not properly converted to JSON/back
if _, ok := inspectJSON["Path"].(bool); ok {
c.Fatalf("Path of `true` should not be converted to boolean `true` via JSON marshalling")
}
}
//Issue #6830: type not properly converted to JSON/back
if _, ok := inspectJSON["Path"].(bool); ok {
c.Fatalf("Path of `true` should not be converted to boolean `true` via JSON marshalling")
}
}

Просмотреть файл

@ -0,0 +1,503 @@
package main
import (
"os"
"path/filepath"
"github.com/go-check/check"
)
// docker cp CONTAINER:PATH LOCALPATH
// Try all of the test cases from the archive package which implements the
// internals of `docker cp` and ensure that the behavior matches when actually
// copying to and from containers.
// Basic assumptions about SRC and DST:
// 1. SRC must exist.
// 2. If SRC ends with a trailing separator, it must be a directory.
// 3. DST parent directory must exist.
// 4. If DST exists as a file, it must not end with a trailing separator.
// First get these easy error cases out of the way.
// Test for error when SRC does not exist.
func (s *DockerSuite) TestCpFromErrSrcNotExists(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-err-src-not-exists")
defer os.RemoveAll(tmpDir)
err := runDockerCp(c, containerCpPath(cID, "file1"), tmpDir)
if err == nil {
c.Fatal("expected IsNotExist error, but got nil instead")
}
if !isCpNotExist(err) {
c.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
}
}
// Test for error when SRC ends in a trailing
// path separator but it exists as a file.
func (s *DockerSuite) TestCpFromErrSrcNotDir(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{addContent: true})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-err-src-not-dir")
defer os.RemoveAll(tmpDir)
err := runDockerCp(c, containerCpPathTrailingSep(cID, "file1"), tmpDir)
if err == nil {
c.Fatal("expected IsNotDir error, but got nil instead")
}
if !isCpNotDir(err) {
c.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
}
}
// Test for error when SRC is a valid file or directory,
// bu the DST parent directory does not exist.
func (s *DockerSuite) TestCpFromErrDstParentNotExists(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{addContent: true})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-err-dst-parent-not-exists")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
// Try with a file source.
srcPath := containerCpPath(cID, "/file1")
dstPath := cpPath(tmpDir, "notExists", "file1")
err := runDockerCp(c, srcPath, dstPath)
if err == nil {
c.Fatal("expected IsNotExist error, but got nil instead")
}
if !isCpNotExist(err) {
c.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
}
// Try with a directory source.
srcPath = containerCpPath(cID, "/dir1")
if err := runDockerCp(c, srcPath, dstPath); err == nil {
c.Fatal("expected IsNotExist error, but got nil instead")
}
if !isCpNotExist(err) {
c.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
}
}
// Test for error when DST ends in a trailing
// path separator but exists as a file.
func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{addContent: true})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
// Try with a file source.
srcPath := containerCpPath(cID, "/file1")
dstPath := cpPathTrailingSep(tmpDir, "file1")
err := runDockerCp(c, srcPath, dstPath)
if err == nil {
c.Fatal("expected IsNotDir error, but got nil instead")
}
if !isCpNotDir(err) {
c.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
}
// Try with a directory source.
srcPath = containerCpPath(cID, "/dir1")
if err := runDockerCp(c, srcPath, dstPath); err == nil {
c.Fatal("expected IsNotDir error, but got nil instead")
}
if !isCpNotDir(err) {
c.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
}
}
// Possibilities are reduced to the remaining 10 cases:
//
// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action
// ===================================================================================================
// A | no | - | no | - | no | create file
// B | no | - | no | - | yes | error
// C | no | - | yes | no | - | overwrite file
// D | no | - | yes | yes | - | create file in dst dir
// E | yes | no | no | - | - | create dir, copy contents
// F | yes | no | yes | no | - | error
// G | yes | no | yes | yes | - | copy dir and contents
// H | yes | yes | no | - | - | create dir, copy contents
// I | yes | yes | yes | no | - | error
// J | yes | yes | yes | yes | - | copy dir contents
//
// A. SRC specifies a file and DST (no trailing path separator) doesn't
// exist. This should create a file with the name DST and copy the
// contents of the source file into it.
func (s *DockerSuite) TestCpFromCaseA(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
addContent: true, workDir: "/root",
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-case-a")
defer os.RemoveAll(tmpDir)
srcPath := containerCpPath(cID, "/root/file1")
dstPath := cpPath(tmpDir, "itWorks.txt")
if err := runDockerCp(c, srcPath, dstPath); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
if err := fileContentEquals(c, dstPath, "file1\n"); err != nil {
c.Fatal(err)
}
}
// B. SRC specifies a file and DST (with trailing path separator) doesn't
// exist. This should cause an error because the copy operation cannot
// create a directory when copying a single file.
func (s *DockerSuite) TestCpFromCaseB(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{addContent: true})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-case-b")
defer os.RemoveAll(tmpDir)
srcPath := containerCpPath(cID, "/file1")
dstDir := cpPathTrailingSep(tmpDir, "testDir")
err := runDockerCp(c, srcPath, dstDir)
if err == nil {
c.Fatal("expected DirNotExists error, but got nil instead")
}
if !isCpDirNotExist(err) {
c.Fatalf("expected DirNotExists error, but got %T: %s", err, err)
}
}
// C. SRC specifies a file and DST exists as a file. This should overwrite
// the file at DST with the contents of the source file.
func (s *DockerSuite) TestCpFromCaseC(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
addContent: true, workDir: "/root",
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-case-c")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcPath := containerCpPath(cID, "/root/file1")
dstPath := cpPath(tmpDir, "file2")
// Ensure the local file starts with different content.
if err := fileContentEquals(c, dstPath, "file2\n"); err != nil {
c.Fatal(err)
}
if err := runDockerCp(c, srcPath, dstPath); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
if err := fileContentEquals(c, dstPath, "file1\n"); err != nil {
c.Fatal(err)
}
}
// D. SRC specifies a file and DST exists as a directory. This should place
// a copy of the source file inside it using the basename from SRC. Ensure
// this works whether DST has a trailing path separator or not.
func (s *DockerSuite) TestCpFromCaseD(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{addContent: true})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-case-d")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcPath := containerCpPath(cID, "/file1")
dstDir := cpPath(tmpDir, "dir1")
dstPath := filepath.Join(dstDir, "file1")
// Ensure that dstPath doesn't exist.
if _, err := os.Stat(dstPath); !os.IsNotExist(err) {
c.Fatalf("did not expect dstPath %q to exist", dstPath)
}
if err := runDockerCp(c, srcPath, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
if err := fileContentEquals(c, dstPath, "file1\n"); err != nil {
c.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
if err := os.RemoveAll(dstDir); err != nil {
c.Fatalf("unable to remove dstDir: %s", err)
}
if err := os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
c.Fatalf("unable to make dstDir: %s", err)
}
dstDir = cpPathTrailingSep(tmpDir, "dir1")
if err := runDockerCp(c, srcPath, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
if err := fileContentEquals(c, dstPath, "file1\n"); err != nil {
c.Fatal(err)
}
}
// E. SRC specifies a directory and DST does not exist. This should create a
// directory at DST and copy the contents of the SRC directory into the DST
// directory. Ensure this works whether DST has a trailing path separator or
// not.
func (s *DockerSuite) TestCpFromCaseE(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{addContent: true})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-case-e")
defer os.RemoveAll(tmpDir)
srcDir := containerCpPath(cID, "dir1")
dstDir := cpPath(tmpDir, "testDir")
dstPath := filepath.Join(dstDir, "file1-1")
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil {
c.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
if err := os.RemoveAll(dstDir); err != nil {
c.Fatalf("unable to remove dstDir: %s", err)
}
dstDir = cpPathTrailingSep(tmpDir, "testDir")
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil {
c.Fatal(err)
}
}
// F. SRC specifies a directory and DST exists as a file. This should cause an
// error as it is not possible to overwrite a file with a directory.
func (s *DockerSuite) TestCpFromCaseF(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
addContent: true, workDir: "/root",
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-case-f")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcDir := containerCpPath(cID, "/root/dir1")
dstFile := cpPath(tmpDir, "file1")
err := runDockerCp(c, srcDir, dstFile)
if err == nil {
c.Fatal("expected ErrCannotCopyDir error, but got nil instead")
}
if !isCpCannotCopyDir(err) {
c.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
}
}
// G. SRC specifies a directory and DST exists as a directory. This should copy
// the SRC directory and all its contents to the DST directory. Ensure this
// works whether DST has a trailing path separator or not.
func (s *DockerSuite) TestCpFromCaseG(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
addContent: true, workDir: "/root",
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-case-g")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcDir := containerCpPath(cID, "/root/dir1")
dstDir := cpPath(tmpDir, "dir2")
resultDir := filepath.Join(dstDir, "dir1")
dstPath := filepath.Join(resultDir, "file1-1")
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil {
c.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
if err := os.RemoveAll(dstDir); err != nil {
c.Fatalf("unable to remove dstDir: %s", err)
}
if err := os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
c.Fatalf("unable to make dstDir: %s", err)
}
dstDir = cpPathTrailingSep(tmpDir, "dir2")
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil {
c.Fatal(err)
}
}
// H. SRC specifies a directory's contents only and DST does not exist. This
// should create a directory at DST and copy the contents of the SRC
// directory (but not the directory itself) into the DST directory. Ensure
// this works whether DST has a trailing path separator or not.
func (s *DockerSuite) TestCpFromCaseH(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{addContent: true})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-case-h")
defer os.RemoveAll(tmpDir)
srcDir := containerCpPathTrailingSep(cID, "dir1") + "."
dstDir := cpPath(tmpDir, "testDir")
dstPath := filepath.Join(dstDir, "file1-1")
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil {
c.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
if err := os.RemoveAll(dstDir); err != nil {
c.Fatalf("unable to remove resultDir: %s", err)
}
dstDir = cpPathTrailingSep(tmpDir, "testDir")
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil {
c.Fatal(err)
}
}
// I. SRC specifies a direcotry's contents only and DST exists as a file. This
// should cause an error as it is not possible to overwrite a file with a
// directory.
func (s *DockerSuite) TestCpFromCaseI(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
addContent: true, workDir: "/root",
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-case-i")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcDir := containerCpPathTrailingSep(cID, "/root/dir1") + "."
dstFile := cpPath(tmpDir, "file1")
err := runDockerCp(c, srcDir, dstFile)
if err == nil {
c.Fatal("expected ErrCannotCopyDir error, but got nil instead")
}
if !isCpCannotCopyDir(err) {
c.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
}
}
// J. SRC specifies a directory's contents only and DST exists as a directory.
// This should copy the contents of the SRC directory (but not the directory
// itself) into the DST directory. Ensure this works whether DST has a
// trailing path separator or not.
func (s *DockerSuite) TestCpFromCaseJ(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
addContent: true, workDir: "/root",
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-from-case-j")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcDir := containerCpPathTrailingSep(cID, "/root/dir1") + "."
dstDir := cpPath(tmpDir, "dir2")
dstPath := filepath.Join(dstDir, "file1-1")
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil {
c.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
if err := os.RemoveAll(dstDir); err != nil {
c.Fatalf("unable to remove dstDir: %s", err)
}
if err := os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
c.Fatalf("unable to make dstDir: %s", err)
}
dstDir = cpPathTrailingSep(tmpDir, "dir2")
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
if err := fileContentEquals(c, dstPath, "file1-1\n"); err != nil {
c.Fatal(err)
}
}

Просмотреть файл

@ -23,6 +23,18 @@ const (
cpHostContents = "hello, i am the host"
)
// Ensure that an all-local path case returns an error.
func (s *DockerSuite) TestCpLocalOnly(c *check.C) {
err := runDockerCp(c, "foo", "bar")
if err == nil {
c.Fatal("expected failure, got success")
}
if !strings.Contains(err.Error(), "must specify at least one container source") {
c.Fatalf("unexpected output: %s", err.Error())
}
}
// Test for #5656
// Check that garbage paths don't escape the container's rootfs
func (s *DockerSuite) TestCpGarbagePath(c *check.C) {

Просмотреть файл

@ -0,0 +1,634 @@
package main
import (
"os"
"github.com/go-check/check"
)
// docker cp LOCALPATH CONTAINER:PATH
// Try all of the test cases from the archive package which implements the
// internals of `docker cp` and ensure that the behavior matches when actually
// copying to and from containers.
// Basic assumptions about SRC and DST:
// 1. SRC must exist.
// 2. If SRC ends with a trailing separator, it must be a directory.
// 3. DST parent directory must exist.
// 4. If DST exists as a file, it must not end with a trailing separator.
// First get these easy error cases out of the way.
// Test for error when SRC does not exist.
func (s *DockerSuite) TestCpToErrSrcNotExists(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-err-src-not-exists")
defer os.RemoveAll(tmpDir)
srcPath := cpPath(tmpDir, "file1")
dstPath := containerCpPath(cID, "file1")
err := runDockerCp(c, srcPath, dstPath)
if err == nil {
c.Fatal("expected IsNotExist error, but got nil instead")
}
if !isCpNotExist(err) {
c.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
}
}
// Test for error when SRC ends in a trailing
// path separator but it exists as a file.
func (s *DockerSuite) TestCpToErrSrcNotDir(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-err-src-not-dir")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcPath := cpPathTrailingSep(tmpDir, "file1")
dstPath := containerCpPath(cID, "testDir")
err := runDockerCp(c, srcPath, dstPath)
if err == nil {
c.Fatal("expected IsNotDir error, but got nil instead")
}
if !isCpNotDir(err) {
c.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
}
}
// Test for error when SRC is a valid file or directory,
// bu the DST parent directory does not exist.
func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{addContent: true})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-err-dst-parent-not-exists")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
// Try with a file source.
srcPath := cpPath(tmpDir, "file1")
dstPath := containerCpPath(cID, "/notExists", "file1")
err := runDockerCp(c, srcPath, dstPath)
if err == nil {
c.Fatal("expected IsNotExist error, but got nil instead")
}
if !isCpNotExist(err) {
c.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
}
// Try with a directory source.
srcPath = cpPath(tmpDir, "dir1")
if err := runDockerCp(c, srcPath, dstPath); err == nil {
c.Fatal("expected IsNotExist error, but got nil instead")
}
if !isCpNotExist(err) {
c.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
}
}
// Test for error when DST ends in a trailing path separator but exists as a
// file. Also test that we cannot overwirite an existing directory with a
// non-directory and cannot overwrite an existing
func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{addContent: true})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-err-dst-not-dir")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
// Try with a file source.
srcPath := cpPath(tmpDir, "dir1/file1-1")
dstPath := containerCpPathTrailingSep(cID, "file1")
// The client should encounter an error trying to stat the destination
// and then be unable to copy since the destination is asserted to be a
// directory but does not exist.
err := runDockerCp(c, srcPath, dstPath)
if err == nil {
c.Fatal("expected DirNotExist error, but got nil instead")
}
if !isCpDirNotExist(err) {
c.Fatalf("expected DirNotExist error, but got %T: %s", err, err)
}
// Try with a directory source.
srcPath = cpPath(tmpDir, "dir1")
// The client should encounter an error trying to stat the destination and
// then decide to extract to the parent directory instead with a rebased
// name in the source archive, but this directory would overwrite the
// existing file with the same name.
err = runDockerCp(c, srcPath, dstPath)
if err == nil {
c.Fatal("expected CannotOverwriteNonDirWithDir error, but got nil instead")
}
if !isCannotOverwriteNonDirWithDir(err) {
c.Fatalf("expected CannotOverwriteNonDirWithDir error, but got %T: %s", err, err)
}
}
// Possibilities are reduced to the remaining 10 cases:
//
// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action
// ===================================================================================================
// A | no | - | no | - | no | create file
// B | no | - | no | - | yes | error
// C | no | - | yes | no | - | overwrite file
// D | no | - | yes | yes | - | create file in dst dir
// E | yes | no | no | - | - | create dir, copy contents
// F | yes | no | yes | no | - | error
// G | yes | no | yes | yes | - | copy dir and contents
// H | yes | yes | no | - | - | create dir, copy contents
// I | yes | yes | yes | no | - | error
// J | yes | yes | yes | yes | - | copy dir contents
//
// A. SRC specifies a file and DST (no trailing path separator) doesn't
// exist. This should create a file with the name DST and copy the
// contents of the source file into it.
func (s *DockerSuite) TestCpToCaseA(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
workDir: "/root", command: makeCatFileCommand("itWorks.txt"),
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-case-a")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcPath := cpPath(tmpDir, "file1")
dstPath := containerCpPath(cID, "/root/itWorks.txt")
if err := runDockerCp(c, srcPath, dstPath); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
if err := containerStartOutputEquals(c, cID, "file1\n"); err != nil {
c.Fatal(err)
}
}
// B. SRC specifies a file and DST (with trailing path separator) doesn't
// exist. This should cause an error because the copy operation cannot
// create a directory when copying a single file.
func (s *DockerSuite) TestCpToCaseB(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
command: makeCatFileCommand("testDir/file1"),
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-case-b")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcPath := cpPath(tmpDir, "file1")
dstDir := containerCpPathTrailingSep(cID, "testDir")
err := runDockerCp(c, srcPath, dstDir)
if err == nil {
c.Fatal("expected DirNotExists error, but got nil instead")
}
if !isCpDirNotExist(err) {
c.Fatalf("expected DirNotExists error, but got %T: %s", err, err)
}
}
// C. SRC specifies a file and DST exists as a file. This should overwrite
// the file at DST with the contents of the source file.
func (s *DockerSuite) TestCpToCaseC(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
addContent: true, workDir: "/root",
command: makeCatFileCommand("file2"),
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-case-c")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcPath := cpPath(tmpDir, "file1")
dstPath := containerCpPath(cID, "/root/file2")
// Ensure the container's file starts with the original content.
if err := containerStartOutputEquals(c, cID, "file2\n"); err != nil {
c.Fatal(err)
}
if err := runDockerCp(c, srcPath, dstPath); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
// Should now contain file1's contents.
if err := containerStartOutputEquals(c, cID, "file1\n"); err != nil {
c.Fatal(err)
}
}
// D. SRC specifies a file and DST exists as a directory. This should place
// a copy of the source file inside it using the basename from SRC. Ensure
// this works whether DST has a trailing path separator or not.
func (s *DockerSuite) TestCpToCaseD(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
addContent: true,
command: makeCatFileCommand("/dir1/file1"),
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-case-d")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcPath := cpPath(tmpDir, "file1")
dstDir := containerCpPath(cID, "dir1")
// Ensure that dstPath doesn't exist.
if err := containerStartOutputEquals(c, cID, ""); err != nil {
c.Fatal(err)
}
if err := runDockerCp(c, srcPath, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
// Should now contain file1's contents.
if err := containerStartOutputEquals(c, cID, "file1\n"); err != nil {
c.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
// Make new destination container.
cID = makeTestContainer(c, testContainerOptions{
addContent: true,
command: makeCatFileCommand("/dir1/file1"),
})
defer deleteContainer(cID)
dstDir = containerCpPathTrailingSep(cID, "dir1")
// Ensure that dstPath doesn't exist.
if err := containerStartOutputEquals(c, cID, ""); err != nil {
c.Fatal(err)
}
if err := runDockerCp(c, srcPath, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
// Should now contain file1's contents.
if err := containerStartOutputEquals(c, cID, "file1\n"); err != nil {
c.Fatal(err)
}
}
// E. SRC specifies a directory and DST does not exist. This should create a
// directory at DST and copy the contents of the SRC directory into the DST
// directory. Ensure this works whether DST has a trailing path separator or
// not.
func (s *DockerSuite) TestCpToCaseE(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
command: makeCatFileCommand("/testDir/file1-1"),
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-case-e")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcDir := cpPath(tmpDir, "dir1")
dstDir := containerCpPath(cID, "testDir")
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
// Should now contain file1-1's contents.
if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil {
c.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
// Make new destination container.
cID = makeTestContainer(c, testContainerOptions{
command: makeCatFileCommand("/testDir/file1-1"),
})
defer deleteContainer(cID)
dstDir = containerCpPathTrailingSep(cID, "testDir")
err := runDockerCp(c, srcDir, dstDir)
if err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
// Should now contain file1-1's contents.
if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil {
c.Fatal(err)
}
}
// F. SRC specifies a directory and DST exists as a file. This should cause an
// error as it is not possible to overwrite a file with a directory.
func (s *DockerSuite) TestCpToCaseF(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
addContent: true, workDir: "/root",
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-case-f")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcDir := cpPath(tmpDir, "dir1")
dstFile := containerCpPath(cID, "/root/file1")
err := runDockerCp(c, srcDir, dstFile)
if err == nil {
c.Fatal("expected ErrCannotCopyDir error, but got nil instead")
}
if !isCpCannotCopyDir(err) {
c.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
}
}
// G. SRC specifies a directory and DST exists as a directory. This should copy
// the SRC directory and all its contents to the DST directory. Ensure this
// works whether DST has a trailing path separator or not.
func (s *DockerSuite) TestCpToCaseG(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
addContent: true, workDir: "/root",
command: makeCatFileCommand("dir2/dir1/file1-1"),
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-case-g")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcDir := cpPath(tmpDir, "dir1")
dstDir := containerCpPath(cID, "/root/dir2")
// Ensure that dstPath doesn't exist.
if err := containerStartOutputEquals(c, cID, ""); err != nil {
c.Fatal(err)
}
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
// Should now contain file1-1's contents.
if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil {
c.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
// Make new destination container.
cID = makeTestContainer(c, testContainerOptions{
addContent: true,
command: makeCatFileCommand("/dir2/dir1/file1-1"),
})
defer deleteContainer(cID)
dstDir = containerCpPathTrailingSep(cID, "/dir2")
// Ensure that dstPath doesn't exist.
if err := containerStartOutputEquals(c, cID, ""); err != nil {
c.Fatal(err)
}
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
// Should now contain file1-1's contents.
if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil {
c.Fatal(err)
}
}
// H. SRC specifies a directory's contents only and DST does not exist. This
// should create a directory at DST and copy the contents of the SRC
// directory (but not the directory itself) into the DST directory. Ensure
// this works whether DST has a trailing path separator or not.
func (s *DockerSuite) TestCpToCaseH(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
command: makeCatFileCommand("/testDir/file1-1"),
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-case-h")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcDir := cpPathTrailingSep(tmpDir, "dir1") + "."
dstDir := containerCpPath(cID, "testDir")
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
// Should now contain file1-1's contents.
if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil {
c.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
// Make new destination container.
cID = makeTestContainer(c, testContainerOptions{
command: makeCatFileCommand("/testDir/file1-1"),
})
defer deleteContainer(cID)
dstDir = containerCpPathTrailingSep(cID, "testDir")
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
// Should now contain file1-1's contents.
if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil {
c.Fatal(err)
}
}
// I. SRC specifies a direcotry's contents only and DST exists as a file. This
// should cause an error as it is not possible to overwrite a file with a
// directory.
func (s *DockerSuite) TestCpToCaseI(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
addContent: true, workDir: "/root",
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-case-i")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcDir := cpPathTrailingSep(tmpDir, "dir1") + "."
dstFile := containerCpPath(cID, "/root/file1")
err := runDockerCp(c, srcDir, dstFile)
if err == nil {
c.Fatal("expected ErrCannotCopyDir error, but got nil instead")
}
if !isCpCannotCopyDir(err) {
c.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
}
}
// J. SRC specifies a directory's contents only and DST exists as a directory.
// This should copy the contents of the SRC directory (but not the directory
// itself) into the DST directory. Ensure this works whether DST has a
// trailing path separator or not.
func (s *DockerSuite) TestCpToCaseJ(c *check.C) {
cID := makeTestContainer(c, testContainerOptions{
addContent: true, workDir: "/root",
command: makeCatFileCommand("/dir2/file1-1"),
})
defer deleteContainer(cID)
tmpDir := getTestDir(c, "test-cp-to-case-j")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
srcDir := cpPathTrailingSep(tmpDir, "dir1") + "."
dstDir := containerCpPath(cID, "/dir2")
// Ensure that dstPath doesn't exist.
if err := containerStartOutputEquals(c, cID, ""); err != nil {
c.Fatal(err)
}
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
// Should now contain file1-1's contents.
if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil {
c.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
// Make new destination container.
cID = makeTestContainer(c, testContainerOptions{
command: makeCatFileCommand("/dir2/file1-1"),
})
defer deleteContainer(cID)
dstDir = containerCpPathTrailingSep(cID, "/dir2")
// Ensure that dstPath doesn't exist.
if err := containerStartOutputEquals(c, cID, ""); err != nil {
c.Fatal(err)
}
if err := runDockerCp(c, srcDir, dstDir); err != nil {
c.Fatalf("unexpected error %T: %s", err, err)
}
// Should now contain file1-1's contents.
if err := containerStartOutputEquals(c, cID, "file1-1\n"); err != nil {
c.Fatal(err)
}
}
// The `docker cp` command should also ensure that you cannot
// write to a container rootfs that is marked as read-only.
func (s *DockerSuite) TestCpToErrReadOnlyRootfs(c *check.C) {
tmpDir := getTestDir(c, "test-cp-to-err-read-only-rootfs")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
cID := makeTestContainer(c, testContainerOptions{
readOnly: true, workDir: "/root",
command: makeCatFileCommand("shouldNotExist"),
})
defer deleteContainer(cID)
srcPath := cpPath(tmpDir, "file1")
dstPath := containerCpPath(cID, "/root/shouldNotExist")
err := runDockerCp(c, srcPath, dstPath)
if err == nil {
c.Fatal("expected ErrContainerRootfsReadonly error, but got nil instead")
}
if !isCpCannotCopyReadOnly(err) {
c.Fatalf("expected ErrContainerRootfsReadonly error, but got %T: %s", err, err)
}
// Ensure that dstPath doesn't exist.
if err := containerStartOutputEquals(c, cID, ""); err != nil {
c.Fatal(err)
}
}
// The `docker cp` command should also ensure that you
// cannot write to a volume that is mounted as read-only.
func (s *DockerSuite) TestCpToErrReadOnlyVolume(c *check.C) {
tmpDir := getTestDir(c, "test-cp-to-err-read-only-volume")
defer os.RemoveAll(tmpDir)
makeTestContentInDir(c, tmpDir)
cID := makeTestContainer(c, testContainerOptions{
volumes: defaultVolumes(tmpDir), workDir: "/root",
command: makeCatFileCommand("/vol_ro/shouldNotExist"),
})
defer deleteContainer(cID)
srcPath := cpPath(tmpDir, "file1")
dstPath := containerCpPath(cID, "/vol_ro/shouldNotExist")
err := runDockerCp(c, srcPath, dstPath)
if err == nil {
c.Fatal("expected ErrVolumeReadonly error, but got nil instead")
}
if !isCpCannotCopyReadOnly(err) {
c.Fatalf("expected ErrVolumeReadonly error, but got %T: %s", err, err)
}
// Ensure that dstPath doesn't exist.
if err := containerStartOutputEquals(c, cID, ""); err != nil {
c.Fatal(err)
}
}

Просмотреть файл

@ -0,0 +1,298 @@
package main
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/archive"
"github.com/go-check/check"
)
type FileType uint32
const (
Regular FileType = iota
Dir
Symlink
)
type FileData struct {
filetype FileType
path string
contents string
}
func (fd FileData) creationCommand() string {
var command string
switch fd.filetype {
case Regular:
// Don't overwrite the file if it already exists!
command = fmt.Sprintf("if [ ! -f %s ]; then echo %q > %s; fi", fd.path, fd.contents, fd.path)
case Dir:
command = fmt.Sprintf("mkdir -p %s", fd.path)
case Symlink:
command = fmt.Sprintf("ln -fs %s %s", fd.contents, fd.path)
}
return command
}
func mkFilesCommand(fds []FileData) string {
commands := make([]string, len(fds))
for i, fd := range fds {
commands[i] = fd.creationCommand()
}
return strings.Join(commands, " && ")
}
var defaultFileData = []FileData{
{Regular, "file1", "file1"},
{Regular, "file2", "file2"},
{Regular, "file3", "file3"},
{Regular, "file4", "file4"},
{Regular, "file5", "file5"},
{Regular, "file6", "file6"},
{Regular, "file7", "file7"},
{Dir, "dir1", ""},
{Regular, "dir1/file1-1", "file1-1"},
{Regular, "dir1/file1-2", "file1-2"},
{Dir, "dir2", ""},
{Regular, "dir2/file2-1", "file2-1"},
{Regular, "dir2/file2-2", "file2-2"},
{Dir, "dir3", ""},
{Regular, "dir3/file3-1", "file3-1"},
{Regular, "dir3/file3-2", "file3-2"},
{Dir, "dir4", ""},
{Regular, "dir4/file3-1", "file4-1"},
{Regular, "dir4/file3-2", "file4-2"},
{Dir, "dir5", ""},
{Symlink, "symlink1", "target1"},
{Symlink, "symlink2", "target2"},
}
func defaultMkContentCommand() string {
return mkFilesCommand(defaultFileData)
}
func makeTestContentInDir(c *check.C, dir string) {
for _, fd := range defaultFileData {
path := filepath.Join(dir, filepath.FromSlash(fd.path))
switch fd.filetype {
case Regular:
if err := ioutil.WriteFile(path, []byte(fd.contents+"\n"), os.FileMode(0666)); err != nil {
c.Fatal(err)
}
case Dir:
if err := os.Mkdir(path, os.FileMode(0777)); err != nil {
c.Fatal(err)
}
case Symlink:
if err := os.Symlink(fd.contents, path); err != nil {
c.Fatal(err)
}
}
}
}
type testContainerOptions struct {
addContent bool
readOnly bool
volumes []string
workDir string
command string
}
func makeTestContainer(c *check.C, options testContainerOptions) (containerID string) {
if options.addContent {
mkContentCmd := defaultMkContentCommand()
if options.command == "" {
options.command = mkContentCmd
} else {
options.command = fmt.Sprintf("%s && %s", defaultMkContentCommand(), options.command)
}
}
if options.command == "" {
options.command = "#(nop)"
}
args := []string{"run", "-d"}
for _, volume := range options.volumes {
args = append(args, "-v", volume)
}
if options.workDir != "" {
args = append(args, "-w", options.workDir)
}
if options.readOnly {
args = append(args, "--read-only")
}
args = append(args, "busybox", "/bin/sh", "-c", options.command)
out, status := dockerCmd(c, args...)
if status != 0 {
c.Fatalf("failed to run container, status %d: %s", status, out)
}
containerID = strings.TrimSpace(out)
out, status = dockerCmd(c, "wait", containerID)
if status != 0 {
c.Fatalf("failed to wait for test container container, status %d: %s", status, out)
}
if exitCode := strings.TrimSpace(out); exitCode != "0" {
logs, status := dockerCmd(c, "logs", containerID)
if status != 0 {
logs = "UNABLE TO GET LOGS"
}
c.Fatalf("failed to make test container, exit code (%d): %s", exitCode, logs)
}
return
}
func makeCatFileCommand(path string) string {
return fmt.Sprintf("if [ -f %s ]; then cat %s; fi", path, path)
}
func cpPath(pathElements ...string) string {
localizedPathElements := make([]string, len(pathElements))
for i, path := range pathElements {
localizedPathElements[i] = filepath.FromSlash(path)
}
return strings.Join(localizedPathElements, string(filepath.Separator))
}
func cpPathTrailingSep(pathElements ...string) string {
return fmt.Sprintf("%s%c", cpPath(pathElements...), filepath.Separator)
}
func containerCpPath(containerID string, pathElements ...string) string {
joined := strings.Join(pathElements, "/")
return fmt.Sprintf("%s:%s", containerID, joined)
}
func containerCpPathTrailingSep(containerID string, pathElements ...string) string {
return fmt.Sprintf("%s/", containerCpPath(containerID, pathElements...))
}
func runDockerCp(c *check.C, src, dst string) (err error) {
c.Logf("running `docker cp %s %s`", src, dst)
args := []string{"cp", src, dst}
out, _, err := runCommandWithOutput(exec.Command(dockerBinary, args...))
if err != nil {
err = fmt.Errorf("error executing `docker cp` command: %s: %s", err, out)
}
return
}
func startContainerGetOutput(c *check.C, cID string) (out string, err error) {
c.Logf("running `docker start -a %s`", cID)
args := []string{"start", "-a", cID}
out, _, err = runCommandWithOutput(exec.Command(dockerBinary, args...))
if err != nil {
err = fmt.Errorf("error executing `docker start` command: %s: %s", err, out)
}
return
}
func getTestDir(c *check.C, label string) (tmpDir string) {
var err error
if tmpDir, err = ioutil.TempDir("", label); err != nil {
c.Fatalf("unable to make temporary directory: %s", err)
}
return
}
func isCpNotExist(err error) bool {
return strings.Contains(err.Error(), "no such file or directory") || strings.Contains(err.Error(), "cannot find the file specified")
}
func isCpDirNotExist(err error) bool {
return strings.Contains(err.Error(), archive.ErrDirNotExists.Error())
}
func isCpNotDir(err error) bool {
return strings.Contains(err.Error(), archive.ErrNotDirectory.Error()) || strings.Contains(err.Error(), "filename, directory name, or volume label syntax is incorrect")
}
func isCpCannotCopyDir(err error) bool {
return strings.Contains(err.Error(), archive.ErrCannotCopyDir.Error())
}
func isCpCannotCopyReadOnly(err error) bool {
return strings.Contains(err.Error(), "marked read-only")
}
func isCannotOverwriteNonDirWithDir(err error) bool {
return strings.Contains(err.Error(), "cannot overwrite non-directory")
}
func fileContentEquals(c *check.C, filename, contents string) (err error) {
c.Logf("checking that file %q contains %q\n", filename, contents)
fileBytes, err := ioutil.ReadFile(filename)
if err != nil {
return
}
expectedBytes, err := ioutil.ReadAll(strings.NewReader(contents))
if err != nil {
return
}
if !bytes.Equal(fileBytes, expectedBytes) {
err = fmt.Errorf("file content not equal - expected %q, got %q", string(expectedBytes), string(fileBytes))
}
return
}
func containerStartOutputEquals(c *check.C, cID, contents string) (err error) {
c.Logf("checking that container %q start output contains %q\n", cID, contents)
out, err := startContainerGetOutput(c, cID)
if err != nil {
return err
}
if out != contents {
err = fmt.Errorf("output contents not equal - expected %q, got %q", contents, out)
}
return
}
func defaultVolumes(tmpDir string) []string {
if SameHostDaemon.Condition() {
return []string{
"/vol1",
fmt.Sprintf("%s:/vol2", tmpDir),
fmt.Sprintf("%s:/vol3", filepath.Join(tmpDir, "vol3")),
fmt.Sprintf("%s:/vol_ro:ro", filepath.Join(tmpDir, "vol_ro")),
}
}
// Can't bind-mount volumes with separate host daemon.
return []string{"/vol1", "/vol2", "/vol3", "/vol_ro:/vol_ro:ro"}
}

Просмотреть файл

@ -184,7 +184,7 @@ func (s *DockerSuite) TestCreateVolumesCreated(c *check.C) {
name := "test_create_volume"
dockerCmd(c, "create", "--name", name, "-v", "/foo", "busybox")
dir, err := inspectFieldMap(name, "Volumes", "/foo")
dir, err := inspectMountSourceField(name, "/foo")
if err != nil {
c.Fatalf("Error getting volume host path: %q", err)
}

Просмотреть файл

@ -67,23 +67,23 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) {
if out, err := s.d.Cmd("run", "-d", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil {
c.Fatal(err, out)
}
if err := s.d.Restart(); err != nil {
c.Fatal(err)
}
if _, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil {
c.Fatal(err)
}
if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil {
c.Fatal(err, out)
}
v, err := s.d.Cmd("inspect", "--format", "{{ json .Volumes }}", "volrestarttest1")
if err != nil {
c.Fatal(err)
}
volumes := make(map[string]string)
json.Unmarshal([]byte(v), &volumes)
if _, err := os.Stat(volumes["/foo"]); err != nil {
c.Fatalf("Expected volume to exist: %s - %s", volumes["/foo"], err)
out, err := s.d.Cmd("inspect", "-f", "{{json .Mounts}}", "volrestarttest1")
c.Assert(err, check.IsNil)
if _, err := inspectMountPointJSON(out, "/foo"); err != nil {
c.Fatalf("Expected volume to exist: /foo, error: %v\n", err)
}
}

Просмотреть файл

@ -3,7 +3,9 @@ package main
import (
"bufio"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"regexp"
"strconv"
@ -519,6 +521,7 @@ func (s *DockerSuite) TestEventsCommit(c *check.C) {
func (s *DockerSuite) TestEventsCopy(c *check.C) {
since := daemonTime(c).Unix()
// Build a test image.
id, err := buildImage("cpimg", `
FROM busybox
RUN echo HI > /tmp/file`, true)
@ -526,12 +529,31 @@ func (s *DockerSuite) TestEventsCopy(c *check.C) {
c.Fatalf("Couldn't create image: %q", err)
}
dockerCmd(c, "run", "--name=cptest", id, "true")
dockerCmd(c, "cp", "cptest:/tmp/file", "-")
// Create an empty test file.
tempFile, err := ioutil.TempFile("", "test-events-copy-")
if err != nil {
c.Fatal(err)
}
defer os.Remove(tempFile.Name())
if err := tempFile.Close(); err != nil {
c.Fatal(err)
}
dockerCmd(c, "create", "--name=cptest", id)
dockerCmd(c, "cp", "cptest:/tmp/file", tempFile.Name())
out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=cptest", "--until="+strconv.Itoa(int(since)))
if !strings.Contains(out, " copy\n") {
c.Fatalf("Missing 'copy' log event\n%s", out)
if !strings.Contains(out, " archive-path\n") {
c.Fatalf("Missing 'archive-path' log event\n%s", out)
}
dockerCmd(c, "cp", tempFile.Name(), "cptest:/tmp/filecopy")
out, _ = dockerCmd(c, "events", "--since=0", "-f", "container=cptest", "--until="+strconv.Itoa(int(since)))
if !strings.Contains(out, " extract-to-dir\n") {
c.Fatalf("Missing 'extract-to-dir' log event\n%s", out)
}
}

Просмотреть файл

@ -0,0 +1,44 @@
// +build experimental
package main
import (
"github.com/docker/docker/api/types"
"github.com/go-check/check"
)
func (s *DockerSuite) TestInspectNamedMountPoint(c *check.C) {
dockerCmd(c, "run", "-d", "--name", "test", "-v", "data:/data", "busybox", "cat")
vol, err := inspectFieldJSON("test", "Mounts")
c.Assert(err, check.IsNil)
var mp []types.MountPoint
err = unmarshalJSON([]byte(vol), &mp)
c.Assert(err, check.IsNil)
if len(mp) != 1 {
c.Fatalf("Expected 1 mount point, was %v\n", len(mp))
}
m := mp[0]
if m.Name != "data" {
c.Fatalf("Expected name data, was %s\n", m.Name)
}
if m.Driver != "local" {
c.Fatalf("Expected driver local, was %s\n", m.Driver)
}
if m.Source == "" {
c.Fatalf("Expected source to not be empty")
}
if m.RW != true {
c.Fatalf("Expected rw to be true")
}
if m.Destination != "/data" {
c.Fatalf("Expected destination /data, was %s\n", m.Destination)
}
}

Просмотреть файл

@ -6,6 +6,7 @@ import (
"strconv"
"strings"
"github.com/docker/docker/api/types"
"github.com/go-check/check"
)
@ -218,3 +219,44 @@ func (s *DockerSuite) TestInspectContainerGraphDriver(c *check.C) {
c.Fatalf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)
}
}
func (s *DockerSuite) TestInspectBindMountPoint(c *check.C) {
dockerCmd(c, "run", "-d", "--name", "test", "-v", "/data:/data:ro,z", "busybox", "cat")
vol, err := inspectFieldJSON("test", "Mounts")
c.Assert(err, check.IsNil)
var mp []types.MountPoint
err = unmarshalJSON([]byte(vol), &mp)
c.Assert(err, check.IsNil)
if len(mp) != 1 {
c.Fatalf("Expected 1 mount point, was %v\n", len(mp))
}
m := mp[0]
if m.Name != "" {
c.Fatal("Expected name to be empty")
}
if m.Driver != "" {
c.Fatal("Expected driver to be empty")
}
if m.Source != "/data" {
c.Fatalf("Expected source /data, was %s\n", m.Source)
}
if m.Destination != "/data" {
c.Fatalf("Expected destination /data, was %s\n", m.Destination)
}
if m.Mode != "ro,z" {
c.Fatalf("Expected mode `ro,z`, was %s\n", m.Mode)
}
if m.RW != false {
c.Fatalf("Expected rw to be false")
}
}

Просмотреть файл

@ -115,7 +115,7 @@ func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) {
stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", cleanedContainerID)
if stderr != "" {
c.Fatalf("Expected empty stderr stream, got %v", stdout)
c.Fatalf("Expected empty stderr stream, got %v", stderr)
}
stdout = strings.TrimSpace(stdout)

Просмотреть файл

@ -22,7 +22,7 @@ func (s *DockerSuite) TestRestartStoppedContainer(c *check.C) {
out, _ = dockerCmd(c, "logs", cleanedContainerID)
if out != "foobar\nfoobar\n" {
c.Errorf("container should've printed 'foobar' twice")
c.Errorf("container should've printed 'foobar' twice, got %v", out)
}
}
@ -54,27 +54,27 @@ func (s *DockerSuite) TestRestartWithVolumes(c *check.C) {
out, _ := dockerCmd(c, "run", "-d", "-v", "/test", "busybox", "top")
cleanedContainerID := strings.TrimSpace(out)
out, _ = dockerCmd(c, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID)
out, _ = dockerCmd(c, "inspect", "--format", "{{ len .Mounts }}", cleanedContainerID)
if out = strings.Trim(out, " \n\r"); out != "1" {
c.Errorf("expect 1 volume received %s", out)
}
volumes, err := inspectField(cleanedContainerID, "Volumes")
source, err := inspectMountSourceField(cleanedContainerID, "/test")
c.Assert(err, check.IsNil)
dockerCmd(c, "restart", cleanedContainerID)
out, _ = dockerCmd(c, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID)
out, _ = dockerCmd(c, "inspect", "--format", "{{ len .Mounts }}", cleanedContainerID)
if out = strings.Trim(out, " \n\r"); out != "1" {
c.Errorf("expect 1 volume after restart received %s", out)
}
volumesAfterRestart, err := inspectField(cleanedContainerID, "Volumes")
sourceAfterRestart, err := inspectMountSourceField(cleanedContainerID, "/test")
c.Assert(err, check.IsNil)
if volumes != volumesAfterRestart {
c.Errorf("expected volume path: %s Actual path: %s", volumes, volumesAfterRestart)
if source != sourceAfterRestart {
c.Errorf("expected volume path: %s Actual path: %s", source, sourceAfterRestart)
}
}

Просмотреть файл

@ -33,12 +33,7 @@ func (s *DockerSuite) TestRunEchoStdout(c *check.C) {
// "test" should be printed
func (s *DockerSuite) TestRunEchoStdoutWithMemoryLimit(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "-m", "16m", "busybox", "echo", "test")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
if err != nil {
c.Fatalf("failed to run container: %v, output: %q", err, out)
}
out, _, _ := dockerCmdWithStdoutStderr(c, "run", "-m", "16m", "busybox", "echo", "test")
out = strings.Trim(out, "\r\n")
if expected := "test"; out != expected {
@ -73,12 +68,7 @@ func (s *DockerSuite) TestRunEchoStdoutWitCPULimit(c *check.C) {
// "test" should be printed
func (s *DockerSuite) TestRunEchoStdoutWithCPUAndMemoryLimit(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "16m", "busybox", "echo", "test")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
if err != nil {
c.Fatalf("failed to run container: %v, output: %q", err, out)
}
out, _, _ := dockerCmdWithStdoutStderr(c, "run", "-c", "1000", "-m", "16m", "busybox", "echo", "test")
if out != "test\n" {
c.Errorf("container should've printed 'test', got %q instead", out)
}
@ -931,12 +921,7 @@ func (s *DockerSuite) TestRunDnsDefaultOptions(c *check.C) {
}
func (s *DockerSuite) TestRunDnsOptions(c *check.C) {
cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf")
out, stderr, _, err := runCommandWithStdoutStderr(cmd)
if err != nil {
c.Fatal(err, out)
}
out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf")
// The client will get a warning on stderr when setting DNS to a localhost address; verify this:
if !strings.Contains(stderr, "Localhost DNS setting") {
@ -948,12 +933,7 @@ func (s *DockerSuite) TestRunDnsOptions(c *check.C) {
c.Fatalf("expected 'nameserver 127.0.0.1 search mydomain', but says: %q", actual)
}
cmd = exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=.", "busybox", "cat", "/etc/resolv.conf")
out, _, _, err = runCommandWithStdoutStderr(cmd)
if err != nil {
c.Fatal(err, out)
}
out, stderr, _ = dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=.", "busybox", "cat", "/etc/resolv.conf")
actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1)
if actual != "nameserver 127.0.0.1" {
@ -1844,24 +1824,23 @@ func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) {
dockerCmd(c, "run", "-v", "/foo", "-v", "/bar/", "--name", "dark_helmet", "run_volumes_clean_paths")
out, err := inspectFieldMap("dark_helmet", "Volumes", "/foo/")
c.Assert(err, check.IsNil)
if out != "" {
out, err := inspectMountSourceField("dark_helmet", "/foo/")
if err != mountNotFound {
c.Fatalf("Found unexpected volume entry for '/foo/' in volumes\n%q", out)
}
out, err = inspectFieldMap("dark_helmet", "Volumes", "/foo")
out, err = inspectMountSourceField("dark_helmet", "/foo")
c.Assert(err, check.IsNil)
if !strings.Contains(out, volumesConfigPath) {
c.Fatalf("Volume was not defined for /foo\n%q", out)
}
out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar/")
c.Assert(err, check.IsNil)
if out != "" {
out, err = inspectMountSourceField("dark_helmet", "/bar/")
if err != mountNotFound {
c.Fatalf("Found unexpected volume entry for '/bar/' in volumes\n%q", out)
}
out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar")
out, err = inspectMountSourceField("dark_helmet", "/bar")
c.Assert(err, check.IsNil)
if !strings.Contains(out, volumesConfigPath) {
c.Fatalf("Volume was not defined for /bar\n%q", out)
@ -1923,15 +1902,11 @@ func (s *DockerSuite) TestRunExposePort(c *check.C) {
func (s *DockerSuite) TestRunUnknownCommand(c *check.C) {
testRequires(c, NativeExecDriver)
runCmd := exec.Command(dockerBinary, "create", "busybox", "/bin/nada")
cID, _, _, err := runCommandWithStdoutStderr(runCmd)
if err != nil {
c.Fatalf("Failed to create container: %v, output: %q", err, cID)
}
cID = strings.TrimSpace(cID)
out, _, _ := dockerCmdWithStdoutStderr(c, "create", "busybox", "/bin/nada")
runCmd = exec.Command(dockerBinary, "start", cID)
_, _, _, _ = runCommandWithStdoutStderr(runCmd)
cID := strings.TrimSpace(out)
_, _, err := dockerCmdWithError(c, "start", cID)
c.Assert(err, check.NotNil)
rc, err := inspectField(cID, "State.ExitCode")
c.Assert(err, check.IsNil)
@ -2507,14 +2482,37 @@ func (s *DockerSuite) TestVolumeFromMixedRWOptions(c *check.C) {
dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true")
dockerCmd(c, "run", "--volumes-from", "parent:rw", "--name", "test-volumes-2", "busybox", "true")
testRO, err := inspectFieldMap("test-volumes-1", ".VolumesRW", "/test")
mRO, err := inspectMountPoint("test-volumes-1", "/test")
c.Assert(err, check.IsNil)
if testRO != "false" {
if mRO.RW {
c.Fatalf("Expected RO volume was RW")
}
testRW, err := inspectFieldMap("test-volumes-2", ".VolumesRW", "/test")
mRW, err := inspectMountPoint("test-volumes-2", "/test")
c.Assert(err, check.IsNil)
if testRW != "true" {
if !mRW.RW {
c.Fatalf("Expected RW volume was RO")
}
}
func (s *DockerSuite) TestRunWriteFilteredProc(c *check.C) {
testRequires(c, Apparmor)
testWritePaths := []string{
/* modprobe and core_pattern should both be denied by generic
* policy of denials for /proc/sys/kernel. These files have been
* picked to be checked as they are particularly sensitive to writes */
"/proc/sys/kernel/modprobe",
"/proc/sys/kernel/core_pattern",
"/proc/sysrq-trigger",
}
for i, filePath := range testWritePaths {
name := fmt.Sprintf("writeprocsieve-%d", i)
shellCmd := fmt.Sprintf("exec 3>%s", filePath)
runCmd := exec.Command(dockerBinary, "run", "--privileged", "--security-opt", "apparmor:docker-default", "--name", name, "busybox", "sh", "-c", shellCmd)
if out, exitCode, err := runCommandWithOutput(runCmd); err == nil || exitCode == 0 {
c.Fatalf("Open FD for write should have failed with permission denied, got: %s, %v", out, err)
}
}
}

Просмотреть файл

@ -100,17 +100,9 @@ func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) {
testRequires(c, NativeExecDriver)
cgroupParent := "test"
data, err := ioutil.ReadFile("/proc/self/cgroup")
if err != nil {
c.Fatalf("failed to read '/proc/self/cgroup - %v", err)
}
selfCgroupPaths := parseCgroupPaths(string(data))
selfCpuCgroup, found := selfCgroupPaths["memory"]
if !found {
c.Fatalf("unable to find self cpu cgroup path. CgroupsPath: %v", selfCgroupPaths)
}
name := "cgroup-test"
out, _, err := dockerCmdWithError(c, "run", "--cgroup-parent", cgroupParent, "--rm", "busybox", "cat", "/proc/self/cgroup")
out, _, err := dockerCmdWithError(c, "run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup")
if err != nil {
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
@ -118,16 +110,18 @@ func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) {
if len(cgroupPaths) == 0 {
c.Fatalf("unexpected output - %q", string(out))
}
found = false
expectedCgroupPrefix := path.Join(selfCpuCgroup, cgroupParent)
id, err := getIDByName(name)
c.Assert(err, check.IsNil)
expectedCgroup := path.Join(cgroupParent, id)
found := false
for _, path := range cgroupPaths {
if strings.HasPrefix(path, expectedCgroupPrefix) {
if strings.HasSuffix(path, expectedCgroup) {
found = true
break
}
}
if !found {
c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have prefix %q. Cgroup Paths: %v", expectedCgroupPrefix, cgroupPaths)
c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths)
}
}
@ -135,7 +129,8 @@ func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) {
testRequires(c, NativeExecDriver)
cgroupParent := "/cgroup-parent/test"
out, _, err := dockerCmdWithError(c, "run", "--cgroup-parent", cgroupParent, "--rm", "busybox", "cat", "/proc/self/cgroup")
name := "cgroup-test"
out, _, err := dockerCmdWithError(c, "run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup")
if err != nil {
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
@ -143,15 +138,18 @@ func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) {
if len(cgroupPaths) == 0 {
c.Fatalf("unexpected output - %q", string(out))
}
id, err := getIDByName(name)
c.Assert(err, check.IsNil)
expectedCgroup := path.Join(cgroupParent, id)
found := false
for _, path := range cgroupPaths {
if strings.HasPrefix(path, cgroupParent) {
if strings.HasSuffix(path, expectedCgroup) {
found = true
break
}
}
if !found {
c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have prefix %q. Cgroup Paths: %v", cgroupParent, cgroupPaths)
c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths)
}
}

Просмотреть файл

@ -16,25 +16,12 @@ import (
// save a repo using gz compression and try to load it using stdout
func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) {
name := "test-save-xz-and-load-repo-stdout"
runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("failed to create a container: %v %v", out, err)
}
dockerCmd(c, "run", "--name", name, "busybox", "true")
repoName := "foobar-save-load-test-xz-gz"
out, _ := dockerCmd(c, "commit", name, repoName)
commitCmd := exec.Command(dockerBinary, "commit", name, repoName)
out, _, err = runCommandWithOutput(commitCmd)
if err != nil {
c.Fatalf("failed to commit container: %v %v", out, err)
}
inspectCmd := exec.Command(dockerBinary, "inspect", repoName)
before, _, err := runCommandWithOutput(inspectCmd)
if err != nil {
c.Fatalf("the repo should exist before saving it: %v %v", before, err)
}
dockerCmd(c, "inspect", repoName)
repoTarball, _, err := runCommandPipelineWithOutput(
exec.Command(dockerBinary, "save", repoName),
@ -52,8 +39,7 @@ func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) {
c.Fatalf("expected error, but succeeded with no error and output: %v", out)
}
inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
after, _, err := runCommandWithOutput(inspectCmd)
after, _, err := dockerCmdWithError(c, "inspect", repoName)
if err == nil {
c.Fatalf("the repo should not exist: %v", after)
}
@ -62,27 +48,14 @@ func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) {
// save a repo using xz+gz compression and try to load it using stdout
func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) {
name := "test-save-xz-gz-and-load-repo-stdout"
runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("failed to create a container: %v %v", out, err)
}
dockerCmd(c, "run", "--name", name, "busybox", "true")
repoName := "foobar-save-load-test-xz-gz"
dockerCmd(c, "commit", name, repoName)
commitCmd := exec.Command(dockerBinary, "commit", name, repoName)
out, _, err = runCommandWithOutput(commitCmd)
if err != nil {
c.Fatalf("failed to commit container: %v %v", out, err)
}
dockerCmd(c, "inspect", repoName)
inspectCmd := exec.Command(dockerBinary, "inspect", repoName)
before, _, err := runCommandWithOutput(inspectCmd)
if err != nil {
c.Fatalf("the repo should exist before saving it: %v %v", before, err)
}
out, _, err = runCommandPipelineWithOutput(
out, _, err := runCommandPipelineWithOutput(
exec.Command(dockerBinary, "save", repoName),
exec.Command("xz", "-c"),
exec.Command("gzip", "-c"))
@ -99,8 +72,7 @@ func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) {
c.Fatalf("expected error, but succeeded with no error and output: %v", out)
}
inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
after, _, err := runCommandWithOutput(inspectCmd)
after, _, err := dockerCmdWithError(c, "inspect", repoName)
if err == nil {
c.Fatalf("the repo should not exist: %v", after)
}
@ -108,55 +80,34 @@ func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) {
func (s *DockerSuite) TestSaveSingleTag(c *check.C) {
repoName := "foobar-save-single-tag-test"
dockerCmd(c, "tag", "busybox:latest", fmt.Sprintf("%v:latest", repoName))
tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", fmt.Sprintf("%v:latest", repoName))
if out, _, err := runCommandWithOutput(tagCmd); err != nil {
c.Fatalf("failed to tag repo: %s, %v", out, err)
}
idCmd := exec.Command(dockerBinary, "images", "-q", "--no-trunc", repoName)
out, _, err := runCommandWithOutput(idCmd)
if err != nil {
c.Fatalf("failed to get repo ID: %s, %v", out, err)
}
out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName)
cleanedImageID := strings.TrimSpace(out)
out, _, err = runCommandPipelineWithOutput(
out, _, err := runCommandPipelineWithOutput(
exec.Command(dockerBinary, "save", fmt.Sprintf("%v:latest", repoName)),
exec.Command("tar", "t"),
exec.Command("grep", "-E", fmt.Sprintf("(^repositories$|%v)", cleanedImageID)))
if err != nil {
c.Fatalf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)
}
}
func (s *DockerSuite) TestSaveImageId(c *check.C) {
repoName := "foobar-save-image-id-test"
dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName))
tagCmd := exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName))
if out, _, err := runCommandWithOutput(tagCmd); err != nil {
c.Fatalf("failed to tag repo: %s, %v", out, err)
}
idLongCmd := exec.Command(dockerBinary, "images", "-q", "--no-trunc", repoName)
out, _, err := runCommandWithOutput(idLongCmd)
if err != nil {
c.Fatalf("failed to get repo ID: %s, %v", out, err)
}
out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName)
cleanedLongImageID := strings.TrimSpace(out)
idShortCmd := exec.Command(dockerBinary, "images", "-q", repoName)
out, _, err = runCommandWithOutput(idShortCmd)
if err != nil {
c.Fatalf("failed to get repo short ID: %s, %v", out, err)
}
out, _ = dockerCmd(c, "images", "-q", repoName)
cleanedShortImageID := strings.TrimSpace(out)
saveCmd := exec.Command(dockerBinary, "save", cleanedShortImageID)
tarCmd := exec.Command("tar", "t")
var err error
tarCmd.Stdin, err = saveCmd.StdoutPipe()
if err != nil {
c.Fatalf("cannot set stdout pipe for tar: %v", err)
@ -181,45 +132,28 @@ func (s *DockerSuite) TestSaveImageId(c *check.C) {
if err != nil {
c.Fatalf("failed to save repo with image ID: %s, %v", out, err)
}
}
// save a repo and try to load it using flags
func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) {
name := "test-save-and-load-repo-flags"
runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("failed to create a container: %s, %v", out, err)
}
dockerCmd(c, "run", "--name", name, "busybox", "true")
repoName := "foobar-save-load-test"
commitCmd := exec.Command(dockerBinary, "commit", name, repoName)
deleteImages(repoName)
if out, _, err = runCommandWithOutput(commitCmd); err != nil {
c.Fatalf("failed to commit container: %s, %v", out, err)
}
dockerCmd(c, "commit", name, repoName)
inspectCmd := exec.Command(dockerBinary, "inspect", repoName)
before, _, err := runCommandWithOutput(inspectCmd)
if err != nil {
c.Fatalf("the repo should exist before saving it: %s, %v", before, err)
before, _ := dockerCmd(c, "inspect", repoName)
}
out, _, err = runCommandPipelineWithOutput(
out, _, err := runCommandPipelineWithOutput(
exec.Command(dockerBinary, "save", repoName),
exec.Command(dockerBinary, "load"))
if err != nil {
c.Fatalf("failed to save and load repo: %s, %v", out, err)
}
inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
after, _, err := runCommandWithOutput(inspectCmd)
if err != nil {
c.Fatalf("the repo should exist after loading it: %s, %v", after, err)
}
after, _ := dockerCmd(c, "inspect", repoName)
if before != after {
c.Fatalf("inspect is not the same after a save / load")
}
@ -229,19 +163,12 @@ func (s *DockerSuite) TestSaveMultipleNames(c *check.C) {
repoName := "foobar-save-multi-name-test"
// Make one image
tagCmd := exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName))
if out, _, err := runCommandWithOutput(tagCmd); err != nil {
c.Fatalf("failed to tag repo: %s, %v", out, err)
}
dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName))
// Make two images
tagCmd = exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName))
out, _, err := runCommandWithOutput(tagCmd)
if err != nil {
c.Fatalf("failed to tag repo: %s, %v", out, err)
}
dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName))
out, _, err = runCommandPipelineWithOutput(
out, _, err := runCommandPipelineWithOutput(
exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)),
exec.Command("tar", "xO", "repositories"),
exec.Command("grep", "-q", "-E", "(-one|-two)"),
@ -249,26 +176,18 @@ func (s *DockerSuite) TestSaveMultipleNames(c *check.C) {
if err != nil {
c.Fatalf("failed to save multiple repos: %s, %v", out, err)
}
}
func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) {
makeImage := func(from string, tag string) string {
runCmd := exec.Command(dockerBinary, "run", "-d", from, "true")
var (
out string
err error
)
if out, _, err = runCommandWithOutput(runCmd); err != nil {
c.Fatalf("failed to create a container: %v %v", out, err)
}
out, _ = dockerCmd(c, "run", "-d", from, "true")
cleanedContainerID := strings.TrimSpace(out)
commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, tag)
if out, _, err = runCommandWithOutput(commitCmd); err != nil {
c.Fatalf("failed to commit container: %v %v", out, err)
}
out, _ = dockerCmd(c, "commit", cleanedContainerID, tag)
imageID := strings.TrimSpace(out)
return imageID
}
@ -294,11 +213,7 @@ func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) {
actual := strings.Split(strings.TrimSpace(out), "\n")
// make the list of expected layers
out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "history", "-q", "--no-trunc", "busybox:latest"))
if err != nil {
c.Fatalf("failed to get history: %s, %v", out, err)
}
out, _ = dockerCmd(c, "history", "-q", "--no-trunc", "busybox:latest")
expected := append(strings.Split(strings.TrimSpace(out), "\n"), idFoo, idBar)
sort.Strings(actual)
@ -306,7 +221,6 @@ func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) {
if !reflect.DeepEqual(expected, actual) {
c.Fatalf("archive does not contains the right layers: got %v, expected %v", actual, expected)
}
}
// Issue #6722 #5892 ensure directories are included in changes

Просмотреть файл

@ -15,24 +15,12 @@ import (
// save a repo and try to load it using stdout
func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) {
name := "test-save-and-load-repo-stdout"
runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("failed to create a container: %s, %v", out, err)
}
dockerCmd(c, "run", "--name", name, "busybox", "true")
repoName := "foobar-save-load-test"
out, _ := dockerCmd(c, "commit", name, repoName)
commitCmd := exec.Command(dockerBinary, "commit", name, repoName)
if out, _, err = runCommandWithOutput(commitCmd); err != nil {
c.Fatalf("failed to commit container: %s, %v", out, err)
}
inspectCmd := exec.Command(dockerBinary, "inspect", repoName)
before, _, err := runCommandWithOutput(inspectCmd)
if err != nil {
c.Fatalf("the repo should exist before saving it: %s, %v", before, err)
}
before, _ := dockerCmd(c, "inspect", repoName)
tmpFile, err := ioutil.TempFile("", "foobar-save-load-test.tar")
c.Assert(err, check.IsNil)
@ -57,11 +45,7 @@ func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) {
c.Fatalf("failed to load repo: %s, %v", out, err)
}
inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
after, _, err := runCommandWithOutput(inspectCmd)
if err != nil {
c.Fatalf("the repo should exist after loading it: %s %v", after, err)
}
after, _ := dockerCmd(c, "inspect", repoName)
if before != after {
c.Fatalf("inspect is not the same after a save / load")
@ -94,5 +78,4 @@ func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) {
if !bytes.Contains(buf[:n], []byte("Cowardly refusing")) {
c.Fatal("help output is not being yielded", out)
}
}

Просмотреть файл

@ -1,7 +1,6 @@
package main
import (
"os/exec"
"strings"
"github.com/go-check/check"
@ -10,21 +9,19 @@ import (
// search for repos named "registry" on the central registry
func (s *DockerSuite) TestSearchOnCentralRegistry(c *check.C) {
testRequires(c, Network)
searchCmd := exec.Command(dockerBinary, "search", "busybox")
out, exitCode, err := runCommandWithOutput(searchCmd)
if err != nil || exitCode != 0 {
c.Fatalf("failed to search on the central registry: %s, %v", out, err)
out, exitCode := dockerCmd(c, "search", "busybox")
if exitCode != 0 {
c.Fatalf("failed to search on the central registry: %s", out)
}
if !strings.Contains(out, "Busybox base image.") {
c.Fatal("couldn't find any repository named (or containing) 'Busybox base image.'")
}
}
func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) {
searchCmdStarsChars := exec.Command(dockerBinary, "search", "--stars=a", "busybox")
out, exitCode, err := runCommandWithOutput(searchCmdStarsChars)
out, exitCode, err := dockerCmdWithError(c, "search", "--stars=a", "busybox")
if err == nil || exitCode == 0 {
c.Fatalf("Should not get right information: %s, %v", out, err)
}
@ -33,8 +30,7 @@ func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) {
c.Fatal("couldn't find the invalid value warning")
}
searchCmdStarsNegativeNumber := exec.Command(dockerBinary, "search", "-s=-1", "busybox")
out, exitCode, err = runCommandWithOutput(searchCmdStarsNegativeNumber)
out, exitCode, err = dockerCmdWithError(c, "search", "-s=-1", "busybox")
if err == nil || exitCode == 0 {
c.Fatalf("Should not get right information: %s, %v", out, err)
}
@ -42,64 +38,54 @@ func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) {
if !strings.Contains(out, "invalid value") {
c.Fatal("couldn't find the invalid value warning")
}
}
func (s *DockerSuite) TestSearchCmdOptions(c *check.C) {
testRequires(c, Network)
searchCmdhelp := exec.Command(dockerBinary, "search", "--help")
out, exitCode, err := runCommandWithOutput(searchCmdhelp)
if err != nil || exitCode != 0 {
c.Fatalf("failed to get search help information: %s, %v", out, err)
out, exitCode := dockerCmd(c, "search", "--help")
if exitCode != 0 {
c.Fatalf("failed to get search help information: %s", out)
}
if !strings.Contains(out, "Usage:\tdocker search [OPTIONS] TERM") {
c.Fatalf("failed to show docker search usage: %s, %v", out, err)
c.Fatalf("failed to show docker search usage: %s", out)
}
searchCmd := exec.Command(dockerBinary, "search", "busybox")
outSearchCmd, exitCode, err := runCommandWithOutput(searchCmd)
if err != nil || exitCode != 0 {
c.Fatalf("failed to search on the central registry: %s, %v", outSearchCmd, err)
outSearchCmd, exitCode := dockerCmd(c, "search", "busybox")
if exitCode != 0 {
c.Fatalf("failed to search on the central registry: %s", outSearchCmd)
}
searchCmdNotrunc := exec.Command(dockerBinary, "search", "--no-trunc=true", "busybox")
outSearchCmdNotrunc, _, err := runCommandWithOutput(searchCmdNotrunc)
if err != nil {
c.Fatalf("failed to search on the central registry: %s, %v", outSearchCmdNotrunc, err)
}
outSearchCmdNotrunc, _ := dockerCmd(c, "search", "--no-trunc=true", "busybox")
if len(outSearchCmd) > len(outSearchCmdNotrunc) {
c.Fatalf("The no-trunc option can't take effect.")
}
searchCmdautomated := exec.Command(dockerBinary, "search", "--automated=true", "busybox")
outSearchCmdautomated, exitCode, err := runCommandWithOutput(searchCmdautomated) //The busybox is a busybox base image, not an AUTOMATED image.
if err != nil || exitCode != 0 {
c.Fatalf("failed to search with automated=true on the central registry: %s, %v", outSearchCmdautomated, err)
outSearchCmdautomated, exitCode := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image.
if exitCode != 0 {
c.Fatalf("failed to search with automated=true on the central registry: %s", outSearchCmdautomated)
}
outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n")
for i := range outSearchCmdautomatedSlice {
if strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox ") {
c.Fatalf("The busybox is not an AUTOMATED image: %s, %v", out, err)
c.Fatalf("The busybox is not an AUTOMATED image: %s", out)
}
}
searchCmdStars := exec.Command(dockerBinary, "search", "-s=2", "busybox")
outSearchCmdStars, exitCode, err := runCommandWithOutput(searchCmdStars)
if err != nil || exitCode != 0 {
c.Fatalf("failed to search with stars=2 on the central registry: %s, %v", outSearchCmdStars, err)
outSearchCmdStars, exitCode := dockerCmd(c, "search", "-s=2", "busybox")
if exitCode != 0 {
c.Fatalf("failed to search with stars=2 on the central registry: %s", outSearchCmdStars)
}
if strings.Count(outSearchCmdStars, "[OK]") > strings.Count(outSearchCmd, "[OK]") {
c.Fatalf("The quantity of images with stars should be less than that of all images: %s, %v", outSearchCmdStars, err)
c.Fatalf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars)
}
searchCmdOptions := exec.Command(dockerBinary, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox")
out, exitCode, err = runCommandWithOutput(searchCmdOptions)
if err != nil || exitCode != 0 {
c.Fatalf("failed to search with stars&automated&no-trunc options on the central registry: %s, %v", out, err)
out, exitCode = dockerCmd(c, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox")
if exitCode != 0 {
c.Fatalf("failed to search with stars&automated&no-trunc options on the central registry: %s", out)
}
}

Просмотреть файл

@ -4,7 +4,6 @@ package main
import (
"fmt"
"os/exec"
"strings"
"github.com/go-check/check"
@ -23,9 +22,7 @@ func assertSrvNotAvailable(c *check.C, sname, name string) {
}
func isSrvPresent(c *check.C, sname, name string) bool {
runCmd := exec.Command(dockerBinary, "service", "ls")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
c.Assert(err, check.IsNil)
out, _, _ := dockerCmdWithStdoutStderr(c, "service", "ls")
lines := strings.Split(out, "\n")
for i := 1; i < len(lines)-1; i++ {
if strings.Contains(lines[i], sname) && strings.Contains(lines[i], name) {
@ -36,9 +33,7 @@ func isSrvPresent(c *check.C, sname, name string) bool {
}
func isCntPresent(c *check.C, cname, sname, name string) bool {
runCmd := exec.Command(dockerBinary, "service", "ls", "--no-trunc")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
c.Assert(err, check.IsNil)
out, _, _ := dockerCmdWithStdoutStderr(c, "service", "ls", "--no-trunc")
lines := strings.Split(out, "\n")
for i := 1; i < len(lines)-1; i++ {
fmt.Println(lines)
@ -50,37 +45,25 @@ func isCntPresent(c *check.C, cname, sname, name string) bool {
}
func (s *DockerSuite) TestDockerServiceCreateDelete(c *check.C) {
runCmd := exec.Command(dockerBinary, "network", "create", "test")
_, _, _, err := runCommandWithStdoutStderr(runCmd)
c.Assert(err, check.IsNil)
dockerCmdWithStdoutStderr(c, "network", "create", "test")
assertNwIsAvailable(c, "test")
runCmd = exec.Command(dockerBinary, "service", "publish", "s1.test")
_, _, _, err = runCommandWithStdoutStderr(runCmd)
c.Assert(err, check.IsNil)
dockerCmdWithStdoutStderr(c, "service", "publish", "s1.test")
assertSrvIsAvailable(c, "s1", "test")
runCmd = exec.Command(dockerBinary, "service", "unpublish", "s1.test")
_, _, _, err = runCommandWithStdoutStderr(runCmd)
c.Assert(err, check.IsNil)
dockerCmdWithStdoutStderr(c, "service", "unpublish", "s1.test")
assertSrvNotAvailable(c, "s1", "test")
runCmd = exec.Command(dockerBinary, "network", "rm", "test")
_, _, _, err = runCommandWithStdoutStderr(runCmd)
c.Assert(err, check.IsNil)
dockerCmdWithStdoutStderr(c, "network", "rm", "test")
assertNwNotAvailable(c, "test")
}
func (s *DockerSuite) TestDockerPublishServiceFlag(c *check.C) {
// Run saying the container is the backend for the specified service on the specified network
runCmd := exec.Command(dockerBinary, "run", "-d", "--expose=23", "--publish-service", "telnet.production", "busybox", "top")
out, _, err := runCommandWithOutput(runCmd)
c.Assert(err, check.IsNil)
out, _ := dockerCmd(c, "run", "-d", "--expose=23", "--publish-service", "telnet.production", "busybox", "top")
cid := strings.TrimSpace(out)
// Verify container is attached in service ps o/p
assertSrvIsAvailable(c, "telnet", "production")
runCmd = exec.Command(dockerBinary, "rm", "-f", cid)
out, _, err = runCommandWithOutput(runCmd)
c.Assert(err, check.IsNil)
dockerCmd(c, "rm", "-f", cid)
}

Просмотреть файл

@ -2,7 +2,6 @@ package main
import (
"fmt"
"os/exec"
"strings"
"time"
@ -11,12 +10,11 @@ import (
// Regression test for https://github.com/docker/docker/issues/7843
func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) {
dockerCmd(c, "run", "-d", "--name", "test", "busybox")
dockerCmd(c, "wait", "test")
// Expect this to fail because the above container is stopped, this is what we want
if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "--link", "test:test", "busybox")); err == nil {
if _, _, err := dockerCmdWithError(c, "run", "-d", "--name", "test2", "--link", "test:test", "busybox"); err == nil {
c.Fatal("Expected error but got none")
}
@ -24,7 +22,7 @@ func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) {
go func() {
// Attempt to start attached to the container that won't start
// This should return an error immediately since the container can't be started
if _, err := runCommand(exec.Command(dockerBinary, "start", "-a", "test2")); err == nil {
if _, _, err := dockerCmdWithError(c, "start", "-a", "test2"); err == nil {
ch <- fmt.Errorf("Expected error but got none")
}
close(ch)
@ -36,28 +34,17 @@ func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) {
case <-time.After(time.Second):
c.Fatalf("Attach did not exit properly")
}
}
// gh#8555: Exit code should be passed through when using start -a
func (s *DockerSuite) TestStartAttachCorrectExitCode(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
if err != nil {
c.Fatalf("failed to run container: %v, output: %q", err, out)
}
out, _, _ := dockerCmdWithStdoutStderr(c, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1")
out = strings.TrimSpace(out)
// make sure the container has exited before trying the "start -a"
waitCmd := exec.Command(dockerBinary, "wait", out)
if _, _, err = runCommandWithOutput(waitCmd); err != nil {
c.Fatalf("Failed to wait on container: %v", err)
}
dockerCmd(c, "wait", out)
startCmd := exec.Command(dockerBinary, "start", "-a", out)
startOut, exitCode, err := runCommandWithOutput(startCmd)
startOut, exitCode, err := dockerCmdWithError(c, "start", "-a", out)
if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) {
c.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut)
}
@ -68,29 +55,16 @@ func (s *DockerSuite) TestStartAttachCorrectExitCode(c *check.C) {
}
func (s *DockerSuite) TestStartAttachSilent(c *check.C) {
name := "teststartattachcorrectexitcode"
runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "echo", "test")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
if err != nil {
c.Fatalf("failed to run container: %v, output: %q", err, out)
}
dockerCmd(c, "run", "--name", name, "busybox", "echo", "test")
// make sure the container has exited before trying the "start -a"
waitCmd := exec.Command(dockerBinary, "wait", name)
if _, _, err = runCommandWithOutput(waitCmd); err != nil {
c.Fatalf("wait command failed with error: %v", err)
}
dockerCmd(c, "wait", name)
startCmd := exec.Command(dockerBinary, "start", "-a", name)
startOut, _, err := runCommandWithOutput(startCmd)
if err != nil {
c.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut)
}
startOut, _ := dockerCmd(c, "start", "-a", name)
if expected := "test\n"; startOut != expected {
c.Fatalf("start -a produced unexpected output: expected %q, got %q", expected, startOut)
}
}
func (s *DockerSuite) TestStartRecordError(c *check.C) {
@ -104,10 +78,11 @@ func (s *DockerSuite) TestStartRecordError(c *check.C) {
}
// Expect this to fail and records error because of ports conflict
out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top"))
out, _, err := dockerCmdWithError(c, "run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top")
if err == nil {
c.Fatalf("Expected error but got none, output %q", out)
}
stateErr, err = inspectField("test2", "State.Error")
c.Assert(err, check.IsNil)
expected := "port is already allocated"
@ -123,47 +98,31 @@ func (s *DockerSuite) TestStartRecordError(c *check.C) {
if stateErr != "" {
c.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr)
}
}
func (s *DockerSuite) TestStartPausedContainer(c *check.C) {
defer unpauseAllContainers()
runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top")
if out, _, err := runCommandWithOutput(runCmd); err != nil {
c.Fatal(out, err)
}
dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top")
runCmd = exec.Command(dockerBinary, "pause", "testing")
if out, _, err := runCommandWithOutput(runCmd); err != nil {
c.Fatal(out, err)
}
dockerCmd(c, "pause", "testing")
runCmd = exec.Command(dockerBinary, "start", "testing")
if out, _, err := runCommandWithOutput(runCmd); err == nil || !strings.Contains(out, "Cannot start a paused container, try unpause instead.") {
if out, _, err := dockerCmdWithError(c, "start", "testing"); err == nil || !strings.Contains(out, "Cannot start a paused container, try unpause instead.") {
c.Fatalf("an error should have been shown that you cannot start paused container: %s\n%v", out, err)
}
}
func (s *DockerSuite) TestStartMultipleContainers(c *check.C) {
// run a container named 'parent' and create two container link to `parent`
cmd := exec.Command(dockerBinary, "run", "-d", "--name", "parent", "busybox", "top")
if out, _, err := runCommandWithOutput(cmd); err != nil {
c.Fatal(out, err)
}
dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top")
for _, container := range []string{"child_first", "child_second"} {
cmd = exec.Command(dockerBinary, "create", "--name", container, "--link", "parent:parent", "busybox", "top")
if out, _, err := runCommandWithOutput(cmd); err != nil {
c.Fatal(out, err)
}
dockerCmd(c, "create", "--name", container, "--link", "parent:parent", "busybox", "top")
}
// stop 'parent' container
cmd = exec.Command(dockerBinary, "stop", "parent")
if out, _, err := runCommandWithOutput(cmd); err != nil {
c.Fatal(out, err)
}
dockerCmd(c, "stop", "parent")
out, err := inspectField("parent", "State.Running")
c.Assert(err, check.IsNil)
if out != "false" {
@ -172,8 +131,7 @@ func (s *DockerSuite) TestStartMultipleContainers(c *check.C) {
// start all the three containers, container `child_first` start first which should be failed
// container 'parent' start second and then start container 'child_second'
cmd = exec.Command(dockerBinary, "start", "child_first", "parent", "child_second")
out, _, err = runCommandWithOutput(cmd)
out, _, err = dockerCmdWithError(c, "start", "child_first", "parent", "child_second")
if !strings.Contains(out, "Cannot start container child_first") || err == nil {
c.Fatal("Expected error but got none")
}
@ -186,33 +144,22 @@ func (s *DockerSuite) TestStartMultipleContainers(c *check.C) {
}
}
}
func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) {
var cmd *exec.Cmd
// run multiple containers to test
for _, container := range []string{"test1", "test2", "test3"} {
cmd = exec.Command(dockerBinary, "run", "-d", "--name", container, "busybox", "top")
if out, _, err := runCommandWithOutput(cmd); err != nil {
c.Fatal(out, err)
}
dockerCmd(c, "run", "-d", "--name", container, "busybox", "top")
}
// stop all the containers
for _, container := range []string{"test1", "test2", "test3"} {
cmd = exec.Command(dockerBinary, "stop", container)
if out, _, err := runCommandWithOutput(cmd); err != nil {
c.Fatal(out, err)
}
dockerCmd(c, "stop", container)
}
// test start and attach multiple containers at once, expected error
for _, option := range []string{"-a", "-i", "-ai"} {
cmd = exec.Command(dockerBinary, "start", option, "test1", "test2", "test3")
out, _, err := runCommandWithOutput(cmd)
out, _, err := dockerCmdWithError(c, "start", option, "test1", "test2", "test3")
if !strings.Contains(out, "You cannot start and attach multiple containers at once.") || err == nil {
c.Fatal("Expected error but got none")
}
@ -228,5 +175,4 @@ func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) {
c.Fatal("Container running state wrong")
}
}
}

Просмотреть файл

@ -1,4 +1,3 @@
// +build experimental
// +build !windows
package main

Просмотреть файл

@ -20,6 +20,7 @@ import (
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/stringutils"
@ -880,6 +881,46 @@ func inspectFieldMap(name, path, field string) (string, error) {
return inspectFilter(name, fmt.Sprintf("index .%s %q", path, field))
}
func inspectMountSourceField(name, destination string) (string, error) {
m, err := inspectMountPoint(name, destination)
if err != nil {
return "", err
}
return m.Source, nil
}
func inspectMountPoint(name, destination string) (types.MountPoint, error) {
out, err := inspectFieldJSON(name, "Mounts")
if err != nil {
return types.MountPoint{}, err
}
return inspectMountPointJSON(out, destination)
}
var mountNotFound = errors.New("mount point not found")
func inspectMountPointJSON(j, destination string) (types.MountPoint, error) {
var mp []types.MountPoint
if err := unmarshalJSON([]byte(j), &mp); err != nil {
return types.MountPoint{}, err
}
var m *types.MountPoint
for _, c := range mp {
if c.Destination == destination {
m = &c
break
}
}
if m == nil {
return types.MountPoint{}, mountNotFound
}
return *m, nil
}
func getIDByName(name string) (string, error) {
return inspectField(name, "Id")
}

Просмотреть файл

@ -8,9 +8,15 @@ import (
"github.com/docker/docker/pkg/nat"
)
// Just to make life easier
func newPortNoError(proto, port string) nat.Port {
p, _ := nat.NewPort(proto, port)
return p
}
func TestLinkNaming(t *testing.T) {
ports := make(nat.PortSet)
ports[nat.Port("6379/tcp")] = struct{}{}
ports[newPortNoError("tcp", "6379")] = struct{}{}
link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports)
if err != nil {
@ -40,7 +46,7 @@ func TestLinkNaming(t *testing.T) {
func TestLinkNew(t *testing.T) {
ports := make(nat.PortSet)
ports[nat.Port("6379/tcp")] = struct{}{}
ports[newPortNoError("tcp", "6379")] = struct{}{}
link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports)
if err != nil {
@ -63,7 +69,7 @@ func TestLinkNew(t *testing.T) {
t.Fail()
}
for _, p := range link.Ports {
if p != nat.Port("6379/tcp") {
if p != newPortNoError("tcp", "6379") {
t.Fail()
}
}
@ -71,7 +77,7 @@ func TestLinkNew(t *testing.T) {
func TestLinkEnv(t *testing.T) {
ports := make(nat.PortSet)
ports[nat.Port("6379/tcp")] = struct{}{}
ports[newPortNoError("tcp", "6379")] = struct{}{}
link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports)
if err != nil {
@ -112,9 +118,9 @@ func TestLinkEnv(t *testing.T) {
func TestLinkMultipleEnv(t *testing.T) {
ports := make(nat.PortSet)
ports[nat.Port("6379/tcp")] = struct{}{}
ports[nat.Port("6380/tcp")] = struct{}{}
ports[nat.Port("6381/tcp")] = struct{}{}
ports[newPortNoError("tcp", "6379")] = struct{}{}
ports[newPortNoError("tcp", "6380")] = struct{}{}
ports[newPortNoError("tcp", "6381")] = struct{}{}
link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports)
if err != nil {
@ -161,9 +167,9 @@ func TestLinkMultipleEnv(t *testing.T) {
func TestLinkPortRangeEnv(t *testing.T) {
ports := make(nat.PortSet)
ports[nat.Port("6379/tcp")] = struct{}{}
ports[nat.Port("6380/tcp")] = struct{}{}
ports[nat.Port("6381/tcp")] = struct{}{}
ports[newPortNoError("tcp", "6379")] = struct{}{}
ports[newPortNoError("tcp", "6380")] = struct{}{}
ports[newPortNoError("tcp", "6381")] = struct{}{}
link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports)
if err != nil {

Просмотреть файл

@ -1,7 +1,7 @@
FROM golang:1.4
RUN mkdir -p /go/src/github.com/cpuguy83
RUN mkdir -p /go/src/github.com/cpuguy83 \
&& git clone -b v1.0.1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
&& git clone -b v1.0.3 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
&& cd /go/src/github.com/cpuguy83/go-md2man \
&& go get -v ./...
CMD ["/go/bin/go-md2man", "--help"]

Просмотреть файл

@ -2,69 +2,150 @@
% Docker Community
% JUNE 2014
# NAME
docker-cp - Copy files or folders from a container's PATH to a HOSTDIR
or to STDOUT.
docker-cp - Copy files/folders between a container and the local filesystem.
# SYNOPSIS
**docker cp**
[**--help**]
CONTAINER:PATH HOSTDIR|-
CONTAINER:PATH LOCALPATH|-
LOCALPATH|- CONTAINER:PATH
# DESCRIPTION
Copy files or folders from a `CONTAINER:PATH` to the `HOSTDIR` or to `STDOUT`.
The `CONTAINER:PATH` is relative to the root of the container's filesystem. You
can copy from either a running or stopped container.
In the first synopsis form, the `docker cp` utility copies the contents of
`PATH` from the filesystem of `CONTAINER` to the `LOCALPATH` (or stream as
a tar archive to `STDOUT` if `-` is specified).
The `PATH` can be a file or directory. The `docker cp` command assumes all
`PATH` values start at the `/` (root) directory. This means supplying the
initial forward slash is optional; The command sees
In the second synopsis form, the contents of `LOCALPATH` (or a tar archive
streamed from `STDIN` if `-` is specified) are copied from the local machine to
`PATH` in the filesystem of `CONTAINER`.
You can copy to or from either a running or stopped container. The `PATH` can
be a file or directory. The `docker cp` command assumes all `CONTAINER:PATH`
values are relative to the `/` (root) directory of the container. This means
supplying the initial forward slash is optional; The command sees
`compassionate_darwin:/tmp/foo/myfile.txt` and
`compassionate_darwin:tmp/foo/myfile.txt` as identical.
`compassionate_darwin:tmp/foo/myfile.txt` as identical. If a `LOCALPATH` value
is not absolute, is it considered relative to the current working directory.
The `HOSTDIR` refers to a directory on the host. If you do not specify an
absolute path for your `HOSTDIR` value, Docker creates the directory relative to
where you run the `docker cp` command. For example, suppose you want to copy the
`/tmp/foo` directory from a container to the `/tmp` directory on your host. If
you run `docker cp` in your `~` (home) directory on the host:
Behavior is similar to the common Unix utility `cp -a` in that directories are
copied recursively with permissions preserved if possible. Ownership is set to
the user and primary group on the receiving end of the transfer. For example,
files copied to a container will be created with `UID:GID` of the root user.
Files copied to the local machine will be created with the `UID:GID` of the
user which invoked the `docker cp` command.
$ docker cp compassionate_darwin:tmp/foo /tmp
Assuming a path separator of `/`, a first argument of `SRC_PATH` and second
argument of `DST_PATH`, the behavior is as follows:
Docker creates a `/tmp/foo` directory on your host. Alternatively, you can omit
the leading slash in the command. If you execute this command from your home directory:
- `SRC_PATH` specifies a file
- `DST_PATH` does not exist
- the file is saved to a file created at `DST_PATH`
- `DST_PATH` does not exist and ends with `/`
- Error condition: the destination directory must exist.
- `DST_PATH` exists and is a file
- the destination is overwritten with the contents of the source file
- `DST_PATH` exists and is a directory
- the file is copied into this directory using the basename from
`SRC_PATH`
- `SRC_PATH` specifies a directory
- `DST_PATH` does not exist
- `DST_PATH` is created as a directory and the *contents* of the source
directory are copied into this directory
- `DST_PATH` exists and is a file
- Error condition: cannot copy a directory to a file
- `DST_PATH` exists and is a directory
- `SRC_PATH` does not end with `/.`
- the source directory is copied into this directory
- `SRC_PAPTH` does end with `/.`
- the *content* of the source directory is copied into this
directory
$ docker cp compassionate_darwin:tmp/foo tmp
The command requires `SRC_PATH` and `DST_PATH` to exist according to the above
rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not
the target, is copied.
Docker creates a `~/tmp/foo` subdirectory.
A colon (`:`) is used as a delimiter between `CONTAINER` and `PATH`, but `:`
could also be in a valid `LOCALPATH`, like `file:name.txt`. This ambiguity is
resolved by requiring a `LOCALPATH` with a `:` to be made explicit with a
relative or absolute path, for example:
When copying files to an existing `HOSTDIR`, the `cp` command adds the new files to
the directory. For example, this command:
`/path/to/file:name.txt` or `./file:name.txt`
$ docker cp sharp_ptolemy:/tmp/foo/myfile.txt /tmp
It is not possible to copy certain system files such as resources under
`/proc`, `/sys`, `/dev`, and mounts created by the user in the container.
Creates a `/tmp/foo` directory on the host containing the `myfile.txt` file. If
you repeat the command but change the filename:
Using `-` as the first argument in place of a `LOCALPATH` will stream the
contents of `STDIN` as a tar archive which will be extracted to the `PATH` in
the filesystem of the destination container. In this case, `PATH` must specify
a directory.
$ docker cp sharp_ptolemy:/tmp/foo/secondfile.txt /tmp
Your host's `/tmp/foo` directory will contain both files:
$ ls /tmp/foo
myfile.txt secondfile.txt
Finally, use '-' to write the data as a `tar` file to STDOUT.
Using `-` as the second argument in place of a `LOCALPATH` will stream the
contents of the resource from the source container as a tar archive to
`STDOUT`.
# OPTIONS
**--help**
Print usage statement
# EXAMPLES
An important shell script file, created in a bash shell, is copied from
the exited container to the current dir on the host:
# docker cp c071f3c3ee81:setup.sh .
Suppose a container has finished producing some output as a file it saves
to somewhere in its filesystem. This could be the output of a build job or
some other computation. You can copy these outputs from the container to a
location on your local host.
If you want to copy the `/tmp/foo` directory from a container to the
existing `/tmp` directory on your host. If you run `docker cp` in your `~`
(home) directory on the local host:
$ docker cp compassionate_darwin:tmp/foo /tmp
Docker creates a `/tmp/foo` directory on your host. Alternatively, you can omit
the leading slash in the command. If you execute this command from your home
directory:
$ docker cp compassionate_darwin:tmp/foo tmp
If `~/tmp` does not exist, Docker will create it and copy the contents of
`/tmp/foo` from the container into this new directory. If `~/tmp` already
exists as a directory, then Docker will copy the contents of `/tmp/foo` from
the container into a directory at `~/tmp/foo`.
When copying a single file to an existing `LOCALPATH`, the `docker cp` command
will either overwrite the contents of `LOCALPATH` if it is a file or place it
into `LOCALPATH` if it is a directory, overwriting an existing file of the same
name if one exists. For example, this command:
$ docker cp sharp_ptolemy:/tmp/foo/myfile.txt /test
If `/test` does not exist on the local machine, it will be created as a file
with the contents of `/tmp/foo/myfile.txt` from the container. If `/test`
exists as a file, it will be overwritten. Lastly, if `/tmp` exists as a
directory, the file will be copied to `/test/myfile.txt`.
Next, suppose you want to copy a file or folder into a container. For example,
this could be a configuration file or some other input to a long running
computation that you would like to place into a created container before it
starts. This is useful because it does not require the configuration file or
other input to exist in the container image.
If you have a file, `config.yml`, in the current directory on your local host
and wish to copy it to an existing directory at `/etc/my-app.d` in a container,
this command can be used:
$ docker cp config.yml myappcontainer:/etc/my-app.d
If you have several files in a local directory `/config` which you need to copy
to a directory `/etc/my-app.d` in a container:
$ docker cp /config/. myappcontainer:/etc/my-app.d
The above command will copy the contents of the local `/config` directory into
the directory `/etc/my-app.d` in the container.
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
based on docker.com source material and internal work.
June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
May 2015, updated by Josh Hawn <josh.hawn@docker.com>

Просмотреть файл

@ -95,8 +95,14 @@ To get information on a container use its ID or instance name:
"ExecDriver": "native-0.2",
"MountLabel": "",
"ProcessLabel": "",
"Volumes": {},
"VolumesRW": {},
"Mounts": [
{
"Source": "/data",
"Destination": "/data",
"Mode": "ro,Z",
"RW": false
}
],
"AppArmorProfile": "",
"ExecIDs": null,
"HostConfig": {

Просмотреть файл

@ -25,15 +25,23 @@ import (
)
type (
Archive io.ReadCloser
ArchiveReader io.Reader
Compression int
TarOptions struct {
IncludeFiles []string
ExcludePatterns []string
Compression Compression
NoLchown bool
Name string
Archive io.ReadCloser
ArchiveReader io.Reader
Compression int
TarChownOptions struct {
UID, GID int
}
TarOptions struct {
IncludeFiles []string
ExcludePatterns []string
Compression Compression
NoLchown bool
ChownOpts *TarChownOptions
Name string
IncludeSourceDir bool
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool
}
// Archiver allows the reuse of most utility functions of this package
@ -262,7 +270,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
return nil
}
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error {
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error {
// hdr.Mode is in linux format, which we can use for sycalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
@ -328,9 +336,12 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
}
// Lchown is not supported on Windows
if runtime.GOOS != "windows" {
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
// Lchown is not supported on Windows.
if Lchown && runtime.GOOS != "windows" {
if chownOpts == nil {
chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid}
}
if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
return err
}
}
@ -396,6 +407,20 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
}
defer func() {
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Debugf("Can't close tar writer: %s", err)
}
if err := compressWriter.Close(); err != nil {
logrus.Debugf("Can't close compress writer: %s", err)
}
if err := pipeWriter.Close(); err != nil {
logrus.Debugf("Can't close pipe writer: %s", err)
}
}()
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
@ -404,7 +429,26 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
// mutating the filesystem and we can see transient errors
// from this
if options.IncludeFiles == nil {
stat, err := os.Lstat(srcPath)
if err != nil {
return
}
if !stat.IsDir() {
// We can't later join a non-dir with any includes because the
// 'walk' will error if "file/." is stat-ed and "file" is not a
// directory. So, we must split the source path and use the
// basename as the include.
if len(options.IncludeFiles) > 0 {
logrus.Warn("Tar: Can't archive a file with includes")
}
dir, base := SplitPathDirEntry(srcPath)
srcPath = dir
options.IncludeFiles = []string{base}
}
if len(options.IncludeFiles) == 0 {
options.IncludeFiles = []string{"."}
}
@ -412,19 +456,26 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
var renamedRelFilePath string // For when tar.Options.Name is set
for _, include := range options.IncludeFiles {
filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
// We can't use filepath.Join(srcPath, include) because this will
// clean away a trailing "." or "/" which may be important.
walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator))
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
if err != nil {
logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
return nil
}
relFilePath, err := filepath.Rel(srcPath, filePath)
if err != nil || (relFilePath == "." && f.IsDir()) {
if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
// Error getting relative path OR we are looking
// at the root path. Skip in both situations.
// at the source directory path. Skip in both situations.
return nil
}
if options.IncludeSourceDir && include == "." && relFilePath != "." {
relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
}
skip := false
// If "include" is an exact match for the current file
@ -468,17 +519,6 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
return nil
})
}
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Debugf("Can't close tar writer: %s", err)
}
if err := compressWriter.Close(); err != nil {
logrus.Debugf("Can't close compress writer: %s", err)
}
if err := pipeWriter.Close(); err != nil {
logrus.Debugf("Can't close pipe writer: %s", err)
}
}()
return pipeReader, nil
@ -543,9 +583,22 @@ loop:
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
// If NoOverwriteDirNonDir is true then we cannot replace
// an existing directory with a non-directory from the archive.
return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
}
if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
// If NoOverwriteDirNonDir is true then we cannot replace
// an existing non-directory with a directory from the archive.
return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
}
if fi.IsDir() && hdr.Name == "." {
continue
}
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return err
@ -553,7 +606,8 @@ loop:
}
}
trBuf.Reset(tr)
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil {
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
return err
}

Просмотреть файл

@ -719,7 +719,7 @@ func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true)
err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil)
if err != nil {
t.Fatal(err)
}

308
pkg/archive/copy.go Normal file
Просмотреть файл

@ -0,0 +1,308 @@
package archive
import (
"archive/tar"
"errors"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
log "github.com/Sirupsen/logrus"
)
// Errors used or returned by this file.
var (
ErrNotDirectory = errors.New("not a directory")
ErrDirNotExists = errors.New("no such directory")
ErrCannotCopyDir = errors.New("cannot copy directory")
ErrInvalidCopySource = errors.New("invalid copy source content")
)
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
// processing using any utility functions from the path or filepath stdlib
// packages) and appends a trailing `/.` or `/` if its corresponding original
// path (from before being processed by utility functions from the path or
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
// path already ends in a `.` path segment, then another is not added. If the
// clean path already ends in a path separator, then another is not added.
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
if !SpecifiesCurrentDir(cleanedPath) && SpecifiesCurrentDir(originalPath) {
if !HasTrailingPathSeparator(cleanedPath) {
// Add a separator if it doesn't already end with one (a cleaned
// path would only end in a separator if it is the root).
cleanedPath += string(filepath.Separator)
}
cleanedPath += "."
}
if !HasTrailingPathSeparator(cleanedPath) && HasTrailingPathSeparator(originalPath) {
cleanedPath += string(filepath.Separator)
}
return cleanedPath
}
// AssertsDirectory returns whether the given path is
// asserted to be a directory, i.e., the path ends with
// a trailing '/' or `/.`, assuming a path separator of `/`.
func AssertsDirectory(path string) bool {
return HasTrailingPathSeparator(path) || SpecifiesCurrentDir(path)
}
// HasTrailingPathSeparator returns whether the given
// path ends with the system's path separator character.
func HasTrailingPathSeparator(path string) bool {
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
}
// SpecifiesCurrentDir returns whether the given path specifies
// a "current directory", i.e., the last path segment is `.`.
func SpecifiesCurrentDir(path string) bool {
return filepath.Base(path) == "."
}
// SplitPathDirEntry splits the given path between its
// parent directory and its basename in that directory.
func SplitPathDirEntry(localizedPath string) (dir, base string) {
normalizedPath := filepath.ToSlash(localizedPath)
vol := filepath.VolumeName(normalizedPath)
normalizedPath = normalizedPath[len(vol):]
if normalizedPath == "/" {
// Specifies the root path.
return filepath.FromSlash(vol + normalizedPath), "."
}
trimmedPath := vol + strings.TrimRight(normalizedPath, "/")
dir = filepath.FromSlash(path.Dir(trimmedPath))
base = filepath.FromSlash(path.Base(trimmedPath))
return dir, base
}
// TarResource archives the resource at the given sourcePath into a Tar
// archive. A non-nil error is returned if sourcePath does not exist or is
// asserted to be a directory but exists as another type of file.
//
// This function acts as a convenient wrapper around TarWithOptions, which
// requires a directory as the source path. TarResource accepts either a
// directory or a file path and correctly sets the Tar options.
func TarResource(sourcePath string) (content Archive, err error) {
if _, err = os.Lstat(sourcePath); err != nil {
// Catches the case where the source does not exist or is not a
// directory if asserted to be a directory, as this also causes an
// error.
return
}
if len(sourcePath) > 1 && HasTrailingPathSeparator(sourcePath) {
// In the case where the source path is a symbolic link AND it ends
// with a path separator, we will want to evaluate the symbolic link.
trimmedPath := sourcePath[:len(sourcePath)-1]
stat, err := os.Lstat(trimmedPath)
if err != nil {
return nil, err
}
if stat.Mode()&os.ModeSymlink != 0 {
if sourcePath, err = filepath.EvalSymlinks(trimmedPath); err != nil {
return nil, err
}
}
}
// Separate the source path between it's directory and
// the entry in that directory which we are archiving.
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
filter := []string{sourceBase}
log.Debugf("copying %q from %q", sourceBase, sourceDir)
return TarWithOptions(sourceDir, &TarOptions{
Compression: Uncompressed,
IncludeFiles: filter,
IncludeSourceDir: true,
})
}
// CopyInfo holds basic info about the source
// or destination path of a copy operation.
type CopyInfo struct {
Path string
Exists bool
IsDir bool
}
// CopyInfoStatPath stats the given path to create a CopyInfo
// struct representing that resource. If mustExist is true, then
// it is an error if there is no file or directory at the given path.
func CopyInfoStatPath(path string, mustExist bool) (CopyInfo, error) {
pathInfo := CopyInfo{Path: path}
fileInfo, err := os.Lstat(path)
if err == nil {
pathInfo.Exists, pathInfo.IsDir = true, fileInfo.IsDir()
} else if os.IsNotExist(err) && !mustExist {
err = nil
}
return pathInfo, err
}
// PrepareArchiveCopy prepares the given srcContent archive, which should
// contain the archived resource described by srcInfo, to the destination
// described by dstInfo. Returns the possibly modified content archive along
// with the path to the destination directory which it should be extracted to.
func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
// Separate the destination path between its directory and base
// components in case the source archive contents need to be rebased.
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
_, srcBase := SplitPathDirEntry(srcInfo.Path)
switch {
case dstInfo.Exists && dstInfo.IsDir:
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since
// you cannot copy a directory to an existing file location.
return "", nil, ErrCannotCopyDir
case dstInfo.Exists:
// The destination exists as some type of file and the source content
// is also a file. The source content entry will have to be renamed to
// have a basename which matches the destination path's basename.
return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case srcInfo.IsDir:
// The destination does not exist and the source content is an archive
// of a directory. The archive should be extracted to the parent of
// the destination path instead, and when it is, the directory that is
// created as a result should take the name of the destination path.
// The source content entries will have to be renamed to have a
// basename which matches the destination path's basename.
return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case AssertsDirectory(dstInfo.Path):
// The destination does not exist and is asserted to be created as a
// directory, but the source content is not a directory. This is an
// error condition since you cannot create a directory from a file
// source.
return "", nil, ErrDirNotExists
default:
// The last remaining case is when the destination does not exist, is
// not asserted to be a directory, and the source content is not an
// archive of a directory. It this case, the destination file will need
// to be created when the archive is extracted and the source content
// entry will have to be renamed to have a basename which matches the
// destination path's basename.
return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
}
}
// rebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurance of oldBase with newBase at the beginning of entry names.
func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive {
rebased, w := io.Pipe()
go func() {
srcTar := tar.NewReader(srcContent)
rebasedTar := tar.NewWriter(w)
for {
hdr, err := srcTar.Next()
if err == io.EOF {
// Signals end of archive.
rebasedTar.Close()
w.Close()
return
}
if err != nil {
w.CloseWithError(err)
return
}
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
if err = rebasedTar.WriteHeader(hdr); err != nil {
w.CloseWithError(err)
return
}
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
w.CloseWithError(err)
return
}
}
}()
return rebased
}
// CopyResource performs an archive copy from the given source path to the
// given destination path. The source path MUST exist and the destination
// path's parent directory must exist.
func CopyResource(srcPath, dstPath string) error {
var (
srcInfo CopyInfo
err error
)
// Clean the source and destination paths.
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
if srcInfo, err = CopyInfoStatPath(srcPath, true); err != nil {
return err
}
content, err := TarResource(srcPath)
if err != nil {
return err
}
defer content.Close()
return CopyTo(content, srcInfo, dstPath)
}
// CopyTo handles extracting the given content whose
// entries should be sourced from srcInfo to dstPath.
func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error {
dstInfo, err := CopyInfoStatPath(dstPath, false)
if err != nil {
return err
}
if !dstInfo.Exists {
// Ensure destination parent dir exists.
dstParent, _ := SplitPathDirEntry(dstPath)
dstStat, err := os.Lstat(dstParent)
if err != nil {
return err
}
if !dstStat.IsDir() {
return ErrNotDirectory
}
}
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
if err != nil {
return err
}
defer copyArchive.Close()
options := &TarOptions{
NoLchown: true,
NoOverwriteDirNonDir: true,
}
return Untar(copyArchive, dstDir, options)
}

637
pkg/archive/copy_test.go Normal file
Просмотреть файл

@ -0,0 +1,637 @@
package archive
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
)
func removeAllPaths(paths ...string) {
for _, path := range paths {
os.RemoveAll(path)
}
}
func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) {
var err error
if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil {
t.Fatal(err)
}
if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil {
t.Fatal(err)
}
return
}
func isNotDir(err error) bool {
return strings.Contains(err.Error(), "not a directory")
}
func joinTrailingSep(pathElements ...string) string {
joined := filepath.Join(pathElements...)
return fmt.Sprintf("%s%c", joined, filepath.Separator)
}
func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) {
t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB)
fileA, err := os.Open(filenameA)
if err != nil {
return
}
defer fileA.Close()
fileB, err := os.Open(filenameB)
if err != nil {
return
}
defer fileB.Close()
hasher := sha256.New()
if _, err = io.Copy(hasher, fileA); err != nil {
return
}
hashA := hasher.Sum(nil)
hasher.Reset()
if _, err = io.Copy(hasher, fileB); err != nil {
return
}
hashB := hasher.Sum(nil)
if !bytes.Equal(hashA, hashB) {
err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB))
}
return
}
func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) {
t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir)
var changes []Change
if changes, err = ChangesDirs(newDir, oldDir); err != nil {
return
}
if len(changes) != 0 {
err = fmt.Errorf("expected no changes between directories, but got: %v", changes)
}
return
}
func logDirContents(t *testing.T, dirPath string) {
logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error {
if err != nil {
t.Errorf("stat error for path %q: %s", path, err)
return nil
}
if info.IsDir() {
path = joinTrailingSep(path)
}
t.Logf("\t%s", path)
return nil
})
t.Logf("logging directory contents: %q", dirPath)
if err := filepath.Walk(dirPath, logWalkedPaths); err != nil {
t.Fatal(err)
}
}
func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) {
t.Logf("copying from %q to %q", srcPath, dstPath)
return CopyResource(srcPath, dstPath)
}
// Basic assumptions about SRC and DST:
// 1. SRC must exist.
// 2. If SRC ends with a trailing separator, it must be a directory.
// 3. DST parent directory must exist.
// 4. If DST exists as a file, it must not end with a trailing separator.
// First get these easy error cases out of the way.
// Test for error when SRC does not exist.
func TestCopyErrSrcNotExists(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
content, err := TarResource(filepath.Join(tmpDirA, "file1"))
if err == nil {
content.Close()
t.Fatal("expected IsNotExist error, but got nil instead")
}
if !os.IsNotExist(err) {
t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
}
}
// Test for error when SRC ends in a trailing
// path separator but it exists as a file.
func TestCopyErrSrcNotDir(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
// Load A with some sample files and directories.
createSampleDir(t, tmpDirA)
content, err := TarResource(joinTrailingSep(tmpDirA, "file1"))
if err == nil {
content.Close()
t.Fatal("expected IsNotDir error, but got nil instead")
}
if !isNotDir(err) {
t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
}
}
// Test for error when SRC is a valid file or directory,
// but the DST parent directory does not exist.
func TestCopyErrDstParentNotExists(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
// Load A with some sample files and directories.
createSampleDir(t, tmpDirA)
srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false}
// Try with a file source.
content, err := TarResource(srcInfo.Path)
if err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
defer content.Close()
// Copy to a file whose parent does not exist.
if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil {
t.Fatal("expected IsNotExist error, but got nil instead")
}
if !os.IsNotExist(err) {
t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
}
// Try with a directory source.
srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true}
content, err = TarResource(srcInfo.Path)
if err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
defer content.Close()
// Copy to a directory whose parent does not exist.
if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil {
t.Fatal("expected IsNotExist error, but got nil instead")
}
if !os.IsNotExist(err) {
t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
}
}
// Test for error when DST ends in a trailing
// path separator but exists as a file.
func TestCopyErrDstNotDir(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
// Load A and B with some sample files and directories.
createSampleDir(t, tmpDirA)
createSampleDir(t, tmpDirB)
// Try with a file source.
srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false}
content, err := TarResource(srcInfo.Path)
if err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
defer content.Close()
if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil {
t.Fatal("expected IsNotDir error, but got nil instead")
}
if !isNotDir(err) {
t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
}
// Try with a directory source.
srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true}
content, err = TarResource(srcInfo.Path)
if err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
defer content.Close()
if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil {
t.Fatal("expected IsNotDir error, but got nil instead")
}
if !isNotDir(err) {
t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
}
}
// Possibilities are reduced to the remaining 10 cases:
//
// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action
// ===================================================================================================
// A | no | - | no | - | no | create file
// B | no | - | no | - | yes | error
// C | no | - | yes | no | - | overwrite file
// D | no | - | yes | yes | - | create file in dst dir
// E | yes | no | no | - | - | create dir, copy contents
// F | yes | no | yes | no | - | error
// G | yes | no | yes | yes | - | copy dir and contents
// H | yes | yes | no | - | - | create dir, copy contents
// I | yes | yes | yes | no | - | error
// J | yes | yes | yes | yes | - | copy dir contents
//
// A. SRC specifies a file and DST (no trailing path separator) doesn't
// exist. This should create a file with the name DST and copy the
// contents of the source file into it.
func TestCopyCaseA(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
// Load A with some sample files and directories.
createSampleDir(t, tmpDirA)
srcPath := filepath.Join(tmpDirA, "file1")
dstPath := filepath.Join(tmpDirB, "itWorks.txt")
var err error
if err = testCopyHelper(t, srcPath, dstPath); err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
t.Fatal(err)
}
}
// B. SRC specifies a file and DST (with trailing path separator) doesn't
// exist. This should cause an error because the copy operation cannot
// create a directory when copying a single file.
func TestCopyCaseB(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
// Load A with some sample files and directories.
createSampleDir(t, tmpDirA)
srcPath := filepath.Join(tmpDirA, "file1")
dstDir := joinTrailingSep(tmpDirB, "testDir")
var err error
if err = testCopyHelper(t, srcPath, dstDir); err == nil {
t.Fatal("expected ErrDirNotExists error, but got nil instead")
}
if err != ErrDirNotExists {
t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err)
}
}
// C. SRC specifies a file and DST exists as a file. This should overwrite
// the file at DST with the contents of the source file.
func TestCopyCaseC(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
// Load A and B with some sample files and directories.
createSampleDir(t, tmpDirA)
createSampleDir(t, tmpDirB)
srcPath := filepath.Join(tmpDirA, "file1")
dstPath := filepath.Join(tmpDirB, "file2")
var err error
// Ensure they start out different.
if err = fileContentsEqual(t, srcPath, dstPath); err == nil {
t.Fatal("expected different file contents")
}
if err = testCopyHelper(t, srcPath, dstPath); err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
t.Fatal(err)
}
}
// D. SRC specifies a file and DST exists as a directory. This should place
// a copy of the source file inside it using the basename from SRC. Ensure
// this works whether DST has a trailing path separator or not.
func TestCopyCaseD(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
// Load A and B with some sample files and directories.
createSampleDir(t, tmpDirA)
createSampleDir(t, tmpDirB)
srcPath := filepath.Join(tmpDirA, "file1")
dstDir := filepath.Join(tmpDirB, "dir1")
dstPath := filepath.Join(dstDir, "file1")
var err error
// Ensure that dstPath doesn't exist.
if _, err = os.Stat(dstPath); !os.IsNotExist(err) {
t.Fatalf("did not expect dstPath %q to exist", dstPath)
}
if err = testCopyHelper(t, srcPath, dstDir); err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
t.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
if err = os.RemoveAll(dstDir); err != nil {
t.Fatalf("unable to remove dstDir: %s", err)
}
if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
t.Fatalf("unable to make dstDir: %s", err)
}
dstDir = joinTrailingSep(tmpDirB, "dir1")
if err = testCopyHelper(t, srcPath, dstDir); err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
t.Fatal(err)
}
}
// E. SRC specifies a directory and DST does not exist. This should create a
// directory at DST and copy the contents of the SRC directory into the DST
// directory. Ensure this works whether DST has a trailing path separator or
// not.
func TestCopyCaseE(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
// Load A with some sample files and directories.
createSampleDir(t, tmpDirA)
srcDir := filepath.Join(tmpDirA, "dir1")
dstDir := filepath.Join(tmpDirB, "testDir")
var err error
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
t.Log("dir contents not equal")
logDirContents(t, tmpDirA)
logDirContents(t, tmpDirB)
t.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
if err = os.RemoveAll(dstDir); err != nil {
t.Fatalf("unable to remove dstDir: %s", err)
}
dstDir = joinTrailingSep(tmpDirB, "testDir")
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
t.Fatal(err)
}
}
// F. SRC specifies a directory and DST exists as a file. This should cause an
// error as it is not possible to overwrite a file with a directory.
func TestCopyCaseF(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
// Load A and B with some sample files and directories.
createSampleDir(t, tmpDirA)
createSampleDir(t, tmpDirB)
srcDir := filepath.Join(tmpDirA, "dir1")
dstFile := filepath.Join(tmpDirB, "file1")
var err error
if err = testCopyHelper(t, srcDir, dstFile); err == nil {
t.Fatal("expected ErrCannotCopyDir error, but got nil instead")
}
if err != ErrCannotCopyDir {
t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
}
}
// G. SRC specifies a directory and DST exists as a directory. This should copy
// the SRC directory and all its contents to the DST directory. Ensure this
// works whether DST has a trailing path separator or not.
func TestCopyCaseG(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
// Load A and B with some sample files and directories.
createSampleDir(t, tmpDirA)
createSampleDir(t, tmpDirB)
srcDir := filepath.Join(tmpDirA, "dir1")
dstDir := filepath.Join(tmpDirB, "dir2")
resultDir := filepath.Join(dstDir, "dir1")
var err error
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
if err = dirContentsEqual(t, resultDir, srcDir); err != nil {
t.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
if err = os.RemoveAll(dstDir); err != nil {
t.Fatalf("unable to remove dstDir: %s", err)
}
if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
t.Fatalf("unable to make dstDir: %s", err)
}
dstDir = joinTrailingSep(tmpDirB, "dir2")
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
if err = dirContentsEqual(t, resultDir, srcDir); err != nil {
t.Fatal(err)
}
}
// H. SRC specifies a directory's contents only and DST does not exist. This
// should create a directory at DST and copy the contents of the SRC
// directory (but not the directory itself) into the DST directory. Ensure
// this works whether DST has a trailing path separator or not.
func TestCopyCaseH(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
// Load A with some sample files and directories.
createSampleDir(t, tmpDirA)
srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
dstDir := filepath.Join(tmpDirB, "testDir")
var err error
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
t.Log("dir contents not equal")
logDirContents(t, tmpDirA)
logDirContents(t, tmpDirB)
t.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
if err = os.RemoveAll(dstDir); err != nil {
t.Fatalf("unable to remove dstDir: %s", err)
}
dstDir = joinTrailingSep(tmpDirB, "testDir")
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
t.Log("dir contents not equal")
logDirContents(t, tmpDirA)
logDirContents(t, tmpDirB)
t.Fatal(err)
}
}
// I. SRC specifies a direcotry's contents only and DST exists as a file. This
// should cause an error as it is not possible to overwrite a file with a
// directory.
func TestCopyCaseI(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
// Load A and B with some sample files and directories.
createSampleDir(t, tmpDirA)
createSampleDir(t, tmpDirB)
srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
dstFile := filepath.Join(tmpDirB, "file1")
var err error
if err = testCopyHelper(t, srcDir, dstFile); err == nil {
t.Fatal("expected ErrCannotCopyDir error, but got nil instead")
}
if err != ErrCannotCopyDir {
t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
}
}
// J. SRC specifies a directory's contents only and DST exists as a directory.
// This should copy the contents of the SRC directory (but not the directory
// itself) into the DST directory. Ensure this works whether DST has a
// trailing path separator or not.
func TestCopyCaseJ(t *testing.T) {
tmpDirA, tmpDirB := getTestTempDirs(t)
defer removeAllPaths(tmpDirA, tmpDirB)
// Load A and B with some sample files and directories.
createSampleDir(t, tmpDirA)
createSampleDir(t, tmpDirB)
srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
dstDir := filepath.Join(tmpDirB, "dir5")
var err error
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
t.Fatal(err)
}
// Now try again but using a trailing path separator for dstDir.
if err = os.RemoveAll(dstDir); err != nil {
t.Fatalf("unable to remove dstDir: %s", err)
}
if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
t.Fatalf("unable to make dstDir: %s", err)
}
dstDir = joinTrailingSep(tmpDirB, "dir5")
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
t.Fatalf("unexpected error %T: %s", err, err)
}
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
t.Fatal(err)
}
}

Просмотреть файл

@ -93,7 +93,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil {
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil {
return 0, err
}
}
@ -150,7 +150,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
srcData = tmpFile
}
if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil {
if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
return 0, err
}

Просмотреть файл

@ -1,12 +1,6 @@
// +build cgo
package graphdb
import (
"database/sql"
_ "code.google.com/p/gosqlite/sqlite3" // registers sqlite
)
import "database/sql"
// NewSqliteConn opens a connection to a sqlite
// database.
@ -15,6 +9,5 @@ func NewSqliteConn(root string) (*Database, error) {
if err != nil {
return nil, err
}
return NewDatabase(conn)
}

Просмотреть файл

@ -0,0 +1,5 @@
// +build cgo,!windows
package graphdb
import _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite

Просмотреть файл

@ -0,0 +1,5 @@
// +build cgo,windows
package graphdb
import _ "github.com/mattn/go-sqlite3" // registers sqlite

Просмотреть файл

@ -358,6 +358,9 @@ var (
rnd = rand.New(random.NewSource())
)
// GetRandomName generates a random name from the list of adjectives and surnames in this package
// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random
// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3`
func GetRandomName(retry int) string {
begin:
name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))])

Просмотреть файл

@ -1,6 +1,7 @@
package namesgenerator
import (
"strings"
"testing"
)
@ -12,6 +13,27 @@ func TestGenerateAwesomeNames(t *testing.T) {
}
}
func TestNameFormat(t *testing.T) {
name := GetRandomName(0)
if !strings.Contains(name, "_") {
t.Fatalf("Generated name does not contain an underscore")
}
if strings.ContainsAny(name, "0123456789") {
t.Fatalf("Generated name contains numbers!")
}
}
func TestNameRetries(t *testing.T) {
name := GetRandomName(1)
if !strings.Contains(name, "_") {
t.Fatalf("Generated name does not contain an underscore")
}
if !strings.ContainsAny(name, "0123456789") {
t.Fatalf("Generated name doesn't contain a number")
}
}
// To be awesome, a container name must involve cool inventors, be easy to remember,
// be at least mildly funny, and always be politically correct for enterprise adoption.
func isAwesome(name string) bool {

Просмотреть файл

@ -13,26 +13,41 @@ import (
)
const (
PortSpecTemplate = "ip:hostPort:containerPort"
PortSpecTemplateFormat = "ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort"
// portSpecTemplate is the expected format for port specifications
portSpecTemplate = "ip:hostPort:containerPort"
)
// PortBinding represents a binding between a Host IP address and a Host Port
type PortBinding struct {
HostIp string
// HostIP is the host IP Address
HostIP string `json:"HostIp"`
// HostPort is the host port number
HostPort string
}
// PortMap is a collection of PortBinding indexed by Port
type PortMap map[Port][]PortBinding
// PortSet is a collection of structs indexed by Port
type PortSet map[Port]struct{}
// 80/tcp
// Port is a string containing port number and protocol in the format "80/tcp"
type Port string
func NewPort(proto, port string) Port {
return Port(fmt.Sprintf("%s/%s", port, proto))
// NewPort creates a new instance of a Port given a protocol and port number
func NewPort(proto, port string) (Port, error) {
// Check for parsing issues on "port" now so we can avoid having
// to check it later on.
portInt, err := ParsePort(port)
if err != nil {
return "", err
}
return Port(fmt.Sprintf("%d/%s", portInt, proto)), nil
}
// ParsePort parses the port number string and returns an int
func ParsePort(rawPort string) (int, error) {
if len(rawPort) == 0 {
return 0, nil
@ -44,25 +59,32 @@ func ParsePort(rawPort string) (int, error) {
return int(port), nil
}
// Proto returns the protocol of a Port
func (p Port) Proto() string {
proto, _ := SplitProtoPort(string(p))
return proto
}
// Port returns the port number of a Port
func (p Port) Port() string {
_, port := SplitProtoPort(string(p))
return port
}
// Int returns the port number of a Port as an int
func (p Port) Int() int {
port, err := ParsePort(p.Port())
if err != nil {
panic(err)
portStr := p.Port()
if len(portStr) == 0 {
return 0
}
return port
// We don't need to check for an error because we're going to
// assume that any error would have been found, and reported, in NewPort()
port, _ := strconv.ParseUint(portStr, 10, 16)
return int(port)
}
// Splits a port in the format of proto/port
// SplitProtoPort splits a port in the format of proto/port
func SplitProtoPort(rawPort string) (string, string) {
parts := strings.Split(rawPort, "/")
l := len(parts)
@ -87,8 +109,8 @@ func validateProto(proto string) bool {
return false
}
// We will receive port specs in the format of ip:public:private/proto and these need to be
// parsed in the internal types
// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses
// these in to the internal types
func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) {
var (
exposedPorts = make(map[Port]struct{}, len(ports))
@ -108,19 +130,19 @@ func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding,
rawPort = fmt.Sprintf(":%s", rawPort)
}
parts, err := parsers.PartParser(PortSpecTemplate, rawPort)
parts, err := parsers.PartParser(portSpecTemplate, rawPort)
if err != nil {
return nil, nil, err
}
var (
containerPort = parts["containerPort"]
rawIp = parts["ip"]
rawIP = parts["ip"]
hostPort = parts["hostPort"]
)
if rawIp != "" && net.ParseIP(rawIp) == nil {
return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIp)
if rawIP != "" && net.ParseIP(rawIP) == nil {
return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIP)
}
if containerPort == "" {
return nil, nil, fmt.Errorf("No port specified: %s<empty>", rawPort)
@ -152,13 +174,16 @@ func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding,
if len(hostPort) > 0 {
hostPort = strconv.FormatUint(startHostPort+i, 10)
}
port := NewPort(strings.ToLower(proto), containerPort)
port, err := NewPort(strings.ToLower(proto), containerPort)
if err != nil {
return nil, nil, err
}
if _, exists := exposedPorts[port]; !exists {
exposedPorts[port] = struct{}{}
}
binding := PortBinding{
HostIp: rawIp,
HostIP: rawIP,
HostPort: hostPort,
}
bslice, exists := bindings[port]

Просмотреть файл

@ -42,7 +42,11 @@ func TestParsePort(t *testing.T) {
}
func TestPort(t *testing.T) {
p := NewPort("tcp", "1234")
p, err := NewPort("tcp", "1234")
if err != nil {
t.Fatalf("tcp, 1234 had a parsing issue: %v", err)
}
if string(p) != "1234/tcp" {
t.Fatal("tcp, 1234 did not result in the string 1234/tcp")
@ -59,6 +63,11 @@ func TestPort(t *testing.T) {
if p.Int() != 1234 {
t.Fatal("port int value was not 1234")
}
p, err = NewPort("tcp", "asd1234")
if err == nil {
t.Fatal("tcp, asd1234 was supposed to fail")
}
}
func TestSplitProtoPort(t *testing.T) {
@ -124,8 +133,8 @@ func TestParsePortSpecs(t *testing.T) {
t.Fatalf("%s should have exactly one binding", portspec)
}
if bindings[0].HostIp != "" {
t.Fatalf("HostIp should not be set for %s", portspec)
if bindings[0].HostIP != "" {
t.Fatalf("HostIP should not be set for %s", portspec)
}
if bindings[0].HostPort != "" {
@ -154,8 +163,8 @@ func TestParsePortSpecs(t *testing.T) {
t.Fatalf("%s should have exactly one binding", portspec)
}
if bindings[0].HostIp != "" {
t.Fatalf("HostIp should not be set for %s", portspec)
if bindings[0].HostIP != "" {
t.Fatalf("HostIP should not be set for %s", portspec)
}
if bindings[0].HostPort != port {
@ -184,8 +193,8 @@ func TestParsePortSpecs(t *testing.T) {
t.Fatalf("%s should have exactly one binding", portspec)
}
if bindings[0].HostIp != "0.0.0.0" {
t.Fatalf("HostIp is not 0.0.0.0 for %s", portspec)
if bindings[0].HostIP != "0.0.0.0" {
t.Fatalf("HostIP is not 0.0.0.0 for %s", portspec)
}
if bindings[0].HostPort != port {
@ -226,8 +235,8 @@ func TestParsePortSpecsWithRange(t *testing.T) {
t.Fatalf("%s should have exactly one binding", portspec)
}
if bindings[0].HostIp != "" {
t.Fatalf("HostIp should not be set for %s", portspec)
if bindings[0].HostIP != "" {
t.Fatalf("HostIP should not be set for %s", portspec)
}
if bindings[0].HostPort != "" {
@ -255,8 +264,8 @@ func TestParsePortSpecsWithRange(t *testing.T) {
t.Fatalf("%s should have exactly one binding", portspec)
}
if bindings[0].HostIp != "" {
t.Fatalf("HostIp should not be set for %s", portspec)
if bindings[0].HostIP != "" {
t.Fatalf("HostIP should not be set for %s", portspec)
}
if bindings[0].HostPort != port {
@ -280,7 +289,7 @@ func TestParsePortSpecsWithRange(t *testing.T) {
for portspec, bindings := range bindingMap {
_, port := SplitProtoPort(string(portspec))
if len(bindings) != 1 || bindings[0].HostIp != "0.0.0.0" || bindings[0].HostPort != port {
if len(bindings) != 1 || bindings[0].HostIP != "0.0.0.0" || bindings[0].HostPort != port {
t.Fatalf("Expect single binding to port %s but found %s", port, bindings)
}
}
@ -328,7 +337,7 @@ func TestParseNetworkOptsPrivateOnly(t *testing.T) {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
if s.HostIP != "192.168.1.100" {
t.Fail()
}
}
@ -370,7 +379,7 @@ func TestParseNetworkOptsPublic(t *testing.T) {
t.Logf("Expected 8080 got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
if s.HostIP != "192.168.1.100" {
t.Fail()
}
}
@ -445,7 +454,7 @@ func TestParseNetworkOptsUdp(t *testing.T) {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
if s.HostIP != "192.168.1.100" {
t.Fail()
}
}

Просмотреть файл

@ -26,6 +26,9 @@ func (s *portSorter) Less(i, j int) bool {
return s.by(ip, jp)
}
// Sort sorts a list of ports using the provided predicate
// This function should compare `i` and `j`, returning true if `i` is
// considered to be less than `j`
func Sort(ports []Port, predicate func(i, j Port) bool) {
s := &portSorter{ports, predicate}
sort.Sort(s)

Просмотреть файл

@ -59,10 +59,10 @@ func TestSortPortMap(t *testing.T) {
},
Port("6379/tcp"): []PortBinding{
{},
{HostIp: "0.0.0.0", HostPort: "32749"},
{HostIP: "0.0.0.0", HostPort: "32749"},
},
Port("9999/tcp"): []PortBinding{
{HostIp: "0.0.0.0", HostPort: "40000"},
{HostIP: "0.0.0.0", HostPort: "40000"},
},
}
@ -77,7 +77,7 @@ func TestSortPortMap(t *testing.T) {
t.Errorf("failed to prioritize port with explicit mappings, got %v", ports)
}
if pm := portMap[Port("6379/tcp")]; !reflect.DeepEqual(pm, []PortBinding{
{HostIp: "0.0.0.0", HostPort: "32749"},
{HostIP: "0.0.0.0", HostPort: "32749"},
{},
}) {
t.Errorf("failed to prioritize bindings with explicit mappings, got %v", pm)

Просмотреть файл

@ -3,6 +3,7 @@ package parsers
import (
"fmt"
"net/url"
"path"
"runtime"
"strconv"
"strings"
@ -158,5 +159,12 @@ func ParseLink(val string) (string, string, error) {
if len(arr) == 1 {
return val, val, nil
}
// This is kept because we can actually get an HostConfig with links
// from an already created container and the format is not `foo:bar`
// but `/foo:/c1/bar`
if strings.HasPrefix(arr[0], "/") {
_, alias := path.Split(arr[1])
return arr[0][1:], alias, nil
}
return arr[0], arr[1], nil
}

Просмотреть файл

@ -6,100 +6,11 @@ import (
"io/ioutil"
"net/http"
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/cliconfig"
)
type RequestAuthorization struct {
authConfig *cliconfig.AuthConfig
registryEndpoint *Endpoint
resource string
scope string
actions []string
tokenLock sync.Mutex
tokenCache string
tokenExpiration time.Time
}
func NewRequestAuthorization(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, resource, scope string, actions []string) *RequestAuthorization {
return &RequestAuthorization{
authConfig: authConfig,
registryEndpoint: registryEndpoint,
resource: resource,
scope: scope,
actions: actions,
}
}
func (auth *RequestAuthorization) getToken() (string, error) {
auth.tokenLock.Lock()
defer auth.tokenLock.Unlock()
now := time.Now()
if now.Before(auth.tokenExpiration) {
logrus.Debugf("Using cached token for %s", auth.authConfig.Username)
return auth.tokenCache, nil
}
for _, challenge := range auth.registryEndpoint.AuthChallenges {
switch strings.ToLower(challenge.Scheme) {
case "basic":
// no token necessary
case "bearer":
logrus.Debugf("Getting bearer token with %s for %s", challenge.Parameters, auth.authConfig.Username)
params := map[string]string{}
for k, v := range challenge.Parameters {
params[k] = v
}
params["scope"] = fmt.Sprintf("%s:%s:%s", auth.resource, auth.scope, strings.Join(auth.actions, ","))
token, err := getToken(auth.authConfig.Username, auth.authConfig.Password, params, auth.registryEndpoint)
if err != nil {
return "", err
}
auth.tokenCache = token
auth.tokenExpiration = now.Add(time.Minute)
return token, nil
default:
logrus.Infof("Unsupported auth scheme: %q", challenge.Scheme)
}
}
// Do not expire cache since there are no challenges which use a token
auth.tokenExpiration = time.Now().Add(time.Hour * 24)
return "", nil
}
// Checks that requests to the v2 registry can be authorized.
func (auth *RequestAuthorization) CanAuthorizeV2() bool {
if len(auth.registryEndpoint.AuthChallenges) == 0 {
return true
}
scope := fmt.Sprintf("%s:%s:%s", auth.resource, auth.scope, strings.Join(auth.actions, ","))
if _, err := loginV2(auth.authConfig, auth.registryEndpoint, scope); err != nil {
logrus.Debugf("Cannot authorize against V2 endpoint: %s", auth.registryEndpoint)
return false
}
return true
}
func (auth *RequestAuthorization) Authorize(req *http.Request) error {
token, err := auth.getToken()
if err != nil {
return err
}
if token != "" {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
} else if auth.authConfig.Username != "" && auth.authConfig.Password != "" {
req.SetBasicAuth(auth.authConfig.Username, auth.authConfig.Password)
}
return nil
}
// Login tries to register/login to the registry server.
func Login(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) {
// Separates the v2 registry login logic from the v1 logic.

Просмотреть файл

@ -6,6 +6,7 @@ import (
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
@ -161,19 +162,31 @@ func (s *Service) TlsConfig(hostname string) (*tls.Config, error) {
return &tlsConfig, nil
}
func (s *Service) tlsConfigForMirror(mirror string) (*tls.Config, error) {
mirrorUrl, err := url.Parse(mirror)
if err != nil {
return nil, err
}
return s.TlsConfig(mirrorUrl.Host)
}
func (s *Service) LookupEndpoints(repoName string) (endpoints []APIEndpoint, err error) {
var cfg = tlsconfig.ServerDefault
tlsConfig := &cfg
if strings.HasPrefix(repoName, DEFAULT_NAMESPACE+"/") {
// v2 mirrors
for _, mirror := range s.Config.Mirrors {
mirrorTlsConfig, err := s.tlsConfigForMirror(mirror)
if err != nil {
return nil, err
}
endpoints = append(endpoints, APIEndpoint{
URL: mirror,
// guess mirrors are v2
Version: APIVersion2,
Mirror: true,
TrimHostname: true,
TLSConfig: tlsConfig,
TLSConfig: mirrorTlsConfig,
})
}
// v2 registry
@ -184,18 +197,6 @@ func (s *Service) LookupEndpoints(repoName string) (endpoints []APIEndpoint, err
TrimHostname: true,
TLSConfig: tlsConfig,
})
// v1 mirrors
// TODO(tiborvass): shouldn't we remove v1 mirrors from here, since v1 mirrors are kinda special?
for _, mirror := range s.Config.Mirrors {
endpoints = append(endpoints, APIEndpoint{
URL: mirror,
// guess mirrors are v1
Version: APIVersion1,
Mirror: true,
TrimHostname: true,
TLSConfig: tlsConfig,
})
}
// v1 registry
endpoints = append(endpoints, APIEndpoint{
URL: DEFAULT_V1_REGISTRY,

Просмотреть файл

@ -33,23 +33,6 @@ type RegistryInfo struct {
Standalone bool `json:"standalone"`
}
type FSLayer struct {
BlobSum string `json:"blobSum"`
}
type ManifestHistory struct {
V1Compatibility string `json:"v1Compatibility"`
}
type ManifestData struct {
Name string `json:"name"`
Tag string `json:"tag"`
Architecture string `json:"architecture"`
FSLayers []*FSLayer `json:"fsLayers"`
History []*ManifestHistory `json:"history"`
SchemaVersion int `json:"schemaVersion"`
}
type APIVersion int
func (av APIVersion) String() string {

Просмотреть файл

@ -6,17 +6,23 @@ import (
"github.com/docker/docker/pkg/nat"
)
// Just to make life easier
func newPortNoError(proto, port string) nat.Port {
p, _ := nat.NewPort(proto, port)
return p
}
func TestCompare(t *testing.T) {
ports1 := make(nat.PortSet)
ports1[nat.Port("1111/tcp")] = struct{}{}
ports1[nat.Port("2222/tcp")] = struct{}{}
ports1[newPortNoError("tcp", "1111")] = struct{}{}
ports1[newPortNoError("tcp", "2222")] = struct{}{}
ports2 := make(nat.PortSet)
ports2[nat.Port("3333/tcp")] = struct{}{}
ports2[nat.Port("4444/tcp")] = struct{}{}
ports2[newPortNoError("tcp", "3333")] = struct{}{}
ports2[newPortNoError("tcp", "4444")] = struct{}{}
ports3 := make(nat.PortSet)
ports3[nat.Port("1111/tcp")] = struct{}{}
ports3[nat.Port("2222/tcp")] = struct{}{}
ports3[nat.Port("5555/tcp")] = struct{}{}
ports3[newPortNoError("tcp", "1111")] = struct{}{}
ports3[newPortNoError("tcp", "2222")] = struct{}{}
ports3[newPortNoError("tcp", "5555")] = struct{}{}
volumes1 := make(map[string]struct{})
volumes1["/test1"] = struct{}{}
volumes2 := make(map[string]struct{})

Просмотреть файл

@ -11,8 +11,8 @@ func TestMerge(t *testing.T) {
volumesImage["/test1"] = struct{}{}
volumesImage["/test2"] = struct{}{}
portsImage := make(nat.PortSet)
portsImage[nat.Port("1111/tcp")] = struct{}{}
portsImage[nat.Port("2222/tcp")] = struct{}{}
portsImage[newPortNoError("tcp", "1111")] = struct{}{}
portsImage[newPortNoError("tcp", "2222")] = struct{}{}
configImage := &Config{
ExposedPorts: portsImage,
Env: []string{"VAR1=1", "VAR2=2"},
@ -20,8 +20,8 @@ func TestMerge(t *testing.T) {
}
portsUser := make(nat.PortSet)
portsUser[nat.Port("2222/tcp")] = struct{}{}
portsUser[nat.Port("3333/tcp")] = struct{}{}
portsUser[newPortNoError("tcp", "2222")] = struct{}{}
portsUser[newPortNoError("tcp", "3333")] = struct{}{}
volumesUser := make(map[string]struct{})
volumesUser["/test3"] = struct{}{}
configUser := &Config{

Просмотреть файл

@ -26,15 +26,16 @@ var (
// validateNM is the set of fields passed to validateNetMode()
type validateNM struct {
netMode NetworkMode
flHostname *string
flLinks opts.ListOpts
flDns opts.ListOpts
flExtraHosts opts.ListOpts
flMacAddress *string
flPublish opts.ListOpts
flPublishAll *bool
flExpose opts.ListOpts
netMode NetworkMode
flHostname *string
flLinks opts.ListOpts
flDns opts.ListOpts
flExtraHosts opts.ListOpts
flMacAddress *string
flPublish opts.ListOpts
flPublishAll *bool
flExpose opts.ListOpts
flVolumeDriver string
}
func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSet, error) {
@ -94,6 +95,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
flReadonlyRootfs = cmd.Bool([]string{"-read-only"}, false, "Mount the container's root filesystem as read only")
flLoggingDriver = cmd.String([]string{"-log-driver"}, "", "Logging driver for container")
flCgroupParent = cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")
flVolumeDriver = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container")
)
cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR")
@ -259,7 +261,10 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
return nil, nil, cmd, fmt.Errorf("Invalid range format for --expose: %s, error: %s", e, err)
}
for i := start; i <= end; i++ {
p := nat.NewPort(proto, strconv.FormatUint(i, 10))
p, err := nat.NewPort(proto, strconv.FormatUint(i, 10))
if err != nil {
return nil, nil, cmd, err
}
if _, exists := ports[p]; !exists {
ports[p] = struct{}{}
}
@ -332,6 +337,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
Entrypoint: entrypoint,
WorkingDir: *flWorkingDir,
Labels: convertKVStringsToMap(labels),
VolumeDriver: *flVolumeDriver,
}
hostConfig := &HostConfig{

Просмотреть файл

@ -10,12 +10,10 @@ type experimentalFlags struct {
func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags {
flags := make(map[string]interface{})
flags["volume-driver"] = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container")
flags["publish-service"] = cmd.String([]string{"-publish-service"}, "", "Publish this container as a service")
return &experimentalFlags{flags: flags}
}
func applyExperimentalFlags(exp *experimentalFlags, config *Config, hostConfig *HostConfig) {
config.VolumeDriver = *(exp.flags["volume-driver"]).(*string)
config.PublishService = *(exp.flags["publish-service"]).(*string)
}

Просмотреть файл

@ -2,6 +2,8 @@
package utils
// ExperimentalBuild is a stub which always returns true for
// builds that include the "experimental" build tag
func ExperimentalBuild() bool {
return true
}

Просмотреть файл

@ -14,6 +14,8 @@ import (
"github.com/docker/docker/pkg/urlutil"
)
// GitClone clones a repository into a newly created directory which
// will be under "docker-build-git"
func GitClone(remoteURL string) (string, error) {
if !urlutil.IsGitTransport(remoteURL) {
remoteURL = "https://" + remoteURL

Просмотреть файл

@ -2,6 +2,8 @@
package utils
// ExperimentalBuild is a stub which always returns false for
// builds that do not include the "experimental" build tag
func ExperimentalBuild() bool {
return false
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше