Merge pull request #2812 from dotcloud/bump_v0.6.7

Bump v0.6.7
This commit is contained in:
Victor Vieux 2013-11-21 18:21:19 -08:00 коммит произвёл Victor Vieux
Родитель 6d420407ca 53f1bf0f99
Коммит a93e40a158
124 изменённых файлов: 8018 добавлений и 3803 удалений

1
.gitignore поставляемый
Просмотреть файл

@ -18,3 +18,4 @@ bundles/
.hg/
.git/
vendor/pkg/
pyenv

Просмотреть файл

@ -44,6 +44,7 @@ Daniel Nordberg <dnordberg@gmail.com>
Daniel Robinson <gottagetmac@gmail.com>
Daniel Von Fange <daniel@leancoder.com>
Daniel YC Lin <dlin.tw@gmail.com>
Darren Coxall <darren@darrencoxall.com>
David Calavera <david.calavera@gmail.com>
David Sissitka <me@dsissitka.com>
Deni Bertovic <deni@kset.org>
@ -94,6 +95,7 @@ Jonathan Rudenberg <jonathan@titanous.com>
Joost Cassee <joost@cassee.net>
Jordan Arentsen <blissdev@gmail.com>
Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
Josh Poimboeuf <jpoimboe@redhat.com>
Julien Barbier <write0@gmail.com>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
Karan Lyons <karan@karanlyons.com>
@ -119,6 +121,7 @@ Marko Mikulicic <mmikulicic@gmail.com>
Markus Fix <lispmeister@gmail.com>
Martin Redmond <martin@tinychat.com>
Matt Apperson <me@mattapperson.com>
Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
Matt Bachmann <bachmann.matt@gmail.com>
Matthew Mueller <mattmuelle@gmail.com>
Maxim Treskin <zerthurd@gmail.com>
@ -165,6 +168,7 @@ Sridatta Thatipamala <sthatipamala@gmail.com>
Sridhar Ratnakumar <sridharr@activestate.com>
Steeve Morin <steeve.morin@gmail.com>
Stefan Praszalowicz <stefan@greplin.com>
Sven Dowideit <SvenDowideit@home.org.au>
Thatcher Peskens <thatcher@dotcloud.com>
Thermionix <bond711@gmail.com>
Thijs Terlouw <thijsterlouw@gmail.com>

Просмотреть файл

@ -1,5 +1,43 @@
# Changelog
## 0.6.7 (2013-11-21)
#### Runtime
* Improved stability, fixes some race conditons
* Skip the volumes mounted when deleting the volumes of container.
* Fix layer size computation: handle hard links correctly
* Use the work Path for docker cp CONTAINER:PATH
* Fix tmp dir never cleanup
* Speedup docker ps
* More informative error message on name collisions
* Fix nameserver regex
* Always return long id's
* Fix container restart race condition
* Keep published ports on docker stop;docker start
* Fix container networking on Fedora
* Correctly express "any address" to iptables
* Fix network setup when reconnecting to ghost container
* Prevent deletion if image is used by a running container
* Lock around read operations in graph
#### RemoteAPI
* Return full ID on docker rmi
#### Client
+ Add -tree option to images
+ Offline image transfer
* Exit with status 2 on usage error and display usage on stderr
* Do not forward SIGCHLD to container
* Use string timestamp for docker events -since
#### Other
* Update to go 1.2rc5
+ Add /etc/default/docker support to upstart
## 0.6.6 (2013-11-06)
#### Runtime

Просмотреть файл

@ -1,11 +1,14 @@
# Contributing to Docker
Want to hack on Docker? Awesome! Here are instructions to get you started. They are probably not perfect, please let us know if anything feels
wrong or incomplete.
Want to hack on Docker? Awesome! Here are instructions to get you
started. They are probably not perfect, please let us know if anything
feels wrong or incomplete.
## Build Environment
For instructions on setting up your development environment, please see our dedicated [dev environment setup docs](http://docs.docker.io/en/latest/contributing/devenvironment/).
For instructions on setting up your development environment, please
see our dedicated [dev environment setup
docs](http://docs.docker.io/en/latest/contributing/devenvironment/).
## Contribution guidelines

Просмотреть файл

@ -36,7 +36,7 @@ run apt-get install -y -q mercurial
run apt-get install -y -q build-essential libsqlite3-dev
# Install Go
run curl -s https://go.googlecode.com/files/go1.2rc3.src.tar.gz | tar -v -C /usr/local -xz
run curl -s https://go.googlecode.com/files/go1.2rc5.src.tar.gz | tar -v -C /usr/local -xz
env PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
env GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
run cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std
@ -46,10 +46,9 @@ run apt-get install -y -q ruby1.9.3 rubygems libffi-dev
run gem install --no-rdoc --no-ri fpm
run apt-get install -y -q reprepro dpkg-sig
# Install s3cmd 1.0.1 (earlier versions don't support env variables in the config)
run apt-get install -y -q python-pip
run pip install s3cmd
run pip install python-magic
run pip install s3cmd==1.1.0-beta3
run pip install python-magic==0.4.6
run /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY\n' > /.s3cfg
# Runtime dependencies

35
NOTICE
Просмотреть файл

@ -8,35 +8,12 @@ by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
Transfers of Docker shall be in accordance with applicable export
controls of any country and all other applicable legal requirements.
Docker shall not be distributed or downloaded to or in Cuba, Iran,
North Korea, Sudan or Syria and shall not be distributed or downloaded
to any person on the Denied Persons List administered by the U.S.
Department of Commerce.
What does that mean?
Here is a further explanation from our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
Like all software products that utilize cryptography, the export and
use of Docker is subject to the U.S. Commerce Department's Export
Administration Regulations (EAR) because it uses or contains
cryptography (see
http://www.bis.doc.gov/index.php/policy-guidance/encryption). Certain
free and open source software projects have a lightweight set of
requirements, which can generally be met by providing email notice to
the appropriate U.S. government agencies that their source code is
available on a publicly available repository and making the
appropriate statements in the README.
For more information, please see http://www.bis.doc.gov
The restrictions of the EAR apply to certain denied locations
(currently Iran, Sudan, Syria, North Korea, or Cuba) and those
individuals on the Denied Persons List, which is available here:
http://www.bis.doc.gov/index.php/policy-guidance/lists-of-parties-of-concern/denied-persons-list.
If you are incorporating Docker into a new open source project, the
EAR restrictions apply to your incorporation of Docker into your
project in the same manner as other cryptography-enabled projects,
such as OpenSSL, almost all Linux distributions, etc.
For more information, see http://www.apache.org/dev/crypto.html and/or
seek legal counsel.
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.

Просмотреть файл

@ -193,10 +193,9 @@ wrong or incomplete.
*Brought to you courtesy of our legal counsel. For more context,
please see the Notice document.*
Transfers of Docker shall be in accordance with applicable export controls
of any country and all other applicable legal requirements. Without limiting the
foregoing, Docker shall not be distributed or downloaded to any individual or
location if such distribution or download would violate the applicable US
government export regulations.
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see http://www.bis.doc.gov

Просмотреть файл

@ -1 +1 @@
0.6.6
0.6.7-dev

163
Vagrantfile поставляемый
Просмотреть файл

@ -4,65 +4,135 @@
BOX_NAME = ENV['BOX_NAME'] || "ubuntu"
BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64.box"
VF_BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64_vmware_fusion.box"
AWS_BOX_URI = ENV['BOX_URI'] || "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
AWS_REGION = ENV['AWS_REGION'] || "us-east-1"
AWS_AMI = ENV['AWS_AMI'] || "ami-d0f89fb9"
AWS_AMI = ENV['AWS_AMI'] || "ami-69f5a900"
AWS_INSTANCE_TYPE = ENV['AWS_INSTANCE_TYPE'] || 't1.micro'
FORWARD_DOCKER_PORTS = ENV['FORWARD_DOCKER_PORTS']
SSH_PRIVKEY_PATH = ENV["SSH_PRIVKEY_PATH"]
# A script to upgrade from the 12.04 kernel to the raring backport kernel (3.8)
# and install docker.
$script = <<SCRIPT
# The username to add to the docker group will be passed as the first argument
# to the script. If nothing is passed, default to "vagrant".
user="$1"
if [ -z "$user" ]; then
user=vagrant
fi
# Adding an apt gpg key is idempotent.
wget -q -O - https://get.docker.io/gpg | apt-key add -
# Creating the docker.list file is idempotent, but it may overrite desired
# settings if it already exists. This could be solved with md5sum but it
# doesn't seem worth it.
echo 'deb http://get.docker.io/ubuntu docker main' > \
/etc/apt/sources.list.d/docker.list
# Update remote package metadata. 'apt-get update' is idempotent.
apt-get update -q
# Install docker. 'apt-get install' is idempotent.
apt-get install -q -y lxc-docker
usermod -a -G docker "$user"
tmp=`mktemp -q` && {
# Only install the backport kernel, don't bother upgrade if the backport is
# already installed. We want parse the output of apt so we need to save it
# with 'tee'. NOTE: The installation of the kernel will trigger dkms to
# install vboxguest if needed.
apt-get install -q -y --no-upgrade linux-image-generic-lts-raring | \
tee "$tmp"
# Parse the number of installed packages from the output
NUM_INST=`awk '$2 == "upgraded," && $4 == "newly" { print $3 }' "$tmp"`
rm "$tmp"
}
# If the number of installed packages is greater than 0, we want to reboot (the
# backport kernel was installed but is not running).
if [ "$NUM_INST" -gt 0 ];
then
echo "Rebooting down to activate new kernel."
echo "/vagrant will not be mounted. Use 'vagrant halt' followed by"
echo "'vagrant up' to ensure /vagrant is mounted."
shutdown -r now
fi
SCRIPT
# We need to install the virtualbox guest additions *before* we do the normal
# docker installation. As such this script is prepended to the common docker
# install script above. This allows the install of the backport kernel to
# trigger dkms to build the virtualbox guest module install.
$vbox_script = <<VBOX_SCRIPT + $script
# Install the VirtualBox guest additions if they aren't already installed.
if [ ! -d /opt/VBoxGuestAdditions-4.3.2/ ]; then
# Update remote package metadata. 'apt-get update' is idempotent.
apt-get update -q
# Kernel Headers and dkms are required to build the vbox guest kernel
# modules.
apt-get install -q -y linux-headers-generic-lts-raring dkms
echo 'Downloading VBox Guest Additions...'
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.2/VBoxGuestAdditions_4.3.2.iso
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.2.iso /mnt
/mnt/VBoxLinuxAdditions.run --nox11
umount /mnt
fi
VBOX_SCRIPT
Vagrant::Config.run do |config|
# Setup virtual machine box. This VM configuration code is always executed.
config.vm.box = BOX_NAME
config.vm.box_url = BOX_URI
config.ssh.forward_agent = true
# Provision docker and new kernel if deployment was not done.
# It is assumed Vagrant can successfully launch the provider instance.
if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
# Add lxc-docker package
pkg_cmd = "wget -q -O - https://get.docker.io/gpg | apt-key add -;" \
"echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list;" \
"apt-get update -qq; apt-get install -q -y --force-yes lxc-docker; "
# Add Ubuntu raring backported kernel
pkg_cmd << "apt-get update -qq; apt-get install -q -y linux-image-generic-lts-raring; "
# Add guest additions if local vbox VM. As virtualbox is the default provider,
# it is assumed it won't be explicitly stated.
if ENV["VAGRANT_DEFAULT_PROVIDER"].nil? && ARGV.none? { |arg| arg.downcase.start_with?("--provider") }
pkg_cmd << "apt-get install -q -y linux-headers-generic-lts-raring dkms; " \
"echo 'Downloading VBox Guest Additions...'; " \
"wget -q http://dlc.sun.com.edgesuite.net/virtualbox/4.2.12/VBoxGuestAdditions_4.2.12.iso; "
# Prepare the VM to add guest additions after reboot
pkg_cmd << "echo -e 'mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.2.12.iso /mnt\n" \
"echo yes | /mnt/VBoxLinuxAdditions.run\numount /mnt\n" \
"rm /root/guest_additions.sh; ' > /root/guest_additions.sh; " \
"chmod 700 /root/guest_additions.sh; " \
"sed -i -E 's#^exit 0#[ -x /root/guest_additions.sh ] \\&\\& /root/guest_additions.sh#' /etc/rc.local; " \
"echo 'Installation of VBox Guest Additions is proceeding in the background.'; " \
"echo '\"vagrant reload\" can be used in about 2 minutes to activate the new guest additions.'; "
end
# Add vagrant user to the docker group
pkg_cmd << "usermod -a -G docker vagrant; "
# Activate new kernel
pkg_cmd << "shutdown -r +1; "
config.vm.provision :shell, :inline => pkg_cmd
# Use the specified private key path if it is specified and not empty.
if SSH_PRIVKEY_PATH
config.ssh.private_key_path = SSH_PRIVKEY_PATH
end
config.ssh.forward_agent = true
end
# Providers were added on Vagrant >= 1.1.0
#
# NOTE: The vagrant "vm.provision" appends its arguments to a list and executes
# them in order. If you invoke "vm.provision :shell, :inline => $script"
# twice then vagrant will run the script two times. Unfortunately when you use
# providers and the override argument to set up provisioners (like the vbox
# guest extensions) they 1) don't replace the other provisioners (they append
# to the end of the list) and 2) you can't control the order the provisioners
# are executed (you can only append to the list). If you want the virtualbox
# only script to run before the other script, you have to jump through a lot of
# hoops.
#
# Here is my only repeatable solution: make one script that is common ($script)
# and another script that is the virtual box guest *prepended* to the common
# script. Only ever use "vm.provision" *one time* per provider. That means
# every single provider has an override, and every single one configures
# "vm.provision". Much saddness, but such is life.
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
config.vm.provider :aws do |aws, override|
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
username = "ubuntu"
override.vm.box_url = AWS_BOX_URI
override.vm.provision :shell, :inline => $script, :args => username
aws.access_key_id = ENV["AWS_ACCESS_KEY"]
aws.secret_access_key = ENV["AWS_SECRET_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
override.ssh.username = "ubuntu"
override.ssh.username = username
aws.region = AWS_REGION
aws.ami = AWS_AMI
aws.instance_type = "t1.micro"
aws.instance_type = AWS_INSTANCE_TYPE
end
config.vm.provider :rackspace do |rs|
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
config.vm.provider :rackspace do |rs, override|
override.vm.provision :shell, :inline => $script
rs.username = ENV["RS_USERNAME"]
rs.api_key = ENV["RS_API_KEY"]
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
@ -71,20 +141,25 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
end
config.vm.provider :vmware_fusion do |f, override|
override.vm.box = BOX_NAME
override.vm.box_url = VF_BOX_URI
override.vm.synced_folder ".", "/vagrant", disabled: true
override.vm.provision :shell, :inline => $script
f.vmx["displayName"] = "docker"
end
config.vm.provider :virtualbox do |vb|
config.vm.box = BOX_NAME
config.vm.box_url = BOX_URI
config.vm.provider :virtualbox do |vb, override|
override.vm.provision :shell, :inline => $vbox_script
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"]
end
end
# If this is a version 1 config, virtualbox is the only option. A version 2
# config would have already been set in the above provider section.
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
config.vm.provision :shell, :inline => $vbox_script
end
if !FORWARD_DOCKER_PORTS.nil?
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
(49000..49900).each do |port|

137
api.go
Просмотреть файл

@ -23,7 +23,7 @@ import (
)
const (
APIVERSION = 1.6
APIVERSION = 1.7
DEFAULTHTTPHOST = "127.0.0.1"
DEFAULTHTTPPORT = 4243
DEFAULTUNIXSOCKET = "/var/run/docker.sock"
@ -61,7 +61,10 @@ func parseMultipartForm(r *http.Request) error {
func httpError(w http.ResponseWriter, err error) {
statusCode := http.StatusInternalServerError
if strings.HasPrefix(err.Error(), "No such") {
// FIXME: this is brittle and should not be necessary.
// If we need to differentiate between different possible error types, we should
// create appropriate error types with clearly defined meaning.
if strings.Contains(err.Error(), "No such") {
statusCode = http.StatusNotFound
} else if strings.HasPrefix(err.Error(), "Bad parameter") {
statusCode = http.StatusBadRequest
@ -146,13 +149,12 @@ func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r *
signal := 0
if r != nil {
s := r.Form.Get("signal")
if s != "" {
if s, err := strconv.Atoi(s); err != nil {
if s := r.Form.Get("signal"); s != "" {
s, err := strconv.Atoi(s)
if err != nil {
return err
} else {
signal = s
}
signal = s
}
}
if err := srv.ContainerKill(name, signal); err != nil {
@ -191,10 +193,23 @@ func getImagesJSON(srv *Server, version float64, w http.ResponseWriter, r *http.
return err
}
if version < 1.7 {
outs2 := []APIImagesOld{}
for _, ctnr := range outs {
outs2 = append(outs2, ctnr.ToLegacy()...)
}
return writeJSON(w, http.StatusOK, outs2)
}
return writeJSON(w, http.StatusOK, outs)
}
func getImagesViz(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if version > 1.6 {
w.WriteHeader(http.StatusNotFound)
return fmt.Errorf("This is now implemented in the client.")
}
if err := srv.ImagesViz(w); err != nil {
return err
}
@ -299,13 +314,10 @@ func getContainersTop(srv *Server, version float64, w http.ResponseWriter, r *ht
if err := parseForm(r); err != nil {
return err
}
name := vars["name"]
ps_args := r.Form.Get("ps_args")
procsStr, err := srv.ContainerTop(name, ps_args)
procsStr, err := srv.ContainerTop(vars["name"], r.Form.Get("ps_args"))
if err != nil {
return err
}
return writeJSON(w, http.StatusOK, procsStr)
}
@ -333,13 +345,12 @@ func getContainersJSON(srv *Server, version float64, w http.ResponseWriter, r *h
if version < 1.5 {
outs2 := []APIContainersOld{}
for _, ctnr := range outs {
outs2 = append(outs2, ctnr.ToLegacy())
outs2 = append(outs2, *ctnr.ToLegacy())
}
return writeJSON(w, http.StatusOK, outs2)
} else {
return writeJSON(w, http.StatusOK, outs)
}
return writeJSON(w, http.StatusOK, outs)
}
func postImagesTag(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@ -465,15 +476,16 @@ func postImagesInsert(srv *Server, version float64, w http.ResponseWriter, r *ht
w.Header().Set("Content-Type", "application/json")
}
sf := utils.NewStreamFormatter(version > 1.0)
imgID, err := srv.ImageInsert(name, url, path, w, sf)
err := srv.ImageInsert(name, url, path, w, sf)
if err != nil {
if sf.Used() {
w.Write(sf.FormatError(err))
return nil
}
return err
}
return writeJSON(w, http.StatusOK, &APIID{ID: imgID})
return nil
}
func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@ -522,47 +534,52 @@ func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http
return nil
}
func getImagesGet(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
name := vars["name"]
if version > 1.0 {
w.Header().Set("Content-Type", "application/x-tar")
}
return srv.ImageExport(name, w)
}
func postImagesLoad(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return srv.ImageLoad(r.Body)
}
func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return nil
}
config := &Config{}
out := &APIRun{}
name := r.Form.Get("name")
if err := json.NewDecoder(r.Body).Decode(config); err != nil {
job := srv.Eng.Job("create", r.Form.Get("name"))
if err := job.DecodeEnv(r.Body); err != nil {
return err
}
resolvConf, err := utils.GetResolvConf()
if err != nil {
return err
}
if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
if !job.GetenvBool("NetworkDisabled") && len(job.Getenv("Dns")) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
out.Warnings = append(out.Warnings, fmt.Sprintf("Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns))
config.Dns = defaultDns
job.SetenvList("Dns", defaultDns)
}
id, warnings, err := srv.ContainerCreate(config, name)
if err != nil {
// Read container ID from the first line of stdout
job.StdoutParseString(&out.ID)
// Read warnings from stderr
job.StderrParseLines(&out.Warnings, 0)
if err := job.Run(); err != nil {
return err
}
out.ID = id
for _, warning := range warnings {
out.Warnings = append(out.Warnings, warning)
}
if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.MemoryLimit {
log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
out.Warnings = append(out.Warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
}
if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit {
if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.SwapLimit {
log.Println("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.")
out.Warnings = append(out.Warnings, "Your kernel does not support memory swap capabilities. Limitation discarded.")
}
if !config.NetworkDisabled && srv.runtime.capabilities.IPv4ForwardingDisabled {
if !job.GetenvBool("NetworkDisabled") && srv.runtime.capabilities.IPv4ForwardingDisabled {
log.Println("Warning: IPv4 forwarding is disabled.")
out.Warnings = append(out.Warnings, "IPv4 forwarding is disabled.")
}
@ -629,36 +646,28 @@ func deleteImages(srv *Server, version float64, w http.ResponseWriter, r *http.R
if imgs != nil {
if len(imgs) != 0 {
return writeJSON(w, http.StatusOK, imgs)
} else {
return fmt.Errorf("Conflict, %s wasn't deleted", name)
}
} else {
w.WriteHeader(http.StatusNoContent)
return fmt.Errorf("Conflict, %s wasn't deleted", name)
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func postContainersStart(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var hostConfig *HostConfig
// allow a nil body for backwards compatibility
if r.Body != nil {
if matchesContentType(r.Header.Get("Content-Type"), "application/json") {
hostConfig = &HostConfig{}
if err := json.NewDecoder(r.Body).Decode(hostConfig); err != nil {
return err
}
}
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
name := vars["name"]
// Register any links from the host config before starting the container
if err := srv.RegisterLinks(name, hostConfig); err != nil {
return err
job := srv.Eng.Job("start", name)
// allow a nil body for backwards compatibility
if r.Body != nil {
if matchesContentType(r.Header.Get("Content-Type"), "application/json") {
if err := job.DecodeEnv(r.Body); err != nil {
return err
}
}
}
if err := srv.ContainerStart(name, hostConfig); err != nil {
if err := job.Run(); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
@ -922,7 +931,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
if err != nil {
return err
}
c, err := mkBuildContext(string(dockerFile), nil)
c, err := MkBuildContext(string(dockerFile), nil)
if err != nil {
return err
}
@ -970,7 +979,7 @@ func postContainersCopy(srv *Server, version float64, w http.ResponseWriter, r *
}
if copyData.Resource == "" {
return fmt.Errorf("Resource cannot be empty")
return fmt.Errorf("Path cannot be empty")
}
if copyData.Resource[0] == '/' {
copyData.Resource = copyData.Resource[1:]
@ -1039,6 +1048,7 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
"/images/json": getImagesJSON,
"/images/viz": getImagesViz,
"/images/search": getImagesSearch,
"/images/{name:.*}/get": getImagesGet,
"/images/{name:.*}/history": getImagesHistory,
"/images/{name:.*}/json": getImagesByName,
"/containers/ps": getContainersJSON,
@ -1055,6 +1065,7 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
"/build": postBuild,
"/images/create": postImagesCreate,
"/images/{name:.*}/insert": postImagesInsert,
"/images/load": postImagesLoad,
"/images/{name:.*}/push": postImagesPush,
"/images/{name:.*}/tag": postImagesTag,
"/containers/create": postContainersCreate,
@ -1100,6 +1111,20 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
return r, nil
}
// ServeRequest processes a single http request to the docker remote api.
// FIXME: refactor this to be part of Server and not require re-creating a new
// router each time. This requires first moving ListenAndServe into Server.
func ServeRequest(srv *Server, apiversion float64, w http.ResponseWriter, req *http.Request) error {
router, err := createRouter(srv, false)
if err != nil {
return err
}
// Insert APIVERSION into the request as a convenience
req.URL.Path = fmt.Sprintf("/v%g%s", apiversion, req.URL.Path)
router.ServeHTTP(w, req)
return nil
}
func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
log.Printf("Listening for HTTP on %s (%s)\n", addr, proto)

Просмотреть файл

@ -1,119 +1,148 @@
package docker
type APIHistory struct {
ID string `json:"Id"`
Tags []string `json:",omitempty"`
Created int64
CreatedBy string `json:",omitempty"`
Size int64
import "strings"
type (
APIHistory struct {
ID string `json:"Id"`
Tags []string `json:",omitempty"`
Created int64
CreatedBy string `json:",omitempty"`
Size int64
}
APIImages struct {
ID string `json:"Id"`
RepoTags []string `json:",omitempty"`
Created int64
Size int64
VirtualSize int64
ParentId string `json:",omitempty"`
}
APIImagesOld struct {
Repository string `json:",omitempty"`
Tag string `json:",omitempty"`
ID string `json:"Id"`
Created int64
Size int64
VirtualSize int64
}
APIInfo struct {
Debug bool
Containers int
Images int
NFd int `json:",omitempty"`
NGoroutines int `json:",omitempty"`
MemoryLimit bool `json:",omitempty"`
SwapLimit bool `json:",omitempty"`
IPv4Forwarding bool `json:",omitempty"`
LXCVersion string `json:",omitempty"`
NEventsListener int `json:",omitempty"`
KernelVersion string `json:",omitempty"`
IndexServerAddress string `json:",omitempty"`
}
APITop struct {
Titles []string
Processes [][]string
}
APIRmi struct {
Deleted string `json:",omitempty"`
Untagged string `json:",omitempty"`
}
APIContainers struct {
ID string `json:"Id"`
Image string
Command string
Created int64
Status string
Ports []APIPort
SizeRw int64
SizeRootFs int64
Names []string
}
APIContainersOld struct {
ID string `json:"Id"`
Image string
Command string
Created int64
Status string
Ports string
SizeRw int64
SizeRootFs int64
}
APIID struct {
ID string `json:"Id"`
}
APIRun struct {
ID string `json:"Id"`
Warnings []string `json:",omitempty"`
}
APIPort struct {
PrivatePort int64
PublicPort int64
Type string
IP string
}
APIVersion struct {
Version string
GitCommit string `json:",omitempty"`
GoVersion string `json:",omitempty"`
}
APIWait struct {
StatusCode int
}
APIAuth struct {
Status string
}
APIImageConfig struct {
ID string `json:"Id"`
*Config
}
APICopy struct {
Resource string
HostPath string
}
)
func (api APIImages) ToLegacy() []APIImagesOld {
outs := []APIImagesOld{}
for _, repotag := range api.RepoTags {
components := strings.SplitN(repotag, ":", 2)
outs = append(outs, APIImagesOld{
ID: api.ID,
Repository: components[0],
Tag: components[1],
Created: api.Created,
Size: api.Size,
VirtualSize: api.VirtualSize,
})
}
return outs
}
type APIImages struct {
Repository string `json:",omitempty"`
Tag string `json:",omitempty"`
ID string `json:"Id"`
Created int64
Size int64
VirtualSize int64
}
type APIInfo struct {
Debug bool
Containers int
Images int
NFd int `json:",omitempty"`
NGoroutines int `json:",omitempty"`
MemoryLimit bool `json:",omitempty"`
SwapLimit bool `json:",omitempty"`
IPv4Forwarding bool `json:",omitempty"`
LXCVersion string `json:",omitempty"`
NEventsListener int `json:",omitempty"`
KernelVersion string `json:",omitempty"`
IndexServerAddress string `json:",omitempty"`
}
type APITop struct {
Titles []string
Processes [][]string
}
type APIRmi struct {
Deleted string `json:",omitempty"`
Untagged string `json:",omitempty"`
}
type APIContainers struct {
ID string `json:"Id"`
Image string
Command string
Created int64
Status string
Ports []APIPort
SizeRw int64
SizeRootFs int64
Names []string
}
func (self *APIContainers) ToLegacy() APIContainersOld {
return APIContainersOld{
ID: self.ID,
Image: self.Image,
Command: self.Command,
Created: self.Created,
Status: self.Status,
Ports: displayablePorts(self.Ports),
SizeRw: self.SizeRw,
SizeRootFs: self.SizeRootFs,
func (api APIContainers) ToLegacy() *APIContainersOld {
return &APIContainersOld{
ID: api.ID,
Image: api.Image,
Command: api.Command,
Created: api.Created,
Status: api.Status,
Ports: displayablePorts(api.Ports),
SizeRw: api.SizeRw,
SizeRootFs: api.SizeRootFs,
}
}
type APIContainersOld struct {
ID string `json:"Id"`
Image string
Command string
Created int64
Status string
Ports string
SizeRw int64
SizeRootFs int64
}
type APIID struct {
ID string `json:"Id"`
}
type APIRun struct {
ID string `json:"Id"`
Warnings []string `json:",omitempty"`
}
type APIPort struct {
PrivatePort int64
PublicPort int64
Type string
IP string
}
type APIVersion struct {
Version string
GitCommit string `json:",omitempty"`
GoVersion string `json:",omitempty"`
}
type APIWait struct {
StatusCode int
}
type APIAuth struct {
Status string
}
type APIImageConfig struct {
ID string `json:"Id"`
*Config
}
type APICopy struct {
Resource string
HostPath string
}

19
api_unit_test.go Normal file
Просмотреть файл

@ -0,0 +1,19 @@
package docker
import (
"testing"
)
func TestJsonContentType(t *testing.T) {
if !matchesContentType("application/json", "application/json") {
t.Fail()
}
if !matchesContentType("application/json; charset=utf-8", "application/json") {
t.Fail()
}
if matchesContentType("dockerapplication/json", "application/json") {
t.Fail()
}
}

Просмотреть файл

@ -109,16 +109,17 @@ func Untar(archive io.Reader, path string) error {
buf := make([]byte, 10)
totalN := 0
for totalN < 10 {
if n, err := archive.Read(buf[totalN:]); err != nil {
n, err := archive.Read(buf[totalN:])
if err != nil {
if err == io.EOF {
return fmt.Errorf("Tarball too short")
}
return err
} else {
totalN += n
utils.Debugf("[tar autodetect] n: %d", n)
}
totalN += n
utils.Debugf("[tar autodetect] n: %d", n)
}
compression := DetectCompression(buf)
utils.Debugf("Archive compression detected: %s", compression.Extension())

Просмотреть файл

@ -196,10 +196,9 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
if loginAgainstOfficialIndex {
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
"Please check your e-mail for a confirmation link.")
} else {
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
"Please see the documentation of the registry " + serverAddress + " for instructions how to activate it.")
}
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
"Please see the documentation of the registry " + serverAddress + " for instructions how to activate it.")
} else if reqStatusCode == 400 {
if string(reqBody) == "\"Username or email already exists\"" {
req, err := factory.NewRequest("GET", serverAddress+"users/", nil)

Просмотреть файл

@ -1,11 +1,8 @@
package auth
import (
"crypto/rand"
"encoding/hex"
"io/ioutil"
"os"
"strings"
"testing"
)
@ -29,52 +26,6 @@ func TestEncodeAuth(t *testing.T) {
}
}
func TestLogin(t *testing.T) {
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
authConfig := &AuthConfig{Username: "unittester", Password: "surlautrerivejetattendrai", Email: "noise+unittester@dotcloud.com"}
status, err := Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
if status != "Login Succeeded" {
t.Fatalf("Expected status \"Login Succeeded\", found \"%s\" instead", status)
}
}
func TestCreateAccount(t *testing.T) {
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
tokenBuffer := make([]byte, 16)
_, err := rand.Read(tokenBuffer)
if err != nil {
t.Fatal(err)
}
token := hex.EncodeToString(tokenBuffer)[:12]
username := "ut" + token
authConfig := &AuthConfig{Username: username, Password: "test42", Email: "docker-ut+" + token + "@example.com"}
status, err := Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
expectedStatus := "Account created. Please use the confirmation link we sent" +
" to your e-mail to activate it."
if status != expectedStatus {
t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status)
}
status, err = Login(authConfig, nil)
if err == nil {
t.Fatalf("Expected error but found nil instead")
}
expectedError := "Login: Account is not Active"
if !strings.Contains(err.Error(), expectedError) {
t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err)
}
}
func setupTempConfigFile() (*ConfigFile, error) {
root, err := ioutil.TempDir("", "docker-test-auth")
if err != nil {

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,15 +1,14 @@
package docker
import (
"net"
"github.com/dotcloud/docker/engine"
"net"
)
// FIXME: separate runtime configuration from http api configuration
type DaemonConfig struct {
Pidfile string
Root string
ProtoAddresses []string
AutoRestart bool
EnableCors bool
Dns []string
@ -36,7 +35,6 @@ func ConfigFromJob(job *engine.Job) *DaemonConfig {
} else {
config.BridgeIface = DefaultNetworkBridge
}
config.ProtoAddresses = job.GetenvList("ProtoAddresses")
config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp"))
config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication")
return &config

149
config_test.go Normal file
Просмотреть файл

@ -0,0 +1,149 @@
package docker
import (
"testing"
)
func TestCompareConfig(t *testing.T) {
volumes1 := make(map[string]struct{})
volumes1["/test1"] = struct{}{}
config1 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config2 := Config{
Dns: []string{"0.0.0.0", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config3 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config4 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "22222222",
Volumes: volumes1,
}
volumes2 := make(map[string]struct{})
volumes2["/test2"] = struct{}{}
config5 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes2,
}
if CompareConfig(&config1, &config2) {
t.Fatalf("CompareConfig should return false, Dns are different")
}
if CompareConfig(&config1, &config3) {
t.Fatalf("CompareConfig should return false, PortSpecs are different")
}
if CompareConfig(&config1, &config4) {
t.Fatalf("CompareConfig should return false, VolumesFrom are different")
}
if CompareConfig(&config1, &config5) {
t.Fatalf("CompareConfig should return false, Volumes are different")
}
if !CompareConfig(&config1, &config1) {
t.Fatalf("CompareConfig should return true")
}
}
func TestMergeConfig(t *testing.T) {
volumesImage := make(map[string]struct{})
volumesImage["/test1"] = struct{}{}
volumesImage["/test2"] = struct{}{}
configImage := &Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "1111",
Volumes: volumesImage,
}
volumesUser := make(map[string]struct{})
volumesUser["/test3"] = struct{}{}
configUser := &Config{
Dns: []string{"3.3.3.3"},
PortSpecs: []string{"3333:2222", "3333:3333"},
Env: []string{"VAR2=3", "VAR3=3"},
Volumes: volumesUser,
}
if err := MergeConfig(configUser, configImage); err != nil {
t.Error(err)
}
if len(configUser.Dns) != 3 {
t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns))
}
for _, dns := range configUser.Dns {
if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" {
t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns)
}
}
if len(configUser.ExposedPorts) != 3 {
t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs)
}
}
if len(configUser.Env) != 3 {
t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env))
}
for _, env := range configUser.Env {
if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" {
t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env)
}
}
if len(configUser.Volumes) != 3 {
t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes))
}
for v := range configUser.Volumes {
if v != "/test1" && v != "/test2" && v != "/test3" {
t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v)
}
}
if configUser.VolumesFrom != "1111" {
t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom)
}
ports, _, err := parsePortSpecs([]string{"0000"})
if err != nil {
t.Error(err)
}
configImage2 := &Config{
ExposedPorts: ports,
}
if err := MergeConfig(configUser, configImage2); err != nil {
t.Error(err)
}
if len(configUser.ExposedPorts) != 4 {
t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs)
}
}
}

Просмотреть файл

@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/term"
@ -20,11 +19,14 @@ import (
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
)
type Container struct {
sync.Mutex
root string
ID string
@ -133,7 +135,11 @@ type PortBinding struct {
type Port string
func (p Port) Proto() string {
return strings.Split(string(p), "/")[1]
parts := strings.Split(string(p), "/")
if len(parts) == 1 {
return "tcp"
}
return parts[1]
}
func (p Port) Port() string {
@ -152,206 +158,6 @@ func NewPort(proto, port string) Port {
return Port(fmt.Sprintf("%s/%s", port, proto))
}
func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
if os.Getenv("TEST") != "" {
cmd.SetOutput(ioutil.Discard)
cmd.Usage = nil
}
flHostname := cmd.String("h", "", "Container host name")
flWorkingDir := cmd.String("w", "", "Working directory inside the container")
flUser := cmd.String("u", "", "Username or UID")
flDetach := cmd.Bool("d", false, "Detached mode: Run container in the background, print new container id")
flAttach := NewAttachOpts()
cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.")
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
flContainerIDFile := cmd.String("cidfile", "", "Write the container ID to the file")
flNetwork := cmd.Bool("n", true, "Enable networking for this container")
flPrivileged := cmd.Bool("privileged", false, "Give extended privileges to this container")
flAutoRemove := cmd.Bool("rm", false, "Automatically remove the container when it exits (incompatible with -d)")
cmd.Bool("sig-proxy", true, "Proxify all received signal to the process (even in non-tty mode)")
cmd.String("name", "", "Assign a name to the container")
flPublishAll := cmd.Bool("P", false, "Publish all exposed ports to the host interfaces")
if capabilities != nil && *flMemory > 0 && !capabilities.MemoryLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
*flMemory = 0
}
flCpuShares := cmd.Int64("c", 0, "CPU shares (relative weight)")
var flPublish utils.ListOpts
cmd.Var(&flPublish, "p", "Publish a container's port to the host (use 'docker port' to see the actual mapping)")
var flExpose utils.ListOpts
cmd.Var(&flExpose, "expose", "Expose a port from the container without publishing it to your host")
var flEnv utils.ListOpts
cmd.Var(&flEnv, "e", "Set environment variables")
var flDns utils.ListOpts
cmd.Var(&flDns, "dns", "Set custom dns servers")
flVolumes := NewPathOpts()
cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
var flVolumesFrom utils.ListOpts
cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container")
flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
var flLxcOpts utils.ListOpts
cmd.Var(&flLxcOpts, "lxc-conf", "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
var flLinks utils.ListOpts
cmd.Var(&flLinks, "link", "Add link to another container (name:alias)")
if err := cmd.Parse(args); err != nil {
return nil, nil, cmd, err
}
if *flDetach && len(flAttach) > 0 {
return nil, nil, cmd, ErrConflictAttachDetach
}
if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
return nil, nil, cmd, ErrInvalidWorikingDirectory
}
if *flDetach && *flAutoRemove {
return nil, nil, cmd, ErrConflictDetachAutoRemove
}
// If neither -d or -a are set, attach to everything by default
if len(flAttach) == 0 && !*flDetach {
if !*flDetach {
flAttach.Set("stdout")
flAttach.Set("stderr")
if *flStdin {
flAttach.Set("stdin")
}
}
}
envs := []string{}
for _, env := range flEnv {
arr := strings.Split(env, "=")
if len(arr) > 1 {
envs = append(envs, env)
} else {
v := os.Getenv(env)
envs = append(envs, env+"="+v)
}
}
var binds []string
// add any bind targets to the list of container volumes
for bind := range flVolumes {
arr := strings.Split(bind, ":")
if len(arr) > 1 {
if arr[0] == "/" {
return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'")
}
dstDir := arr[1]
flVolumes[dstDir] = struct{}{}
binds = append(binds, bind)
delete(flVolumes, bind)
}
}
parsedArgs := cmd.Args()
runCmd := []string{}
entrypoint := []string{}
image := ""
if len(parsedArgs) >= 1 {
image = cmd.Arg(0)
}
if len(parsedArgs) > 1 {
runCmd = parsedArgs[1:]
}
if *flEntrypoint != "" {
entrypoint = []string{*flEntrypoint}
}
var lxcConf []KeyValuePair
lxcConf, err := parseLxcConfOpts(flLxcOpts)
if err != nil {
return nil, nil, cmd, err
}
hostname := *flHostname
domainname := ""
parts := strings.SplitN(hostname, ".", 2)
if len(parts) > 1 {
hostname = parts[0]
domainname = parts[1]
}
ports, portBindings, err := parsePortSpecs(flPublish)
if err != nil {
return nil, nil, cmd, err
}
// Merge in exposed ports to the map of published ports
for _, e := range flExpose {
if strings.Contains(e, ":") {
return nil, nil, cmd, fmt.Errorf("Invalid port format for -expose: %s", e)
}
p := NewPort(splitProtoPort(e))
if _, exists := ports[p]; !exists {
ports[p] = struct{}{}
}
}
config := &Config{
Hostname: hostname,
Domainname: domainname,
PortSpecs: nil, // Deprecated
ExposedPorts: ports,
User: *flUser,
Tty: *flTty,
NetworkDisabled: !*flNetwork,
OpenStdin: *flStdin,
Memory: *flMemory,
CpuShares: *flCpuShares,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: envs,
Cmd: runCmd,
Dns: flDns,
Image: image,
Volumes: flVolumes,
VolumesFrom: strings.Join(flVolumesFrom, ","),
Entrypoint: entrypoint,
WorkingDir: *flWorkingDir,
}
hostConfig := &HostConfig{
Binds: binds,
ContainerIDFile: *flContainerIDFile,
LxcConf: lxcConf,
Privileged: *flPrivileged,
PortBindings: portBindings,
Links: flLinks,
PublishAllPorts: *flPublishAll,
}
if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
}
return config, hostConfig, cmd, nil
}
type PortMapping map[string]string // Deprecated
type NetworkSettings struct {
@ -394,9 +200,9 @@ func (container *Container) Inject(file io.Reader, pth string) error {
if _, err := os.Stat(path.Join(container.rwPath(), pth)); err == nil {
// Since err is nil, the path could be stat'd and it exists
return fmt.Errorf("%s exists", pth)
} else if ! os.IsNotExist(err) {
} else if !os.IsNotExist(err) {
// Expect err might be that the file doesn't exist, so
// if it's some other error, return that.
// if it's some other error, return that.
return err
}
@ -684,26 +490,28 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
}
func (container *Container) Start() (err error) {
container.State.Lock()
defer container.State.Unlock()
container.Lock()
defer container.Unlock()
if container.State.IsRunning() {
return fmt.Errorf("The container %s is already running.", container.ID)
}
defer func() {
if err != nil {
container.cleanup()
}
}()
if container.State.Running {
return fmt.Errorf("The container %s is already running.", container.ID)
}
if err := container.EnsureMounted(); err != nil {
return err
}
if container.runtime.networkManager.disabled {
container.Config.NetworkDisabled = true
container.buildHostnameAndHostsFiles("127.0.1.1")
} else {
if err := container.allocateNetwork(); err != nil {
return err
}
container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
}
// Make sure the config is compatible with the current kernel
@ -763,9 +571,23 @@ func (container *Container) Start() (err error) {
// Apply volumes from another container if requested
if container.Config.VolumesFrom != "" {
volumes := strings.Split(container.Config.VolumesFrom, ",")
for _, v := range volumes {
c := container.runtime.Get(v)
containerSpecs := strings.Split(container.Config.VolumesFrom, ",")
for _, containerSpec := range containerSpecs {
mountRW := true
specParts := strings.SplitN(containerSpec, ":", 2)
switch len(specParts) {
case 0:
return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
case 2:
switch specParts[1] {
case "ro":
mountRW = false
case "rw": // mountRW is already true
default:
return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec)
}
}
c := container.runtime.Get(specParts[0])
if c == nil {
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
}
@ -778,7 +600,7 @@ func (container *Container) Start() (err error) {
}
container.Volumes[volPath] = id
if isRW, exists := c.VolumesRW[volPath]; exists {
container.VolumesRW[volPath] = isRW
container.VolumesRW[volPath] = isRW && mountRW
}
}
@ -819,7 +641,7 @@ func (container *Container) Start() (err error) {
// Create the mountpoint
rootVolPath := path.Join(container.RootfsPath(), volPath)
if err := os.MkdirAll(rootVolPath, 0755); err != nil {
return nil
return err
}
// Do not copy or change permissions if we are mounting from the host
@ -863,7 +685,13 @@ func (container *Container) Start() (err error) {
return err
}
var lxcStart string = "lxc-start"
if container.hostConfig.Privileged && container.runtime.capabilities.AppArmor {
lxcStart = path.Join(container.runtime.config.Root, "lxc-start-unconfined")
}
params := []string{
lxcStart,
"-n", container.ID,
"-f", container.lxcConfigPath(),
"--",
@ -956,11 +784,24 @@ func (container *Container) Start() (err error) {
params = append(params, "--", container.Path)
params = append(params, container.Args...)
var lxcStart string = "lxc-start"
if container.hostConfig.Privileged && container.runtime.capabilities.AppArmor {
lxcStart = path.Join(container.runtime.config.Root, "lxc-start-unconfined")
if RootIsShared() {
// lxc-start really needs / to be non-shared, or all kinds of stuff break
// when lxc-start unmount things and those unmounts propagate to the main
// mount namespace.
// What we really want is to clone into a new namespace and then
// mount / MS_REC|MS_SLAVE, but since we can't really clone or fork
// without exec in go we have to do this horrible shell hack...
shellString :=
"mount --make-rslave /; exec " +
utils.ShellQuoteArguments(params)
params = []string{
"unshare", "-m", "--", "/bin/sh", "-c", shellString,
}
}
container.cmd = exec.Command(lxcStart, params...)
container.cmd = exec.Command(params[0], params[1:]...)
// Setup logging of stdout and stderr to disk
if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
return err
@ -981,7 +822,7 @@ func (container *Container) Start() (err error) {
}
// FIXME: save state on disk *first*, then converge
// this way disk state is used as a journal, eg. we can restore after crash etc.
container.State.setRunning(container.cmd.Process.Pid)
container.State.SetRunning(container.cmd.Process.Pid)
// Init the lock
container.waitLock = make(chan struct{})
@ -989,14 +830,14 @@ func (container *Container) Start() (err error) {
container.ToDisk()
go container.monitor()
defer utils.Debugf("Container running: %v", container.State.Running)
defer utils.Debugf("Container running: %v", container.State.IsRunning())
// We wait for the container to be fully running.
// Timeout after 5 seconds. In case of broken pipe, just retry.
// Note: The container can run and finish correctly before
// the end of this loop
for now := time.Now(); time.Since(now) < 5*time.Second; {
// If the container dies while waiting for it, just return
if !container.State.Running {
if !container.State.IsRunning() {
return nil
}
output, err := exec.Command("lxc-info", "-s", "-n", container.ID).CombinedOutput()
@ -1013,11 +854,11 @@ func (container *Container) Start() (err error) {
if strings.Contains(string(output), "RUNNING") {
return nil
}
utils.Debugf("Waiting for the container to start (running: %v): %s", container.State.Running, bytes.TrimSpace(output))
utils.Debugf("Waiting for the container to start (running: %v): %s", container.State.IsRunning(), bytes.TrimSpace(output))
time.Sleep(50 * time.Millisecond)
}
if container.State.Running {
if container.State.IsRunning() {
return ErrContainerStartTimeout
}
return ErrContainerStart
@ -1069,16 +910,41 @@ func (container *Container) StderrPipe() (io.ReadCloser, error) {
return utils.NewBufReader(reader), nil
}
func (container *Container) buildHostnameAndHostsFiles(IP string) {
container.HostnamePath = path.Join(container.root, "hostname")
ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
hostsContent := []byte(`
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
`)
container.HostsPath = path.Join(container.root, "hosts")
if container.Config.Domainname != "" {
hostsContent = append([]byte(fmt.Sprintf("%s\t%s.%s %s\n", IP, container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...)
} else {
hostsContent = append([]byte(fmt.Sprintf("%s\t%s\n", IP, container.Config.Hostname)), hostsContent...)
}
ioutil.WriteFile(container.HostsPath, hostsContent, 0644)
}
func (container *Container) allocateNetwork() error {
if container.Config.NetworkDisabled {
return nil
}
var iface *NetworkInterface
var err error
if container.State.Ghost {
manager := container.runtime.networkManager
if manager.disabled {
var (
iface *NetworkInterface
err error
)
if container.State.IsGhost() {
if manager := container.runtime.networkManager; manager.disabled {
iface = &NetworkInterface{disabled: true}
} else {
iface = &NetworkInterface{
@ -1086,7 +952,7 @@ func (container *Container) allocateNetwork() error {
Gateway: manager.bridgeNetwork.IP,
manager: manager,
}
if iface !=nil && iface.IPNet.IP != nil {
if iface != nil && iface.IPNet.IP != nil {
ipNum := ipToInt(iface.IPNet.IP)
manager.ipAllocator.inUse[ipNum] = struct{}{}
} else {
@ -1114,10 +980,12 @@ func (container *Container) allocateNetwork() error {
}
}
portSpecs := make(map[Port]struct{})
bindings := make(map[Port][]PortBinding)
var (
portSpecs = make(map[Port]struct{})
bindings = make(map[Port][]PortBinding)
)
if !container.State.Ghost {
if !container.State.IsGhost() {
if container.Config.ExposedPorts != nil {
portSpecs = container.Config.ExposedPorts
}
@ -1213,11 +1081,8 @@ func (container *Container) monitor() {
exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
}
// Report status back
container.State.setStopped(exitCode)
if container.runtime != nil && container.runtime.srv != nil {
container.runtime.srv.LogEvent("die", container.ShortID(), container.runtime.repositories.ImageName(container.Image))
container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image))
}
// Cleanup
@ -1228,6 +1093,9 @@ func (container *Container) monitor() {
container.stdin, container.stdinPipe = io.Pipe()
}
// Report status back
container.State.SetStopped(exitCode)
// Release the lock
close(container.waitLock)
@ -1276,15 +1144,15 @@ func (container *Container) cleanup() {
}
func (container *Container) kill(sig int) error {
container.State.Lock()
defer container.State.Unlock()
container.Lock()
defer container.Unlock()
if !container.State.Running {
if !container.State.IsRunning() {
return nil
}
if output, err := exec.Command("lxc-kill", "-n", container.ID, strconv.Itoa(sig)).CombinedOutput(); err != nil {
log.Printf("error killing container %s (%s, %s)", container.ShortID(), output, err)
log.Printf("error killing container %s (%s, %s)", utils.TruncateID(container.ID), output, err)
return err
}
@ -1292,7 +1160,7 @@ func (container *Container) kill(sig int) error {
}
func (container *Container) Kill() error {
if !container.State.Running {
if !container.State.IsRunning() {
return nil
}
@ -1304,9 +1172,9 @@ func (container *Container) Kill() error {
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := container.WaitTimeout(10 * time.Second); err != nil {
if container.cmd == nil {
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.ShortID())
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", utils.TruncateID(container.ID))
}
log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", container.ShortID())
log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", utils.TruncateID(container.ID))
if err := container.cmd.Process.Kill(); err != nil {
return err
}
@ -1317,7 +1185,7 @@ func (container *Container) Kill() error {
}
func (container *Container) Stop(seconds int) error {
if !container.State.Running {
if !container.State.IsRunning() {
return nil
}
@ -1351,7 +1219,7 @@ func (container *Container) Restart(seconds int) error {
// Wait blocks until the container stops running, then returns its exit code.
func (container *Container) Wait() int {
<-container.waitLock
return container.State.ExitCode
return container.State.GetExitCode()
}
func (container *Container) Resize(h, w int) error {
@ -1442,14 +1310,6 @@ func (container *Container) Unmount() error {
return Unmount(container.RootfsPath())
}
// ShortID returns a shorthand version of the container's id for convenience.
// A collision with other container shorthands is very unlikely, but possible.
// In case of a collision a lookup with Runtime.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length container Id.
func (container *Container) ShortID() string {
return utils.TruncateID(container.ID)
}
func (container *Container) logPath(name string) string {
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name))
}
@ -1493,20 +1353,46 @@ func validateID(id string) error {
// GetSize, return real size, virtual size
func (container *Container) GetSize() (int64, int64) {
var sizeRw, sizeRootfs int64
data := make(map[uint64]bool)
filepath.Walk(container.rwPath(), func(path string, fileInfo os.FileInfo, err error) error {
if fileInfo != nil {
sizeRw += fileInfo.Size()
if fileInfo == nil {
return nil
}
size := fileInfo.Size()
if size == 0 {
return nil
}
inode := fileInfo.Sys().(*syscall.Stat_t).Ino
if _, entryExists := data[inode]; entryExists {
return nil
}
data[inode] = false
sizeRw += size
return nil
})
data = make(map[uint64]bool)
_, err := os.Stat(container.RootfsPath())
if err == nil {
filepath.Walk(container.RootfsPath(), func(path string, fileInfo os.FileInfo, err error) error {
if fileInfo != nil {
sizeRootfs += fileInfo.Size()
if fileInfo == nil {
return nil
}
size := fileInfo.Size()
if size == 0 {
return nil
}
inode := fileInfo.Sys().(*syscall.Stat_t).Ino
if _, entryExists := data[inode]; entryExists {
return nil
}
data[inode] = false
sizeRootfs += size
return nil
})
}

161
container_unit_test.go Normal file
Просмотреть файл

@ -0,0 +1,161 @@
package docker
import (
"testing"
)
func TestParseLxcConfOpt(t *testing.T) {
opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "}
for _, o := range opts {
k, v, err := parseLxcOpt(o)
if err != nil {
t.FailNow()
}
if k != "lxc.utsname" {
t.Fail()
}
if v != "docker" {
t.Fail()
}
}
}
func TestParseNetworkOptsPrivateOnly(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::80"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "tcp" {
t.Logf("Expected tcp got %s", k.Proto())
t.Fail()
}
if k.Port() != "80" {
t.Logf("Expected 80 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "" {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestParseNetworkOptsPublic(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100:8080:80"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "tcp" {
t.Logf("Expected tcp got %s", k.Proto())
t.Fail()
}
if k.Port() != "80" {
t.Logf("Expected 80 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "8080" {
t.Logf("Expected 8080 got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestParseNetworkOptsUdp(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::6000/udp"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "udp" {
t.Logf("Expected udp got %s", k.Proto())
t.Fail()
}
if k.Port() != "6000" {
t.Logf("Expected 6000 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "" {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestGetFullName(t *testing.T) {
name, err := getFullName("testing")
if err != nil {
t.Fatal(err)
}
if name != "/testing" {
t.Fatalf("Expected /testing got %s", name)
}
if _, err := getFullName(""); err == nil {
t.Fatal("Error should not be nil")
}
}

Просмотреть файл

@ -29,7 +29,9 @@ if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
if [ "$1" = start ] && which initctl >/dev/null && initctl version | grep -q upstart; then
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | /bin/grep -q upstart; then
log_failure_msg "Docker is managed via upstart, try using service $BASE $1"
exit 1
fi

Просмотреть файл

@ -6,5 +6,10 @@ stop on runlevel [!2345]
respawn
script
/usr/bin/docker -d
DOCKER=/usr/bin/$UPSTART_JOB
DOCKER_OPTS=
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
"$DOCKER" -d $DOCKER_OPTS
end script

Просмотреть файл

@ -192,7 +192,7 @@ if [ "$justTar" ]; then
sudo tar --numeric-owner -caf "$repo" .
else
# create the image (and tag $repo:$suite)
sudo tar --numeric-owner -c . | $docker import - $repo $suite
sudo tar --numeric-owner -c . | $docker import - $repo:$suite
# test the image
$docker run -i -t $repo:$suite echo success
@ -202,25 +202,25 @@ else
Debian)
if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then
# tag latest
$docker tag $repo:$suite $repo latest
$docker tag $repo:$suite $repo:latest
if [ -r etc/debian_version ]; then
# tag the specific debian release version (which is only reasonable to tag on debian stable)
ver=$(cat etc/debian_version)
$docker tag $repo:$suite $repo $ver
$docker tag $repo:$suite $repo:$ver
fi
fi
;;
Ubuntu)
if [ "$suite" = "$ubuntuLatestLTS" ]; then
# tag latest
$docker tag $repo:$suite $repo latest
$docker tag $repo:$suite $repo:latest
fi
if [ -r etc/lsb-release ]; then
lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")"
if [ "$lsbRelease" ]; then
# tag specific Ubuntu version number, if available (12.04, etc.)
$docker tag $repo:$suite $repo $lsbRelease
$docker tag $repo:$suite $repo:$lsbRelease
fi
fi
;;

Просмотреть файл

@ -1,3 +1,19 @@
# Vagrant-docker
# Vagrant integration
This is a placeholder for the official vagrant-docker, a plugin for Vagrant (http://vagrantup.com) which exposes Docker as a provider.
Currently there are at least 4 different projects that we are aware of that deals
with integration with [Vagrant](http://vagrantup.com/) at different levels. One
approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html)
which means you can create containers and pull base images on VMs using Docker's
CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html),
meaning you can use Vagrant to control Docker containers.
### Provisioners
* [Vocker](https://github.com/fgrehm/vocker)
* [Ventriloquist](https://github.com/fgrehm/ventriloquist)
### Providers
* [docker-provider](https://github.com/fgrehm/docker-provider)
* [vagrant-shell](https://github.com/destructuring/vagrant-shell)

Просмотреть файл

@ -4,9 +4,9 @@ import (
"flag"
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/sysinit"
"github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/engine"
"log"
"os"
"strings"
@ -71,7 +71,8 @@ func main() {
if err != nil {
log.Fatal(err)
}
job := eng.Job("serveapi")
// Load plugin: httpapi
job := eng.Job("initapi")
job.Setenv("Pidfile", *pidfile)
job.Setenv("Root", *flRoot)
job.SetenvBool("AutoRestart", *flAutoRestart)
@ -79,12 +80,17 @@ func main() {
job.Setenv("Dns", *flDns)
job.SetenvBool("EnableIptables", *flEnableIptables)
job.Setenv("BridgeIface", *bridgeName)
job.SetenvList("ProtoAddresses", flHosts)
job.Setenv("DefaultIp", *flDefaultIp)
job.SetenvBool("InterContainerCommunication", *flInterContainerComm)
if err := job.Run(); err != nil {
log.Fatal(err)
}
// Serve api
job = eng.Job("serveapi", flHosts...)
job.SetenvBool("Logging", true)
if err := job.Run(); err != nil {
log.Fatal(err)
}
} else {
if len(flHosts) > 1 {
log.Fatal("Please specify only one -H")

Просмотреть файл

@ -31,7 +31,7 @@ help:
# @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
# @echo " latexpdf to make LaTeX files and run them through pdflatex"
# @echo " text to make text files"
# @echo " man to make manual pages"
@echo " man to make a manual page"
# @echo " texinfo to make Texinfo files"
# @echo " info to make Texinfo files and run them through makeinfo"
# @echo " gettext to make PO message catalogs"

Просмотреть файл

@ -1,2 +1,2 @@
Sphinx==1.1.3
sphinxcontrib-httpdomain==1.1.8
sphinxcontrib-httpdomain==1.1.9

Просмотреть файл

@ -26,14 +26,118 @@ Docker Remote API
2. Versions
===========
The current version of the API is 1.6
The current version of the API is 1.7
Calling /images/<name>/insert is the same as calling
/v1.6/images/<name>/insert
/v1.7/images/<name>/insert
You can still call an old version of the api using
/v1.0/images/<name>/insert
v1.7
****
Full Documentation
------------------
:doc:`docker_remote_api_v1.7`
What's new
----------
.. http:get:: /images/json
The format of the json returned from this uri changed. Instead of an entry
for each repo/tag on an image, each image is only represented once, with a
nested attribute indicating the repo/tags that apply to that image.
Instead of:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
[
{
"VirtualSize": 131506275,
"Size": 131506275,
"Created": 1365714795,
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Tag": "12.04",
"Repository": "ubuntu"
},
{
"VirtualSize": 131506275,
"Size": 131506275,
"Created": 1365714795,
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Tag": "latest",
"Repository": "ubuntu"
},
{
"VirtualSize": 131506275,
"Size": 131506275,
"Created": 1365714795,
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Tag": "precise",
"Repository": "ubuntu"
},
{
"VirtualSize": 180116135,
"Size": 24653,
"Created": 1364102658,
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"Tag": "12.10",
"Repository": "ubuntu"
},
{
"VirtualSize": 180116135,
"Size": 24653,
"Created": 1364102658,
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"Tag": "quantal",
"Repository": "ubuntu"
}
]
The returned json looks like this:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
[
{
"RepoTag": [
"ubuntu:12.04",
"ubuntu:precise",
"ubuntu:latest"
],
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Created": 1365714795,
"Size": 131506275,
"VirtualSize": 131506275
},
{
"RepoTag": [
"ubuntu:12.10",
"ubuntu:quantal"
],
"ParentId": "27cf784147099545",
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"Created": 1364102658,
"Size": 24653,
"VirtualSize": 180116135
}
]
.. http:get:: /images/viz
This URI no longer exists. The ``images -viz`` output is now generated in
the client, using the ``/images/json`` data.
v1.6
****

Просмотреть файл

@ -121,8 +121,7 @@ Create a container
"AttachStdin":false,
"AttachStdout":true,
"AttachStderr":true,
"PortSpecs":null,
"Privileged": false,
"ExposedPorts":{},
"Tty":false,
"OpenStdin":false,
"StdinOnce":false,
@ -135,7 +134,6 @@ Create a container
"Volumes":{},
"VolumesFrom":"",
"WorkingDir":""
}
**Example response**:
@ -157,6 +155,57 @@ Create a container
:statuscode 406: impossible to attach (container not running)
:statuscode 500: server error
**More Complex Example request, in 2 steps.**
**First, use create to expose a Private Port, which can be bound back to a Public Port at startup**:
.. sourcecode:: http
POST /containers/create HTTP/1.1
Content-Type: application/json
{
"Cmd":[
"/usr/sbin/sshd","-D"
],
"Image":"image-with-sshd",
"ExposedPorts":{"22/tcp":{}}
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 201 OK
Content-Type: application/json
{
"Id":"e90e34656806"
"Warnings":[]
}
**Second, start (using the ID returned above) the image we just created, mapping the ssh port 22 to something on the host**:
.. sourcecode:: http
POST /containers/e90e34656806/start HTTP/1.1
Content-Type: application/json
{
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }]}
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 204 No Content
Content-Type: text/plain; charset=utf-8
Content-Length: 0
**Now you can ssh into your new container on port 11022.**
Inspect a container
*******************
@ -191,7 +240,7 @@ Inspect a container
"AttachStdin": false,
"AttachStdout": true,
"AttachStderr": true,
"PortSpecs": null,
"ExposedPorts": {},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
@ -362,7 +411,12 @@ Start a container
{
"Binds":["/tmp:/tmp"],
"LxcConf":{"lxc.utsname":"docker"}
"LxcConf":{"lxc.utsname":"docker"},
"ContainerIDFile": "",
"Privileged": false,
"PortBindings": {"22/tcp": [{HostIp:"", HostPort:""}]},
"Links": [],
"PublishAllPorts": false
}
**Example response**:
@ -795,7 +849,7 @@ Inspect an image
"AttachStdin":false,
"AttachStdout":false,
"AttachStderr":false,
"PortSpecs":null,
"ExposedPorts":{},
"Tty":true,
"OpenStdin":true,
"StdinOnce":false,
@ -1141,7 +1195,7 @@ Create a new image from a container's changes
{
"Cmd": ["cat", "/world"],
"PortSpecs":["22"]
"ExposedPorts":{"22/tcp":{}}
}
**Example response**:

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,4 +1,4 @@
:title: Registry API
:title: Remote API Client Libraries
:description: Various client libraries available to use with the Docker remote API
:keywords: API, Docker, index, registry, REST, documentation, clients, Python, Ruby, Javascript, Erlang, Go
@ -12,26 +12,28 @@ compatibility. Please file issues with the library owners. If you
find more library implementations, please list them in Docker doc bugs
and we will add the libraries here.
+----------------------+----------------+--------------------------------------------+
| Language/Framework | Name | Repository |
+======================+================+============================================+
| Python | docker-py | https://github.com/dotcloud/docker-py |
+----------------------+----------------+--------------------------------------------+
| Ruby | docker-client | https://github.com/geku/docker-client |
+----------------------+----------------+--------------------------------------------+
| Ruby | docker-api | https://github.com/swipely/docker-api |
+----------------------+----------------+--------------------------------------------+
| Javascript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io |
| | | Install via NPM: `npm install docker.io` |
+----------------------+----------------+--------------------------------------------+
| Javascript | docker-js | https://github.com/dgoujard/docker-js |
+----------------------+----------------+--------------------------------------------+
| Javascript (Angular) | dockerui | https://github.com/crosbymichael/dockerui |
| **WebUI** | | |
+----------------------+----------------+--------------------------------------------+
| Java | docker-java | https://github.com/kpelykh/docker-java |
+----------------------+----------------+--------------------------------------------+
| Erlang | erldocker | https://github.com/proger/erldocker |
+----------------------+----------------+--------------------------------------------+
| Go | go-dockerclient| https://github.com/fsouza/go-dockerclient |
+----------------------+----------------+--------------------------------------------+
+----------------------+----------------+--------------------------------------------+----------+
| Language/Framework | Name | Repository | Status |
+======================+================+============================================+==========+
| Python | docker-py | https://github.com/dotcloud/docker-py | Active |
+----------------------+----------------+--------------------------------------------+----------+
| Ruby | docker-client | https://github.com/geku/docker-client | Outdated |
+----------------------+----------------+--------------------------------------------+----------+
| Ruby | docker-api | https://github.com/swipely/docker-api | Active |
+----------------------+----------------+--------------------------------------------+----------+
| Javascript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io | Active |
| | | Install via NPM: `npm install docker.io` | |
+----------------------+----------------+--------------------------------------------+----------+
| Javascript | docker-js | https://github.com/dgoujard/docker-js | Active |
+----------------------+----------------+--------------------------------------------+----------+
| Javascript (Angular) | dockerui | https://github.com/crosbymichael/dockerui | Active |
| **WebUI** | | | |
+----------------------+----------------+--------------------------------------------+----------+
| Java | docker-java | https://github.com/kpelykh/docker-java | Active |
+----------------------+----------------+--------------------------------------------+----------+
| Erlang | erldocker | https://github.com/proger/erldocker | Active |
+----------------------+----------------+--------------------------------------------+----------+
| Go | go-dockerclient| https://github.com/fsouza/go-dockerclient | Active |
+----------------------+----------------+--------------------------------------------+----------+
| PHP | Alvine | http://pear.alvine.io/ (alpha) | Active |
+----------------------+----------------+--------------------------------------------+----------+

Просмотреть файл

@ -88,31 +88,65 @@ Examples:
Usage: docker build [OPTIONS] PATH | URL | -
Build a new container image from the source code at PATH
-t="": Repository name (and optionally a tag) to be applied to the resulting image in case of success.
-t="": Repository name (and optionally a tag) to be applied
to the resulting image in case of success.
-q=false: Suppress verbose build output.
-no-cache: Do not use the cache when building the image.
-rm: Remove intermediate containers after a successful build
When a single Dockerfile is given as URL, then no context is set. When a git repository is set as URL, the repository is used as context
The files at PATH or URL are called the "context" of the build. The
build process may refer to any of the files in the context, for
example when using an :ref:`ADD <dockerfile_add>` instruction. When a
single ``Dockerfile`` is given as URL, then no context is set. When a
git repository is set as URL, then the repository is used as the
context
.. _cli_build_examples:
.. seealso:: :ref:`dockerbuilder`.
Examples:
~~~~~~~~~
.. code-block:: bash
sudo docker build .
Uploading context 10240 bytes
Step 1 : FROM busybox
Pulling repository busybox
---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/
Step 2 : RUN ls -lh /
---> Running in 9c9e81692ae9
total 24
drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin
drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev
drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc
drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib
lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib
dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc
lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin
dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys
drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp
drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr
---> b35f4035db3f
Step 3 : CMD echo Hello World
---> Running in 02071fceb21b
---> f52f38b7823e
Successfully built f52f38b7823e
This will read the ``Dockerfile`` from the current directory. It will
also send any other files and directories found in the current
directory to the ``docker`` daemon.
This example specifies that the PATH is ``.``, and so all the files in
the local directory get tar'd and sent to the Docker daemon. The PATH
specifies where to find the files for the "context" of the build on
the Docker daemon. Remember that the daemon could be running on a
remote machine and that no parsing of the Dockerfile happens at the
client side (where you're running ``docker build``). That means that
*all* the files at PATH get sent, not just the ones listed to
:ref:`ADD <dockerfile_add>` in the ``Dockerfile``.
The transfer of context from the local machine to the Docker daemon is
what the ``docker`` client means when you see the "Uploading context"
message.
The contents of this directory would be used by ``ADD`` commands found
within the ``Dockerfile``. This will send a lot of data to the
``docker`` daemon if the current directory contains a lot of data. If
the absolute path is provided instead of ``.`` then only the files and
directories required by the ADD commands from the ``Dockerfile`` will be
added to the context and transferred to the ``docker`` daemon.
.. code-block:: bash
@ -129,16 +163,15 @@ tag will be ``2.0``
This will read a ``Dockerfile`` from *stdin* without context. Due to
the lack of a context, no contents of any local directory will be sent
to the ``docker`` daemon. ``ADD`` doesn't work when running in this
mode because the absence of the context provides no source files to
copy to the container.
to the ``docker`` daemon. Since there is no context, a Dockerfile
``ADD`` only works if it refers to a remote URL.
.. code-block:: bash
sudo docker build github.com/creack/docker-firefox
This will clone the Github repository and use it as context. The
``Dockerfile`` at the root of the repository is used as
This will clone the Github repository and use the cloned repository as
context. The ``Dockerfile`` at the root of the repository is used as
``Dockerfile``. Note that you can specify an arbitrary git repository
by using the ``git://`` schema.
@ -157,7 +190,7 @@ by using the ``git://`` schema.
-m="": Commit message
-author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
-run="": Configuration to be applied when the image is launched with `docker run`.
(ex: '{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
Simple commit of an existing container
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -173,7 +206,7 @@ Simple commit of an existing container
$ docker images | head
REPOSITORY TAG ID CREATED SIZE
SvenDowideit/testimage version3 f5283438590d 16 seconds ago 204.2 MB (virtual 335.7 MB)
S
Full -run example
.................
@ -219,10 +252,15 @@ Full -run example
::
Usage: docker cp CONTAINER:RESOURCE HOSTPATH
Usage: docker cp CONTAINER:PATH HOSTPATH
Copy files/folders from the containers filesystem to the host
path. Paths are relative to the root of the filesystem.
.. code-block:: bash
$ sudo docker cp 7bb0e258aefe:/etc/debian_version .
$ sudo docker cp blue_frog:/etc/hosts .
.. _cli_diff:
@ -231,9 +269,33 @@ Full -run example
::
Usage: docker diff CONTAINER [OPTIONS]
Usage: docker diff CONTAINER
List the changed files and directories in a container's filesystem
Inspect changes on a container's filesystem
There are 3 events that are listed in the 'diff':
1. ```A``` - Add
2. ```D``` - Delete
3. ```C``` - Change
for example:
.. code-block:: bash
$ sudo docker diff 7bb0e258aefe
C /dev
A /dev/kmsg
C /etc
A /etc/mtab
A /go
A /go/src
A /go/src/github.com
A /go/src/github.com/dotcloud
A /go/src/github.com/dotcloud/docker
A /go/src/github.com/dotcloud/docker/.git
....
.. _cli_events:
@ -245,6 +307,9 @@ Full -run example
Usage: docker events
Get real time events from the server
-since="": Show previously created events and then stream.
(either seconds since epoch, or date string as below)
.. _cli_events_example:
@ -277,6 +342,23 @@ Shell 1: (Again .. now showing events)
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
Show events in the past from a specified time
.............................................
.. code-block:: bash
$ sudo docker events -since 1378216169
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
$ sudo docker events -since '2013-09-03'
[2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
$ sudo docker events -since '2013-09-03 15:49:29 +0200 CEST'
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
.. _cli_export:
@ -303,6 +385,40 @@ Shell 1: (Again .. now showing events)
-notrunc=false: Don't truncate output
-q=false: only show numeric IDs
To see how the docker:latest image was built:
.. code-block:: bash
$ docker history docker
ID CREATED CREATED BY
docker:latest 19 hours ago /bin/sh -c #(nop) ADD . in /go/src/github.com/dotcloud/docker
cf5f2467662d 2 weeks ago /bin/sh -c #(nop) ENTRYPOINT ["hack/dind"]
3538fbe372bf 2 weeks ago /bin/sh -c #(nop) WORKDIR /go/src/github.com/dotcloud/docker
7450f65072e5 2 weeks ago /bin/sh -c #(nop) VOLUME /var/lib/docker
b79d62b97328 2 weeks ago /bin/sh -c apt-get install -y -q lxc
36714852a550 2 weeks ago /bin/sh -c apt-get install -y -q iptables
8c4c706df1d6 2 weeks ago /bin/sh -c /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEYn' > /.s3cfg
b89989433c48 2 weeks ago /bin/sh -c pip install python-magic
a23e640d85b5 2 weeks ago /bin/sh -c pip install s3cmd
41f54fec7e79 2 weeks ago /bin/sh -c apt-get install -y -q python-pip
d9bc04add907 2 weeks ago /bin/sh -c apt-get install -y -q reprepro dpkg-sig
e74f4760fa70 2 weeks ago /bin/sh -c gem install --no-rdoc --no-ri fpm
1e43224726eb 2 weeks ago /bin/sh -c apt-get install -y -q ruby1.9.3 rubygems libffi-dev
460953ae9d7f 2 weeks ago /bin/sh -c #(nop) ENV GOPATH=/go:/go/src/github.com/dotcloud/docker/vendor
8b63eb1d666b 2 weeks ago /bin/sh -c #(nop) ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/goroot/bin
3087f3bcedf2 2 weeks ago /bin/sh -c #(nop) ENV GOROOT=/goroot
635840d198e5 2 weeks ago /bin/sh -c cd /goroot/src && ./make.bash
439f4a0592ba 2 weeks ago /bin/sh -c curl -s https://go.googlecode.com/files/go1.1.2.src.tar.gz | tar -v -C / -xz && mv /go /goroot
13967ed36e93 2 weeks ago /bin/sh -c #(nop) ENV CGO_ENABLED=0
bf7424458437 2 weeks ago /bin/sh -c apt-get install -y -q build-essential
a89ec997c3bf 2 weeks ago /bin/sh -c apt-get install -y -q mercurial
b9f165c6e749 2 weeks ago /bin/sh -c apt-get install -y -q git
17a64374afa7 2 weeks ago /bin/sh -c apt-get install -y -q curl
d5e85dc5b1d8 2 weeks ago /bin/sh -c apt-get update
13e642467c11 2 weeks ago /bin/sh -c echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
ae6dde92a94e 2 weeks ago /bin/sh -c #(nop) MAINTAINER Solomon Hykes <solomon@dotcloud.com>
ubuntu:12.04 6 months ago
.. _cli_images:
``images``
@ -314,20 +430,86 @@ Shell 1: (Again .. now showing events)
List images
-a=false: show all images
-a=false: show all images (by default filter out the intermediate images used to build)
-notrunc=false: Don't truncate output
-q=false: only show numeric IDs
-viz=false: output in graphviz format
-tree=false: output graph in tree format
-viz=false: output graph in graphviz format
Listing the most recently created images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
$ sudo docker images | head
REPOSITORY TAG IMAGE ID CREATED SIZE
<none> <none> 77af4d6b9913 19 hours ago 30.53 MB (virtual 1.089 GB)
committest latest b6fa739cedf5 19 hours ago 30.53 MB (virtual 1.089 GB)
<none> <none> 78a85c484f71 19 hours ago 30.53 MB (virtual 1.089 GB)
docker latest 30557a29d5ab 20 hours ago 30.53 MB (virtual 1.089 GB)
<none> <none> 0124422dd9f9 20 hours ago 30.53 MB (virtual 1.089 GB)
<none> <none> 18ad6fad3402 22 hours ago 23.68 MB (virtual 1.082 GB)
<none> <none> f9f1e26352f0 23 hours ago 30.46 MB (virtual 1.089 GB)
tryout latest 2629d1fa0b81 23 hours ago 16.4 kB (virtual 131.5 MB)
<none> <none> 5ed6274db6ce 24 hours ago 30.44 MB (virtual 1.089 GB)
Listing the full length image IDs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
$ sudo docker images -notrunc | head
REPOSITORY TAG IMAGE ID CREATED SIZE
<none> <none> 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 30.53 MB (virtual 1.089 GB)
committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 30.53 MB (virtual 1.089 GB)
<none> <none> 78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 30.53 MB (virtual 1.089 GB)
docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 30.53 MB (virtual 1.089 GB)
<none> <none> 0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 30.53 MB (virtual 1.089 GB)
<none> <none> 18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 23.68 MB (virtual 1.082 GB)
<none> <none> f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 30.46 MB (virtual 1.089 GB)
tryout latest 2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 16.4 kB (virtual 131.5 MB)
<none> <none> 5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 30.44 MB (virtual 1.089 GB)
Displaying images visually
~~~~~~~~~~~~~~~~~~~~~~~~~~
::
.. code-block:: bash
sudo docker images -viz | dot -Tpng -o docker.png
$ sudo docker images -viz | dot -Tpng -o docker.png
.. image:: docker_images.gif
:alt: Example inheritance graph of Docker images.
Displaying image hierarchy
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
$ sudo docker images -tree
|─8dbd9e392a96 Size: 131.5 MB (virtual 131.5 MB) Tags: ubuntu:12.04,ubuntu:latest,ubuntu:precise
└─27cf78414709 Size: 180.1 MB (virtual 180.1 MB)
└─b750fe79269d Size: 24.65 kB (virtual 180.1 MB) Tags: ubuntu:12.10,ubuntu:quantal
|─f98de3b610d5 Size: 12.29 kB (virtual 180.1 MB)
| └─7da80deb7dbf Size: 16.38 kB (virtual 180.1 MB)
| └─65ed2fee0a34 Size: 20.66 kB (virtual 180.2 MB)
| └─a2b9ea53dddc Size: 819.7 MB (virtual 999.8 MB)
| └─a29b932eaba8 Size: 28.67 kB (virtual 999.9 MB)
| └─e270a44f124d Size: 12.29 kB (virtual 999.9 MB) Tags: progrium/buildstep:latest
└─17e74ac162d8 Size: 53.93 kB (virtual 180.2 MB)
└─339a3f56b760 Size: 24.65 kB (virtual 180.2 MB)
└─904fcc40e34d Size: 96.7 MB (virtual 276.9 MB)
└─b1b0235328dd Size: 363.3 MB (virtual 640.2 MB)
└─7cb05d1acb3b Size: 20.48 kB (virtual 640.2 MB)
└─47bf6f34832d Size: 20.48 kB (virtual 640.2 MB)
└─f165104e82ed Size: 12.29 kB (virtual 640.2 MB)
└─d9cf85a47b7e Size: 1.911 MB (virtual 642.2 MB)
└─3ee562df86ca Size: 17.07 kB (virtual 642.2 MB)
└─b05fc2d00e4a Size: 24.96 kB (virtual 642.2 MB)
└─c96a99614930 Size: 12.29 kB (virtual 642.2 MB)
└─a6a357a48c49 Size: 12.29 kB (virtual 642.2 MB) Tags: ndj/mongodb:latest
.. _cli_import:
``import``
@ -383,6 +565,21 @@ might not get preserved.
Display system-wide information.
.. code-block:: bash
$ sudo docker info
Containers: 292
Images: 194
Debug mode (server): false
Debug mode (client): false
Fds: 22
Goroutines: 67
LXC Version: 0.9.0
EventsListeners: 115
Kernel Version: 3.8.0-33-generic
WARNING: No swap limit support
.. _cli_insert:
``insert``
@ -428,6 +625,24 @@ Insert file from github
The main process inside the container will be sent SIGKILL.
Known Issues (kill)
~~~~~~~~~~~~~~~~~~~
* :issue:`197` indicates that ``docker kill`` may leave directories
behind and make it difficult to remove the container.
.. _cli_load:
``load``
--------
::
Usage: docker load < repository.tar
Loads a tarred repository from the standard input stream.
Restores both images and tags.
.. _cli_login:
``login``
@ -536,6 +751,12 @@ The main process inside the container will be sent SIGKILL.
Remove one or more containers
-link="": Remove the link instead of the actual container
Known Issues (rm)
~~~~~~~~~~~~~~~~~~~
* :issue:`197` indicates that ``docker kill`` may leave directories
behind and make it difficult to remove the container.
Examples:
~~~~~~~~~
@ -558,6 +779,15 @@ This will remove the container referenced under the link ``/redis``.
This will remove the underlying link between ``/webapp`` and the ``/redis`` containers removing all
network communication.
.. code-block:: bash
$ docker rm `docker ps -a -q`
This command will delete all stopped containers. The command ``docker ps -a -q`` will return all
existing container IDs and pass them to the ``rm`` command which will delete them. Any running
containers will not be deleted.
.. _cli_rmi:
``rmi``
@ -588,7 +818,7 @@ network communication.
-h="": Container host name
-i=false: Keep stdin open even if not attached
-privileged=false: Give extended privileges to this container
-m=0: Memory limit (in bytes)
-m="": Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
-n=true: Enable networking for this container
-p=[]: Map a network port to the container
-rm=false: Automatically remove the container when it exits (incompatible with -d)
@ -596,7 +826,7 @@ network communication.
-u="": Username or UID
-dns=[]: Set custom dns servers for the container
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume.
-volumes-from="": Mount all volumes from the given container
-volumes-from="": Mount all volumes from the given container(s)
-entrypoint="": Overwrite the default entrypoint set by the image
-w="": Working directory inside the container
-lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
@ -688,6 +918,35 @@ can access the network and environment of the redis container via
environment variables. The ``-name`` flag will assign the name ``console``
to the newly created container.
.. code-block:: bash
docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
The ``-volumes-from`` flag mounts all the defined volumes from the
refrence containers. Containers can be specified by a comma seperated
list or by repetitions of the ``-volumes-from`` argument. The container
id may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
read-only or read-write mode, respectively. By default, the volumes are mounted
in the same mode (rw or ro) as the reference container.
Known Issues (run -volumes-from)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
could indicate a permissions problem with AppArmor. Please see the
issue for a workaround.
.. _cli_save:
``save``
::
Usage: docker save image > repository.tar
Streams a tarred repository to the standard output stream.
Contains all parent layers, and all tags + versions.
.. _cli_search:
``search``

Просмотреть файл

@ -40,7 +40,11 @@ html_additional_pages = {
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinxcontrib.httpdomain']
extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks']
# Configure extlinks
extlinks = { 'issue': ('https://github.com/dotcloud/docker/issues/%s',
'Issue ') }
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@ -231,7 +235,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('toctree', 'docker', u'Docker Documentation',
('commandline/cli', 'docker', u'Docker Documentation',
[u'Team Docker'], 1)
]

Просмотреть файл

@ -10,13 +10,16 @@ Want to hack on Docker? Awesome!
The repository includes `all the instructions you need to get
started <https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md>`_.
The developer environment `Dockerfile <https://github.com/dotcloud/docker/blob/master/Dockerfile>`_
The `developer environment Dockerfile
<https://github.com/dotcloud/docker/blob/master/Dockerfile>`_
specifies the tools and versions used to test and build Docker.
If you're making changes to the documentation, see the
`README.md <https://github.com/dotcloud/docker/blob/master/docs/README.md>`_.
The documentation environment `Dockerfile <https://github.com/dotcloud/docker/blob/master/docs/Dockerfile>`_
The `documentation environment Dockerfile
<https://github.com/dotcloud/docker/blob/master/docs/Dockerfile>`_
specifies the tools and versions used to build the Documentation.
Further interesting details can be found in the `Packaging hints <https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md>`_.
Further interesting details can be found in the `Packaging hints
<https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md>`_.

Просмотреть файл

@ -20,7 +20,7 @@ Note that we're marking ``/var/lib/couchdb`` as a data volume.
.. code-block:: bash
COUCH1=$(sudo docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03)
COUCH1=$(sudo docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03)
Add data to the first database
------------------------------
@ -31,7 +31,7 @@ replace ``localhost`` with the public IP of your Docker host.
.. code-block:: bash
HOST=localhost
URL="http://$HOST:$(sudo docker port $COUCH1 5984)/_utils/"
URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -Po '\d+$')/_utils/"
echo "Navigate to $URL in your browser, and use the couch interface to add data"
Create second database
@ -41,7 +41,7 @@ This time, we're requesting shared access to ``$COUCH1``'s volumes.
.. code-block:: bash
COUCH2=$(sudo docker run -d -volumes-from $COUCH1 shykes/couchdb:2013-05-03)
COUCH2=$(sudo docker run -d -p 5984 -volumes-from $COUCH1 shykes/couchdb:2013-05-03)
Browse data on the second database
----------------------------------
@ -49,7 +49,7 @@ Browse data on the second database
.. code-block:: bash
HOST=localhost
URL="http://$HOST:$(sudo docker port $COUCH2 5984)/_utils/"
URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -Po '\d+$')/_utils/"
echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!'
Congratulations, you are now running two Couchdb containers, completely

Просмотреть файл

@ -1,4 +1,7 @@
.. note::
This example assumes you have Docker running in daemon mode. For more information please see :ref:`running_examples`
* This example assumes you have Docker running in daemon mode. For
more information please see :ref:`running_examples`.
* **If you don't like sudo** then see :ref:`dockergroup`

Просмотреть файл

@ -127,10 +127,12 @@ Check the logs make sure it is working correctly.
sudo docker attach $CONTAINER_ID
Attach to the container to see the results in realtime.
Attach to the container to see the results in real-time.
- **"docker attach**" This will allow us to attach to a background
process to see what is going on.
- **"-sig-proxy=true"** Proxify all received signal to the process
(even in non-tty mode)
- **$CONTAINER_ID** The Id of the container we want to attach too.
Exit from the container attachment by pressing Control-C.

Просмотреть файл

@ -24,4 +24,3 @@ to more substantial services like those which you might find in production.
postgresql_service
mongodb
running_riak_service
linking_into_redis

Просмотреть файл

@ -1,137 +0,0 @@
:title: Linking to an Redis container
:description: Running redis linked into your web app
:keywords: docker, example, networking, redis, link
.. _linking_redis:
Linking Redis
=============
.. include:: example_header.inc
Building a Redis container to link as a child of our web application.
Building the Redis container
----------------------------
Lets build a Redis image with the following Dockerfile.
First checkout the Redis source code.
.. code-block:: bash
git clone https://github.com/antirez/redis.git
cd redis
git checkout 2.6
Now let's create a Dockerfile in the root of the Redis repository.
.. code-block:: bash
# Build redis from source
# Make sure you have the redis source code checked out in
# the same directory as this Dockerfile
FROM ubuntu
RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
RUN apt-get update
RUN apt-get upgrade -y
RUN apt-get install -y gcc make g++ build-essential libc6-dev tcl
ADD . /redis
RUN (cd /redis && make)
RUN mkdir -p /redis-data
VOLUME ["/redis-data"]
EXPOSE 6379
ENTRYPOINT ["/redis/src/redis-server"]
CMD ["--dir", "/redis-data"]
# docker build our new redis image from source
docker build -t redis-2.6 .
We need to ``EXPOSE`` the default port of 6379 so that our link knows what ports
to connect to our Redis container on. If you do not expose any ports for the
image then docker will not be able to establish the link between containers.
Run the Redis container
-----------------------
.. code-block:: bash
sudo docker run -d -e PASSWORD=docker -name redis redis-2.6 --requirepass docker
This will run our Redis container with the password docker
to secure our service. By specifying the ``-name`` flag on run
we will assign the name ``redis`` to this container. If we do not specify a name for
our container via the ``-name`` flag docker will automatically generate a name for us.
We can issue all the commands that you would expect; start, stop, attach, using the name for our container.
The name also allows us to link other containers into this one.
Linking Redis as a child
------------------------
Next we can start a new web application that has a dependency on Redis and apply a link
to connect both containers. If you noticed when running our Redis server we did not use
the ``-p`` flag to publish the Redis port to the host system. Redis exposed port 6379 via the Dockerfile
and this is all we need to establish a link.
Now let's start our web application with a link into Redis.
.. code-block:: bash
sudo docker run -t -i -link redis:db -name webapp ubuntu bash
root@4c01db0b339c:/# env
HOSTNAME=4c01db0b339c
DB_NAME=/webapp/db
TERM=xterm
DB_PORT=tcp://172.17.0.8:6379
DB_PORT_6379_TCP=tcp://172.17.0.8:6379
DB_PORT_6379_TCP_PROTO=tcp
DB_PORT_6379_TCP_ADDR=172.17.0.8
DB_PORT_6379_TCP_PORT=6379
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PWD=/
DB_ENV_PASSWORD=docker
SHLVL=1
HOME=/
container=lxc
_=/usr/bin/env
root@4c01db0b339c:/#
When we inspect the environment of the linked container we can see a few extra environment
variables have been added. When you specified ``-link redis:db`` you are telling docker
to link the container named ``redis`` into this new container with the alias ``db``.
Environment variables are prefixed with the alias so that the parent container can access
network and environment information from the containers that are linked into it.
.. code-block:: bash
# The name of the child container
DB_NAME=/webapp/db
# The default protocol, ip, and port of the service running in the container
DB_PORT=tcp://172.17.0.8:6379
# A specific protocol, ip, and port of various services
DB_PORT_6379_TCP=tcp://172.17.0.8:6379
DB_PORT_6379_TCP_PROTO=tcp
DB_PORT_6379_TCP_ADDR=172.17.0.8
DB_PORT_6379_TCP_PORT=6379
# Get environment variables of the container
DB_ENV_PASSWORD=docker
Accessing the network information along with the environment of the child container allows
us to easily connect to the Redis service on the specific IP and port and use the password
specified in the environment.

Просмотреть файл

@ -176,11 +176,11 @@ Run the image
+++++++++++++
Running your image with ``-d`` runs the container in detached mode, leaving the
container running in the background. Run the image you previously built:
container running in the background. The ``-p`` flag redirects a public port to a private port in the container. Run the image you previously built:
.. code-block:: bash
sudo docker run -d <your username>/centos-node-hello
sudo docker run -p 49160:8080 -d <your username>/centos-node-hello
Print the output of your app:

Просмотреть файл

@ -39,11 +39,12 @@ container. The ``BUILD_JOB`` environment variable will be set with the new conta
.. code-block:: bash
sudo docker attach $BUILD_JOB
sudo docker attach -sig-proxy=false $BUILD_JOB
[...]
While this container is running, we can attach to the new container to
see what is going on. You can use Ctrl-C to disconnect.
see what is going on. The flag ``-sig-proxy`` set as ``false`` allows you to connect and
disconnect (Ctrl-C) to it without stopping the container.
.. code-block:: bash
@ -86,7 +87,7 @@ http://0.0.0.0:5000/`` in the log output.
.. code-block:: bash
WEB_PORT=$(sudo docker port $WEB_WORKER 5000)
WEB_PORT=$(sudo docker port $WEB_WORKER 5000 | awk -F: '{ print $2 }')
Look up the public-facing port which is NAT-ed. Find the private port
used by the container and store it inside of the ``WEB_PORT`` variable.

Просмотреть файл

@ -9,74 +9,93 @@ Redis Service
.. include:: example_header.inc
Very simple, no frills, Redis service.
Very simple, no frills, Redis service attached to a web application using a link.
Open a docker container
-----------------------
Create a docker container for Redis
-----------------------------------
Firstly, we create a ``Dockerfile`` for our new Redis image.
.. code-block:: bash
sudo docker run -i -t ubuntu /bin/bash
FROM ubuntu:12.10
RUN apt-get update
RUN apt-get -y install redis-server
EXPOSE 6379
ENTRYPOINT ["/usr/bin/redis-server"]
Building your image
-------------------
Update your Docker container, install the Redis server. Once
installed, exit out of the Docker container.
Next we build an image from our ``Dockerfile``. Replace ``<your username>``
with your own user name.
.. code-block:: bash
apt-get update
apt-get install redis-server
exit
Snapshot the installation
-------------------------
.. code-block:: bash
sudo docker ps -a # grab the container id (this will be the first one in the list)
sudo docker commit <container_id> <your username>/redis
sudo docker build -t <your username>/redis .
Run the service
---------------
Use the image we've just created and name your container ``redis``.
Running the service with ``-d`` runs the container in detached mode, leaving the
container running in the background. Use your snapshot.
container running in the background.
Importantly, we're not exposing any ports on our container. Instead we're going to
use a container link to provide access to our Redis database.
.. code-block:: bash
sudo docker run -d -p 6379 <your username>/redis /usr/bin/redis-server
sudo docker run -name redis -d <your username>/redis
Test 1
++++++
Create your web application container
-------------------------------------
Connect to the container with the ``redis-cli`` binary.
Next we can create a container for our application. We're going to use the ``-link``
flag to create a link to the ``redis`` container we've just created with an alias of
``db``. This will create a secure tunnel to the ``redis`` container and expose the
Redis instance running inside that container to only this container.
.. code-block:: bash
sudo docker ps # grab the new container id
sudo docker inspect <container_id> # grab the ipaddress of the container
redis-cli -h <ipaddress> -p 6379
redis 10.0.3.32:6379> set docker awesome
sudo docker run -link redis:db -i -t ubuntu:12.10 /bin/bash
Once inside our freshly created container we need to install Redis to get the
``redis-cli`` binary to test our connection.
.. code-block:: bash
apt-get update
apt-get -y install redis-server
service redis-server stop
Now we can test the connection. Firstly, let's look at the available environmental
variables in our web application container. We can use these to get the IP and port
of our ``redis`` container.
.. code-block:: bash
env
. . .
DB_NAME=/violet_wolf/db
DB_PORT_6379_TCP_PORT=6379
DB_PORT=tcp://172.17.0.33:6379
DB_PORT_6379_TCP=tcp://172.17.0.33:6379
DB_PORT_6379_TCP_ADDR=172.17.0.33
DB_PORT_6379_TCP_PROTO=tcp
We can see that we've got a small list of environment variables prefixed with ``DB``.
The ``DB`` comes from the link alias specified when we launched the container. Let's use
the ``DB_PORT_6379_TCP_ADDR`` variable to connect to our Redis container.
.. code-block:: bash
redis-cli -h $DB_PORT_6379_TCP_ADDR
redis 172.17.0.33:6379>
redis 172.17.0.33:6379> set docker awesome
OK
redis 10.0.3.32:6379> get docker
redis 172.17.0.33:6379> get docker
"awesome"
redis 10.0.3.32:6379> exit
redis 172.17.0.33:6379> exit
Test 2
++++++
We could easily use this or other environment variables in our web application to make a
connection to our ``redis`` container.
Connect to the host os with the ``redis-cli`` binary.
.. code-block:: bash
sudo docker ps # grab the new container id
sudo docker port <container_id> 6379 # grab the external port
ip addr show # grab the host ip address
redis-cli -h <host ipaddress> -p <external port>
redis 192.168.0.1:49153> set docker awesome
OK
redis 192.168.0.1:49153> get docker
"awesome"
redis 192.168.0.1:49153> exit

Просмотреть файл

@ -12,9 +12,9 @@ SSH Daemon Service
**Video:**
I've create a little screencast to show how to create a SSHd service
I've created a little screencast to show how to create an SSHd service
and connect to it. It is something like 11 minutes and not entirely
smooth, but gives you a good idea.
smooth, but it gives you a good idea.
.. note::
This screencast was created before Docker version 0.5.2, so the

Просмотреть файл

@ -22,12 +22,10 @@ Amazon QuickStart
1. **Choose an image:**
* Open http://cloud-images.ubuntu.com/locator/ec2/
* Enter ``amd64 precise`` in the search field (it will search as you
type)
* Pick an image by clicking on the image name. *An EBS-enabled
image will let you use a t1.micro instance.* Clicking on the image
name will take you to your AWS Console.
* Launch the `Create Instance Wizard` <https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:> menu on your AWS Console
* Select "Community AMIs" option and serch for ``amd64 precise`` (click enter to search)
* If you choose a EBS enabled AMI you will be able to launch a `t1.micro` instance (more info on `pricing` <http://aws.amazon.com/en/ec2/pricing/> )
* When you click select you'll be taken to the instance setup, and you're one click away from having your Ubuntu VM up and running.
2. **Tell CloudInit to install Docker:**
@ -102,26 +100,45 @@ Docker that way too. Vagrant 1.1 or higher is required.
we need to set them there first. Make sure you have everything on
amazon aws setup so you can (manually) deploy a new image to EC2.
Note that where possible these variables are the same as those honored by
the ec2 api tools.
::
export AWS_ACCESS_KEY_ID=xxx
export AWS_SECRET_ACCESS_KEY=xxx
export AWS_ACCESS_KEY=xxx
export AWS_SECRET_KEY=xxx
export AWS_KEYPAIR_NAME=xxx
export AWS_SSH_PRIVKEY=xxx
export SSH_PRIVKEY_PATH=xxx
The environment variables are:
export BOX_NAME=xxx
export AWS_REGION=xxx
export AWS_AMI=xxx
export AWS_INSTANCE_TYPE=xxx
* ``AWS_ACCESS_KEY_ID`` - The API key used to make requests to AWS
* ``AWS_SECRET_ACCESS_KEY`` - The secret key to make AWS API requests
The required environment variables are:
* ``AWS_ACCESS_KEY`` - The API key used to make requests to AWS
* ``AWS_SECRET_KEY`` - The secret key to make AWS API requests
* ``AWS_KEYPAIR_NAME`` - The name of the keypair used for this EC2 instance
* ``AWS_SSH_PRIVKEY`` - The path to the private key for the named
* ``SSH_PRIVKEY_PATH`` - The path to the private key for the named
keypair, for example ``~/.ssh/docker.pem``
There are a number of optional environment variables:
* ``BOX_NAME`` - The name of the vagrant box to use. Defaults to
``ubuntu``.
* ``AWS_REGION`` - The aws region to spawn the vm in. Defaults to
``us-east-1``.
* ``AWS_AMI`` - The aws AMI to start with as a base. This must be
be an ubuntu 12.04 precise image. You must change this value if
``AWS_REGION`` is set to a value other than ``us-east-1``.
This is because AMIs are region specific. Defaults to ``ami-69f5a900``.
* ``AWS_INSTANCE_TYPE`` - The aws instance type. Defaults to ``t1.micro``.
You can check if they are set correctly by doing something like
::
echo $AWS_ACCESS_KEY_ID
echo $AWS_ACCESS_KEY
6. Do the magic!

Просмотреть файл

@ -19,11 +19,12 @@ Contents:
ubuntulinux
binaries
security
upgrading
kernel
vagrant
windows
amazon
rackspace
archlinux
gentoolinux
upgrading
kernel

Просмотреть файл

@ -25,6 +25,7 @@ If you cannot or do not want to use the "official" kernels,
here is some technical background about the features (both optional and
mandatory) that docker needs to run successfully.
Linux version 3.8 or above
--------------------------
@ -39,6 +40,15 @@ The symptoms include:
- kernel crash causing the machine to freeze for a few minutes, or even
completely.
Additionally, kernels prior 3.4 did not implement ``reboot_pid_ns``,
which means that the ``reboot()`` syscall could reboot the host machine,
instead of terminating the container. To work around that problem,
LXC userland tools (since version 0.8) automatically drop the ``SYS_BOOT``
capability when necessary. Still, if you run a pre-3.4 kernel with pre-0.8
LXC tools, be aware that containers can reboot the whole host! This is
not something that Docker wants to address in the short term, since you
shouldn't use kernels prior 3.8 with Docker anyway.
While it is still possible to use older kernels for development, it is
really not advised to do so.

Просмотреть файл

@ -0,0 +1,267 @@
:title: Docker Security
:description: Review of the Docker Daemon attack surface
:keywords: Docker, Docker documentation, security
.. _dockersecurity:
Docker Security
===============
*Adapted from* `Containers & Docker: How Secure are They? <blogsecurity>`_
There are three major areas to consider when reviewing Docker security:
* the intrinsic security of containers, as implemented by kernel
namespaces and cgroups;
* the attack surface of the Docker daemon itself;
* the "hardening" security features of the kernel and how they
interact with containers.
Kernel Namespaces
-----------------
Docker containers are essentially LXC containers, and they come with
the same security features. When you start a container with ``docker
run``, behind the scenes Docker uses ``lxc-start`` to execute the
Docker container. This creates a set of namespaces and control groups
for the container. Those namespaces and control groups are not created
by Docker itself, but by ``lxc-start``. This means that as the LXC
userland tools evolve (and provide additional namespaces and isolation
features), Docker will automatically make use of them.
**Namespaces provide the first and most straightforward form of
isolation**: processes running within a container cannot see, and even
less affect, processes running in another container, or in the host
system.
**Each container also gets its own network stack**, meaning that a
container doesnt get a privileged access to the sockets or interfaces
of another container. Of course, if the host system is setup
accordingly, containers can interact with each other through their
respective network interfaces — just like they can interact with
external hosts. When you specify public ports for your containers or
use :ref:`links <working_with_links_names>` then IP traffic is allowed
between containers. They can ping each other, send/receive UDP
packets, and establish TCP connections, but that can be restricted if
necessary. From a network architecture point of view, all containers
on a given Docker host are sitting on bridge interfaces. This means
that they are just like physical machines connected through a common
Ethernet switch; no more, no less.
How mature is the code providing kernel namespaces and private
networking? Kernel namespaces were introduced `between kernel version
2.6.15 and 2.6.26
<http://lxc.sourceforge.net/index.php/about/kernel-namespaces/>`_. This
means that since July 2008 (date of the 2.6.26 release, now 5 years
ago), namespace code has been exercised and scrutinized on a large
number of production systems. And there is more: the design and
inspiration for the namespaces code are even older. Namespaces are
actually an effort to reimplement the features of `OpenVZ
<http://en.wikipedia.org/wiki/OpenVZ>`_ in such a way that they could
be merged within the mainstream kernel. And OpenVZ was initially
released in 2005, so both the design and the implementation are
pretty mature.
Control Groups
--------------
Control Groups are the other key component of Linux Containers. They
implement resource accounting and limiting. They provide a lot of very
useful metrics, but they also help to ensure that each container gets
its fair share of memory, CPU, disk I/O; and, more importantly, that a
single container cannot bring the system down by exhausting one of
those resources.
So while they do not play a role in preventing one container from
accessing or affecting the data and processes of another container,
they are essential to fend off some denial-of-service attacks. They
are particularly important on multi-tenant platforms, like public and
private PaaS, to guarantee a consistent uptime (and performance) even
when some applications start to misbehave.
Control Groups have been around for a while as well: the code was
started in 2006, and initially merged in kernel 2.6.24.
Docker Daemon Attack Surface
----------------------------
Running containers (and applications) with Docker implies running the
Docker daemon. This daemon currently requires root privileges, and you
should therefore be aware of some important details.
First of all, **only trusted users should be allowed to control your
Docker daemon**. This is a direct consequence of some powerful Docker
features. Specifically, Docker allows you to share a directory between
the Docker host and a guest container; and it allows you to do so
without limiting the access rights of the container. This means that
you can start a container where the ``/host`` directory will be the
``/`` directory on your host; and the container will be able to alter
your host filesystem without any restriction. This sounds crazy? Well,
you have to know that **all virtualization systems allowing filesystem
resource sharing behave the same way**. Nothing prevents you from
sharing your root filesystem (or even your root block device) with a
virtual machine.
This has a strong security implication: if you instrument Docker from
e.g. a web server to provision containers through an API, you should
be even more careful than usual with parameter checking, to make sure
that a malicious user cannot pass crafted parameters causing Docker to
create arbitrary containers.
For this reason, the REST API endpoint (used by the Docker CLI to
communicate with the Docker daemon) changed in Docker 0.5.2, and now
uses a UNIX socket instead of a TCP socket bound on 127.0.0.1 (the
latter being prone to cross-site-scripting attacks if you happen to
run Docker directly on your local machine, outside of a VM). You can
then use traditional UNIX permission checks to limit access to the
control socket.
You can also expose the REST API over HTTP if you explicitly decide
so. However, if you do that, being aware of the abovementioned
security implication, you should ensure that it will be reachable
only from a trusted network or VPN; or protected with e.g. ``stunnel``
and client SSL certificates.
Recent improvements in Linux namespaces will soon allow to run
full-featured containers without root privileges, thanks to the new
user namespace. This is covered in detail `here
<http://s3hh.wordpress.com/2013/07/19/creating-and-using-containers-without-privilege/>`_. Moreover,
this will solve the problem caused by sharing filesystems between host
and guest, since the user namespace allows users within containers
(including the root user) to be mapped to other users in the host
system.
The end goal for Docker is therefore to implement two additional
security improvements:
* map the root user of a container to a non-root user of the Docker
host, to mitigate the effects of a container-to-host privilege
escalation;
* allow the Docker daemon to run without root privileges, and delegate
operations requiring those privileges to well-audited sub-processes,
each with its own (very limited) scope: virtual network setup,
filesystem management, etc.
Finally, if you run Docker on a server, it is recommended to run
exclusively Docker in the server, and move all other services within
containers controlled by Docker. Of course, it is fine to keep your
favorite admin tools (probably at least an SSH server), as well as
existing monitoring/supervision processes (e.g. NRPE, collectd, etc).
Linux Kernel Capabilities
-------------------------
By default, Docker starts containers with a very restricted set of
capabilities. What does that mean?
Capabilities turn the binary "root/non-root" dichotomy into a
fine-grained access control system. Processes (like web servers) that
just need to bind on a port below 1024 do not have to run as root:
they can just be granted the ``net_bind_service`` capability
instead. And there are many other capabilities, for almost all the
specific areas where root privileges are usually needed.
This means a lot for container security; lets see why!
Your average server (bare metal or virtual machine) needs to run a
bunch of processes as root. Those typically include SSH, cron,
syslogd; hardware management tools (to e.g. load modules), network
configuration tools (to handle e.g. DHCP, WPA, or VPNs), and much
more. A container is very different, because almost all of those tasks
are handled by the infrastructure around the container:
* SSH access will typically be managed by a single server running in
the Docker host;
* ``cron``, when necessary, should run as a user process, dedicated
and tailored for the app that needs its scheduling service, rather
than as a platform-wide facility;
* log management will also typically be handed to Docker, or by
third-party services like Loggly or Splunk;
* hardware management is irrelevant, meaning that you never need to
run ``udevd`` or equivalent daemons within containers;
* network management happens outside of the containers, enforcing
separation of concerns as much as possible, meaning that a container
should never need to perform ``ifconfig``, ``route``, or ip commands
(except when a container is specifically engineered to behave like a
router or firewall, of course).
This means that in most cases, containers will not need "real" root
privileges *at all*. And therefore, containers can run with a reduced
capability set; meaning that "root" within a container has much less
privileges than the real "root". For instance, it is possible to:
* deny all "mount" operations;
* deny access to raw sockets (to prevent packet spoofing);
* deny access to some filesystem operations, like creating new device
nodes, changing the owner of files, or altering attributes
(including the immutable flag);
* deny module loading;
* and many others.
This means that even if an intruder manages to escalate to root within
a container, it will be much harder to do serious damage, or to
escalate to the host.
This won't affect regular web apps; but malicious users will find that
the arsenal at their disposal has shrunk considerably! You can see
`the list of dropped capabilities in the Docker code
<https://github.com/dotcloud/docker/blob/v0.5.0/lxc_template.go#L97>`_,
and a full list of available capabilities in `Linux manpages
<http://man7.org/linux/man-pages/man7/capabilities.7.html>`_.
Of course, you can always enable extra capabilities if you really need
them (for instance, if you want to use a FUSE-based filesystem), but
by default, Docker containers will be locked down to ensure maximum
safety.
Other Kernel Security Features
------------------------------
Capabilities are just one of the many security features provided by
modern Linux kernels. It is also possible to leverage existing,
well-known systems like TOMOYO, AppArmor, SELinux, GRSEC, etc. with
Docker.
While Docker currently only enables capabilities, it doesn't interfere
with the other systems. This means that there are many different ways
to harden a Docker host. Here are a few examples.
* You can run a kernel with GRSEC and PAX. This will add many safety
checks, both at compile-time and run-time; it will also defeat many
exploits, thanks to techniques like address randomization. It
doesnt require Docker-specific configuration, since those security
features apply system-wide, independently of containers.
* If your distribution comes with security model templates for LXC
containers, you can use them out of the box. For instance, Ubuntu
comes with AppArmor templates for LXC, and those templates provide
an extra safety net (even though it overlaps greatly with
capabilities).
* You can define your own policies using your favorite access control
mechanism. Since Docker containers are standard LXC containers,
there is nothing “magic” or specific to Docker.
Just like there are many third-party tools to augment Docker
containers with e.g. special network topologies or shared filesystems,
you can expect to see tools to harden existing Docker containers
without affecting Dockers core.
Conclusions
-----------
Docker containers are, by default, quite secure; especially if you
take care of running your processes inside the containers as
non-privileged users (i.e. non root).
You can add an extra layer of safety by enabling Apparmor, SELinux,
GRSEC, or your favorite hardening solution.
Last but not least, if you see interesting security features in other
containerization systems, you will be able to implement them as well
with Docker, since everything is provided by the kernel anyway.
For more context and especially for comparisons with VMs and other
container systems, please also see the `original blog post
<blogsecurity>`_.
.. _blogsecurity: http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/

Просмотреть файл

@ -38,3 +38,10 @@ was when the container was stopped.
You can promote a container to an :ref:`image_def` with ``docker
commit``. Once a container is an image, you can use it as a parent for
new containers.
Container IDs
.............
All containers are identified by a 64 hexadecimal digit string (internally a 256bit
value). To simplify their use, a short ID of the first 12 characters can be used
on the commandline. There is a small possibility of short id collisions, so the
docker server will always return the long ID.

Просмотреть файл

@ -36,3 +36,11 @@ Base Image
..........
An image that has no parent is a **base image**.
Image IDs
.........
All images are identified by a 64 hexadecimal digit string (internally a 256bit
value). To simplify their use, a short ID of the first 12 characters can be used
on the command line. There is a small possibility of short id collisions, so the
docker server will always return the long ID.

Просмотреть файл

@ -22,15 +22,29 @@ specify the path to it and manually start it.
# Run docker in daemon mode
sudo <path to>/docker -d &
Running an interactive shell
----------------------------
Download a pre-built image
--------------------------
.. code-block:: bash
# Download an ubuntu image
sudo docker pull ubuntu
This will find the ``ubuntu`` image by name in the :ref:`Central Index
<searching_central_index>` and download it from the top-level Central
Repository to a local image cache.
.. NOTE:: When the image has successfully downloaded, you will see a
12 character hash ``539c0211cd76: Download complete`` which is the
short form of the image ID. These short image IDs are the first 12
characters of the full image ID - which can be found using ``docker
inspect`` or ``docker images -notrunc=true``
Running an interactive shell
----------------------------
.. code-block:: bash
# Run an interactive shell in the ubuntu image,
# allocate a tty, attach stdin and stdout
# To detach the tty without exiting the shell,
@ -39,32 +53,36 @@ Running an interactive shell
.. _dockergroup:
Why ``sudo``?
-------------
sudo and the docker Group
-------------------------
The ``docker`` daemon always runs as root, and since ``docker``
version 0.5.2, ``docker`` binds to a Unix socket instead of a TCP
port. By default that Unix socket is owned by the user *root*, and so,
by default, you can access it with ``sudo``.
Starting in version 0.5.3, if you create a Unix group called *docker*
and add users to it, then the ``docker`` daemon will make the
ownership of the Unix socket read/writable by the *docker* group when
the daemon starts. The ``docker`` daemon must always run as root, but
if you run the ``docker`` client as a user in the *docker* group then
you don't need to add ``sudo`` to all the client commands.
Starting in version 0.5.3, if you (or your Docker installer) create a
Unix group called *docker* and add users to it, then the ``docker``
daemon will make the ownership of the Unix socket read/writable by the
*docker* group when the daemon starts. The ``docker`` daemon must
always run as root, but if you run the ``docker`` client as a user in
the *docker* group then you don't need to add ``sudo`` to all the
client commands.
**Example:**
.. code-block:: bash
# Add the docker group
# Add the docker group if it doesn't already exist.
sudo groupadd docker
# Add the ubuntu user to the docker group
# Add the user "ubuntu" to the docker group.
# Change the user name to match your preferred user.
# You may have to logout and log back in again for
# this to take effect
# this to take effect.
sudo gpasswd -a ubuntu docker
# Restart the docker daemon
# Restart the docker daemon.
sudo service docker restart
.. _bind_docker:
@ -72,7 +90,7 @@ you don't need to add ``sudo`` to all the client commands.
Bind Docker to another host/port or a Unix socket
-------------------------------------------------
.. DANGER:: Changing the default ``docker`` daemon binding to a TCP
.. warning:: Changing the default ``docker`` daemon binding to a TCP
port or Unix *docker* user group will increase your security risks
by allowing non-root users to potentially gain *root* access on the
host (`e.g. #1369

Просмотреть файл

@ -15,27 +15,39 @@ commit them along the way, giving you a final image.
.. contents:: Table of Contents
.. _dockerfile_usage:
1. Usage
========
To build an image from a source repository, create a description file
called ``Dockerfile`` at the root of your repository. This file will
describe the steps to assemble the image.
To :ref:`build <cli_build>` an image from a source repository, create
a description file called ``Dockerfile`` at the root of your
repository. This file will describe the steps to assemble the image.
Then call ``docker build`` with the path of your source repository as
argument:
argument (for example, ``.``):
``sudo docker build .``
The path to the source repository defines where to find the *context*
of the build. The build is run by the Docker daemon, not by the CLI,
so the whole context must be transferred to the daemon. The Docker CLI
reports "Uploading context" when the context is sent to the daemon.
You can specify a repository and tag at which to save the new image if the
build succeeds:
``sudo docker build -t shykes/myapp .``
Docker will run your steps one-by-one, committing the result if necessary,
before finally outputting the ID of your new image.
The Docker daemon will run your steps one-by-one, committing the
result if necessary, before finally outputting the ID of your new
image. The Docker daemon will automatically clean up the context you
sent.
When you're done with your build, you're ready to look into :ref:`image_push`.
When you're done with your build, you're ready to look into
:ref:`image_push`.
.. _dockerfile_format:
2. Format
=========
@ -63,12 +75,16 @@ allows statements like:
# Comment
RUN echo 'we are running some # of cool things'
.. _dockerfile_instructions:
3. Instructions
===============
Here is the set of instructions you can use in a ``Dockerfile`` for
building images.
.. _dockerfile_from:
3.1 FROM
--------
@ -94,6 +110,8 @@ output by the commit before each new ``FROM`` command.
If no ``tag`` is given to the ``FROM`` instruction, ``latest`` is
assumed. If the used tag does not exist, an error will be returned.
.. _dockerfile_maintainer:
3.2 MAINTAINER
--------------
@ -102,6 +120,8 @@ assumed. If the used tag does not exist, an error will be returned.
The ``MAINTAINER`` instruction allows you to set the *Author* field of
the generated images.
.. _dockerfile_run:
3.3 RUN
-------
@ -116,6 +136,16 @@ core concepts of Docker where commits are cheap and containers can be
created from any point in an image's history, much like source
control.
Known Issues (RUN)
..................
* :issue:`783` is about file permissions problems that can occur when
using the AUFS file system. You might notice it during an attempt to
``rm`` a file, for example. The issue describes a workaround.
* :issue:`2424` Locale will not be set automatically.
.. _dockerfile_cmd:
3.4 CMD
-------
@ -159,7 +189,7 @@ array:
If you would like your container to run the same executable every
time, then you should consider using ``ENTRYPOINT`` in combination
with ``CMD``. See :ref:`entrypoint_def`.
with ``CMD``. See :ref:`dockerfile_entrypoint`.
If the user specifies arguments to ``docker run`` then they will
override the default specified in CMD.
@ -169,6 +199,8 @@ override the default specified in CMD.
command and commits the result; ``CMD`` does not execute anything at
build time, but specifies the intended command for the image.
.. _dockerfile_expose:
3.5 EXPOSE
----------
@ -179,6 +211,8 @@ functionally equivalent to running ``docker commit -run '{"PortSpecs":
["<port>", "<port2>"]}'`` outside the builder. Refer to
:ref:`port_redirection` for detailed information.
.. _dockerfile_env:
3.6 ENV
-------
@ -193,6 +227,8 @@ with ``<key>=<value>``
The environment variables will persist when a container is run
from the resulting image.
.. _dockerfile_add:
3.7 ADD
-------
@ -211,8 +247,16 @@ destination container.
All new files and directories are created with mode 0755, uid and gid
0.
.. note::
if you build using STDIN (``docker build - < somefile``), there is no build
context, so the Dockerfile can only contain an URL based ADD statement.
The copy obeys the following rules:
* The ``<src>`` path must be inside the *context* of the build; you cannot
``ADD ../something /something``, because the first step of a
``docker build`` is to send the context directory (and subdirectories) to
the docker daemon.
* If ``<src>`` is a URL and ``<dest>`` does not end with a trailing slash,
then a file is downloaded from the URL and copied to ``<dest>``.
* If ``<src>`` is a URL and ``<dest>`` does end with a trailing slash,
@ -245,7 +289,7 @@ The copy obeys the following rules:
* If ``<dest>`` doesn't exist, it is created along with all missing
directories in its path.
.. _entrypoint_def:
.. _dockerfile_entrypoint:
3.8 ENTRYPOINT
--------------
@ -294,14 +338,18 @@ this optional but default, you could use a CMD:
CMD ["-l", "-"]
ENTRYPOINT ["/usr/bin/wc"]
.. _dockerfile_volume:
3.9 VOLUME
----------
``VOLUME ["/data"]``
The ``VOLUME`` instruction will add one or more new volumes to any
container created from the image.
The ``VOLUME`` instruction will create a mount point with the specified name and mark it
as holding externally mounted volumes from native host or other containers. For more information/examples
and mounting instructions via docker client, refer to :ref:`volume_def` documentation.
.. _dockerfile_user:
3.10 USER
---------
@ -311,6 +359,8 @@ container created from the image.
The ``USER`` instruction sets the username or UID to use when running
the image.
.. _dockerfile_workdir:
3.11 WORKDIR
------------
@ -319,6 +369,7 @@ the image.
The ``WORKDIR`` instruction sets the working directory in which
the command given by ``CMD`` is executed.
.. _dockerfile_examples:
4. Dockerfile Examples
======================

Просмотреть файл

@ -29,14 +29,32 @@ Here are a few sample scripts for systemd and upstart to integrate with docker.
Sample Upstart Script
---------------------
In this example we've already created a container to run Redis with an id of
0a7e070b698b. To create an upstart script for our container, we create a file
named ``/etc/init/redis.conf`` and place the following into it:
.. code-block:: bash
description "Redis container"
author "Me"
start on filesystem and started lxc-net and started docker
start on filesystem and started docker
stop on runlevel [!2345]
respawn
exec docker start -a 0a7e070b698b
script
# Wait for docker to finish starting up first.
FILE=/var/run/docker.sock
while [ ! -e $FILE ] ; do
inotifywait -t 2 -e create $(dirname $FILE)
done
/usr/bin/docker start -a 0a7e070b698b
end script
Next, we have to configure docker so that it's run with the option ``-r=false``.
Run the following command:
.. code-block:: bash
$ sudo sh -c "echo 'DOCKER_OPTS=\"-r=false\"' > /etc/default/docker"
Sample systemd Script

Просмотреть файл

@ -20,3 +20,4 @@ Contents:
puppet
host_integration
working_with_volumes
working_with_links_names

Просмотреть файл

@ -0,0 +1,104 @@
:title: Working with Links and Names
:description: How to create and use links and names
:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
.. _working_with_links_names:
Working with Links and Names
============================
From version 0.6.5 you are now able to ``name`` a container and ``link`` it to another
container by referring to its name. This will create a parent -> child relationship
where the parent container can see selected information about its child.
.. _run_name:
Container Naming
----------------
.. versionadded:: v0.6.5
You can now name your container by using the ``-name`` flag. If no name is provided, Docker
will automatically generate a name. You can see this name using the ``docker ps`` command.
.. code-block:: bash
# format is "sudo docker run -name <container_name> <image_name> <command>"
$ sudo docker run -name test ubuntu /bin/bash
# the flag "-a" Show all containers. Only running containers are shown by default.
$ sudo docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
2522602a0d99 ubuntu:12.04 /bin/bash 14 seconds ago Exit 0 test
.. _run_link:
Links: service discovery for docker
-----------------------------------
.. versionadded:: v0.6.5
Links allow containers to discover and securely communicate with each other by using the
flag ``-link name:alias``. Inter-container communication can be disabled with the daemon
flag ``-icc=false``. With this flag set to false, Container A cannot access Container B
unless explicitly allowed via a link. This is a huge win for securing your containers.
When two containers are linked together Docker creates a parent child relationship
between the containers. The parent container will be able to access information via
environment variables of the child such as name, exposed ports, IP and other selected
environment variables.
When linking two containers Docker will use the exposed ports of the container to create
a secure tunnel for the parent to access. If a database container only exposes port 8080
then the linked container will only be allowed to access port 8080 and nothing else if
inter-container communication is set to false.
.. code-block:: bash
# Example: there is an image called redis-2.6 that exposes the port 6379 and starts redis-server.
# Let's name the container as "redis" based on that image and run it as daemon.
$ sudo docker run -d -name redis redis-2.6
We can issue all the commands that you would expect using the name "redis"; start, stop,
attach, using the name for our container. The name also allows us to link other containers
into this one.
Next, we can start a new web application that has a dependency on Redis and apply a link
to connect both containers. If you noticed when running our Redis server we did not use
the -p flag to publish the Redis port to the host system. Redis exposed port 6379 and
this is all we need to establish a link.
.. code-block:: bash
# Linking the redis container as a child
$ sudo docker run -t -i -link redis:db -name webapp ubuntu bash
When you specified -link redis:db you are telling docker to link the container named redis
into this new container with the alias db. Environment variables are prefixed with the alias
so that the parent container can access network and environment information from the containers
that are linked into it.
If we inspect the environment variables of the second container, we would see all the information
about the child container.
.. code-block:: bash
$ root@4c01db0b339c:/# env
HOSTNAME=4c01db0b339c
DB_NAME=/webapp/db
TERM=xterm
DB_PORT=tcp://172.17.0.8:6379
DB_PORT_6379_TCP=tcp://172.17.0.8:6379
DB_PORT_6379_TCP_PROTO=tcp
DB_PORT_6379_TCP_ADDR=172.17.0.8
DB_PORT_6379_TCP_PORT=6379
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PWD=/
SHLVL=1
HOME=/
container=lxc
_=/usr/bin/env
root@4c01db0b339c:/#
Accessing the network information along with the environment of the child container allows
us to easily connect to the Redis service on the specific IP and port in the environment.

Просмотреть файл

@ -30,44 +30,60 @@ Each container can have zero or more data volumes.
Getting Started
...............
Using data volumes is as simple as adding a new flag: ``-v``. The parameter ``-v`` can be used more than once in order to create more volumes within the new container. The example below shows the instruction to create a container with two new volumes::
Using data volumes is as simple as adding a new flag: ``-v``. The
parameter ``-v`` can be used more than once in order to create more
volumes within the new container. The example below shows the
instruction to create a container with two new volumes::
docker run -v /var/volume1 -v /var/volume2 shykes/couchdb
For a Dockerfile, the VOLUME instruction will add one or more new volumes to any container created from the image::
For a Dockerfile, the VOLUME instruction will add one or more new
volumes to any container created from the image::
VOLUME ["/var/volume1", "/var/volume2"]
Create a new container using existing volumes from an existing container:
---------------------------------------------------------------------------
Mount Volumes from an Existing Container:
-----------------------------------------
The command below creates a new container which is runnning as daemon ``-d`` and with one volume ``/var/lib/couchdb``::
The command below creates a new container which is runnning as daemon
``-d`` and with one volume ``/var/lib/couchdb``::
COUCH1=$(sudo docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03)
From the container id of that previous container ``$COUCH1`` it's possible to create new container sharing the same volume using the parameter ``-volumes-from container_id``::
From the container id of that previous container ``$COUCH1`` it's
possible to create new container sharing the same volume using the
parameter ``-volumes-from container_id``::
COUCH2=$(sudo docker run -d -volumes-from $COUCH1 shykes/couchdb:2013-05-03)
Now, the second container has the all the information from the first volume.
Create a new container which mounts a host directory into it:
-------------------------------------------------------------
Mount a Host Directory as a Container Volume:
---------------------------------------------
::
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro].
If "host-dir" is missing, then docker creates a new volume.
This is not available for a Dockerfile due the portability and sharing purpose of it. The [host-dir] volumes is something 100% host dependent and will break on any other machine.
This is not available for a Dockerfile due the portability and sharing
purpose of it. The [host-dir] volumes is something 100% host dependent
and will break on any other machine.
For example::
sudo docker run -v /var/logs:/var/host_logs:ro shykes/couchdb:2013-05-03
The command above mounts the host directory ``/var/logs`` into the container with read only permissions as ``/var/host_logs``.
The command above mounts the host directory ``/var/logs`` into the
container with read only permissions as ``/var/host_logs``.
.. versionadded:: v0.5.0
Known Issues
............
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
could indicate a permissions problem with AppArmor. Please see the
issue for a workaround.

Просмотреть файл

@ -177,6 +177,15 @@ you can push and pull it like any other repository, but it will
there will be no user name checking performed. Your registry will
function completely independently from the Central Index.
.. raw:: html
<iframe width="640" height="360"
src="//www.youtube.com/embed/CAewZCBT4PI?rel=0" frameborder="0"
allowfullscreen></iframe>
.. seealso:: `Docker Blog: How to use your own registry
<http://blog.docker.io/2013/07/how-to-use-your-own-registry/>`_
Authentication file
-------------------

3
docs/theme/docker/layout.html поставляемый
Просмотреть файл

@ -129,7 +129,8 @@
<div class="row footer">
<div class="span12 tbox">
<div class="tbox">
<p>Docker is an open source project, sponsored by <a href="https://dotcloud.com">dotCloud</a>, under the <a href="https://github.com/dotcloud/docker/blob/master/LICENSE" title="Docker licence, hosted in the Github repository">apache 2.0 licence</a></p>
<p>Docker is an open source project, sponsored by <a href="https://www.docker.com">Docker Inc.</a>, under the <a href="https://github.com/dotcloud/docker/blob/master/LICENSE" title="Docker licence, hosted in the Github repository">apache 2.0 licence</a></p>
<p>Documentation proudly hosted by <a href="http://www.readthedocs.org">Read the Docs</a></p>
</div>
<div class="social links">

Просмотреть файл

@ -2,20 +2,25 @@ package engine
import (
"fmt"
"os"
"log"
"runtime"
"github.com/dotcloud/docker/utils"
"log"
"os"
"runtime"
"strings"
)
type Handler func(*Job) string
var globalHandlers map[string]Handler
func init() {
globalHandlers = make(map[string]Handler)
}
func Register(name string, handler Handler) error {
if globalHandlers == nil {
globalHandlers = make(map[string]Handler)
_, exists := globalHandlers[name]
if exists {
return fmt.Errorf("Can't overwrite global handler for command %s", name)
}
globalHandlers[name] = handler
return nil
@ -25,8 +30,24 @@ func Register(name string, handler Handler) error {
// It acts as a store for *containers*, and allows manipulation of these
// containers by executing *jobs*.
type Engine struct {
root string
handlers map[string]Handler
root string
handlers map[string]Handler
hack Hack // data for temporary hackery (see hack.go)
id string
}
func (eng *Engine) Root() string {
return eng.root
}
func (eng *Engine) Register(name string, handler Handler) error {
eng.Logf("Register(%s) (handlers=%v)", name, eng.handlers)
_, exists := eng.handlers[name]
if exists {
return fmt.Errorf("Can't overwrite handler for command %s", name)
}
eng.handlers[name] = handler
return nil
}
// New initializes a new engine managing the directory specified at `root`.
@ -56,22 +77,31 @@ func New(root string) (*Engine, error) {
return nil, err
}
eng := &Engine{
root: root,
handlers: globalHandlers,
root: root,
handlers: make(map[string]Handler),
id: utils.RandomString(),
}
// Copy existing global handlers
for k, v := range globalHandlers {
eng.handlers[k] = v
}
return eng, nil
}
func (eng *Engine) String() string {
return fmt.Sprintf("%s|%s", eng.Root(), eng.id[:8])
}
// Job creates a new job which can later be executed.
// This function mimics `Command` from the standard os/exec package.
func (eng *Engine) Job(name string, args ...string) *Job {
job := &Job{
eng: eng,
Name: name,
Args: args,
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
Eng: eng,
Name: name,
Args: args,
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
}
handler, exists := eng.handlers[name]
if exists {
@ -80,3 +110,7 @@ func (eng *Engine) Job(name string, args ...string) *Job {
return job
}
func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) {
prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n"))
return fmt.Fprintf(os.Stderr, prefixedFormat, args...)
}

55
engine/engine_test.go Normal file
Просмотреть файл

@ -0,0 +1,55 @@
package engine
import (
"testing"
)
func TestRegister(t *testing.T) {
if err := Register("dummy1", nil); err != nil {
t.Fatal(err)
}
if err := Register("dummy1", nil); err == nil {
t.Fatalf("Expecting error, got none")
}
eng := newTestEngine(t)
//Should fail because globan handlers are copied
//at the engine creation
if err := eng.Register("dummy1", nil); err == nil {
t.Fatalf("Expecting error, got none")
}
if err := eng.Register("dummy2", nil); err != nil {
t.Fatal(err)
}
if err := eng.Register("dummy2", nil); err == nil {
t.Fatalf("Expecting error, got none")
}
}
func TestJob(t *testing.T) {
eng := newTestEngine(t)
job1 := eng.Job("dummy1", "--level=awesome")
if job1.handler != nil {
t.Fatalf("job1.handler should be empty")
}
h := func(j *Job) string {
return j.Name
}
eng.Register("dummy2", h)
job2 := eng.Job("dummy2", "--level=awesome")
if job2.handler == nil {
t.Fatalf("job2.handler shouldn't be nil")
}
if job2.handler(job2) != job2.Name {
t.Fatalf("handler dummy2 was not found in job2")
}
}

Просмотреть файл

@ -23,7 +23,101 @@ func TestSetenv(t *testing.T) {
if val := job.Getenv("foo"); val != "bar" {
t.Fatalf("Getenv returns incorrect value: %s", val)
}
job.Setenv("bar", "")
if val := job.Getenv("bar"); val != "" {
t.Fatalf("Getenv returns incorrect value: %s", val)
}
if val := job.Getenv("nonexistent"); val != "" {
t.Fatalf("Getenv returns incorrect value: %s", val)
}
}
func TestSetenvBool(t *testing.T) {
job := mkJob(t, "dummy")
job.SetenvBool("foo", true)
if val := job.GetenvBool("foo"); !val {
t.Fatalf("GetenvBool returns incorrect value: %b", val)
}
job.SetenvBool("bar", false)
if val := job.GetenvBool("bar"); val {
t.Fatalf("GetenvBool returns incorrect value: %b", val)
}
if val := job.GetenvBool("nonexistent"); val {
t.Fatalf("GetenvBool returns incorrect value: %b", val)
}
}
func TestSetenvInt(t *testing.T) {
job := mkJob(t, "dummy")
job.SetenvInt("foo", -42)
if val := job.GetenvInt("foo"); val != -42 {
t.Fatalf("GetenvInt returns incorrect value: %d", val)
}
job.SetenvInt("bar", 42)
if val := job.GetenvInt("bar"); val != 42 {
t.Fatalf("GetenvInt returns incorrect value: %d", val)
}
if val := job.GetenvInt("nonexistent"); val != -1 {
t.Fatalf("GetenvInt returns incorrect value: %d", val)
}
}
func TestSetenvList(t *testing.T) {
job := mkJob(t, "dummy")
job.SetenvList("foo", []string{"bar"})
if val := job.GetenvList("foo"); len(val) != 1 || val[0] != "bar" {
t.Fatalf("GetenvList returns incorrect value: %v", val)
}
job.SetenvList("bar", nil)
if val := job.GetenvList("bar"); val != nil {
t.Fatalf("GetenvList returns incorrect value: %v", val)
}
if val := job.GetenvList("nonexistent"); val != nil {
t.Fatalf("GetenvList returns incorrect value: %v", val)
}
}
func TestImportEnv(t *testing.T) {
type dummy struct {
DummyInt int
DummyStringArray []string
}
job := mkJob(t, "dummy")
if err := job.ImportEnv(&dummy{42, []string{"foo", "bar"}}); err != nil {
t.Fatal(err)
}
dmy := dummy{}
if err := job.ExportEnv(&dmy); err != nil {
t.Fatal(err)
}
if dmy.DummyInt != 42 {
t.Fatalf("Expected 42, got %d", dmy.DummyInt)
}
if len(dmy.DummyStringArray) != 2 || dmy.DummyStringArray[0] != "foo" || dmy.DummyStringArray[1] != "bar" {
t.Fatalf("Expected {foo, bar}, got %v", dmy.DummyStringArray)
}
}
func TestEnviron(t *testing.T) {
job := mkJob(t, "dummy")
job.Setenv("foo", "bar")
val, exists := job.Environ()["foo"]
if !exists {
t.Fatalf("foo not found in the environ")
}
if val != "bar" {
t.Fatalf("bar not found in the environ")
}
}

21
engine/hack.go Normal file
Просмотреть файл

@ -0,0 +1,21 @@
package engine
type Hack map[string]interface{}
func (eng *Engine) Hack_GetGlobalVar(key string) interface{} {
if eng.hack == nil {
return nil
}
val, exists := eng.hack[key]
if !exists {
return nil
}
return val
}
func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) {
if eng.hack == nil {
eng.hack = make(Hack)
}
eng.hack[key] = val
}

Просмотреть файл

@ -1,21 +1,17 @@
package engine
import (
"testing"
"fmt"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"runtime"
"strings"
"fmt"
"io/ioutil"
"github.com/dotcloud/docker/utils"
"testing"
)
var globalTestID string
func init() {
Register("dummy", func(job *Job) string { return ""; })
}
func mkEngine(t *testing.T) *Engine {
func newTestEngine(t *testing.T) *Engine {
// Use the caller function name as a prefix.
// This helps trace temp directories back to their test.
pc, _, _, _ := runtime.Caller(1)
@ -38,5 +34,5 @@ func mkEngine(t *testing.T) *Engine {
}
func mkJob(t *testing.T, name string, args ...string) *Job {
return mkEngine(t).Job(name, args...)
return newTestEngine(t).Job(name, args...)
}

Просмотреть файл

@ -1,11 +1,16 @@
package engine
import (
"io"
"strings"
"fmt"
"bufio"
"bytes"
"encoding/json"
"github.com/dotcloud/docker/utils"
"fmt"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
"sync"
)
// A job is the fundamental unit of work in the docker engine.
@ -20,26 +25,45 @@ import (
// One slight variation is that jobs report their status as a string. The
// string "0" indicates success, and any other strings indicates an error.
// This allows for richer error reporting.
//
//
type Job struct {
eng *Engine
Name string
Args []string
env []string
Stdin io.ReadCloser
Stdout io.WriteCloser
Stderr io.WriteCloser
handler func(*Job) string
status string
Eng *Engine
Name string
Args []string
env []string
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
handler func(*Job) string
status string
onExit []func()
}
// Run executes the job and blocks until the job completes.
// If the job returns a failure status, an error is returned
// which includes the status.
func (job *Job) Run() error {
randId := utils.RandomString()[:4]
fmt.Printf("Job #%s: %s\n", randId, job)
defer fmt.Printf("Job #%s: %s = '%s'", randId, job, job.status)
defer func() {
var wg sync.WaitGroup
for _, f := range job.onExit {
wg.Add(1)
go func(f func()) {
f()
wg.Done()
}(f)
}
wg.Wait()
}()
if job.Stdout != nil && job.Stdout != os.Stdout {
job.Stdout = io.MultiWriter(job.Stdout, os.Stdout)
}
if job.Stderr != nil && job.Stderr != os.Stderr {
job.Stderr = io.MultiWriter(job.Stderr, os.Stderr)
}
job.Eng.Logf("+job %s", job.CallString())
defer func() {
job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString())
}()
if job.handler == nil {
job.status = "command not found"
} else {
@ -51,27 +75,105 @@ func (job *Job) Run() error {
return nil
}
func (job *Job) StdoutParseLines(dst *[]string, limit int) {
job.parseLines(job.StdoutPipe(), dst, limit)
}
func (job *Job) StderrParseLines(dst *[]string, limit int) {
job.parseLines(job.StderrPipe(), dst, limit)
}
func (job *Job) parseLines(src io.Reader, dst *[]string, limit int) {
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
scanner := bufio.NewScanner(src)
for scanner.Scan() {
// If the limit is reached, flush the rest of the source and return
if limit > 0 && len(*dst) >= limit {
io.Copy(ioutil.Discard, src)
return
}
line := scanner.Text()
// Append the line (with delimitor removed)
*dst = append(*dst, line)
}
}()
job.onExit = append(job.onExit, wg.Wait)
}
func (job *Job) StdoutParseString(dst *string) {
lines := make([]string, 0, 1)
job.StdoutParseLines(&lines, 1)
job.onExit = append(job.onExit, func() {
if len(lines) >= 1 {
*dst = lines[0]
}
})
}
func (job *Job) StderrParseString(dst *string) {
lines := make([]string, 0, 1)
job.StderrParseLines(&lines, 1)
job.onExit = append(job.onExit, func() { *dst = lines[0] })
}
func (job *Job) StdoutPipe() io.ReadCloser {
r, w := io.Pipe()
job.Stdout = w
job.onExit = append(job.onExit, func() { w.Close() })
return r
}
func (job *Job) StderrPipe() io.ReadCloser {
r, w := io.Pipe()
job.Stderr = w
job.onExit = append(job.onExit, func() { w.Close() })
return r
}
func (job *Job) CallString() string {
return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", "))
}
func (job *Job) StatusString() string {
// FIXME: if a job returns the empty string, it will be printed
// as not having returned.
// (this only affects String which is a convenience function).
if job.status != "" {
var okerr string
if job.status == "0" {
okerr = "OK"
} else {
okerr = "ERR"
}
return fmt.Sprintf(" = %s (%s)", okerr, job.status)
}
return ""
}
// String returns a human-readable description of `job`
func (job *Job) String() string {
return strings.Join(append([]string{job.Name}, job.Args...), " ")
return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString())
}
func (job *Job) Getenv(key string) (value string) {
for _, kv := range job.env {
if strings.Index(kv, "=") == -1 {
continue
}
parts := strings.SplitN(kv, "=", 2)
if parts[0] != key {
continue
}
if len(parts) < 2 {
value = ""
} else {
value = parts[1]
}
}
return
for _, kv := range job.env {
if strings.Index(kv, "=") == -1 {
continue
}
parts := strings.SplitN(kv, "=", 2)
if parts[0] != key {
continue
}
if len(parts) < 2 {
value = ""
} else {
value = parts[1]
}
}
return
}
func (job *Job) GetenvBool(key string) (value bool) {
@ -90,8 +192,25 @@ func (job *Job) SetenvBool(key string, value bool) {
}
}
func (job *Job) GetenvInt(key string) int64 {
s := strings.Trim(job.Getenv(key), " \t")
val, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return -1
}
return val
}
func (job *Job) SetenvInt(key string, value int64) {
job.Setenv(key, fmt.Sprintf("%d", value))
}
// Returns nil if key not found
func (job *Job) GetenvList(key string) []string {
sval := job.Getenv(key)
if sval == "" {
return nil
}
l := make([]string, 0, 1)
if err := json.Unmarshal([]byte(sval), &l); err != nil {
l = append(l, sval)
@ -99,7 +218,7 @@ func (job *Job) GetenvList(key string) []string {
return l
}
func (job *Job) SetenvList(key string, value []string) error {
func (job *Job) SetenvJson(key string, value interface{}) error {
sval, err := json.Marshal(value)
if err != nil {
return err
@ -108,6 +227,116 @@ func (job *Job) SetenvList(key string, value []string) error {
return nil
}
func (job *Job) Setenv(key, value string) {
job.env = append(job.env, key + "=" + value)
func (job *Job) SetenvList(key string, value []string) error {
return job.SetenvJson(key, value)
}
func (job *Job) Setenv(key, value string) {
job.env = append(job.env, key+"="+value)
}
// DecodeEnv decodes `src` as a json dictionary, and adds
// each decoded key-value pair to the environment.
//
// If `src` cannot be decoded as a json dictionary, an error
// is returned.
func (job *Job) DecodeEnv(src io.Reader) error {
m := make(map[string]interface{})
if err := json.NewDecoder(src).Decode(&m); err != nil {
return err
}
for k, v := range m {
// FIXME: we fix-convert float values to int, because
// encoding/json decodes integers to float64, but cannot encode them back.
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
if fval, ok := v.(float64); ok {
job.SetenvInt(k, int64(fval))
} else if sval, ok := v.(string); ok {
job.Setenv(k, sval)
} else if val, err := json.Marshal(v); err == nil {
job.Setenv(k, string(val))
} else {
job.Setenv(k, fmt.Sprintf("%v", v))
}
}
return nil
}
func (job *Job) EncodeEnv(dst io.Writer) error {
m := make(map[string]interface{})
for k, v := range job.Environ() {
var val interface{}
if err := json.Unmarshal([]byte(v), &val); err == nil {
// FIXME: we fix-convert float values to int, because
// encoding/json decodes integers to float64, but cannot encode them back.
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
if fval, isFloat := val.(float64); isFloat {
val = int(fval)
}
m[k] = val
} else {
m[k] = v
}
}
if err := json.NewEncoder(dst).Encode(&m); err != nil {
return err
}
return nil
}
func (job *Job) ExportEnv(dst interface{}) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("ExportEnv %s", err)
}
}()
var buf bytes.Buffer
// step 1: encode/marshal the env to an intermediary json representation
if err := job.EncodeEnv(&buf); err != nil {
return err
}
// step 2: decode/unmarshal the intermediary json into the destination object
if err := json.NewDecoder(&buf).Decode(dst); err != nil {
return err
}
return nil
}
func (job *Job) ImportEnv(src interface{}) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("ImportEnv: %s", err)
}
}()
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(src); err != nil {
return err
}
if err := job.DecodeEnv(&buf); err != nil {
return err
}
return nil
}
func (job *Job) Environ() map[string]string {
m := make(map[string]string)
for _, kv := range job.env {
parts := strings.SplitN(kv, "=", 2)
m[parts[0]] = parts[1]
}
return m
}
func (job *Job) Logf(format string, args ...interface{}) (n int, err error) {
prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n"))
return fmt.Fprintf(job.Stderr, prefixedFormat, args...)
}
func (job *Job) Printf(format string, args ...interface{}) (n int, err error) {
return fmt.Fprintf(job.Stdout, format, args...)
}
func (job *Job) Errorf(format string, args ...interface{}) (n int, err error) {
return fmt.Fprintf(job.Stderr, format, args...)
}

Просмотреть файл

@ -48,7 +48,7 @@ type WalkFunc func(fullPath string, entity *Entity) error
// Graph database for storing entities and their relationships
type Database struct {
conn *sql.DB
mux sync.Mutex
mux sync.RWMutex
}
// Create a new graph database initialized with a root entity
@ -138,7 +138,14 @@ func (db *Database) Set(fullPath, id string) (*Entity, error) {
// Return true if a name already exists in the database
func (db *Database) Exists(name string) bool {
return db.Get(name) != nil
db.mux.RLock()
defer db.mux.RUnlock()
e, err := db.get(name)
if err != nil {
return false
}
return e != nil
}
func (db *Database) setEdge(parentPath, name string, e *Entity) error {
@ -165,6 +172,9 @@ func (db *Database) RootEntity() *Entity {
// Return the entity for a given path
func (db *Database) Get(name string) *Entity {
db.mux.RLock()
defer db.mux.RUnlock()
e, err := db.get(name)
if err != nil {
return nil
@ -200,23 +210,36 @@ func (db *Database) get(name string) (*Entity, error) {
// List all entities by from the name
// The key will be the full path of the entity
func (db *Database) List(name string, depth int) Entities {
db.mux.RLock()
defer db.mux.RUnlock()
out := Entities{}
e, err := db.get(name)
if err != nil {
return out
}
for c := range db.children(e, name, depth) {
children, err := db.children(e, name, depth, nil)
if err != nil {
return out
}
for _, c := range children {
out[c.FullPath] = c.Entity
}
return out
}
// Walk through the child graph of an entity, calling walkFunc for each child entity.
// It is safe for walkFunc to call graph functions.
func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error {
e, err := db.get(name)
children, err := db.Children(name, depth)
if err != nil {
return err
}
for c := range db.children(e, name, depth) {
// Note: the database lock must not be held while calling walkFunc
for _, c := range children {
if err := walkFunc(c.FullPath, c.Entity); err != nil {
return err
}
@ -224,8 +247,24 @@ func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error {
return nil
}
// Return the children of the specified entity
func (db *Database) Children(name string, depth int) ([]WalkMeta, error) {
db.mux.RLock()
defer db.mux.RUnlock()
e, err := db.get(name)
if err != nil {
return nil, err
}
return db.children(e, name, depth, nil)
}
// Return the refrence count for a specified id
func (db *Database) Refs(id string) int {
db.mux.RLock()
defer db.mux.RUnlock()
var count int
if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil {
return 0
@ -235,6 +274,9 @@ func (db *Database) Refs(id string) int {
// Return all the id's path references
func (db *Database) RefPaths(id string) Edges {
db.mux.RLock()
defer db.mux.RUnlock()
refs := Edges{}
rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id)
@ -356,56 +398,51 @@ type WalkMeta struct {
Edge *Edge
}
func (db *Database) children(e *Entity, name string, depth int) <-chan WalkMeta {
out := make(chan WalkMeta)
func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) {
if e == nil {
close(out)
return out
return entities, nil
}
go func() {
rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id)
if err != nil {
close(out)
rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var entityId, entityName string
if err := rows.Scan(&entityId, &entityName); err != nil {
return nil, err
}
child := &Entity{entityId}
edge := &Edge{
ParentID: e.id,
Name: entityName,
EntityID: child.id,
}
defer rows.Close()
for rows.Next() {
var entityId, entityName string
if err := rows.Scan(&entityId, &entityName); err != nil {
// Log error
continue
}
child := &Entity{entityId}
edge := &Edge{
ParentID: e.id,
Name: entityName,
EntityID: child.id,
}
meta := WalkMeta{
Parent: e,
Entity: child,
FullPath: path.Join(name, edge.Name),
Edge: edge,
}
meta := WalkMeta{
Parent: e,
Entity: child,
FullPath: path.Join(name, edge.Name),
Edge: edge,
}
entities = append(entities, meta)
out <- meta
if depth == 0 {
continue
}
if depth != 0 {
nDepth := depth
if depth != -1 {
nDepth -= 1
}
sc := db.children(child, meta.FullPath, nDepth)
for c := range sc {
out <- c
entities, err = db.children(child, meta.FullPath, nDepth, entities)
if err != nil {
return nil, err
}
}
close(out)
}()
return out
}
return entities, nil
}
// Return the entity based on the parent path and name

Просмотреть файл

@ -220,12 +220,11 @@ func (graph *Graph) getDockerInitLayer() (string, error) {
if err := os.MkdirAll(path.Join(initLayer, path.Dir(pth)), 0755); err != nil {
return "", err
}
if f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755); err != nil {
f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755)
if err != nil {
return "", err
} else {
f.Close()
}
f.Close()
}
} else {
return "", err

Просмотреть файл

@ -9,7 +9,6 @@ import (
"io"
"io/ioutil"
"os"
"path"
"testing"
"time"
)
@ -121,41 +120,6 @@ func TestRegister(t *testing.T) {
}
}
func TestMount(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
image, err := graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
tmp, err := ioutil.TempDir("", "docker-test-graph-mount-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
rootfs := path.Join(tmp, "rootfs")
if err := os.MkdirAll(rootfs, 0700); err != nil {
t.Fatal(err)
}
rw := path.Join(tmp, "rw")
if err := os.MkdirAll(rw, 0700); err != nil {
t.Fatal(err)
}
if err := image.Mount(rootfs, rw); err != nil {
t.Fatal(err)
}
// FIXME: test for mount contents
defer func() {
if err := Unmount(rootfs); err != nil {
t.Error(err)
}
}()
}
// Test that an image can be deleted by its shorthand prefix
func TestDeletePrefix(t *testing.T) {
graph := tempGraph(t)

Просмотреть файл

@ -5,7 +5,7 @@ It is a curated selection of planned improvements which are either important, di
For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/dotcloud/docker/issues).
Tu suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request.
To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request.
## Container wiring and service discovery

Просмотреть файл

@ -1,14 +1,16 @@
# VERSION: 0.22
# DOCKER-VERSION 0.6.3
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
# DESCRIPTION: Deploy docker-ci on Amazon EC2
# VERSION: 0.25
# DOCKER-VERSION 0.6.6
# AUTHOR: Daniel Mizyrycki <daniel@docker.com>
# DESCRIPTION: Deploy docker-ci on Digital Ocean
# COMMENTS:
# CONFIG_JSON is an environment variable json string loaded as:
#
# export CONFIG_JSON='
# { "AWS_TAG": "EC2_instance_name",
# "AWS_ACCESS_KEY": "EC2_access_key",
# "AWS_SECRET_KEY": "EC2_secret_key",
# { "DROPLET_NAME": "docker-ci",
# "DO_CLIENT_ID": "Digital_Ocean_client_id",
# "DO_API_KEY": "Digital_Ocean_api_key",
# "DOCKER_KEY_ID": "Digital_Ocean_ssh_key_id",
# "DOCKER_CI_KEY_PATH": "docker-ci_private_key_path",
# "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)",
# "DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)",
# "BUILDBOT_PWD": "Buildbot_server_password",
@ -33,9 +35,11 @@
from ubuntu:12.04
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
run apt-get update; apt-get install -y python2.7 python-dev python-pip ssh rsync less vim
run pip install boto fabric
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' \
> /etc/apt/sources.list
run apt-get update; apt-get install -y git python2.7 python-dev libevent-dev \
python-pip ssh rsync less vim
run pip install requests fabric
# Add deployment code and set default container command
add . /docker-ci

Просмотреть файл

@ -0,0 +1 @@
0.4.5

Просмотреть файл

@ -43,7 +43,7 @@ c['slavePortnum'] = PORT_MASTER
# Schedulers
c['schedulers'] = [ForceScheduler(name='trigger', builderNames=['docker',
'index','registry','coverage','nightlyrelease'])]
'index','registry','docker-coverage','registry-coverage','nightlyrelease'])]
c['schedulers'] += [SingleBranchScheduler(name="all", treeStableTimer=None,
change_filter=filter.ChangeFilter(branch='master',
repository='https://github.com/dotcloud/docker'), builderNames=['docker'])]
@ -51,7 +51,7 @@ c['schedulers'] += [SingleBranchScheduler(name='pullrequest',
change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None,
builderNames=['pullrequest'])]
c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['nightlyrelease',
'coverage'], hour=7, minute=00)]
'docker-coverage','registry-coverage'], hour=7, minute=00)]
c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'],
hour=range(0,24,4), minute=15)]
@ -76,17 +76,25 @@ c['builders'] += [BuilderConfig(name='pullrequest',slavenames=['buildworker'],
# Docker coverage test
factory = BuildFactory()
factory.addStep(ShellCommand(description='Coverage', logEnviron=False,
factory.addStep(ShellCommand(description='docker-coverage', logEnviron=False,
usePTY=True, command='{0}/docker-coverage/coverage-docker.sh'.format(
DOCKER_CI_PATH)))
c['builders'] += [BuilderConfig(name='coverage',slavenames=['buildworker'],
c['builders'] += [BuilderConfig(name='docker-coverage',slavenames=['buildworker'],
factory=factory)]
# Docker registry coverage test
factory = BuildFactory()
factory.addStep(ShellCommand(description='registry-coverage', logEnviron=False,
usePTY=True, command='docker run registry_coverage'.format(
DOCKER_CI_PATH)))
c['builders'] += [BuilderConfig(name='registry-coverage',slavenames=['buildworker'],
factory=factory)]
# Registry functional test
factory = BuildFactory()
factory.addStep(ShellCommand(description='registry', logEnviron=False,
command='. {0}/master/credentials.cfg; '
'/docker-ci/functionaltests/test_registry.sh'.format(BUILDBOT_PATH),
'{1}/functionaltests/test_registry.sh'.format(BUILDBOT_PATH, DOCKER_CI_PATH),
usePTY=True))
c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'],
factory=factory)]
@ -95,16 +103,17 @@ c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'],
factory = BuildFactory()
factory.addStep(ShellCommand(description='index', logEnviron=False,
command='. {0}/master/credentials.cfg; '
'/docker-ci/functionaltests/test_index.py'.format(BUILDBOT_PATH),
'{1}/functionaltests/test_index.py'.format(BUILDBOT_PATH, DOCKER_CI_PATH),
usePTY=True))
c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'],
factory=factory)]
# Docker nightly release
nightlyrelease_cmd = ('docker version; docker run -i -t -privileged -e AWS_S3_BUCKET='
'test.docker.io dockerbuilder hack/dind dockerbuild.sh')
factory = BuildFactory()
factory.addStep(ShellCommand(description='NightlyRelease', logEnviron=False,
usePTY=True, command='docker run -privileged'
' -e AWS_S3_BUCKET=test.docker.io dockerbuilder'))
factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False,
usePTY=True, command=nightlyrelease_cmd))
c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'],
factory=factory)]

Просмотреть файл

@ -1,11 +1,11 @@
#!/usr/bin/env python
import os, sys, re, json, base64
from boto.ec2.connection import EC2Connection
import os, sys, re, json, requests, base64
from subprocess import call
from fabric import api
from fabric.api import cd, run, put, sudo
from os import environ as env
from datetime import datetime
from time import sleep
# Remove SSH private key as it needs more processing
@ -20,42 +20,41 @@ for key in CONFIG:
env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1',
env['CONFIG_JSON'],flags=re.DOTALL)
AWS_TAG = env.get('AWS_TAG','docker-ci')
AWS_KEY_NAME = 'dotcloud-dev' # Same as CONFIG_JSON['DOCKER_CI_PUB']
AWS_AMI = 'ami-d582d6bc' # Ubuntu 13.04
AWS_REGION = 'us-east-1'
AWS_TYPE = 'm1.small'
AWS_SEC_GROUPS = 'gateway'
AWS_IMAGE_USER = 'ubuntu'
DROPLET_NAME = env.get('DROPLET_NAME','docker-ci')
TIMEOUT = 120 # Seconds before timeout droplet creation
IMAGE_ID = 1004145 # Docker on Ubuntu 13.04
REGION_ID = 4 # New York 2
SIZE_ID = 62 # memory 2GB
DO_IMAGE_USER = 'root' # Image user on Digital Ocean
API_URL = 'https://api.digitalocean.com/'
DOCKER_PATH = '/go/src/github.com/dotcloud/docker'
DOCKER_CI_PATH = '/docker-ci'
CFG_PATH = '{}/buildbot'.format(DOCKER_CI_PATH)
class AWS_EC2:
'''Amazon EC2'''
def __init__(self, access_key, secret_key):
class DigitalOcean():
def __init__(self, key, client):
'''Set default API parameters'''
self.handler = EC2Connection(access_key, secret_key)
def create_instance(self, tag, instance_type):
reservation = self.handler.run_instances(**instance_type)
instance = reservation.instances[0]
sleep(10)
while instance.state != 'running':
sleep(5)
instance.update()
print "Instance state: %s" % (instance.state)
instance.add_tag("Name",tag)
print "instance %s done!" % (instance.id)
return instance.ip_address
def get_instances(self):
return self.handler.get_all_instances()
def get_tags(self):
return dict([(i.instances[0].id, i.instances[0].tags['Name'])
for i in self.handler.get_all_instances() if i.instances[0].tags])
def del_instance(self, instance_id):
self.handler.terminate_instances(instance_ids=[instance_id])
self.key = key
self.client = client
self.api_url = API_URL
def api(self, cmd_path, api_arg={}):
'''Make api call'''
api_arg.update({'api_key':self.key, 'client_id':self.client})
resp = requests.get(self.api_url + cmd_path, params=api_arg).text
resp = json.loads(resp)
if resp['status'] != 'OK':
raise Exception(resp['error_message'])
return resp
def droplet_data(self, name):
'''Get droplet data'''
data = self.api('droplets')
data = [droplet for droplet in data['droplets']
if droplet['name'] == name]
return data[0] if data else {}
def json_fmt(data):
@ -63,20 +62,36 @@ def json_fmt(data):
return json.dumps(data, sort_keys = True, indent = 2)
# Create EC2 API handler
ec2 = AWS_EC2(env['AWS_ACCESS_KEY'], env['AWS_SECRET_KEY'])
do = DigitalOcean(env['DO_API_KEY'], env['DO_CLIENT_ID'])
# Stop processing if AWS_TAG exists on EC2
if AWS_TAG in ec2.get_tags().values():
print ('Instance: {} already deployed. Not further processing.'
.format(AWS_TAG))
# Get DROPLET_NAME data
data = do.droplet_data(DROPLET_NAME)
# Stop processing if DROPLET_NAME exists on Digital Ocean
if data:
print ('Droplet: {} already deployed. Not further processing.'
.format(DROPLET_NAME))
exit(1)
ip = ec2.create_instance(AWS_TAG, {'image_id':AWS_AMI, 'instance_type':AWS_TYPE,
'security_groups':[AWS_SEC_GROUPS], 'key_name':AWS_KEY_NAME})
# Create droplet
do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID,
'image_id':IMAGE_ID, 'size_id':SIZE_ID,
'ssh_key_ids':[env['DOCKER_KEY_ID']]})
# Wait 30 seconds for the machine to boot
sleep(30)
# Wait for droplet to be created.
start_time = datetime.now()
while (data.get('status','') != 'active' and (
datetime.now()-start_time).seconds < TIMEOUT):
data = do.droplet_data(DROPLET_NAME)
print data['status']
sleep(3)
# Wait for the machine to boot
sleep(15)
# Get droplet IP
ip = str(data['ip_address'])
print 'droplet: {} ip: {}'.format(DROPLET_NAME, ip)
# Create docker-ci ssh private key so docker-ci docker container can communicate
# with its EC2 instance
@ -86,7 +101,7 @@ os.chmod('/root/.ssh/id_rsa',0600)
open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n')
api.env.host_string = ip
api.env.user = AWS_IMAGE_USER
api.env.user = DO_IMAGE_USER
api.env.key_filename = '/root/.ssh/id_rsa'
# Correct timezone
@ -100,20 +115,17 @@ sudo("echo '{}' >> /root/.ssh/authorized_keys".format(env['DOCKER_CI_PUB']))
credentials = {
'AWS_ACCESS_KEY': env['PKG_ACCESS_KEY'],
'AWS_SECRET_KEY': env['PKG_SECRET_KEY'],
'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE'],
'INDEX_AUTH': env['INDEX_AUTH']}
'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE']}
open(DOCKER_CI_PATH + '/nightlyrelease/release_credentials.json', 'w').write(
base64.b64encode(json.dumps(credentials)))
# Transfer docker
sudo('mkdir -p ' + DOCKER_CI_PATH)
sudo('chown {}.{} {}'.format(AWS_IMAGE_USER, AWS_IMAGE_USER, DOCKER_CI_PATH))
call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, AWS_IMAGE_USER, ip,
sudo('chown {}.{} {}'.format(DO_IMAGE_USER, DO_IMAGE_USER, DOCKER_CI_PATH))
call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, DO_IMAGE_USER, ip,
os.path.dirname(DOCKER_CI_PATH)), shell=True)
# Install Docker and Buildbot dependencies
sudo('addgroup docker')
sudo('usermod -a -G docker ubuntu')
sudo('mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker')
sudo('wget -q -O - https://get.docker.io/gpg | apt-key add -')
sudo('echo deb https://get.docker.io/ubuntu docker main >'
@ -123,7 +135,7 @@ sudo('echo -e "deb http://archive.ubuntu.com/ubuntu raring main universe\n'
' > /etc/apt/sources.list; apt-get update')
sudo('DEBIAN_FRONTEND=noninteractive apt-get install -q -y wget python-dev'
' python-pip supervisor git mercurial linux-image-extra-$(uname -r)'
' aufs-tools make libfontconfig libevent-dev')
' aufs-tools make libfontconfig libevent-dev libsqlite3-dev libssl-dev')
sudo('wget -O - https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | '
'tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go')
sudo('GOPATH=/go go get -d github.com/dotcloud/docker')
@ -135,13 +147,13 @@ sudo('curl -s https://phantomjs.googlecode.com/files/'
'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin'
' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs')
# Preventively reboot docker-ci daily
sudo('ln -s /sbin/reboot /etc/cron.daily')
# Build docker-ci containers
sudo('cd {}; docker build -t docker .'.format(DOCKER_PATH))
sudo('cd {}; docker build -t docker-ci .'.format(DOCKER_CI_PATH))
sudo('cd {}/nightlyrelease; docker build -t dockerbuilder .'.format(
DOCKER_CI_PATH))
sudo('cd {}/registry-coverage; docker build -t registry_coverage .'.format(
DOCKER_CI_PATH))
# Download docker-ci testing container
sudo('docker pull mzdaniel/test_docker')
@ -154,3 +166,6 @@ sudo('{0}/setup.sh root {0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}'
env['SMTP_PWD'], env['EMAIL_RCP'], env['REGISTRY_USER'],
env['REGISTRY_PWD'], env['REGISTRY_BUCKET'], env['REGISTRY_ACCESS_KEY'],
env['REGISTRY_SECRET_KEY']))
# Preventively reboot docker-ci daily
sudo('ln -s /sbin/reboot /etc/cron.daily')

Просмотреть файл

@ -1,6 +1,6 @@
# VERSION: 0.3
# DOCKER-VERSION 0.6.3
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
# VERSION: 0.4
# DOCKER-VERSION 0.6.6
# AUTHOR: Daniel Mizyrycki <daniel@docker.com>
# DESCRIPTION: Testing docker PRs and commits on top of master using
# REFERENCES: This code reuses the excellent implementation of
# Docker in Docker made by Jerome Petazzoni.
@ -15,15 +15,10 @@
# TO_RUN: docker run -privileged test_docker hack/dind test_docker.sh [commit] [repo] [branch]
from docker
maintainer Daniel Mizyrycki <daniel@dotcloud.com>
maintainer Daniel Mizyrycki <daniel@docker.com>
# Setup go environment. Extracted from /Dockerfile
env CGO_ENABLED 0
env GOROOT /goroot
env PATH $PATH:/goroot/bin
env GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
volume /var/lib/docker
workdir /go/src/github.com/dotcloud/docker
# Setup go in PATH. Extracted from /Dockerfile
env PATH /usr/local/go/bin:$PATH
# Add test_docker.sh
add test_docker.sh /usr/bin/test_docker.sh

Просмотреть файл

@ -8,31 +8,26 @@ BRANCH=${3-master}
# Compute test paths
DOCKER_PATH=/go/src/github.com/dotcloud/docker
# Timestamp
echo
date; echo
# Fetch latest master
cd /
rm -rf /go
mkdir -p $DOCKER_PATH
git clone -q -b master http://github.com/dotcloud/docker $DOCKER_PATH
cd $DOCKER_PATH
git init .
git fetch -q http://github.com/dotcloud/docker master
git reset --hard FETCH_HEAD
# Merge commit
#echo FIXME. Temporarily skip TestPrivilegedCanMount until DinD works reliable on AWS
git pull -q https://github.com/mzdaniel/docker.git dind-aws || exit 1
# Merge commit in top of master
git fetch -q "$REPO" "$BRANCH"
git merge --no-edit $COMMIT || exit 1
git merge --no-edit $COMMIT || exit 255
# Test commit
go test -v; exit_status=$?
./hack/make.sh test; exit_status=$?
# Display load if test fails
if [ $exit_status -eq 1 ] ; then
if [ $exit_status -ne 0 ] ; then
uptime; echo; free
fi
# Cleanup testing directory
rm -rf $BASE_PATH
exit $exit_status

Просмотреть файл

@ -8,10 +8,12 @@ rm -rf docker-registry
# Setup the environment
export SETTINGS_FLAVOR=test
export DOCKER_REGISTRY_CONFIG=config_test.yml
export PYTHONPATH=$(pwd)/docker-registry/test
# Get latest docker registry
git clone -q https://github.com/dotcloud/docker-registry.git
cd docker-registry
sed -Ei "s#(boto_bucket: ).+#\1_env:S3_BUCKET#" config_test.yml
# Get dependencies
pip install -q -r requirements.txt
@ -20,7 +22,6 @@ pip install -q tox
# Run registry tests
tox || exit 1
export PYTHONPATH=$(pwd)/docker-registry
python -m unittest discover -p s3.py -s test || exit 1
python -m unittest discover -p workflow.py -s test

Просмотреть файл

@ -1,20 +1,19 @@
# VERSION: 1.2
# DOCKER-VERSION 0.6.3
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
# VERSION: 1.6
# DOCKER-VERSION 0.6.6
# AUTHOR: Daniel Mizyrycki <daniel@docker.com>
# DESCRIPTION: Build docker nightly release using Docker in Docker.
# REFERENCES: This code reuses the excellent implementation of docker in docker
# made by Jerome Petazzoni. https://github.com/jpetazzo/dind
# COMMENTS:
# release_credentials.json is a base64 json encoded file containing:
# { "AWS_ACCESS_KEY": "Test_docker_AWS_S3_bucket_id",
# "AWS_SECRET_KEY='Test_docker_AWS_S3_bucket_key'
# "GPG_PASSPHRASE='Test_docker_GPG_passphrase_signature'
# "INDEX_AUTH='Encripted_index_authentication' }
# "AWS_SECRET_KEY": "Test_docker_AWS_S3_bucket_key",
# "GPG_PASSPHRASE": "Test_docker_GPG_passphrase_signature" }
# TO_BUILD: docker build -t dockerbuilder .
# TO_RELEASE: docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder
# TO_RELEASE: docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder hack/dind dockerbuild.sh
from docker
maintainer Daniel Mizyrycki <daniel@dotcloud.com>
maintainer Daniel Mizyrycki <daniel@docker.com>
# Add docker dependencies and downloading packages
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
@ -24,11 +23,8 @@ run apt-get update; apt-get install -y -q wget python2.7
run wget -q -O /usr/bin/docker http://get.docker.io/builds/Linux/x86_64/docker-latest; chmod +x /usr/bin/docker
# Add proto docker builder
add ./dockerbuild /usr/bin/dockerbuild
run chmod +x /usr/bin/dockerbuild
add ./dockerbuild.sh /usr/bin/dockerbuild.sh
run chmod +x /usr/bin/dockerbuild.sh
# Add release credentials
add ./release_credentials.json /root/release_credentials.json
# Launch build process in a container
cmd dockerbuild

Просмотреть файл

@ -1,50 +0,0 @@
#!/bin/bash
# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY, PG_PASSPHRASE and INDEX_AUTH
# are decoded from /root/release_credentials.json
# Variable AWS_S3_BUCKET is passed to the environment from docker run -e
# Enable debugging
set -x
# Fetch docker master branch
rm -rf /go/src/github.com/dotcloud/docker
cd /
git clone -q http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker
cd /go/src/github.com/dotcloud/docker
# Launch docker daemon using dind inside the container
./hack/dind /usr/bin/docker -d &
sleep 5
# Add an uncommitted change to generate a timestamped release
date > timestamp
# Build the docker package using /Dockerfile
docker build -t docker .
# Run Docker unittests binary and Ubuntu package
docker run -privileged docker hack/make.sh
exit_status=$?
# Display load if test fails
if [ $exit_status -eq 1 ] ; then
uptime; echo; free
exit 1
fi
# Commit binary and ubuntu bundles for release
docker commit -run '{"Env": ["PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin"], "WorkingDir": "/go/src/github.com/dotcloud/docker"}' $(docker ps -l -q) release
# Turn debug off to load credentials from the environment
set +x
eval $(cat /root/release_credentials.json | python -c '
import sys,json,base64;
d=json.loads(base64.b64decode(sys.stdin.read()));
exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")')
set -x
# Push docker nightly
echo docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=XXXXX -e AWS_SECRET_KEY=XXXXX -e GPG_PASSPHRASE=XXXXX release hack/release.sh
set +x
docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE release hack/release.sh

Просмотреть файл

@ -0,0 +1,40 @@
#!/bin/bash
# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY and PG_PASSPHRASE are decoded
# from /root/release_credentials.json
# Variable AWS_S3_BUCKET is passed to the environment from docker run -e
# Turn debug off to load credentials from the environment
set +x
eval $(cat /root/release_credentials.json | python -c '
import sys,json,base64;
d=json.loads(base64.b64decode(sys.stdin.read()));
exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")')
# Fetch docker master branch
set -x
cd /
rm -rf /go
git clone -q -b master http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker
cd /go/src/github.com/dotcloud/docker
# Launch docker daemon using dind inside the container
/usr/bin/docker version
/usr/bin/docker -d &
sleep 5
# Build Docker release container
docker build -t docker .
# Test docker and if everything works well, release
echo docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=XXXXX -e AWS_SECRET_KEY=XXXXX -e GPG_PASSPHRASE=XXXXX docker hack/release.sh
set +x
docker run -privileged -i -t -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh
exit_status=$?
# Display load if test fails
set -x
if [ $exit_status -ne 0 ] ; then
uptime; echo; free
exit 1
fi

Просмотреть файл

@ -1 +0,0 @@
eyAiQVdTX0FDQ0VTU19LRVkiOiAiIiwKICAiQVdTX1NFQ1JFVF9LRVkiOiAiIiwKICAiR1BHX1BBU1NQSFJBU0UiOiAiIiwKICAiSU5ERVhfQVVUSCI6ICIiIH0=

Просмотреть файл

@ -0,0 +1,18 @@
# VERSION: 0.1
# DOCKER-VERSION 0.6.4
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
# DESCRIPTION: Docker registry coverage
# COMMENTS: Add registry coverage into the docker-ci image
# TO_BUILD: docker build -t registry_coverage .
# TO_RUN: docker run registry_coverage
from docker-ci
maintainer Daniel Mizyrycki <daniel@dotcloud.com>
# Add registry_coverager.sh and dependencies
run pip install coverage flask pyyaml requests simplejson python-glanceclient \
blinker redis boto gevent rsa mock
add registry_coverage.sh /usr/bin/registry_coverage.sh
run chmod +x /usr/bin/registry_coverage.sh
cmd "/usr/bin/registry_coverage.sh"

Просмотреть файл

@ -0,0 +1,18 @@
#!/bin/bash
set -x
# Setup the environment
REGISTRY_PATH=/data/docker-registry
export SETTINGS_FLAVOR=test
export DOCKER_REGISTRY_CONFIG=config_test.yml
export PYTHONPATH=$REGISTRY_PATH/test
# Fetch latest docker-registry master
rm -rf $REGISTRY_PATH
git clone https://github.com/dotcloud/docker-registry -b master $REGISTRY_PATH
cd $REGISTRY_PATH
# Generate coverage
coverage run -m unittest discover test || exit 1
coverage report --include='./*' --omit='./test/*'

Просмотреть файл

@ -34,7 +34,7 @@ env['DOCKER_CI_KEY'] = open(env['DOCKER_CI_KEY_PATH']).read()
DROPLET_NAME = env.get('DROPLET_NAME','report')
TIMEOUT = 120 # Seconds before timeout droplet creation
IMAGE_ID = 894856 # Docker on Ubuntu 13.04
IMAGE_ID = 1004145 # Docker on Ubuntu 13.04
REGION_ID = 4 # New York 2
SIZE_ID = 66 # memory 512MB
DO_IMAGE_USER = 'root' # Image user on Digital Ocean

Просмотреть файл

@ -22,7 +22,12 @@ bundle_test() {
for test_dir in $(find_test_dirs); do (
set -x
cd $test_dir
# Install packages that are dependencies of the tests.
# Note: Does not run the tests.
go test -i -ldflags "$LDFLAGS" $BUILDFLAGS
# Run the tests with the optional $TESTFLAGS.
export TEST_DOCKERINIT_PATH=$DEST/../dynbinary/dockerinit-$VERSION
go test -v -ldflags "$LDFLAGS -X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\"" $BUILDFLAGS $TESTFLAGS
) done

Просмотреть файл

@ -16,7 +16,12 @@ bundle_test() {
for test_dir in $(find_test_dirs); do (
set -x
cd $test_dir
# Install packages that are dependencies of the tests.
# Note: Does not run the tests.
go test -i -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS
# Run the tests with the optional $TESTFLAGS.
go test -v -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS $TESTFLAGS
) done
} 2>&1 | tee $DEST/test.log

Просмотреть файл

@ -10,7 +10,7 @@ fi
PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)"
PACKAGE_URL="http://www.docker.io/"
PACKAGE_MAINTAINER="docker@dotcloud.com"
PACKAGE_DESCRIPTION="lxc-docker is a Linux container runtime
PACKAGE_DESCRIPTION="Linux container runtime
Docker complements LXC with a high-level API which operates at the process
level. It runs unix processes with strong guarantees of isolation and
repeatability across servers.
@ -37,27 +37,51 @@ bundle_ubuntu() {
# This will fail if the binary bundle hasn't been built
cp $DEST/../binary/docker-$VERSION $DIR/usr/bin/docker
# Generate postinst/prerm scripts
cat >/tmp/postinst <<'EOF'
# Generate postinst/prerm/postrm scripts
cat > /tmp/postinst <<'EOF'
#!/bin/sh
service docker stop || true
grep -q '^docker:' /etc/group || groupadd --system docker || true
service docker start
EOF
cat >/tmp/prerm <<'EOF'
#!/bin/sh
service docker stop || true
set -e
set -u
case "$1" in
purge|remove|abort-install)
groupdel docker || true
;;
upgrade|failed-upgrade|abort-upgrade)
# don't touch docker group
;;
esac
getent group docker > /dev/null || groupadd --system docker || true
update-rc.d docker defaults > /dev/null || true
if [ -n "$2" ]; then
_dh_action=restart
else
_dh_action=start
fi
service docker $_dh_action 2>/dev/null || true
#DEBHELPER#
EOF
cat > /tmp/prerm <<'EOF'
#!/bin/sh
set -e
set -u
service docker stop 2>/dev/null || true
#DEBHELPER#
EOF
cat > /tmp/postrm <<'EOF'
#!/bin/sh
set -e
set -u
if [ "$1" = "purge" ] ; then
update-rc.d docker remove > /dev/null || true
fi
# In case this system is running systemd, we make systemd reload the unit files
# to pick up changes.
if [ -d /run/systemd/system ] ; then
systemctl --system daemon-reload > /dev/null || true
fi
#DEBHELPER#
EOF
# TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way
chmod +x /tmp/postinst /tmp/prerm
(
@ -66,6 +90,7 @@ EOF
--name lxc-docker-$VERSION --version $PKGVERSION \
--after-install /tmp/postinst \
--before-remove /tmp/prerm \
--after-remove /tmp/postrm \
--architecture "$PACKAGE_ARCHITECTURE" \
--prefix / \
--depends lxc \
@ -82,6 +107,8 @@ EOF
--vendor "$PACKAGE_VENDOR" \
--config-files /etc/init/docker.conf \
--config-files /etc/init.d/docker \
--config-files /etc/default/docker \
--deb-compression xz \
-t deb .
mkdir empty
fpm -s dir -C empty \
@ -92,7 +119,12 @@ EOF
--maintainer "$PACKAGE_MAINTAINER" \
--url "$PACKAGE_URL" \
--vendor "$PACKAGE_VENDOR" \
--config-files /etc/init/docker.conf \
--config-files /etc/init.d/docker \
--config-files /etc/default/docker \
--deb-compression xz \
-t deb .
# note: the --config-files lines have to be duplicated to stop overwrite on package upgrade (since we have to use this funky virtual package)
)
}

Просмотреть файл

@ -97,7 +97,7 @@ write_to_s3() {
DEST=$1
F=`mktemp`
cat > $F
s3cmd --acl-public put $F $DEST
s3cmd --acl-public --mime-type='text/plain' put $F $DEST
rm -f $F
}
@ -107,14 +107,14 @@ s3_url() {
echo "https://$BUCKET"
;;
*)
echo "http://$BUCKET.s3.amazonaws.com"
s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }'
;;
esac
}
# Upload the 'ubuntu' bundle to S3:
# 1. A full APT repository is published at $BUCKET/ubuntu/
# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/info
# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index
release_ubuntu() {
[ -e bundles/$VERSION/ubuntu ] || {
echo >&2 './hack/make.sh must be run before release_ubuntu'
@ -168,7 +168,7 @@ EOF
# Upload repo
s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/
cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/info
cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/index
# Add the repository to your APT sources
echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
# Then import the repository key
@ -180,7 +180,12 @@ apt-get update ; apt-get install -y lxc-docker
# Alternatively, just use the curl-able install.sh script provided at $(s3_url)
#
EOF
echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu/info"
# Add redirect at /ubuntu/info for URL-backwards-compatibility
rm -rf /tmp/emptyfile && touch /tmp/emptyfile
s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/ubuntu/info
echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu"
}
# Upload a static binary to S3
@ -189,14 +194,20 @@ release_binary() {
echo >&2 './hack/make.sh must be run before release_binary'
exit 1
}
S3DIR=s3://$BUCKET/builds/Linux/x86_64
s3cmd --acl-public put bundles/$VERSION/binary/docker-$VERSION $S3DIR/docker-$VERSION
cat <<EOF | write_to_s3 s3://$BUCKET/builds/info
cat <<EOF | write_to_s3 s3://$BUCKET/builds/index
# To install, run the following command as root:
curl -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
# Then start docker in daemon mode:
sudo /usr/local/bin/docker -d
EOF
# Add redirect at /builds/info for URL-backwards-compatibility
rm -rf /tmp/emptyfile && touch /tmp/emptyfile
s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/builds/info
if [ -z "$NOLATEST" ]; then
echo "Copying docker-$VERSION to docker-latest"
s3cmd --acl-public cp $S3DIR/docker-$VERSION $S3DIR/docker-latest

51
http_test.go Normal file
Просмотреть файл

@ -0,0 +1,51 @@
package docker
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
)
func TestGetBoolParam(t *testing.T) {
if ret, err := getBoolParam("true"); err != nil || !ret {
t.Fatalf("true -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("True"); err != nil || !ret {
t.Fatalf("True -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("1"); err != nil || !ret {
t.Fatalf("1 -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam(""); err != nil || ret {
t.Fatalf("\"\" -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("false"); err != nil || ret {
t.Fatalf("false -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("0"); err != nil || ret {
t.Fatalf("0 -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("faux"); err == nil || ret {
t.Fatalf("faux -> false, err | got %t %s", ret, err)
}
}
func TesthttpError(t *testing.T) {
r := httptest.NewRecorder()
httpError(r, fmt.Errorf("No such method"))
if r.Code != http.StatusNotFound {
t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
}
httpError(r, fmt.Errorf("This accound hasn't been activated"))
if r.Code != http.StatusForbidden {
t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
}
httpError(r, fmt.Errorf("Some error"))
if r.Code != http.StatusInternalServerError {
t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
}
}

Просмотреть файл

@ -16,6 +16,7 @@ import (
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
)
@ -54,11 +55,11 @@ func LoadImage(root string) (*Image, error) {
return nil, err
}
} else {
if size, err := strconv.Atoi(string(buf)); err != nil {
size, err := strconv.Atoi(string(buf))
if err != nil {
return nil, err
} else {
img.Size = int64(size)
}
img.Size = int64(size)
}
// Check that the filesystem layer exists
@ -99,14 +100,14 @@ func StoreImage(img *Image, jsonData []byte, layerData archive.Archive, root str
// If raw json is provided, then use it
if jsonData != nil {
return ioutil.WriteFile(jsonPath(root), jsonData, 0600)
} else { // Otherwise, unmarshal the image
jsonData, err := json.Marshal(img)
if err != nil {
return err
}
if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil {
return err
}
}
// Otherwise, unmarshal the image
jsonData, err := json.Marshal(img)
if err != nil {
return err
}
if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil {
return err
}
return StoreSize(img, root)
@ -114,10 +115,22 @@ func StoreImage(img *Image, jsonData []byte, layerData archive.Archive, root str
func StoreSize(img *Image, root string) error {
layer := layerPath(root)
data := make(map[uint64]bool)
var totalSize int64 = 0
var totalSize int64
filepath.Walk(layer, func(path string, fileInfo os.FileInfo, err error) error {
totalSize += fileInfo.Size()
size := fileInfo.Size()
if size == 0 {
return nil
}
inode := fileInfo.Sys().(*syscall.Stat_t).Ino
if _, entryExists := data[inode]; entryExists {
return nil
}
data[inode] = false
totalSize += size
return nil
})
img.Size = totalSize
@ -163,21 +176,21 @@ func MountAUFS(ro []string, rw string, target string) error {
}
// TarLayer returns a tar archive of the image's filesystem layer.
func (image *Image) TarLayer(compression archive.Compression) (archive.Archive, error) {
layerPath, err := image.layer()
func (img *Image) TarLayer(compression archive.Compression) (archive.Archive, error) {
layerPath, err := img.layer()
if err != nil {
return nil, err
}
return archive.Tar(layerPath, compression)
}
func (image *Image) Mount(root, rw string) error {
func (img *Image) Mount(root, rw string) error {
if mounted, err := Mounted(root); err != nil {
return err
} else if mounted {
return fmt.Errorf("%s is already mounted", root)
}
layers, err := image.layers()
layers, err := img.layers()
if err != nil {
return err
}
@ -194,18 +207,14 @@ func (image *Image) Mount(root, rw string) error {
return nil
}
func (image *Image) Changes(rw string) ([]Change, error) {
layers, err := image.layers()
func (img *Image) Changes(rw string) ([]Change, error) {
layers, err := img.layers()
if err != nil {
return nil, err
}
return Changes(layers, rw)
}
func (image *Image) ShortID() string {
return utils.TruncateID(image.ID)
}
func ValidateID(id string) error {
if id == "" {
return fmt.Errorf("Image id can't be empty")
@ -245,8 +254,10 @@ func (img *Image) History() ([]*Image, error) {
// FIXME: @shykes refactor this function with the new error handling
// (I'll do it if I have time tonight, I focus on the rest)
func (img *Image) layers() ([]string, error) {
var list []string
var e error
var (
list []string
e error
)
if err := img.WalkHistory(
func(img *Image) (err error) {
if layer, err := img.layer(); err != nil {
@ -266,12 +277,11 @@ func (img *Image) layers() ([]string, error) {
}
// Inject the dockerinit layer (empty place-holder for mount-binding dockerinit)
if dockerinitLayer, err := img.getDockerInitLayer(); err != nil {
dockerinitLayer, err := img.getDockerInitLayer()
if err != nil {
return nil, err
} else {
list = append([]string{dockerinitLayer}, list...)
}
return list, nil
return append([]string{dockerinitLayer}, list...), nil
}
func (img *Image) WalkHistory(handler func(*Image) error) (err error) {

Разница между файлами не показана из-за своего большого размера Загрузить разницу

61
integration/auth_test.go Normal file
Просмотреть файл

@ -0,0 +1,61 @@
package docker
import (
"crypto/rand"
"encoding/hex"
"github.com/dotcloud/docker/auth"
"os"
"strings"
"testing"
)
// FIXME: these tests have an external dependency on a staging index hosted
// on the docker.io infrastructure. That dependency should be removed.
// - Unit tests should have no side-effect dependencies.
// - Integration tests should have side-effects limited to the host environment being tested.
func TestLogin(t *testing.T) {
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
authConfig := &auth.AuthConfig{Username: "unittester", Password: "surlautrerivejetattendrai", Email: "noise+unittester@dotcloud.com"}
status, err := auth.Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
if status != "Login Succeeded" {
t.Fatalf("Expected status \"Login Succeeded\", found \"%s\" instead", status)
}
}
func TestCreateAccount(t *testing.T) {
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
tokenBuffer := make([]byte, 16)
_, err := rand.Read(tokenBuffer)
if err != nil {
t.Fatal(err)
}
token := hex.EncodeToString(tokenBuffer)[:12]
username := "ut" + token
authConfig := &auth.AuthConfig{Username: username, Password: "test42", Email: "docker-ut+" + token + "@example.com"}
status, err := auth.Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
expectedStatus := "Account created. Please use the confirmation link we sent" +
" to your e-mail to activate it."
if status != expectedStatus {
t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status)
}
status, err = auth.Login(authConfig, nil)
if err == nil {
t.Fatalf("Expected error but found nil instead")
}
expectedError := "Login: Account is not Active"
if !strings.Contains(err.Error(), expectedError) {
t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err)
}
}

Просмотреть файл

@ -2,7 +2,9 @@ package docker
import (
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/engine"
"io/ioutil"
"net"
"net/http"
@ -14,7 +16,7 @@ import (
// mkTestContext generates a build context from the contents of the provided dockerfile.
// This context is suitable for use as an argument to BuildFile.Build()
func mkTestContext(dockerfile string, files [][2]string, t *testing.T) archive.Archive {
context, err := mkBuildContext(dockerfile, files)
context, err := docker.MkBuildContext(dockerfile, files)
if err != nil {
t.Fatal(err)
}
@ -228,17 +230,15 @@ func TestBuild(t *testing.T) {
}
}
func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache bool) *Image {
if srv == nil {
runtime := mkRuntime(t)
func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) *docker.Image {
if eng == nil {
eng = NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
// FIXME: we might not need runtime, why not simply nuke
// the engine?
defer nuke(runtime)
srv = &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
}
srv := mkServerFromEngine(eng, t)
httpServer, err := mkTestingFileServer(context.remoteFiles)
if err != nil {
@ -252,10 +252,17 @@ func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache
}
port := httpServer.URL[idx+1:]
ip := srv.runtime.networkManager.bridgeNetwork.IP
iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP")
if iIP == nil {
t.Fatal("Legacy bridgeIP field not set in engine")
}
ip, ok := iIP.(net.IP)
if !ok {
panic("Legacy bridgeIP field in engine does not cast to net.IP")
}
dockerfile := constructDockerfile(context.dockerfile, ip, port)
buildfile := NewBuildFile(srv, ioutil.Discard, false, useCache, false)
buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, useCache, false)
id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t))
if err != nil {
t.Fatal(err)
@ -368,20 +375,14 @@ func TestBuildEntrypoint(t *testing.T) {
// testing #1405 - config.Cmd does not get cleaned up if
// utilizing cache
func TestBuildEntrypointRunCleanup(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
img := buildImage(testContextTemplate{`
from {IMAGE}
run echo "hello"
`,
nil, nil}, t, srv, true)
nil, nil}, t, eng, true)
img = buildImage(testContextTemplate{`
from {IMAGE}
@ -389,7 +390,7 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
add foo /foo
entrypoint ["/bin/echo"]
`,
[][2]string{{"foo", "HEYO"}}, nil}, t, srv, true)
[][2]string{{"foo", "HEYO"}}, nil}, t, eng, true)
if len(img.Config.Cmd) != 0 {
t.Fail()
@ -397,14 +398,8 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
}
func TestBuildImageWithCache(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
template := testContextTemplate{`
from {IMAGE}
@ -412,11 +407,11 @@ func TestBuildImageWithCache(t *testing.T) {
`,
nil, nil}
img := buildImage(template, t, srv, true)
img := buildImage(template, t, eng, true)
imageId := img.ID
img = nil
img = buildImage(template, t, srv, true)
img = buildImage(template, t, eng, true)
if imageId != img.ID {
t.Logf("Image ids should match: %s != %s", imageId, img.ID)
@ -425,14 +420,8 @@ func TestBuildImageWithCache(t *testing.T) {
}
func TestBuildImageWithoutCache(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
template := testContextTemplate{`
from {IMAGE}
@ -440,11 +429,11 @@ func TestBuildImageWithoutCache(t *testing.T) {
`,
nil, nil}
img := buildImage(template, t, srv, true)
img := buildImage(template, t, eng, true)
imageId := img.ID
img = nil
img = buildImage(template, t, srv, false)
img = buildImage(template, t, eng, false)
if imageId == img.ID {
t.Logf("Image ids should not match: %s == %s", imageId, img.ID)
@ -453,14 +442,9 @@ func TestBuildImageWithoutCache(t *testing.T) {
}
func TestForbiddenContextPath(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
srv := mkServerFromEngine(eng, t)
context := testContextTemplate{`
from {IMAGE}
@ -481,10 +465,17 @@ func TestForbiddenContextPath(t *testing.T) {
}
port := httpServer.URL[idx+1:]
ip := srv.runtime.networkManager.bridgeNetwork.IP
iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP")
if iIP == nil {
t.Fatal("Legacy bridgeIP field not set in engine")
}
ip, ok := iIP.(net.IP)
if !ok {
panic("Legacy bridgeIP field in engine does not cast to net.IP")
}
dockerfile := constructDockerfile(context.dockerfile, ip, port)
buildfile := NewBuildFile(srv, ioutil.Discard, false, true, false)
buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, true, false)
_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
if err == nil {
@ -499,14 +490,8 @@ func TestForbiddenContextPath(t *testing.T) {
}
func TestBuildADDFileNotFound(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
context := testContextTemplate{`
from {IMAGE}
@ -526,10 +511,17 @@ func TestBuildADDFileNotFound(t *testing.T) {
}
port := httpServer.URL[idx+1:]
ip := srv.runtime.networkManager.bridgeNetwork.IP
iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP")
if iIP == nil {
t.Fatal("Legacy bridgeIP field not set in engine")
}
ip, ok := iIP.(net.IP)
if !ok {
panic("Legacy bridgeIP field in engine does not cast to net.IP")
}
dockerfile := constructDockerfile(context.dockerfile, ip, port)
buildfile := NewBuildFile(srv, ioutil.Discard, false, true, false)
buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, false, true, false)
_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
if err == nil {
@ -544,29 +536,20 @@ func TestBuildADDFileNotFound(t *testing.T) {
}
func TestBuildInheritance(t *testing.T) {
runtime, err := newTestRuntime("")
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
img := buildImage(testContextTemplate{`
from {IMAGE}
expose 4243
`,
nil, nil}, t, srv, true)
nil, nil}, t, eng, true)
img2 := buildImage(testContextTemplate{fmt.Sprintf(`
from %s
entrypoint ["/bin/echo"]
`, img.ID),
nil, nil}, t, srv, true)
nil, nil}, t, eng, true)
// from child
if img2.Config.Entrypoint[0] != "/bin/echo" {

Просмотреть файл

@ -3,9 +3,14 @@ package docker
import (
"bufio"
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"os"
"path"
"regexp"
"strings"
"testing"
"time"
@ -63,8 +68,8 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error
func TestRunHostname(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -108,8 +113,8 @@ func TestRunHostname(t *testing.T) {
func TestRunWorkdir(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -153,8 +158,8 @@ func TestRunWorkdir(t *testing.T) {
func TestRunWorkdirExists(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -198,8 +203,8 @@ func TestRunExit(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c1 := make(chan struct{})
go func() {
@ -251,8 +256,8 @@ func TestRunDisconnect(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c1 := make(chan struct{})
go func() {
@ -284,7 +289,7 @@ func TestRunDisconnect(t *testing.T) {
setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() {
container := globalRuntime.List()[0]
container.Wait()
if container.State.Running {
if container.State.IsRunning() {
t.Fatalf("/bin/cat is still running after closing stdin")
}
})
@ -296,8 +301,8 @@ func TestRunDisconnectTty(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c1 := make(chan struct{})
go func() {
@ -314,7 +319,7 @@ func TestRunDisconnectTty(t *testing.T) {
for {
// Client disconnect after run -i should keep stdin out in TTY mode
l := globalRuntime.List()
if len(l) == 1 && l[0].State.Running {
if len(l) == 1 && l[0].State.IsRunning() {
break
}
time.Sleep(10 * time.Millisecond)
@ -324,7 +329,7 @@ func TestRunDisconnectTty(t *testing.T) {
// Client disconnect after run -i should keep stdin out in TTY mode
container := globalRuntime.List()[0]
setTimeout(t, "Read/Write assertion timed out", 2000*time.Second, func() {
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
}
@ -340,7 +345,7 @@ func TestRunDisconnectTty(t *testing.T) {
// Give some time to monitor to do his thing
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {
if !container.State.IsRunning() {
t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)")
}
}
@ -353,8 +358,8 @@ func TestRunAttachStdin(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
ch := make(chan struct{})
go func() {
@ -380,8 +385,8 @@ func TestRunAttachStdin(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if cmdOutput != container.ShortID()+"\n" {
t.Fatalf("Wrong output: should be '%s', not '%s'\n", container.ShortID()+"\n", cmdOutput)
if cmdOutput != container.ID+"\n" {
t.Fatalf("Wrong output: should be '%s', not '%s'\n", container.ID+"\n", cmdOutput)
}
})
@ -417,8 +422,8 @@ func TestRunDetach(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
ch := make(chan struct{})
go func() {
@ -449,7 +454,7 @@ func TestRunDetach(t *testing.T) {
})
time.Sleep(500 * time.Millisecond)
if !container.State.Running {
if !container.State.IsRunning() {
t.Fatal("The detached container should be still running")
}
@ -458,13 +463,13 @@ func TestRunDetach(t *testing.T) {
})
}
// TestAttachDetach checks that attach in tty mode can be detached
// TestAttachDetach checks that attach in tty mode can be detached using the long container ID
func TestAttachDetach(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
ch := make(chan struct{})
go func() {
@ -474,7 +479,7 @@ func TestAttachDetach(t *testing.T) {
}
}()
var container *Container
var container *docker.Container
setTimeout(t, "Reading container's id timed out", 10*time.Second, func() {
buf := make([]byte, 1024)
@ -485,8 +490,8 @@ func TestAttachDetach(t *testing.T) {
container = globalRuntime.List()[0]
if strings.Trim(string(buf[:n]), " \r\n") != container.ShortID() {
t.Fatalf("Wrong ID received. Expect %s, received %s", container.ShortID(), buf[:n])
if strings.Trim(string(buf[:n]), " \r\n") != container.ID {
t.Fatalf("Wrong ID received. Expect %s, received %s", container.ID, buf[:n])
}
})
setTimeout(t, "Starting container timed out", 10*time.Second, func() {
@ -495,12 +500,12 @@ func TestAttachDetach(t *testing.T) {
stdin, stdinPipe = io.Pipe()
stdout, stdoutPipe = io.Pipe()
cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
ch = make(chan struct{})
go func() {
defer close(ch)
if err := cli.CmdAttach(container.ShortID()); err != nil {
if err := cli.CmdAttach(container.ID); err != nil {
if err != io.ErrClosedPipe {
t.Fatal(err)
}
@ -529,7 +534,69 @@ func TestAttachDetach(t *testing.T) {
})
time.Sleep(500 * time.Millisecond)
if !container.State.Running {
if !container.State.IsRunning() {
t.Fatal("The detached container should be still running")
}
setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() {
container.Kill()
})
}
// TestAttachDetachTruncatedID checks that attach in tty mode can be detached
func TestAttachDetachTruncatedID(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
go stdout.Read(make([]byte, 1024))
setTimeout(t, "Starting container timed out", 2*time.Second, func() {
if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil {
t.Fatal(err)
}
})
container := globalRuntime.List()[0]
stdin, stdinPipe = io.Pipe()
stdout, stdoutPipe = io.Pipe()
cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
ch := make(chan struct{})
go func() {
defer close(ch)
if err := cli.CmdAttach(utils.TruncateID(container.ID)); err != nil {
if err != io.ErrClosedPipe {
t.Fatal(err)
}
}
}()
setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
if err != io.ErrClosedPipe {
t.Fatal(err)
}
}
})
setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
stdinPipe.Write([]byte{16, 17})
if err := stdinPipe.Close(); err != nil {
t.Fatal(err)
}
})
closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
// wait for CmdRun to return
setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() {
<-ch
})
time.Sleep(500 * time.Millisecond)
if !container.State.IsRunning() {
t.Fatal("The detached container should be still running")
}
@ -543,8 +610,8 @@ func TestAttachDisconnect(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
go func() {
// Start a process in daemon mode
@ -562,7 +629,7 @@ func TestAttachDisconnect(t *testing.T) {
setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() {
for {
l := globalRuntime.List()
if len(l) == 1 && l[0].State.Running {
if len(l) == 1 && l[0].State.IsRunning() {
break
}
time.Sleep(10 * time.Millisecond)
@ -598,7 +665,7 @@ func TestAttachDisconnect(t *testing.T) {
// We closed stdin, expect /bin/cat to still be running
// Wait a little bit to make sure container.monitor() did his thing
err := container.WaitTimeout(500 * time.Millisecond)
if err == nil || !container.State.Running {
if err == nil || !container.State.IsRunning() {
t.Fatalf("/bin/cat is not running after closing stdin")
}
@ -612,8 +679,8 @@ func TestAttachDisconnect(t *testing.T) {
func TestRunAutoRemove(t *testing.T) {
t.Skip("Fixme. Skipping test for now, race condition")
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -647,8 +714,8 @@ func TestRunAutoRemove(t *testing.T) {
}
func TestCmdLogs(t *testing.T) {
cli := NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil {
t.Fatal(err)
@ -665,8 +732,8 @@ func TestCmdLogs(t *testing.T) {
// Expected behaviour: using / as a bind mount source should throw an error
func TestRunErrorBindMountRootSource(t *testing.T) {
cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -684,8 +751,8 @@ func TestRunErrorBindMountRootSource(t *testing.T) {
// Expected behaviour: error out when attempting to bind mount non-existing source paths
func TestRunErrorBindNonExistingSource(t *testing.T) {
cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -699,3 +766,178 @@ func TestRunErrorBindNonExistingSource(t *testing.T) {
<-c
})
}
func TestImagesViz(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
image := buildTestImages(t, globalEngine)
c := make(chan struct{})
go func() {
defer close(c)
if err := cli.CmdImages("-viz"); err != nil {
t.Fatal(err)
}
stdoutPipe.Close()
}()
setTimeout(t, "Reading command output time out", 2*time.Second, func() {
cmdOutputBytes, err := ioutil.ReadAll(bufio.NewReader(stdout))
if err != nil {
t.Fatal(err)
}
cmdOutput := string(cmdOutputBytes)
regexpStrings := []string{
"digraph docker {",
fmt.Sprintf("base -> \"%s\" \\[style=invis]", unitTestImageIDShort),
fmt.Sprintf("label=\"%s\\\\n%s:latest\"", unitTestImageIDShort, unitTestImageName),
fmt.Sprintf("label=\"%s\\\\n%s:%s\"", utils.TruncateID(image.ID), "test", "latest"),
"base \\[style=invisible]",
}
compiledRegexps := []*regexp.Regexp{}
for _, regexpString := range regexpStrings {
regexp, err := regexp.Compile(regexpString)
if err != nil {
fmt.Println("Error in regex string: ", err)
return
}
compiledRegexps = append(compiledRegexps, regexp)
}
for _, regexp := range compiledRegexps {
if !regexp.MatchString(cmdOutput) {
t.Fatalf("images -viz content '%s' did not match regexp '%s'", cmdOutput, regexp)
}
}
})
}
func TestImagesTree(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
image := buildTestImages(t, globalEngine)
c := make(chan struct{})
go func() {
defer close(c)
if err := cli.CmdImages("-tree"); err != nil {
t.Fatal(err)
}
stdoutPipe.Close()
}()
setTimeout(t, "Reading command output time out", 2*time.Second, func() {
cmdOutputBytes, err := ioutil.ReadAll(bufio.NewReader(stdout))
if err != nil {
t.Fatal(err)
}
cmdOutput := string(cmdOutputBytes)
regexpStrings := []string{
fmt.Sprintf("└─%s Size: (\\d+.\\d+ MB) \\(virtual \\d+.\\d+ MB\\) Tags: %s:latest", unitTestImageIDShort, unitTestImageName),
"(?m)^ └─[0-9a-f]+",
"(?m)^ └─[0-9a-f]+",
"(?m)^ └─[0-9a-f]+",
fmt.Sprintf(" └─%s Size: \\d+ B \\(virtual \\d+.\\d+ MB\\) Tags: test:latest", utils.TruncateID(image.ID)),
}
compiledRegexps := []*regexp.Regexp{}
for _, regexpString := range regexpStrings {
regexp, err := regexp.Compile(regexpString)
if err != nil {
fmt.Println("Error in regex string: ", err)
return
}
compiledRegexps = append(compiledRegexps, regexp)
}
for _, regexp := range compiledRegexps {
if !regexp.MatchString(cmdOutput) {
t.Fatalf("images -tree content '%s' did not match regexp '%s'", cmdOutput, regexp)
}
}
})
}
func buildTestImages(t *testing.T, eng *engine.Engine) *docker.Image {
var testBuilder = testContextTemplate{
`
from {IMAGE}
run sh -c 'echo root:testpass > /tmp/passwd'
run mkdir -p /var/run/sshd
run [ "$(cat /tmp/passwd)" = "root:testpass" ]
run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
`,
nil,
nil,
}
image := buildImage(testBuilder, t, eng, true)
err := mkServerFromEngine(eng, t).ContainerTag(image.ID, "test", "latest", false)
if err != nil {
t.Fatal(err)
}
return image
}
// #2098 - Docker cidFiles only contain short version of the containerId
//sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
// TestRunCidFile tests that run -cidfile returns the longid
func TestRunCidFile(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
if err != nil {
t.Fatal(err)
}
tmpCidFile := path.Join(tmpDir, "cid")
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
defer close(c)
if err := cli.CmdRun("-cidfile", tmpCidFile, unitTestImageID, "ls"); err != nil {
t.Fatal(err)
}
}()
defer os.RemoveAll(tmpDir)
setTimeout(t, "Reading command output time out", 2*time.Second, func() {
cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if len(cmdOutput) < 1 {
t.Fatalf("'ls' should return something , not '%s'", cmdOutput)
}
//read the tmpCidFile
buffer, err := ioutil.ReadFile(tmpCidFile)
if err != nil {
t.Fatal(err)
}
id := string(buffer)
if len(id) != len("2bf44ea18873287bd9ace8a4cb536a7cbe134bed67e805fdf2f58a57f69b320c") {
t.Fatalf("-cidfile should be a long id, not '%s'", id)
}
//test that its a valid cid? (though the container is gone..)
//remove the file and dir.
})
setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
<-c
})
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

57
integration/graph_test.go Normal file
Просмотреть файл

@ -0,0 +1,57 @@
package docker
import (
"github.com/dotcloud/docker"
"io/ioutil"
"os"
"path"
"testing"
)
func TestMount(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
image, err := graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
tmp, err := ioutil.TempDir("", "docker-test-graph-mount-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
rootfs := path.Join(tmp, "rootfs")
if err := os.MkdirAll(rootfs, 0700); err != nil {
t.Fatal(err)
}
rw := path.Join(tmp, "rw")
if err := os.MkdirAll(rw, 0700); err != nil {
t.Fatal(err)
}
if err := image.Mount(rootfs, rw); err != nil {
t.Fatal(err)
}
// FIXME: test for mount contents
defer func() {
if err := docker.Unmount(rootfs); err != nil {
t.Error(err)
}
}()
}
//FIXME: duplicate
func tempGraph(t *testing.T) *docker.Graph {
tmp, err := ioutil.TempDir("", "docker-graph-")
if err != nil {
t.Fatal(err)
}
graph, err := docker.NewGraph(tmp)
if err != nil {
t.Fatal(err)
}
return graph
}

Просмотреть файл

@ -0,0 +1,22 @@
package docker
import (
"github.com/dotcloud/docker/iptables"
"os"
"testing"
)
// FIXME: this test should be a unit test.
// For example by mocking os/exec to make sure iptables is not actually called.
func TestIptables(t *testing.T) {
if _, err := iptables.Raw("-L"); err != nil {
t.Fatal(err)
}
path := os.Getenv("PATH")
os.Setenv("PATH", "")
defer os.Setenv("PATH", path)
if _, err := iptables.Raw("-L"); err == nil {
t.Fatal("Not finding iptables in the PATH should cause an error")
}
}

Просмотреть файл

@ -3,17 +3,19 @@ package docker
import (
"bytes"
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/sysinit"
"github.com/dotcloud/docker/utils"
"io"
"log"
"net"
"net/url"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"testing"
"time"
@ -22,6 +24,7 @@ import (
const (
unitTestImageName = "docker-test-image"
unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
unitTestImageIDShort = "83599e29c455"
unitTestNetworkBridge = "testdockbr0"
unitTestStoreBase = "/var/lib/docker/unit-tests"
testDaemonAddr = "127.0.0.1:4270"
@ -29,39 +32,33 @@ const (
)
var (
globalRuntime *Runtime
// FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted.
globalRuntime *docker.Runtime
globalEngine *engine.Engine
startFds int
startGoroutines int
)
func nuke(runtime *Runtime) error {
var wg sync.WaitGroup
for _, container := range runtime.List() {
wg.Add(1)
go func(c *Container) {
c.Kill()
wg.Done()
}(container)
}
wg.Wait()
runtime.Close()
os.Remove(filepath.Join(runtime.config.Root, "linkgraph.db"))
return os.RemoveAll(runtime.config.Root)
// FIXME: nuke() is deprecated by Runtime.Nuke()
func nuke(runtime *docker.Runtime) error {
return runtime.Nuke()
}
func cleanup(runtime *Runtime) error {
// FIXME: cleanup and nuke are redundant.
func cleanup(eng *engine.Engine, t *testing.T) error {
runtime := mkRuntimeFromEngine(eng, t)
for _, container := range runtime.List() {
container.Kill()
runtime.Destroy(container)
}
images, err := runtime.graph.Map()
srv := mkServerFromEngine(eng, t)
images, err := srv.Images(true, "")
if err != nil {
return err
}
for _, image := range images {
if image.ID != unitTestImageID {
runtime.graph.Delete(image.ID)
srv.ImageDelete(image.ID, false)
}
}
return nil
@ -118,28 +115,24 @@ func init() {
}
func setupBaseImage() {
config := &DaemonConfig{
Root: unitTestStoreBase,
AutoRestart: false,
BridgeIface: unitTestNetworkBridge,
}
runtime, err := NewRuntimeFromDirectory(config)
eng, err := engine.New(unitTestStoreBase)
if err != nil {
log.Fatalf("Can't initialize engine at %s: %s", unitTestStoreBase, err)
}
job := eng.Job("initapi")
job.Setenv("Root", unitTestStoreBase)
job.SetenvBool("Autorestart", false)
job.Setenv("BridgeIface", unitTestNetworkBridge)
if err := job.Run(); err != nil {
log.Fatalf("Unable to create a runtime for tests:", err)
}
// Create the "Server"
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0))
// If the unit test is not found, try to download it.
if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
if img, err := srv.ImageInspect(unitTestImageName); err != nil || img.ID != unitTestImageID {
// Retrieve the Image
if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
log.Fatalf("Unable to pull the test image:", err)
log.Fatalf("Unable to pull the test image: %s", err)
}
}
}
@ -149,18 +142,22 @@ func spawnGlobalDaemon() {
utils.Debugf("Global runtime already exists. Skipping.")
return
}
globalRuntime = mkRuntime(log.New(os.Stderr, "", 0))
srv := &Server{
runtime: globalRuntime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
t := log.New(os.Stderr, "", 0)
eng := NewTestEngine(t)
globalEngine = eng
globalRuntime = mkRuntimeFromEngine(eng, t)
// Spawn a Daemon
go func() {
utils.Debugf("Spawning global daemon for integration tests")
if err := ListenAndServe(testDaemonProto, testDaemonAddr, srv, os.Getenv("DEBUG") != ""); err != nil {
log.Fatalf("Unable to spawn the test daemon:", err)
listenURL := &url.URL{
Scheme: testDaemonProto,
Host: testDaemonAddr,
}
job := eng.Job("serveapi", listenURL.String())
job.SetenvBool("Logging", os.Getenv("DEBUG") != "")
if err := job.Run(); err != nil {
log.Fatalf("Unable to spawn the test daemon: %s", err)
}
}()
// Give some time to ListenAndServer to actually start
@ -170,8 +167,8 @@ func spawnGlobalDaemon() {
// FIXME: test that ImagePull(json=true) send correct json output
func GetTestImage(runtime *Runtime) *Image {
imgs, err := runtime.graph.Map()
func GetTestImage(runtime *docker.Runtime) *docker.Image {
imgs, err := runtime.Graph().Map()
if err != nil {
log.Fatalf("Unable to get the test image:", err)
}
@ -180,7 +177,7 @@ func GetTestImage(runtime *Runtime) *Image {
return image
}
}
log.Fatalf("Test image %v not found", unitTestImageID)
log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.Graph().Root, imgs)
return nil
}
@ -193,7 +190,7 @@ func TestRuntimeCreate(t *testing.T) {
t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
}
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
},
@ -234,13 +231,25 @@ func TestRuntimeCreate(t *testing.T) {
t.Errorf("Exists() returned false for a newly created container")
}
// Test that conflict error displays correct details
testContainer, _, _ := runtime.Create(
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
},
"conflictname",
)
if _, _, err := runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) {
t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error())
}
// Make sure create with bad parameters returns an error
if _, _, err = runtime.Create(&Config{Image: GetTestImage(runtime).ID}, ""); err == nil {
if _, _, err = runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID}, ""); err == nil {
t.Fatal("Builder.Create should throw an error when Cmd is missing")
}
if _, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{},
},
@ -249,7 +258,7 @@ func TestRuntimeCreate(t *testing.T) {
t.Fatal("Builder.Create should throw an error when Cmd is empty")
}
config := &Config{
config := &docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/ls"},
PortSpecs: []string{"80"},
@ -262,7 +271,7 @@ func TestRuntimeCreate(t *testing.T) {
}
// test expose 80:8000
container, warnings, err := runtime.Create(&Config{
container, warnings, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
PortSpecs: []string{"80:8000"},
@ -281,7 +290,7 @@ func TestDestroy(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
}, "")
@ -308,12 +317,6 @@ func TestDestroy(t *testing.T) {
t.Errorf("Unable to get newly created container")
}
// Make sure the container root directory does not exist anymore
_, err = os.Stat(container.root)
if err == nil || !os.IsNotExist(err) {
t.Errorf("Container root directory still exists after destroy")
}
// Test double destroy
if err := runtime.Destroy(container); err == nil {
// It should have failed
@ -325,13 +328,13 @@ func TestGet(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container1, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container1)
container2, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container2)
container3, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container3)
if runtime.Get(container1.ID) != container1 {
@ -348,15 +351,21 @@ func TestGet(t *testing.T) {
}
func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) {
func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *docker.Container, string) {
var (
err error
container *Container
strPort string
runtime = mkRuntime(t)
port = 5554
p Port
err error
id string
strPort string
eng = NewTestEngine(t)
runtime = mkRuntimeFromEngine(eng, t)
port = 5554
p docker.Port
)
defer func() {
if err != nil {
runtime.Nuke()
}
}()
for {
port += 1
@ -369,40 +378,48 @@ func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container,
} else {
t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
}
ep := make(map[Port]struct{}, 1)
p = Port(fmt.Sprintf("%s/%s", strPort, proto))
ep := make(map[docker.Port]struct{}, 1)
p = docker.Port(fmt.Sprintf("%s/%s", strPort, proto))
ep[p] = struct{}{}
container, _, err = runtime.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", cmd},
PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)},
ExposedPorts: ep,
}, "")
if err != nil {
nuke(runtime)
jobCreate := eng.Job("create")
jobCreate.Setenv("Image", unitTestImageID)
jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd})
jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)})
jobCreate.SetenvJson("ExposedPorts", ep)
jobCreate.StdoutParseString(&id)
if err := jobCreate.Run(); err != nil {
t.Fatal(err)
}
if container != nil {
// FIXME: this relies on the undocumented behavior of runtime.Create
// which will return a nil error AND container if the exposed ports
// are invalid. That behavior should be fixed!
if id != "" {
break
}
t.Logf("Port %v already in use, trying another one", strPort)
}
container.hostConfig = &HostConfig{
PortBindings: make(map[Port][]PortBinding),
}
container.hostConfig.PortBindings[p] = []PortBinding{
jobStart := eng.Job("start", id)
portBindings := make(map[docker.Port][]docker.PortBinding)
portBindings[p] = []docker.PortBinding{
{},
}
if err := container.Start(); err != nil {
nuke(runtime)
if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil {
t.Fatal(err)
}
if err := jobStart.Run(); err != nil {
t.Fatal(err)
}
container := runtime.Get(id)
if container == nil {
t.Fatalf("Couldn't fetch test container %s", id)
}
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
for !container.State.Running {
for !container.State.IsRunning() {
time.Sleep(10 * time.Millisecond)
}
})
@ -500,14 +517,15 @@ func TestAllocateUDPPortLocalhost(t *testing.T) {
}
func TestRestore(t *testing.T) {
runtime1 := mkRuntime(t)
defer nuke(runtime1)
eng := NewTestEngine(t)
runtime1 := mkRuntimeFromEngine(eng, t)
defer runtime1.Nuke()
// Create a container with one instance of docker
container1, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
defer runtime1.Destroy(container1)
// Create a second container meant to be killed
container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
defer runtime1.Destroy(container2)
// Start the container non blocking
@ -515,7 +533,7 @@ func TestRestore(t *testing.T) {
t.Fatal(err)
}
if !container2.State.Running {
if !container2.State.IsRunning() {
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
}
@ -525,7 +543,7 @@ func TestRestore(t *testing.T) {
if err := container2.WaitTimeout(2 * time.Second); err != nil {
t.Fatal(err)
}
container2.State.Running = true
container2.State.SetRunning(42)
container2.ToDisk()
if len(runtime1.List()) != 2 {
@ -535,24 +553,31 @@ func TestRestore(t *testing.T) {
t.Fatal(err)
}
if !container2.State.Running {
if !container2.State.IsRunning() {
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
}
// Here are are simulating a docker restart - that is, reloading all containers
// from scratch
runtime1.config.AutoRestart = false
runtime2, err := NewRuntimeFromDirectory(runtime1.config)
root := eng.Root()
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
defer nuke(runtime2)
job := eng.Job("initapi")
job.Setenv("Root", eng.Root())
job.SetenvBool("Autorestart", false)
if err := job.Run(); err != nil {
t.Fatal(err)
}
runtime2 := mkRuntimeFromEngine(eng, t)
if len(runtime2.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
}
runningCount := 0
for _, c := range runtime2.List() {
if c.State.Running {
if c.State.IsRunning() {
t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
runningCount++
}
@ -567,18 +592,35 @@ func TestRestore(t *testing.T) {
if err := container3.Run(); err != nil {
t.Fatal(err)
}
container2.State.Running = false
container2.State.SetStopped(0)
}
func TestReloadContainerLinks(t *testing.T) {
runtime1 := mkRuntime(t)
// FIXME: here we don't use NewTestEngine because it calls initapi with Autorestart=false,
// and we want to set it to true.
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
t.Fatal(err)
}
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
job := eng.Job("initapi")
job.Setenv("Root", eng.Root())
job.SetenvBool("Autorestart", true)
if err := job.Run(); err != nil {
t.Fatal(err)
}
runtime1 := mkRuntimeFromEngine(eng, t)
defer nuke(runtime1)
// Create a container with one instance of docker
container1, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
container1, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
defer runtime1.Destroy(container1)
// Create a second container meant to be killed
container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
defer runtime1.Destroy(container2)
// Start the container non blocking
@ -586,7 +628,9 @@ func TestReloadContainerLinks(t *testing.T) {
t.Fatal(err)
}
// Add a link to container 2
container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
// FIXME @shykes: setting hostConfig.Links seems redundant with calling RegisterLink().
// Why do we need it @crosbymichael?
// container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
if err := runtime1.RegisterLink(container1, container2, "first"); err != nil {
t.Fatal(err)
}
@ -594,11 +638,11 @@ func TestReloadContainerLinks(t *testing.T) {
t.Fatal(err)
}
if !container2.State.Running {
if !container2.State.IsRunning() {
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
}
if !container1.State.Running {
if !container1.State.IsRunning() {
t.Fatalf("Container %s should appear as running but isn't", container1.ID)
}
@ -608,18 +652,24 @@ func TestReloadContainerLinks(t *testing.T) {
// Here are are simulating a docker restart - that is, reloading all containers
// from scratch
runtime1.config.AutoRestart = true
runtime2, err := NewRuntimeFromDirectory(runtime1.config)
eng, err = engine.New(root)
if err != nil {
t.Fatal(err)
}
defer nuke(runtime2)
job = eng.Job("initapi")
job.Setenv("Root", eng.Root())
job.SetenvBool("Autorestart", false)
if err := job.Run(); err != nil {
t.Fatal(err)
}
runtime2 := mkRuntimeFromEngine(eng, t)
if len(runtime2.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
}
runningCount := 0
for _, c := range runtime2.List() {
if c.State.Running {
if c.State.IsRunning() {
runningCount++
}
}
@ -627,109 +677,85 @@ func TestReloadContainerLinks(t *testing.T) {
t.Fatalf("Expected 2 container alive, %d found", runningCount)
}
// FIXME: we no longer test if containers were registered in the right order,
// because there is no public
// Make sure container 2 ( the child of container 1 ) was registered and started first
// with the runtime
first := runtime2.containers.Front()
if first.Value.(*Container).ID != container2.ID {
//
containers := runtime2.List()
if len(containers) == 0 {
t.Fatalf("Runtime has no containers")
}
first := containers[0]
if first.ID != container2.ID {
t.Fatalf("Container 2 %s should be registered first in the runtime", container2.ID)
}
// Verify that the link is still registered in the runtime
entity := runtime2.containerGraph.Get(container1.Name)
if entity == nil {
t.Fatal("Entity should not be nil")
if c := runtime2.Get(container1.Name); c == nil {
t.Fatal("Named container is no longer registered after restart")
}
}
func TestDefaultContainerName(t *testing.T) {
runtime := mkRuntime(t)
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
shortId, _, err := srv.ContainerCreate(config, "some_name")
if err != nil {
t.Fatal(err)
}
container := runtime.Get(shortId)
container := runtime.Get(createNamedTestContainer(eng, config, t, "some_name"))
containerID := container.ID
if container.Name != "/some_name" {
t.Fatalf("Expect /some_name got %s", container.Name)
}
paths := runtime.containerGraph.RefPaths(containerID)
if paths == nil || len(paths) == 0 {
t.Fatalf("Could not find edges for %s", containerID)
}
edge := paths[0]
if edge.ParentID != "0" {
t.Fatalf("Expected engine got %s", edge.ParentID)
}
if edge.EntityID != containerID {
t.Fatalf("Expected %s got %s", containerID, edge.EntityID)
}
if edge.Name != "some_name" {
t.Fatalf("Expected some_name got %s", edge.Name)
if c := runtime.Get("/some_name"); c == nil {
t.Fatalf("Couldn't retrieve test container as /some_name")
} else if c.ID != containerID {
t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID)
}
}
func TestRandomContainerName(t *testing.T) {
runtime := mkRuntime(t)
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
shortId, _, err := srv.ContainerCreate(config, "")
if err != nil {
t.Fatal(err)
}
container := runtime.Get(shortId)
container := runtime.Get(createTestContainer(eng, config, t))
containerID := container.ID
if container.Name == "" {
t.Fatalf("Expected not empty container name")
}
paths := runtime.containerGraph.RefPaths(containerID)
if paths == nil || len(paths) == 0 {
t.Fatalf("Could not find edges for %s", containerID)
}
edge := paths[0]
if edge.ParentID != "0" {
t.Fatalf("Expected engine got %s", edge.ParentID)
}
if edge.EntityID != containerID {
t.Fatalf("Expected %s got %s", containerID, edge.EntityID)
}
if edge.Name == "" {
t.Fatalf("Expected not empty container name")
if c := runtime.Get(container.Name); c == nil {
log.Fatalf("Could not lookup container %s by its name", container.Name)
} else if c.ID != containerID {
log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID)
}
}
func TestLinkChildContainer(t *testing.T) {
runtime := mkRuntime(t)
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
shortId, _, err := srv.ContainerCreate(config, "/webapp")
if err != nil {
t.Fatal(err)
}
container := runtime.Get(shortId)
container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
webapp, err := runtime.GetByName("/webapp")
if err != nil {
@ -740,17 +766,12 @@ func TestLinkChildContainer(t *testing.T) {
t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
}
config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err = docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
shortId, _, err = srv.ContainerCreate(config, "")
if err != nil {
t.Fatal(err)
}
childContainer := runtime.Get(shortId)
childContainer := runtime.Get(createTestContainer(eng, config, t))
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
t.Fatal(err)
@ -767,20 +788,16 @@ func TestLinkChildContainer(t *testing.T) {
}
func TestGetAllChildren(t *testing.T) {
runtime := mkRuntime(t)
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
shortId, _, err := srv.ContainerCreate(config, "/webapp")
if err != nil {
t.Fatal(err)
}
container := runtime.Get(shortId)
container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
webapp, err := runtime.GetByName("/webapp")
if err != nil {
@ -791,17 +808,12 @@ func TestGetAllChildren(t *testing.T) {
t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
}
config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err = docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
shortId, _, err = srv.ContainerCreate(config, "")
if err != nil {
t.Fatal(err)
}
childContainer := runtime.Get(shortId)
childContainer := runtime.Get(createTestContainer(eng, config, t))
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
t.Fatal(err)
@ -828,19 +840,3 @@ func TestGetAllChildren(t *testing.T) {
}
}
}
func TestGetFullName(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
name, err := runtime.getFullName("testing")
if err != nil {
t.Fatal(err)
}
if name != "/testing" {
t.Fatalf("Expected /testing got %s", name)
}
if _, err := runtime.getFullName(""); err == nil {
t.Fatal("Error should not be nil")
}
}

400
integration/server_test.go Normal file
Просмотреть файл

@ -0,0 +1,400 @@
package docker
import (
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"strings"
"testing"
)
func TestContainerTagImageDelete(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
initialImages, err := srv.Images(false, "")
if err != nil {
t.Fatal(err)
}
if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil {
t.Fatal(err)
}
if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil {
t.Fatal(err)
}
if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil {
t.Fatal(err)
}
images, err := srv.Images(false, "")
if err != nil {
t.Fatal(err)
}
if len(images[0].RepoTags) != len(initialImages[0].RepoTags)+3 {
t.Errorf("Expected %d images, %d found", len(initialImages)+3, len(images))
}
if _, err := srv.ImageDelete("utest/docker:tag2", true); err != nil {
t.Fatal(err)
}
images, err = srv.Images(false, "")
if err != nil {
t.Fatal(err)
}
if len(images[0].RepoTags) != len(initialImages[0].RepoTags)+2 {
t.Errorf("Expected %d images, %d found", len(initialImages)+2, len(images))
}
if _, err := srv.ImageDelete("utest:5000/docker:tag3", true); err != nil {
t.Fatal(err)
}
images, err = srv.Images(false, "")
if err != nil {
t.Fatal(err)
}
if len(images[0].RepoTags) != len(initialImages[0].RepoTags)+1 {
t.Errorf("Expected %d images, %d found", len(initialImages)+1, len(images))
}
if _, err := srv.ImageDelete("utest:tag1", true); err != nil {
t.Fatal(err)
}
images, err = srv.Images(false, "")
if err != nil {
t.Fatal(err)
}
if len(images) != len(initialImages) {
t.Errorf("Expected %d image, %d found", len(initialImages), len(images))
}
}
func TestCreateRm(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
defer mkRuntimeFromEngine(eng, t).Nuke()
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
id := createTestContainer(eng, config, t)
if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 {
t.Errorf("Expected 1 container, %v found", len(c))
}
if err = srv.ContainerDestroy(id, true, false); err != nil {
t.Fatal(err)
}
if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 {
t.Errorf("Expected 0 container, %v found", len(c))
}
}
func TestCreateRmVolumes(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
defer mkRuntimeFromEngine(eng, t).Nuke()
config, hostConfig, _, err := docker.ParseRun([]string{"-v", "/srv", unitTestImageID, "echo", "test"}, nil)
if err != nil {
t.Fatal(err)
}
id := createTestContainer(eng, config, t)
if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 {
t.Errorf("Expected 1 container, %v found", len(c))
}
job := eng.Job("start", id)
if err := job.ImportEnv(hostConfig); err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err)
}
err = srv.ContainerStop(id, 1)
if err != nil {
t.Fatal(err)
}
if err = srv.ContainerDestroy(id, true, false); err != nil {
t.Fatal(err)
}
if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 {
t.Errorf("Expected 0 container, %v found", len(c))
}
}
func TestCommit(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
defer mkRuntimeFromEngine(eng, t).Nuke()
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil)
if err != nil {
t.Fatal(err)
}
id := createTestContainer(eng, config, t)
if _, err := srv.ContainerCommit(id, "testrepo", "testtag", "", "", config); err != nil {
t.Fatal(err)
}
}
func TestCreateStartRestartStopStartKillRm(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
defer mkRuntimeFromEngine(eng, t).Nuke()
config, hostConfig, _, err := docker.ParseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil)
if err != nil {
t.Fatal(err)
}
id := createTestContainer(eng, config, t)
if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 {
t.Errorf("Expected 1 container, %v found", len(c))
}
job := eng.Job("start", id)
if err := job.ImportEnv(hostConfig); err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err)
}
if err := srv.ContainerRestart(id, 15); err != nil {
t.Fatal(err)
}
if err := srv.ContainerStop(id, 15); err != nil {
t.Fatal(err)
}
job = eng.Job("start", id)
if err := job.ImportEnv(hostConfig); err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err)
}
if err := srv.ContainerKill(id, 0); err != nil {
t.Fatal(err)
}
// FIXME: this failed once with a race condition ("Unable to remove filesystem for xxx: directory not empty")
if err := srv.ContainerDestroy(id, true, false); err != nil {
t.Fatal(err)
}
if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 {
t.Errorf("Expected 0 container, %v found", len(c))
}
}
func TestRunWithTooLowMemoryLimit(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
// Try to create a container with a memory limit of 1 byte less than the minimum allowed limit.
job := eng.Job("create")
job.Setenv("Image", unitTestImageID)
job.Setenv("Memory", "524287")
job.Setenv("CpuShares", "1000")
job.SetenvList("Cmd", []string{"/bin/cat"})
var id string
job.StdoutParseString(&id)
if err := job.Run(); err == nil {
t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!")
}
}
func TestRmi(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
defer mkRuntimeFromEngine(eng, t).Nuke()
initialImages, err := srv.Images(false, "")
if err != nil {
t.Fatal(err)
}
config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "echo", "test"}, nil)
if err != nil {
t.Fatal(err)
}
containerID := createTestContainer(eng, config, t)
//To remove
job := eng.Job("start", containerID)
if err := job.ImportEnv(hostConfig); err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err)
}
if _, err := srv.ContainerWait(containerID); err != nil {
t.Fatal(err)
}
imageID, err := srv.ContainerCommit(containerID, "test", "", "", "", nil)
if err != nil {
t.Fatal(err)
}
err = srv.ContainerTag(imageID, "test", "0.1", false)
if err != nil {
t.Fatal(err)
}
containerID = createTestContainer(eng, config, t)
//To remove
job = eng.Job("start", containerID)
if err := job.ImportEnv(hostConfig); err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err)
}
if _, err := srv.ContainerWait(containerID); err != nil {
t.Fatal(err)
}
_, err = srv.ContainerCommit(containerID, "test", "", "", "", nil)
if err != nil {
t.Fatal(err)
}
images, err := srv.Images(false, "")
if err != nil {
t.Fatal(err)
}
if len(images)-len(initialImages) != 2 {
t.Fatalf("Expected 2 new images, found %d.", len(images)-len(initialImages))
}
_, err = srv.ImageDelete(imageID, true)
if err != nil {
t.Fatal(err)
}
images, err = srv.Images(false, "")
if err != nil {
t.Fatal(err)
}
if len(images)-len(initialImages) != 1 {
t.Fatalf("Expected 1 new image, found %d.", len(images)-len(initialImages))
}
for _, image := range images {
if strings.Contains(unitTestImageID, image.ID) {
continue
}
if image.RepoTags[0] == "<none>:<none>" {
t.Fatalf("Expected tagged image, got untagged one.")
}
}
}
func TestImagesFilter(t *testing.T) {
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
srv := mkServerFromEngine(eng, t)
if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil {
t.Fatal(err)
}
if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil {
t.Fatal(err)
}
if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil {
t.Fatal(err)
}
images, err := srv.Images(false, "utest*/*")
if err != nil {
t.Fatal(err)
}
if len(images[0].RepoTags) != 2 {
t.Fatal("incorrect number of matches returned")
}
images, err = srv.Images(false, "utest")
if err != nil {
t.Fatal(err)
}
if len(images[0].RepoTags) != 1 {
t.Fatal("incorrect number of matches returned")
}
images, err = srv.Images(false, "utest*")
if err != nil {
t.Fatal(err)
}
if len(images[0].RepoTags) != 1 {
t.Fatal("incorrect number of matches returned")
}
images, err = srv.Images(false, "*5000*/*")
if err != nil {
t.Fatal(err)
}
if len(images[0].RepoTags) != 1 {
t.Fatal("incorrect number of matches returned")
}
}
func TestImageInsert(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
sf := utils.NewStreamFormatter(true)
// bad image name fails
if err := srv.ImageInsert("foo", "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err == nil {
t.Fatal("expected an error and got none")
}
// bad url fails
if err := srv.ImageInsert(unitTestImageID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil {
t.Fatal("expected an error and got none")
}
// success returns nil
if err := srv.ImageInsert(unitTestImageID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil {
t.Fatalf("expected no error, but got %v", err)
}
}

Просмотреть файл

@ -0,0 +1,63 @@
package docker
import (
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"testing"
"time"
)
func TestServerListOrderedImagesByCreationDate(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
if err := generateImage("", srv); err != nil {
t.Fatal(err)
}
images, err := srv.Images(true, "")
if err != nil {
t.Fatal(err)
}
if images[0].Created < images[1].Created {
t.Error("Expected []APIImges to be ordered by most recent creation date.")
}
}
func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
err := generateImage("bar", srv)
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
err = generateImage("zed", srv)
if err != nil {
t.Fatal(err)
}
images, err := srv.Images(true, "")
if err != nil {
t.Fatal(err)
}
if images[0].RepoTags[0] != "repo:zed" && images[0].RepoTags[0] != "repo:bar" {
t.Errorf("Expected []APIImges to be ordered by most recent creation date. %s", images)
}
}
func generateImage(name string, srv *docker.Server) error {
archive, err := fakeTar()
if err != nil {
return err
}
return srv.ImageImport("-", "repo", name, archive, ioutil.Discard, utils.NewStreamFormatter(true))
}

328
integration/utils_test.go Normal file
Просмотреть файл

@ -0,0 +1,328 @@
package docker
import (
"archive/tar"
"bytes"
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path"
"strings"
"testing"
"time"
)
// This file contains utility functions for docker's unit test suite.
// It has to be named XXX_test.go, apparently, in other to access private functions
// from other XXX_test.go functions.
// Create a temporary runtime suitable for unit testing.
// Call t.Fatal() at the first error.
func mkRuntime(f utils.Fataler) *docker.Runtime {
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
f.Fatal(err)
}
config := &docker.DaemonConfig{
Root: root,
AutoRestart: false,
}
r, err := docker.NewRuntimeFromDirectory(config)
if err != nil {
f.Fatal(err)
}
r.UpdateCapabilities(true)
return r
}
func createNamedTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler, name string) (shortId string) {
job := eng.Job("create", name)
if err := job.ImportEnv(config); err != nil {
f.Fatal(err)
}
job.StdoutParseString(&shortId)
if err := job.Run(); err != nil {
f.Fatal(err)
}
return
}
func createTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler) (shortId string) {
return createNamedTestContainer(eng, config, f, "")
}
func startContainer(eng *engine.Engine, id string, t utils.Fataler) {
job := eng.Job("start", id)
if err := job.Run(); err != nil {
t.Fatal(err)
}
}
func containerRun(eng *engine.Engine, id string, t utils.Fataler) {
startContainer(eng, id, t)
containerWait(eng, id, t)
}
func containerFileExists(eng *engine.Engine, id, dir string, t utils.Fataler) bool {
c := getContainer(eng, id, t)
if err := c.EnsureMounted(); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(path.Join(c.RootfsPath(), dir)); err != nil {
if os.IsNotExist(err) {
return false
}
t.Fatal(err)
}
return true
}
func containerAttach(eng *engine.Engine, id string, t utils.Fataler) (io.WriteCloser, io.ReadCloser) {
c := getContainer(eng, id, t)
i, err := c.StdinPipe()
if err != nil {
t.Fatal(err)
}
o, err := c.StdoutPipe()
if err != nil {
t.Fatal(err)
}
return i, o
}
func containerWait(eng *engine.Engine, id string, t utils.Fataler) int {
return getContainer(eng, id, t).Wait()
}
func containerWaitTimeout(eng *engine.Engine, id string, t utils.Fataler) error {
return getContainer(eng, id, t).WaitTimeout(500 * time.Millisecond)
}
func containerKill(eng *engine.Engine, id string, t utils.Fataler) {
if err := getContainer(eng, id, t).Kill(); err != nil {
t.Fatal(err)
}
}
func containerRunning(eng *engine.Engine, id string, t utils.Fataler) bool {
return getContainer(eng, id, t).State.IsRunning()
}
func containerAssertExists(eng *engine.Engine, id string, t utils.Fataler) {
getContainer(eng, id, t)
}
func containerAssertNotExists(eng *engine.Engine, id string, t utils.Fataler) {
runtime := mkRuntimeFromEngine(eng, t)
if c := runtime.Get(id); c != nil {
t.Fatal(fmt.Errorf("Container %s should not exist", id))
}
}
// assertHttpNotError expect the given response to not have an error.
// Otherwise the it causes the test to fail.
func assertHttpNotError(r *httptest.ResponseRecorder, t utils.Fataler) {
// Non-error http status are [200, 400)
if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest {
t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code))
}
}
// assertHttpError expect the given response to have an error.
// Otherwise the it causes the test to fail.
func assertHttpError(r *httptest.ResponseRecorder, t utils.Fataler) {
// Non-error http status are [200, 400)
if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) {
t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code))
}
}
func getContainer(eng *engine.Engine, id string, t utils.Fataler) *docker.Container {
runtime := mkRuntimeFromEngine(eng, t)
c := runtime.Get(id)
if c == nil {
t.Fatal(fmt.Errorf("No such container: %s", id))
}
return c
}
func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Server {
iSrv := eng.Hack_GetGlobalVar("httpapi.server")
if iSrv == nil {
panic("Legacy server field not set in engine")
}
srv, ok := iSrv.(*docker.Server)
if !ok {
panic("Legacy server field in engine does not cast to *docker.Server")
}
return srv
}
func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Runtime {
iRuntime := eng.Hack_GetGlobalVar("httpapi.runtime")
if iRuntime == nil {
panic("Legacy runtime field not set in engine")
}
runtime, ok := iRuntime.(*docker.Runtime)
if !ok {
panic("Legacy runtime field in engine does not cast to *docker.Runtime")
}
return runtime
}
func NewTestEngine(t utils.Fataler) *engine.Engine {
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
t.Fatal(err)
}
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
// Load default plugins
// (This is manually copied and modified from main() until we have a more generic plugin system)
job := eng.Job("initapi")
job.Setenv("Root", root)
job.SetenvBool("AutoRestart", false)
// TestGetEnabledCors and TestOptionsRoute require EnableCors=true
job.SetenvBool("EnableCors", true)
if err := job.Run(); err != nil {
t.Fatal(err)
}
return eng
}
func newTestDirectory(templateDir string) (dir string, err error) {
return utils.TestDirectory(templateDir)
}
func getCallerName(depth int) string {
return utils.GetCallerName(depth)
}
// Write `content` to the file at path `dst`, creating it if necessary,
// as well as any missing directories.
// The file is truncated if it already exists.
// Call t.Fatal() at the first error.
func writeFile(dst, content string, t *testing.T) {
// Create subdirectories if necessary
if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) {
t.Fatal(err)
}
f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700)
if err != nil {
t.Fatal(err)
}
// Write content (truncate if it exists)
if _, err := io.Copy(f, strings.NewReader(content)); err != nil {
t.Fatal(err)
}
}
// Return the contents of file at path `src`.
// Call t.Fatal() at the first error (including if the file doesn't exist)
func readFile(src string, t *testing.T) (content string) {
f, err := os.Open(src)
if err != nil {
t.Fatal(err)
}
data, err := ioutil.ReadAll(f)
if err != nil {
t.Fatal(err)
}
return string(data)
}
// Create a test container from the given runtime `r` and run arguments `args`.
// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is
// dynamically replaced by the current test image.
// The caller is responsible for destroying the container.
// Call t.Fatal() at the first error.
func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *docker.HostConfig, error) {
config, hc, _, err := docker.ParseRun(args, nil)
defer func() {
if err != nil && t != nil {
t.Fatal(err)
}
}()
if err != nil {
return nil, nil, err
}
if config.Image == "_" {
config.Image = GetTestImage(r).ID
}
c, _, err := r.Create(config, "")
if err != nil {
return nil, nil, err
}
// NOTE: hostConfig is ignored.
// If `args` specify privileged mode, custom lxc conf, external mount binds,
// port redirects etc. they will be ignored.
// This is because the correct way to set these things is to pass environment
// to the `start` job.
// FIXME: this helper function should be deprecated in favor of calling
// `create` and `start` jobs directly.
return c, hc, nil
}
// Create a test container, start it, wait for it to complete, destroy it,
// and return its standard output as a string.
// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally.
func runContainer(eng *engine.Engine, r *docker.Runtime, args []string, t *testing.T) (output string, err error) {
defer func() {
if err != nil && t != nil {
t.Fatal(err)
}
}()
container, hc, err := mkContainer(r, args, t)
if err != nil {
return "", err
}
defer r.Destroy(container)
stdout, err := container.StdoutPipe()
if err != nil {
return "", err
}
defer stdout.Close()
job := eng.Job("start", container.ID)
if err := job.ImportEnv(hc); err != nil {
return "", err
}
if err := job.Run(); err != nil {
return "", err
}
container.Wait()
data, err := ioutil.ReadAll(stdout)
if err != nil {
return "", err
}
output = string(data)
return
}
// FIXME: this is duplicated from graph_test.go in the docker package.
func fakeTar() (io.Reader, error) {
content := []byte("Hello world!\n")
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
hdr := new(tar.Header)
hdr.Size = int64(len(content))
hdr.Name = name
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
tw.Write([]byte(content))
}
tw.Close()
return buf, nil
}

Просмотреть файл

Просмотреть файл

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"net"
"os"
"os/exec"
"strconv"
"strings"
@ -27,8 +28,10 @@ type Chain struct {
}
func NewChain(name, bridge string) (*Chain, error) {
if err := Raw("-t", "nat", "-N", name); err != nil {
if output, err := Raw("-t", "nat", "-N", name); err != nil {
return nil, err
} else if len(output) != 0 {
return nil, fmt.Errorf("Error creating new iptables chain: %s", output)
}
chain := &Chain{
Name: name,
@ -52,13 +55,25 @@ func RemoveExistingChain(name string) error {
}
func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr string, dest_port int) error {
return Raw("-t", "nat", fmt.Sprint(action), c.Name,
daddr := ip.String()
if ip.IsUnspecified() {
// iptables interprets "0.0.0.0" as "0.0.0.0/32", whereas we
// want "0.0.0.0/0". "0/0" is correctly interpreted as "any
// value" by both iptables and ip6tables.
daddr = "0/0"
}
if output, err := Raw("-t", "nat", fmt.Sprint(action), c.Name,
"-p", proto,
"-d", ip.String(),
"-d", daddr,
"--dport", strconv.Itoa(port),
"!", "-i", c.Bridge,
"-j", "DNAT",
"--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port)))
"--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil {
return err
} else if len(output) != 0 {
return fmt.Errorf("Error iptables forward: %s", output)
}
return nil
}
func (c *Chain) Prerouting(action Action, args ...string) error {
@ -66,7 +81,12 @@ func (c *Chain) Prerouting(action Action, args ...string) error {
if len(args) > 0 {
a = append(a, args...)
}
return Raw(append(a, "-j", c.Name)...)
if output, err := Raw(append(a, "-j", c.Name)...); err != nil {
return err
} else if len(output) != 0 {
return fmt.Errorf("Error iptables prerouting: %s", output)
}
return nil
}
func (c *Chain) Output(action Action, args ...string) error {
@ -74,7 +94,12 @@ func (c *Chain) Output(action Action, args ...string) error {
if len(args) > 0 {
a = append(a, args...)
}
return Raw(append(a, "-j", c.Name)...)
if output, err := Raw(append(a, "-j", c.Name)...); err != nil {
return err
} else if len(output) != 0 {
return fmt.Errorf("Error iptables output: %s", output)
}
return nil
}
func (c *Chain) Remove() error {
@ -94,17 +119,23 @@ func (c *Chain) Remove() error {
// Check if an existing rule exists
func Exists(args ...string) bool {
return Raw(append([]string{"-C"}, args...)...) == nil
if _, err := Raw(append([]string{"-C"}, args...)...); err != nil {
return false
}
return true
}
func Raw(args ...string) error {
func Raw(args ...string) ([]byte, error) {
path, err := exec.LookPath("iptables")
if err != nil {
return ErrIptablesNotFound
return nil, ErrIptablesNotFound
}
if err := exec.Command(path, args...).Run(); err != nil {
return fmt.Errorf("iptables failed: iptables %v", strings.Join(args, " "))
if os.Getenv("DEBUG") != "" {
fmt.Printf("[DEBUG] [iptables]: %s, %v\n", path, args)
}
return nil
output, err := exec.Command(path, args...).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("iptables failed: iptables %v: %s (%s)", strings.Join(args, " "), output, err)
}
return output, err
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше