Merge branch 'master' into creack-merge-master

Conflicts:
	api_params.go
	graph.go
	graph_test.go
	image.go
	integration/api_test.go
	integration/container_test.go
	integration/runtime_test.go
	runtime.go
This commit is contained in:
Guillaume J. Charmes 2013-11-19 14:57:53 -08:00
Родитель 5a4113140e 9d867a389b
Коммит 28d4cbbc59
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B33E4642CB6E3FF3
62 изменённых файлов: 2852 добавлений и 2209 удалений

Просмотреть файл

@ -44,6 +44,7 @@ Daniel Nordberg <dnordberg@gmail.com>
Daniel Robinson <gottagetmac@gmail.com>
Daniel Von Fange <daniel@leancoder.com>
Daniel YC Lin <dlin.tw@gmail.com>
Darren Coxall <darren@darrencoxall.com>
David Calavera <david.calavera@gmail.com>
David Sissitka <me@dsissitka.com>
Deni Bertovic <deni@kset.org>
@ -120,6 +121,7 @@ Marko Mikulicic <mmikulicic@gmail.com>
Markus Fix <lispmeister@gmail.com>
Martin Redmond <martin@tinychat.com>
Matt Apperson <me@mattapperson.com>
Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
Matt Bachmann <bachmann.matt@gmail.com>
Matthew Mueller <mattmuelle@gmail.com>
Maxim Treskin <zerthurd@gmail.com>

Просмотреть файл

@ -36,7 +36,7 @@ run apt-get install -y -q mercurial
run apt-get install -y -q build-essential libsqlite3-dev
# Install Go
run curl -s https://go.googlecode.com/files/go1.2rc4.src.tar.gz | tar -v -C /usr/local -xz
run curl -s https://go.googlecode.com/files/go1.2rc5.src.tar.gz | tar -v -C /usr/local -xz
env PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
env GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
run cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std

6
Vagrantfile поставляемый
Просмотреть файл

@ -70,7 +70,7 @@ SCRIPT
# trigger dkms to build the virtualbox guest module install.
$vbox_script = <<VBOX_SCRIPT + $script
# Install the VirtualBox guest additions if they aren't already installed.
if [ ! -d /opt/VBoxGuestAdditions-4.2.12/ ]; then
if [ ! -d /opt/VBoxGuestAdditions-4.3.2/ ]; then
# Update remote package metadata. 'apt-get update' is idempotent.
apt-get update -q
@ -79,9 +79,9 @@ if [ ! -d /opt/VBoxGuestAdditions-4.2.12/ ]; then
apt-get install -q -y linux-headers-generic-lts-raring dkms
echo 'Downloading VBox Guest Additions...'
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.2.12/VBoxGuestAdditions_4.2.12.iso
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.2/VBoxGuestAdditions_4.3.2.iso
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.2.12.iso /mnt
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.2.iso /mnt
/mnt/VBoxLinuxAdditions.run --nox11
umount /mnt
fi

44
api.go
Просмотреть файл

@ -149,13 +149,12 @@ func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r *
signal := 0
if r != nil {
s := r.Form.Get("signal")
if s != "" {
if s, err := strconv.Atoi(s); err != nil {
if s := r.Form.Get("signal"); s != "" {
s, err := strconv.Atoi(s)
if err != nil {
return err
} else {
signal = s
}
signal = s
}
}
if err := srv.ContainerKill(name, signal); err != nil {
@ -201,9 +200,8 @@ func getImagesJSON(srv *Server, version float64, w http.ResponseWriter, r *http.
}
return writeJSON(w, http.StatusOK, outs2)
} else {
return writeJSON(w, http.StatusOK, outs)
}
return writeJSON(w, http.StatusOK, outs)
}
func getImagesViz(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@ -316,13 +314,10 @@ func getContainersTop(srv *Server, version float64, w http.ResponseWriter, r *ht
if err := parseForm(r); err != nil {
return err
}
name := vars["name"]
ps_args := r.Form.Get("ps_args")
procsStr, err := srv.ContainerTop(name, ps_args)
procsStr, err := srv.ContainerTop(vars["name"], r.Form.Get("ps_args"))
if err != nil {
return err
}
return writeJSON(w, http.StatusOK, procsStr)
}
@ -350,13 +345,12 @@ func getContainersJSON(srv *Server, version float64, w http.ResponseWriter, r *h
if version < 1.5 {
outs2 := []APIContainersOld{}
for _, ctnr := range outs {
outs2 = append(outs2, ctnr.ToLegacy())
outs2 = append(outs2, *ctnr.ToLegacy())
}
return writeJSON(w, http.StatusOK, outs2)
} else {
return writeJSON(w, http.StatusOK, outs)
}
return writeJSON(w, http.StatusOK, outs)
}
func postImagesTag(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@ -640,12 +634,10 @@ func deleteImages(srv *Server, version float64, w http.ResponseWriter, r *http.R
if imgs != nil {
if len(imgs) != 0 {
return writeJSON(w, http.StatusOK, imgs)
} else {
return fmt.Errorf("Conflict, %s wasn't deleted", name)
}
} else {
w.WriteHeader(http.StatusNoContent)
return fmt.Errorf("Conflict, %s wasn't deleted", name)
}
w.WriteHeader(http.StatusNoContent)
return nil
}
@ -930,7 +922,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
if err != nil {
return err
}
c, err := mkBuildContext(string(dockerFile), nil)
c, err := MkBuildContext(string(dockerFile), nil)
if err != nil {
return err
}
@ -1108,6 +1100,20 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
return r, nil
}
// ServeRequest processes a single http request to the docker remote api.
// FIXME: refactor this to be part of Server and not require re-creating a new
// router each time. This requires first moving ListenAndServe into Server.
func ServeRequest(srv *Server, apiversion float64, w http.ResponseWriter, req *http.Request) error {
router, err := createRouter(srv, false)
if err != nil {
return err
}
// Insert APIVERSION into the request as a convenience
req.URL.Path = fmt.Sprintf("/v%g%s", apiversion, req.URL.Path)
router.ServeHTTP(w, req)
return nil
}
func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
log.Printf("Listening for HTTP on %s (%s)\n", addr, proto)

Просмотреть файл

@ -2,151 +2,149 @@ package docker
import "strings"
type APIHistory struct {
ID string `json:"Id"`
Tags []string `json:",omitempty"`
Created int64
CreatedBy string `json:",omitempty"`
Size int64
}
type APIImages struct {
ID string `json:"Id"`
RepoTags []string `json:",omitempty"`
Created int64
Size int64
VirtualSize int64
ParentId string `json:",omitempty"`
}
type APIImagesOld struct {
Repository string `json:",omitempty"`
Tag string `json:",omitempty"`
ID string `json:"Id"`
Created int64
Size int64
VirtualSize int64
}
func (self *APIImages) ToLegacy() []APIImagesOld {
outs := []APIImagesOld{}
for _, repotag := range self.RepoTags {
components := strings.SplitN(repotag, ":", 2)
outs = append(outs, APIImagesOld{
ID: self.ID,
Repository: components[0],
Tag: components[1],
Created: self.Created,
Size: self.Size,
VirtualSize: self.VirtualSize,
})
type (
APIHistory struct {
ID string `json:"Id"`
Tags []string `json:",omitempty"`
Created int64
CreatedBy string `json:",omitempty"`
Size int64
}
APIImages struct {
ID string `json:"Id"`
RepoTags []string `json:",omitempty"`
Created int64
Size int64
VirtualSize int64
ParentId string `json:",omitempty"`
}
APIImagesOld struct {
Repository string `json:",omitempty"`
Tag string `json:",omitempty"`
ID string `json:"Id"`
Created int64
Size int64
VirtualSize int64
}
APIInfo struct {
Debug bool
Containers int
Images int
Driver string `json:",omitempty"`
DriverStatus [][2]string `json:",omitempty"`
NFd int `json:",omitempty"`
NGoroutines int `json:",omitempty"`
MemoryLimit bool `json:",omitempty"`
SwapLimit bool `json:",omitempty"`
IPv4Forwarding bool `json:",omitempty"`
LXCVersion string `json:",omitempty"`
NEventsListener int `json:",omitempty"`
KernelVersion string `json:",omitempty"`
IndexServerAddress string `json:",omitempty"`
}
APITop struct {
Titles []string
Processes [][]string
}
APIRmi struct {
Deleted string `json:",omitempty"`
Untagged string `json:",omitempty"`
}
APIContainers struct {
ID string `json:"Id"`
Image string
Command string
Created int64
Status string
Ports []APIPort
SizeRw int64
SizeRootFs int64
Names []string
}
APIContainersOld struct {
ID string `json:"Id"`
Image string
Command string
Created int64
Status string
Ports string
SizeRw int64
SizeRootFs int64
}
APIID struct {
ID string `json:"Id"`
}
APIRun struct {
ID string `json:"Id"`
Warnings []string `json:",omitempty"`
}
APIPort struct {
PrivatePort int64
PublicPort int64
Type string
IP string
}
APIVersion struct {
Version string
GitCommit string `json:",omitempty"`
GoVersion string `json:",omitempty"`
}
APIWait struct {
StatusCode int
}
APIAuth struct {
Status string
}
APIImageConfig struct {
ID string `json:"Id"`
*Config
}
APICopy struct {
Resource string
HostPath string
}
)
func (api APIImages) ToLegacy() []APIImagesOld {
outs := []APIImagesOld{}
for _, repotag := range api.RepoTags {
components := strings.SplitN(repotag, ":", 2)
outs = append(outs, APIImagesOld{
ID: api.ID,
Repository: components[0],
Tag: components[1],
Created: api.Created,
Size: api.Size,
VirtualSize: api.VirtualSize,
})
}
return outs
}
type APIInfo struct {
Debug bool
Containers int
Images int
Driver string `json:",omitempty"`
DriverStatus [][2]string `json:",omitempty"`
NFd int `json:",omitempty"`
NGoroutines int `json:",omitempty"`
MemoryLimit bool `json:",omitempty"`
SwapLimit bool `json:",omitempty"`
IPv4Forwarding bool `json:",omitempty"`
LXCVersion string `json:",omitempty"`
NEventsListener int `json:",omitempty"`
KernelVersion string `json:",omitempty"`
IndexServerAddress string `json:",omitempty"`
}
type APITop struct {
Titles []string
Processes [][]string
}
type APIRmi struct {
Deleted string `json:",omitempty"`
Untagged string `json:",omitempty"`
}
type APIContainers struct {
ID string `json:"Id"`
Image string
Command string
Created int64
Status string
Ports []APIPort
SizeRw int64
SizeRootFs int64
Names []string
}
func (self *APIContainers) ToLegacy() APIContainersOld {
return APIContainersOld{
ID: self.ID,
Image: self.Image,
Command: self.Command,
Created: self.Created,
Status: self.Status,
Ports: displayablePorts(self.Ports),
SizeRw: self.SizeRw,
SizeRootFs: self.SizeRootFs,
func (api APIContainers) ToLegacy() *APIContainersOld {
return &APIContainersOld{
ID: api.ID,
Image: api.Image,
Command: api.Command,
Created: api.Created,
Status: api.Status,
Ports: displayablePorts(api.Ports),
SizeRw: api.SizeRw,
SizeRootFs: api.SizeRootFs,
}
}
type APIContainersOld struct {
ID string `json:"Id"`
Image string
Command string
Created int64
Status string
Ports string
SizeRw int64
SizeRootFs int64
}
type APIID struct {
ID string `json:"Id"`
}
type APIRun struct {
ID string `json:"Id"`
Warnings []string `json:",omitempty"`
}
type APIPort struct {
PrivatePort int64
PublicPort int64
Type string
IP string
}
type APIVersion struct {
Version string
GitCommit string `json:",omitempty"`
GoVersion string `json:",omitempty"`
}
type APIWait struct {
StatusCode int
}
type APIAuth struct {
Status string
}
type APIImageConfig struct {
ID string `json:"Id"`
*Config
}
type APICopy struct {
Resource string
HostPath string
}

19
api_unit_tests.go Normal file
Просмотреть файл

@ -0,0 +1,19 @@
package docker
import (
"testing"
)
func TestJsonContentType(t *testing.T) {
if !matchesContentType("application/json", "application/json") {
t.Fail()
}
if !matchesContentType("application/json; charset=utf-8", "application/json") {
t.Fail()
}
if matchesContentType("dockerapplication/json", "application/json") {
t.Fail()
}
}

Просмотреть файл

@ -175,16 +175,17 @@ func Untar(archive io.Reader, path string, options *TarOptions) error {
buf := make([]byte, 10)
totalN := 0
for totalN < 10 {
if n, err := archive.Read(buf[totalN:]); err != nil {
n, err := archive.Read(buf[totalN:])
if err != nil {
if err == io.EOF {
return fmt.Errorf("Tarball too short")
}
return err
} else {
totalN += n
utils.Debugf("[tar autodetect] n: %d", n)
}
totalN += n
utils.Debugf("[tar autodetect] n: %d", n)
}
compression := DetectCompression(buf)
utils.Debugf("Archive compression detected: %s", compression.Extension())

Просмотреть файл

@ -196,10 +196,9 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
if loginAgainstOfficialIndex {
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
"Please check your e-mail for a confirmation link.")
} else {
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
"Please see the documentation of the registry " + serverAddress + " for instructions how to activate it.")
}
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
"Please see the documentation of the registry " + serverAddress + " for instructions how to activate it.")
} else if reqStatusCode == 400 {
if string(reqBody) == "\"Username or email already exists\"" {
req, err := factory.NewRequest("GET", serverAddress+"users/", nil)

Просмотреть файл

@ -1,11 +1,8 @@
package auth
import (
"crypto/rand"
"encoding/hex"
"io/ioutil"
"os"
"strings"
"testing"
)
@ -29,52 +26,6 @@ func TestEncodeAuth(t *testing.T) {
}
}
func TestLogin(t *testing.T) {
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
authConfig := &AuthConfig{Username: "unittester", Password: "surlautrerivejetattendrai", Email: "noise+unittester@dotcloud.com"}
status, err := Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
if status != "Login Succeeded" {
t.Fatalf("Expected status \"Login Succeeded\", found \"%s\" instead", status)
}
}
func TestCreateAccount(t *testing.T) {
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
tokenBuffer := make([]byte, 16)
_, err := rand.Read(tokenBuffer)
if err != nil {
t.Fatal(err)
}
token := hex.EncodeToString(tokenBuffer)[:12]
username := "ut" + token
authConfig := &AuthConfig{Username: username, Password: "test42", Email: "docker-ut+" + token + "@example.com"}
status, err := Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
expectedStatus := "Account created. Please use the confirmation link we sent" +
" to your e-mail to activate it."
if status != expectedStatus {
t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status)
}
status, err = Login(authConfig, nil)
if err == nil {
t.Fatalf("Expected error but found nil instead")
}
expectedError := "Login: Account is not Active"
if !strings.Contains(err.Error(), expectedError) {
t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err)
}
}
func setupTempConfigFile() (*ConfigFile, error) {
root, err := ioutil.TempDir("", "docker-test-auth")
if err != nil {

Просмотреть файл

@ -135,7 +135,7 @@ func (cli *DockerCli) CmdInsert(args ...string) error {
// mkBuildContext returns an archive of an empty context with the contents
// of `dockerfile` at the path ./Dockerfile
func mkBuildContext(dockerfile string, files [][2]string) (archive.Archive, error) {
func MkBuildContext(dockerfile string, files [][2]string) (archive.Archive, error) {
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
files = append(files, [2]string{"Dockerfile", dockerfile})
@ -185,7 +185,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
if err != nil {
return err
}
context, err = mkBuildContext(string(dockerfile), nil)
context, err = MkBuildContext(string(dockerfile), nil)
} else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) {
isRemote = true
} else {
@ -553,6 +553,9 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
utils.CatchAll(sigc)
go func() {
for s := range sigc {
if s == syscall.SIGCHLD {
continue
}
if _, _, err := cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%d", cid, s), nil); err != nil {
utils.Debugf("Error sending signal: %s", err)
}
@ -2076,10 +2079,9 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h
if matchesContentType(resp.Header.Get("Content-Type"), "application/json") {
return utils.DisplayJSONMessagesStream(resp.Body, out)
} else {
if _, err := io.Copy(out, resp.Body); err != nil {
return err
}
}
if _, err := io.Copy(out, resp.Body); err != nil {
return err
}
return nil
}

149
config_test.go Normal file
Просмотреть файл

@ -0,0 +1,149 @@
package docker
import (
"testing"
)
func TestCompareConfig(t *testing.T) {
volumes1 := make(map[string]struct{})
volumes1["/test1"] = struct{}{}
config1 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config2 := Config{
Dns: []string{"0.0.0.0", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config3 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config4 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "22222222",
Volumes: volumes1,
}
volumes2 := make(map[string]struct{})
volumes2["/test2"] = struct{}{}
config5 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes2,
}
if CompareConfig(&config1, &config2) {
t.Fatalf("CompareConfig should return false, Dns are different")
}
if CompareConfig(&config1, &config3) {
t.Fatalf("CompareConfig should return false, PortSpecs are different")
}
if CompareConfig(&config1, &config4) {
t.Fatalf("CompareConfig should return false, VolumesFrom are different")
}
if CompareConfig(&config1, &config5) {
t.Fatalf("CompareConfig should return false, Volumes are different")
}
if !CompareConfig(&config1, &config1) {
t.Fatalf("CompareConfig should return true")
}
}
func TestMergeConfig(t *testing.T) {
volumesImage := make(map[string]struct{})
volumesImage["/test1"] = struct{}{}
volumesImage["/test2"] = struct{}{}
configImage := &Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "1111",
Volumes: volumesImage,
}
volumesUser := make(map[string]struct{})
volumesUser["/test3"] = struct{}{}
configUser := &Config{
Dns: []string{"3.3.3.3"},
PortSpecs: []string{"3333:2222", "3333:3333"},
Env: []string{"VAR2=3", "VAR3=3"},
Volumes: volumesUser,
}
if err := MergeConfig(configUser, configImage); err != nil {
t.Error(err)
}
if len(configUser.Dns) != 3 {
t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns))
}
for _, dns := range configUser.Dns {
if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" {
t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns)
}
}
if len(configUser.ExposedPorts) != 3 {
t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs)
}
}
if len(configUser.Env) != 3 {
t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env))
}
for _, env := range configUser.Env {
if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" {
t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env)
}
}
if len(configUser.Volumes) != 3 {
t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes))
}
for v := range configUser.Volumes {
if v != "/test1" && v != "/test2" && v != "/test3" {
t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v)
}
}
if configUser.VolumesFrom != "1111" {
t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom)
}
ports, _, err := parsePortSpecs([]string{"0000"})
if err != nil {
t.Error(err)
}
configImage2 := &Config{
ExposedPorts: ports,
}
if err := MergeConfig(configUser, configImage2); err != nil {
t.Error(err)
}
if len(configUser.ExposedPorts) != 4 {
t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs)
}
}
}

161
container_unit_test.go Normal file
Просмотреть файл

@ -0,0 +1,161 @@
package docker
import (
"testing"
)
func TestParseLxcConfOpt(t *testing.T) {
opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "}
for _, o := range opts {
k, v, err := parseLxcOpt(o)
if err != nil {
t.FailNow()
}
if k != "lxc.utsname" {
t.Fail()
}
if v != "docker" {
t.Fail()
}
}
}
func TestParseNetworkOptsPrivateOnly(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::80"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "tcp" {
t.Logf("Expected tcp got %s", k.Proto())
t.Fail()
}
if k.Port() != "80" {
t.Logf("Expected 80 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "" {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestParseNetworkOptsPublic(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100:8080:80"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "tcp" {
t.Logf("Expected tcp got %s", k.Proto())
t.Fail()
}
if k.Port() != "80" {
t.Logf("Expected 80 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "8080" {
t.Logf("Expected 8080 got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestParseNetworkOptsUdp(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::6000/udp"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "udp" {
t.Logf("Expected udp got %s", k.Proto())
t.Fail()
}
if k.Port() != "6000" {
t.Logf("Expected 6000 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "" {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestGetFullName(t *testing.T) {
name, err := getFullName("testing")
if err != nil {
t.Fatal(err)
}
if name != "/testing" {
t.Fatalf("Expected /testing got %s", name)
}
if _, err := getFullName(""); err == nil {
t.Fatal("Error should not be nil")
}
}

Просмотреть файл

@ -192,7 +192,7 @@ if [ "$justTar" ]; then
sudo tar --numeric-owner -caf "$repo" .
else
# create the image (and tag $repo:$suite)
sudo tar --numeric-owner -c . | $docker import - $repo $suite
sudo tar --numeric-owner -c . | $docker import - $repo:$suite
# test the image
$docker run -i -t $repo:$suite echo success
@ -202,25 +202,25 @@ else
Debian)
if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then
# tag latest
$docker tag $repo:$suite $repo latest
$docker tag $repo:$suite $repo:latest
if [ -r etc/debian_version ]; then
# tag the specific debian release version (which is only reasonable to tag on debian stable)
ver=$(cat etc/debian_version)
$docker tag $repo:$suite $repo $ver
$docker tag $repo:$suite $repo:$ver
fi
fi
;;
Ubuntu)
if [ "$suite" = "$ubuntuLatestLTS" ]; then
# tag latest
$docker tag $repo:$suite $repo latest
$docker tag $repo:$suite $repo:latest
fi
if [ -r etc/lsb-release ]; then
lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")"
if [ "$lsbRelease" ]; then
# tag specific Ubuntu version number, if available (12.04, etc.)
$docker tag $repo:$suite $repo $lsbRelease
$docker tag $repo:$suite $repo:$lsbRelease
fi
fi
;;

Просмотреть файл

@ -31,7 +31,7 @@ help:
# @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
# @echo " latexpdf to make LaTeX files and run them through pdflatex"
# @echo " text to make text files"
# @echo " man to make manual pages"
@echo " man to make a manual page"
# @echo " texinfo to make Texinfo files"
# @echo " info to make Texinfo files and run them through makeinfo"
# @echo " gettext to make PO message catalogs"

Просмотреть файл

@ -1,2 +1,2 @@
Sphinx==1.1.3
sphinxcontrib-httpdomain==1.1.8
sphinxcontrib-httpdomain==1.1.9

Просмотреть файл

@ -13,9 +13,12 @@ Docker Remote API v1.7
1. Brief introduction
=====================
- The Remote API is replacing rcli
- Default port in the docker daemon is 4243
- The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr
- The Remote API has replaced rcli
- The daemon listens on ``unix:///var/run/docker.sock``, but you can
:ref:`bind_docker`.
- The API tends to be REST, but for some complex commands, like
``attach`` or ``pull``, the HTTP connection is hijacked to transport
``stdout, stdin`` and ``stderr``
2. Endpoints
============
@ -690,10 +693,12 @@ Create an image
:query repo: repository
:query tag: tag
:query registry: the registry to pull from
:reqheader X-Registry-Auth: base64-encoded AuthConfig object
:statuscode 200: no error
:statuscode 500: server error
Insert a file in an image
*************************
@ -835,18 +840,16 @@ Push an image on the registry
HTTP/1.1 200 OK
Content-Type: application/json
{"status":"Pushing..."}
{"status":"Pushing", "progress":"1/? (n/a)"}
{"error":"Invalid..."}
...
The ``X-Registry-Auth`` header can be used to include a
base64-encoded AuthConfig object.
{"status":"Pushing..."}
{"status":"Pushing", "progress":"1/? (n/a)"}
{"error":"Invalid..."}
...
:query registry: the registry you wan to push, optional
:reqheader X-Registry-Auth: include a base64-encoded AuthConfig object.
:statuscode 200: no error
:statuscode 404: no such image
:statuscode 500: server error
:statuscode 404: no such image
:statuscode 500: server error
Tag an image into a repository
@ -959,9 +962,9 @@ Search images
...
]
:query term: term to search
:statuscode 200: no error
:statuscode 500: server error
:query term: term to search
:statuscode 200: no error
:statuscode 500: server error
2.3 Misc
@ -991,18 +994,22 @@ Build an image from Dockerfile via stdin
{{ STREAM }}
The stream must be a tar archive compressed with one of the following algorithms:
identity (no compression), gzip, bzip2, xz. The archive must include a file called
`Dockerfile` at its root. It may include any number of other files, which will be
accessible in the build context (See the ADD build command).
The stream must be a tar archive compressed with one of the
following algorithms: identity (no compression), gzip, bzip2,
xz.
The Content-type header should be set to "application/tar".
The archive must include a file called ``Dockerfile`` at its
root. It may include any number of other files, which will be
accessible in the build context (See the :ref:`ADD build command
<dockerbuilder>`).
:query t: repository name (and optionally a tag) to be applied to the resulting image in case of success
:query q: suppress verbose build output
:query nocache: do not use the cache when building the image
:reqheader Content-type: should be set to ``"application/tar"``.
:statuscode 200: no error
:statuscode 500: server error
:query t: repository name (and optionally a tag) to be applied to the resulting image in case of success
:query q: suppress verbose build output
:query nocache: do not use the cache when building the image
:statuscode 200: no error
:statuscode 500: server error
Check auth configuration

Просмотреть файл

@ -231,9 +231,33 @@ Full -run example
::
Usage: docker diff CONTAINER [OPTIONS]
Usage: docker diff CONTAINER
List the changed files and directories in a container's filesystem
Inspect changes on a container's filesystem
There are 3 events that are listed in the 'diff':
1. ```A``` - Add
2. ```D``` - Delete
3. ```C``` - Change
for example:
.. code-block:: bash
$ sudo docker diff 7bb0e258aefe
C /dev
A /dev/kmsg
C /etc
A /etc/mtab
A /go
A /go/src
A /go/src/github.com
A /go/src/github.com/dotcloud
A /go/src/github.com/dotcloud/docker
A /go/src/github.com/dotcloud/docker/.git
....
.. _cli_events:
@ -323,6 +347,40 @@ Show events in the past from a specified time
-notrunc=false: Don't truncate output
-q=false: only show numeric IDs
To see how the docker:latest image was built:
.. code-block:: bash
$ docker history docker
ID CREATED CREATED BY
docker:latest 19 hours ago /bin/sh -c #(nop) ADD . in /go/src/github.com/dotcloud/docker
cf5f2467662d 2 weeks ago /bin/sh -c #(nop) ENTRYPOINT ["hack/dind"]
3538fbe372bf 2 weeks ago /bin/sh -c #(nop) WORKDIR /go/src/github.com/dotcloud/docker
7450f65072e5 2 weeks ago /bin/sh -c #(nop) VOLUME /var/lib/docker
b79d62b97328 2 weeks ago /bin/sh -c apt-get install -y -q lxc
36714852a550 2 weeks ago /bin/sh -c apt-get install -y -q iptables
8c4c706df1d6 2 weeks ago /bin/sh -c /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEYn' > /.s3cfg
b89989433c48 2 weeks ago /bin/sh -c pip install python-magic
a23e640d85b5 2 weeks ago /bin/sh -c pip install s3cmd
41f54fec7e79 2 weeks ago /bin/sh -c apt-get install -y -q python-pip
d9bc04add907 2 weeks ago /bin/sh -c apt-get install -y -q reprepro dpkg-sig
e74f4760fa70 2 weeks ago /bin/sh -c gem install --no-rdoc --no-ri fpm
1e43224726eb 2 weeks ago /bin/sh -c apt-get install -y -q ruby1.9.3 rubygems libffi-dev
460953ae9d7f 2 weeks ago /bin/sh -c #(nop) ENV GOPATH=/go:/go/src/github.com/dotcloud/docker/vendor
8b63eb1d666b 2 weeks ago /bin/sh -c #(nop) ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/goroot/bin
3087f3bcedf2 2 weeks ago /bin/sh -c #(nop) ENV GOROOT=/goroot
635840d198e5 2 weeks ago /bin/sh -c cd /goroot/src && ./make.bash
439f4a0592ba 2 weeks ago /bin/sh -c curl -s https://go.googlecode.com/files/go1.1.2.src.tar.gz | tar -v -C / -xz && mv /go /goroot
13967ed36e93 2 weeks ago /bin/sh -c #(nop) ENV CGO_ENABLED=0
bf7424458437 2 weeks ago /bin/sh -c apt-get install -y -q build-essential
a89ec997c3bf 2 weeks ago /bin/sh -c apt-get install -y -q mercurial
b9f165c6e749 2 weeks ago /bin/sh -c apt-get install -y -q git
17a64374afa7 2 weeks ago /bin/sh -c apt-get install -y -q curl
d5e85dc5b1d8 2 weeks ago /bin/sh -c apt-get update
13e642467c11 2 weeks ago /bin/sh -c echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
ae6dde92a94e 2 weeks ago /bin/sh -c #(nop) MAINTAINER Solomon Hykes <solomon@dotcloud.com>
ubuntu:12.04 6 months ago
.. _cli_images:
``images``
@ -435,6 +493,21 @@ might not get preserved.
Display system-wide information.
.. code-block:: bash
$ sudo docker info
Containers: 292
Images: 194
Debug mode (server): false
Debug mode (client): false
Fds: 22
Goroutines: 67
LXC Version: 0.9.0
EventsListeners: 115
Kernel Version: 3.8.0-33-generic
WARNING: No swap limit support
.. _cli_insert:
``insert``
@ -772,6 +845,13 @@ id may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
read-only or read-write mode, respectively. By default, the volumes are mounted
in the same mode (rw or ro) as the reference container.
Known Issues (run -volumes-from)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
could indicate a permissions problem with AppArmor. Please see the
issue for a workaround.
.. _cli_search:
``search``

Просмотреть файл

@ -235,7 +235,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('toctree', 'docker', u'Docker Documentation',
('commandline/cli', 'docker', u'Docker Documentation',
[u'Team Docker'], 1)
]

Просмотреть файл

@ -20,7 +20,7 @@ Note that we're marking ``/var/lib/couchdb`` as a data volume.
.. code-block:: bash
COUCH1=$(sudo docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03)
COUCH1=$(sudo docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03)
Add data to the first database
------------------------------
@ -31,7 +31,7 @@ replace ``localhost`` with the public IP of your Docker host.
.. code-block:: bash
HOST=localhost
URL="http://$HOST:$(sudo docker port $COUCH1 5984)/_utils/"
URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -Po '\d+$')/_utils/"
echo "Navigate to $URL in your browser, and use the couch interface to add data"
Create second database
@ -41,7 +41,7 @@ This time, we're requesting shared access to ``$COUCH1``'s volumes.
.. code-block:: bash
COUCH2=$(sudo docker run -d -volumes-from $COUCH1 shykes/couchdb:2013-05-03)
COUCH2=$(sudo docker run -d -p 5984 -volumes-from $COUCH1 shykes/couchdb:2013-05-03)
Browse data on the second database
----------------------------------
@ -49,7 +49,7 @@ Browse data on the second database
.. code-block:: bash
HOST=localhost
URL="http://$HOST:$(sudo docker port $COUCH2 5984)/_utils/"
URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -Po '\d+$')/_utils/"
echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!'
Congratulations, you are now running two Couchdb containers, completely

Просмотреть файл

@ -1,4 +1,7 @@
.. note::
This example assumes you have Docker running in daemon mode. For more information please see :ref:`running_examples`
* This example assumes you have Docker running in daemon mode. For
more information please see :ref:`running_examples`.
* **If you don't like sudo** then see :ref:`dockergroup`

Просмотреть файл

@ -127,10 +127,12 @@ Check the logs make sure it is working correctly.
sudo docker attach $CONTAINER_ID
Attach to the container to see the results in realtime.
Attach to the container to see the results in real-time.
- **"docker attach**" This will allow us to attach to a background
process to see what is going on.
- **"-sig-proxy=true"** Proxify all received signal to the process
(even in non-tty mode)
- **$CONTAINER_ID** The Id of the container we want to attach too.
Exit from the container attachment by pressing Control-C.

Просмотреть файл

@ -39,11 +39,12 @@ container. The ``BUILD_JOB`` environment variable will be set with the new conta
.. code-block:: bash
sudo docker attach $BUILD_JOB
sudo docker attach -sig-proxy=false $BUILD_JOB
[...]
While this container is running, we can attach to the new container to
see what is going on. You can use Ctrl-C to disconnect.
see what is going on. The flag ``-sig-proxy`` set as ``false`` allows you to connect and
disconnect (Ctrl-C) to it without stopping the container.
.. code-block:: bash

Просмотреть файл

@ -29,7 +29,7 @@ with your own user name.
.. code-block:: bash
sudo docker build -t <your username>/redis
sudo docker build -t <your username>/redis .
Run the service
---------------
@ -82,7 +82,7 @@ of our ``redis`` container.
DB_PORT_6379_TCP_ADDR=172.17.0.33
DB_PORT_6379_TCP_PROTO=tcp
We can see that we've got a small list of environmental varaibles prefixed with ``DB``.
We can see that we've got a small list of environment variables prefixed with ``DB``.
The ``DB`` comes from the link alias specified when we launched the container. Let's use
the ``DB_PORT_6379_TCP_ADDR`` variable to connect to our Redis container.

Просмотреть файл

@ -12,9 +12,9 @@ SSH Daemon Service
**Video:**
I've create a little screencast to show how to create a SSHd service
I've created a little screencast to show how to create an SSHd service
and connect to it. It is something like 11 minutes and not entirely
smooth, but gives you a good idea.
smooth, but it gives you a good idea.
.. note::
This screencast was created before Docker version 0.5.2, so the

Просмотреть файл

@ -19,11 +19,12 @@ Contents:
ubuntulinux
binaries
security
upgrading
kernel
vagrant
windows
amazon
rackspace
archlinux
gentoolinux
upgrading
kernel

Просмотреть файл

@ -25,6 +25,7 @@ If you cannot or do not want to use the "official" kernels,
here is some technical background about the features (both optional and
mandatory) that docker needs to run successfully.
Linux version 3.8 or above
--------------------------
@ -39,6 +40,15 @@ The symptoms include:
- kernel crash causing the machine to freeze for a few minutes, or even
completely.
Additionally, kernels prior 3.4 did not implement ``reboot_pid_ns``,
which means that the ``reboot()`` syscall could reboot the host machine,
instead of terminating the container. To work around that problem,
LXC userland tools (since version 0.8) automatically drop the ``SYS_BOOT``
capability when necessary. Still, if you run a pre-3.4 kernel with pre-0.8
LXC tools, be aware that containers can reboot the whole host! This is
not something that Docker wants to address in the short term, since you
shouldn't use kernels prior 3.8 with Docker anyway.
While it is still possible to use older kernels for development, it is
really not advised to do so.

Просмотреть файл

@ -0,0 +1,267 @@
:title: Docker Security
:description: Review of the Docker Daemon attack surface
:keywords: Docker, Docker documentation, security
.. _dockersecurity:
Docker Security
===============
*Adapted from* `Containers & Docker: How Secure are They? <blogsecurity>`_
There are three major areas to consider when reviewing Docker security:
* the intrinsic security of containers, as implemented by kernel
namespaces and cgroups;
* the attack surface of the Docker daemon itself;
* the "hardening" security features of the kernel and how they
interact with containers.
Kernel Namespaces
-----------------
Docker containers are essentially LXC containers, and they come with
the same security features. When you start a container with ``docker
run``, behind the scenes Docker uses ``lxc-start`` to execute the
Docker container. This creates a set of namespaces and control groups
for the container. Those namespaces and control groups are not created
by Docker itself, but by ``lxc-start``. This means that as the LXC
userland tools evolve (and provide additional namespaces and isolation
features), Docker will automatically make use of them.
**Namespaces provide the first and most straightforward form of
isolation**: processes running within a container cannot see, and even
less affect, processes running in another container, or in the host
system.
**Each container also gets its own network stack**, meaning that a
container doesnt get a privileged access to the sockets or interfaces
of another container. Of course, if the host system is setup
accordingly, containers can interact with each other through their
respective network interfaces — just like they can interact with
external hosts. When you specify public ports for your containers or
use :ref:`links <working_with_links_names>` then IP traffic is allowed
between containers. They can ping each other, send/receive UDP
packets, and establish TCP connections, but that can be restricted if
necessary. From a network architecture point of view, all containers
on a given Docker host are sitting on bridge interfaces. This means
that they are just like physical machines connected through a common
Ethernet switch; no more, no less.
How mature is the code providing kernel namespaces and private
networking? Kernel namespaces were introduced `between kernel version
2.6.15 and 2.6.26
<http://lxc.sourceforge.net/index.php/about/kernel-namespaces/>`_. This
means that since July 2008 (date of the 2.6.26 release, now 5 years
ago), namespace code has been exercised and scrutinized on a large
number of production systems. And there is more: the design and
inspiration for the namespaces code are even older. Namespaces are
actually an effort to reimplement the features of `OpenVZ
<http://en.wikipedia.org/wiki/OpenVZ>`_ in such a way that they could
be merged within the mainstream kernel. And OpenVZ was initially
released in 2005, so both the design and the implementation are
pretty mature.
Control Groups
--------------
Control Groups are the other key component of Linux Containers. They
implement resource accounting and limiting. They provide a lot of very
useful metrics, but they also help to ensure that each container gets
its fair share of memory, CPU, disk I/O; and, more importantly, that a
single container cannot bring the system down by exhausting one of
those resources.
So while they do not play a role in preventing one container from
accessing or affecting the data and processes of another container,
they are essential to fend off some denial-of-service attacks. They
are particularly important on multi-tenant platforms, like public and
private PaaS, to guarantee a consistent uptime (and performance) even
when some applications start to misbehave.
Control Groups have been around for a while as well: the code was
started in 2006, and initially merged in kernel 2.6.24.
Docker Daemon Attack Surface
----------------------------
Running containers (and applications) with Docker implies running the
Docker daemon. This daemon currently requires root privileges, and you
should therefore be aware of some important details.
First of all, **only trusted users should be allowed to control your
Docker daemon**. This is a direct consequence of some powerful Docker
features. Specifically, Docker allows you to share a directory between
the Docker host and a guest container; and it allows you to do so
without limiting the access rights of the container. This means that
you can start a container where the ``/host`` directory will be the
``/`` directory on your host; and the container will be able to alter
your host filesystem without any restriction. This sounds crazy? Well,
you have to know that **all virtualization systems allowing filesystem
resource sharing behave the same way**. Nothing prevents you from
sharing your root filesystem (or even your root block device) with a
virtual machine.
This has a strong security implication: if you instrument Docker from
e.g. a web server to provision containers through an API, you should
be even more careful than usual with parameter checking, to make sure
that a malicious user cannot pass crafted parameters causing Docker to
create arbitrary containers.
For this reason, the REST API endpoint (used by the Docker CLI to
communicate with the Docker daemon) changed in Docker 0.5.2, and now
uses a UNIX socket instead of a TCP socket bound on 127.0.0.1 (the
latter being prone to cross-site-scripting attacks if you happen to
run Docker directly on your local machine, outside of a VM). You can
then use traditional UNIX permission checks to limit access to the
control socket.
You can also expose the REST API over HTTP if you explicitly decide
so. However, if you do that, being aware of the abovementioned
security implication, you should ensure that it will be reachable
only from a trusted network or VPN; or protected with e.g. ``stunnel``
and client SSL certificates.
Recent improvements in Linux namespaces will soon allow to run
full-featured containers without root privileges, thanks to the new
user namespace. This is covered in detail `here
<http://s3hh.wordpress.com/2013/07/19/creating-and-using-containers-without-privilege/>`_. Moreover,
this will solve the problem caused by sharing filesystems between host
and guest, since the user namespace allows users within containers
(including the root user) to be mapped to other users in the host
system.
The end goal for Docker is therefore to implement two additional
security improvements:
* map the root user of a container to a non-root user of the Docker
host, to mitigate the effects of a container-to-host privilege
escalation;
* allow the Docker daemon to run without root privileges, and delegate
operations requiring those privileges to well-audited sub-processes,
each with its own (very limited) scope: virtual network setup,
filesystem management, etc.
Finally, if you run Docker on a server, it is recommended to run
exclusively Docker in the server, and move all other services within
containers controlled by Docker. Of course, it is fine to keep your
favorite admin tools (probably at least an SSH server), as well as
existing monitoring/supervision processes (e.g. NRPE, collectd, etc).
Linux Kernel Capabilities
-------------------------
By default, Docker starts containers with a very restricted set of
capabilities. What does that mean?
Capabilities turn the binary "root/non-root" dichotomy into a
fine-grained access control system. Processes (like web servers) that
just need to bind on a port below 1024 do not have to run as root:
they can just be granted the ``net_bind_service`` capability
instead. And there are many other capabilities, for almost all the
specific areas where root privileges are usually needed.
This means a lot for container security; lets see why!
Your average server (bare metal or virtual machine) needs to run a
bunch of processes as root. Those typically include SSH, cron,
syslogd; hardware management tools (to e.g. load modules), network
configuration tools (to handle e.g. DHCP, WPA, or VPNs), and much
more. A container is very different, because almost all of those tasks
are handled by the infrastructure around the container:
* SSH access will typically be managed by a single server running in
the Docker host;
* ``cron``, when necessary, should run as a user process, dedicated
and tailored for the app that needs its scheduling service, rather
than as a platform-wide facility;
* log management will also typically be handed to Docker, or by
third-party services like Loggly or Splunk;
* hardware management is irrelevant, meaning that you never need to
run ``udevd`` or equivalent daemons within containers;
* network management happens outside of the containers, enforcing
separation of concerns as much as possible, meaning that a container
should never need to perform ``ifconfig``, ``route``, or ip commands
(except when a container is specifically engineered to behave like a
router or firewall, of course).
This means that in most cases, containers will not need "real" root
privileges *at all*. And therefore, containers can run with a reduced
capability set; meaning that "root" within a container has much less
privileges than the real "root". For instance, it is possible to:
* deny all "mount" operations;
* deny access to raw sockets (to prevent packet spoofing);
* deny access to some filesystem operations, like creating new device
nodes, changing the owner of files, or altering attributes
(including the immutable flag);
* deny module loading;
* and many others.
This means that even if an intruder manages to escalate to root within
a container, it will be much harder to do serious damage, or to
escalate to the host.
This won't affect regular web apps; but malicious users will find that
the arsenal at their disposal has shrunk considerably! You can see
`the list of dropped capabilities in the Docker code
<https://github.com/dotcloud/docker/blob/v0.5.0/lxc_template.go#L97>`_,
and a full list of available capabilities in `Linux manpages
<http://man7.org/linux/man-pages/man7/capabilities.7.html>`_.
Of course, you can always enable extra capabilities if you really need
them (for instance, if you want to use a FUSE-based filesystem), but
by default, Docker containers will be locked down to ensure maximum
safety.
Other Kernel Security Features
------------------------------
Capabilities are just one of the many security features provided by
modern Linux kernels. It is also possible to leverage existing,
well-known systems like TOMOYO, AppArmor, SELinux, GRSEC, etc. with
Docker.
While Docker currently only enables capabilities, it doesn't interfere
with the other systems. This means that there are many different ways
to harden a Docker host. Here are a few examples.
* You can run a kernel with GRSEC and PAX. This will add many safety
checks, both at compile-time and run-time; it will also defeat many
exploits, thanks to techniques like address randomization. It
doesnt require Docker-specific configuration, since those security
features apply system-wide, independently of containers.
* If your distribution comes with security model templates for LXC
containers, you can use them out of the box. For instance, Ubuntu
comes with AppArmor templates for LXC, and those templates provide
an extra safety net (even though it overlaps greatly with
capabilities).
* You can define your own policies using your favorite access control
mechanism. Since Docker containers are standard LXC containers,
there is nothing “magic” or specific to Docker.
Just like there are many third-party tools to augment Docker
containers with e.g. special network topologies or shared filesystems,
you can expect to see tools to harden existing Docker containers
without affecting Dockers core.
Conclusions
-----------
Docker containers are, by default, quite secure; especially if you
take care of running your processes inside the containers as
non-privileged users (i.e. non root).
You can add an extra layer of safety by enabling Apparmor, SELinux,
GRSEC, or your favorite hardening solution.
Last but not least, if you see interesting security features in other
containerization systems, you will be able to implement them as well
with Docker, since everything is provided by the kernel anyway.
For more context and especially for comparisons with VMs and other
container systems, please also see the `original blog post
<blogsecurity>`_.
.. _blogsecurity: http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/

Просмотреть файл

@ -34,13 +34,11 @@ This will find the ``ubuntu`` image by name in the :ref:`Central Index
<searching_central_index>` and download it from the top-level Central
Repository to a local image cache.
.. NOTE:: When the image has successfully downloaded, you will see a 12
character hash ``539c0211cd76: Download complete`` which is the short
form of the image ID. These short image IDs are the first 12 characters
of the full image ID - which can be found using ``docker inspect`` or
``docker images -notrunc=true``
.. _dockergroup:
.. NOTE:: When the image has successfully downloaded, you will see a
12 character hash ``539c0211cd76: Download complete`` which is the
short form of the image ID. These short image IDs are the first 12
characters of the full image ID - which can be found using ``docker
inspect`` or ``docker images -notrunc=true``
Running an interactive shell
----------------------------
@ -53,33 +51,38 @@ Running an interactive shell
# use the escape sequence Ctrl-p + Ctrl-q
sudo docker run -i -t ubuntu /bin/bash
.. _dockergroup:
Why ``sudo``?
-------------
sudo and the docker Group
-------------------------
The ``docker`` daemon always runs as root, and since ``docker``
version 0.5.2, ``docker`` binds to a Unix socket instead of a TCP
port. By default that Unix socket is owned by the user *root*, and so,
by default, you can access it with ``sudo``.
Starting in version 0.5.3, if you create a Unix group called *docker*
and add users to it, then the ``docker`` daemon will make the
ownership of the Unix socket read/writable by the *docker* group when
the daemon starts. The ``docker`` daemon must always run as root, but
if you run the ``docker`` client as a user in the *docker* group then
you don't need to add ``sudo`` to all the client commands.
Starting in version 0.5.3, if you (or your Docker installer) create a
Unix group called *docker* and add users to it, then the ``docker``
daemon will make the ownership of the Unix socket read/writable by the
*docker* group when the daemon starts. The ``docker`` daemon must
always run as root, but if you run the ``docker`` client as a user in
the *docker* group then you don't need to add ``sudo`` to all the
client commands.
**Example:**
.. code-block:: bash
# Add the docker group
# Add the docker group if it doesn't already exist.
sudo groupadd docker
# Add the ubuntu user to the docker group
# Add the user "ubuntu" to the docker group.
# Change the user name to match your preferred user.
# You may have to logout and log back in again for
# this to take effect
# this to take effect.
sudo gpasswd -a ubuntu docker
# Restart the docker daemon
# Restart the docker daemon.
sudo service docker restart
.. _bind_docker:
@ -87,7 +90,7 @@ you don't need to add ``sudo`` to all the client commands.
Bind Docker to another host/port or a Unix socket
-------------------------------------------------
.. DANGER:: Changing the default ``docker`` daemon binding to a TCP
.. warning:: Changing the default ``docker`` daemon binding to a TCP
port or Unix *docker* user group will increase your security risks
by allowing non-root users to potentially gain *root* access on the
host (`e.g. #1369

Просмотреть файл

@ -318,8 +318,9 @@ this optional but default, you could use a CMD:
``VOLUME ["/data"]``
The ``VOLUME`` instruction will add one or more new volumes to any
container created from the image.
The ``VOLUME`` instruction will create a mount point with the specified name and mark it
as holding externally mounted volumes from native host or other containers. For more information/examples
and mounting instructions via docker client, refer to :ref:`volume_def` documentation.
3.10 USER
---------

Просмотреть файл

@ -29,14 +29,32 @@ Here are a few sample scripts for systemd and upstart to integrate with docker.
Sample Upstart Script
---------------------
In this example we've already created a container to run Redis with an id of
0a7e070b698b. To create an upstart script for our container, we create a file
named ``/etc/init/redis.conf`` and place the following into it:
.. code-block:: bash
description "Redis container"
author "Me"
start on filesystem and started lxc-net and started docker
start on filesystem and started docker
stop on runlevel [!2345]
respawn
exec docker start -a 0a7e070b698b
script
# Wait for docker to finish starting up first.
FILE=/var/run/docker.sock
while [ ! -e $FILE ] ; do
inotifywait -t 2 -e create $(dirname $FILE)
done
/usr/bin/docker start -a 0a7e070b698b
end script
Next, we have to configure docker so that it's run with the option ``-r=false``.
Run the following command:
.. code-block:: bash
$ sudo sh -c "echo 'DOCKER_OPTS=\"-r=false\"' > /etc/default/docker"
Sample systemd Script

Просмотреть файл

@ -30,44 +30,60 @@ Each container can have zero or more data volumes.
Getting Started
...............
Using data volumes is as simple as adding a new flag: ``-v``. The parameter ``-v`` can be used more than once in order to create more volumes within the new container. The example below shows the instruction to create a container with two new volumes::
Using data volumes is as simple as adding a new flag: ``-v``. The
parameter ``-v`` can be used more than once in order to create more
volumes within the new container. The example below shows the
instruction to create a container with two new volumes::
docker run -v /var/volume1 -v /var/volume2 shykes/couchdb
For a Dockerfile, the VOLUME instruction will add one or more new volumes to any container created from the image::
For a Dockerfile, the VOLUME instruction will add one or more new
volumes to any container created from the image::
VOLUME ["/var/volume1", "/var/volume2"]
Create a new container using existing volumes from an existing container:
---------------------------------------------------------------------------
Mount Volumes from an Existing Container:
-----------------------------------------
The command below creates a new container which is runnning as daemon ``-d`` and with one volume ``/var/lib/couchdb``::
The command below creates a new container which is runnning as daemon
``-d`` and with one volume ``/var/lib/couchdb``::
COUCH1=$(sudo docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03)
From the container id of that previous container ``$COUCH1`` it's possible to create new container sharing the same volume using the parameter ``-volumes-from container_id``::
From the container id of that previous container ``$COUCH1`` it's
possible to create new container sharing the same volume using the
parameter ``-volumes-from container_id``::
COUCH2=$(sudo docker run -d -volumes-from $COUCH1 shykes/couchdb:2013-05-03)
Now, the second container has the all the information from the first volume.
Create a new container which mounts a host directory into it:
-------------------------------------------------------------
Mount a Host Directory as a Container Volume:
---------------------------------------------
::
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro].
If "host-dir" is missing, then docker creates a new volume.
This is not available for a Dockerfile due the portability and sharing purpose of it. The [host-dir] volumes is something 100% host dependent and will break on any other machine.
This is not available for a Dockerfile due the portability and sharing
purpose of it. The [host-dir] volumes is something 100% host dependent
and will break on any other machine.
For example::
sudo docker run -v /var/logs:/var/host_logs:ro shykes/couchdb:2013-05-03
The command above mounts the host directory ``/var/logs`` into the container with read only permissions as ``/var/host_logs``.
The command above mounts the host directory ``/var/logs`` into the
container with read only permissions as ``/var/host_logs``.
.. versionadded:: v0.5.0
Known Issues
............
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
could indicate a permissions problem with AppArmor. Please see the
issue for a workaround.

Просмотреть файл

@ -177,6 +177,15 @@ you can push and pull it like any other repository, but it will
there will be no user name checking performed. Your registry will
function completely independently from the Central Index.
.. raw:: html
<iframe width="640" height="360"
src="//www.youtube.com/embed/CAewZCBT4PI?rel=0" frameborder="0"
allowfullscreen></iframe>
.. seealso:: `Docker Blog: How to use your own registry
<http://blog.docker.io/2013/07/how-to-use-your-own-registry/>`_
Authentication file
-------------------

Просмотреть файл

@ -214,7 +214,7 @@ func (job *Job) GetenvList(key string) []string {
return l
}
func (job *Job) SetenvList(key string, value []string) error {
func (job *Job) SetenvJson(key string, value interface{}) error {
sval, err := json.Marshal(value)
if err != nil {
return err
@ -223,6 +223,10 @@ func (job *Job) SetenvList(key string, value []string) error {
return nil
}
func (job *Job) SetenvList(key string, value []string) error {
return job.SetenvJson(key, value)
}
func (job *Job) Setenv(key, value string) {
job.env = append(job.env, key+"="+value)
}

Просмотреть файл

@ -231,12 +231,11 @@ func setupInitLayer(initLayer string) error {
if err := os.MkdirAll(path.Join(initLayer, path.Dir(pth)), 0755); err != nil {
return err
}
if f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755); err != nil {
f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755)
if err != nil {
return err
} else {
f.Close()
}
f.Close()
}
} else {
return err

Просмотреть файл

@ -10,7 +10,6 @@ import (
"io"
"io/ioutil"
"os"
"path"
"testing"
"time"
)
@ -122,41 +121,6 @@ func TestRegister(t *testing.T) {
}
}
func TestMount(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
image, err := graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
tmp, err := ioutil.TempDir("", "docker-test-graph-mount-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
rootfs := path.Join(tmp, "rootfs")
if err := os.MkdirAll(rootfs, 0700); err != nil {
t.Fatal(err)
}
rw := path.Join(tmp, "rw")
if err := os.MkdirAll(rw, 0700); err != nil {
t.Fatal(err)
}
if _, err := graph.driver.Get(image.ID); err != nil {
t.Fatal(err)
}
// FIXME: test for mount contents
defer func() {
if err := graph.driver.Cleanup(); err != nil {
t.Error(err)
}
}()
}
// Test that an image can be deleted by its shorthand prefix
func TestDeletePrefix(t *testing.T) {
graph := tempGraph(t)

51
http_test.go Normal file
Просмотреть файл

@ -0,0 +1,51 @@
package docker
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
)
func TestGetBoolParam(t *testing.T) {
if ret, err := getBoolParam("true"); err != nil || !ret {
t.Fatalf("true -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("True"); err != nil || !ret {
t.Fatalf("True -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("1"); err != nil || !ret {
t.Fatalf("1 -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam(""); err != nil || ret {
t.Fatalf("\"\" -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("false"); err != nil || ret {
t.Fatalf("false -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("0"); err != nil || ret {
t.Fatalf("0 -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("faux"); err == nil || ret {
t.Fatalf("faux -> false, err | got %t %s", ret, err)
}
}
func TesthttpError(t *testing.T) {
r := httptest.NewRecorder()
httpError(r, fmt.Errorf("No such method"))
if r.Code != http.StatusNotFound {
t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
}
httpError(r, fmt.Errorf("This accound hasn't been activated"))
if r.Code != http.StatusForbidden {
t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
}
httpError(r, fmt.Errorf("Some error"))
if r.Code != http.StatusInternalServerError {
t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
}
}

Просмотреть файл

@ -52,11 +52,11 @@ func LoadImage(root string) (*Image, error) {
return nil, err
}
} else {
if size, err := strconv.Atoi(string(buf)); err != nil {
size, err := strconv.Atoi(string(buf))
if err != nil {
return nil, err
} else {
img.Size = int64(size)
}
img.Size = int64(size)
}
return img, nil
@ -88,14 +88,14 @@ func StoreImage(img *Image, jsonData []byte, layerData archive.Archive, root, ro
// If raw json is provided, then use it
if jsonData != nil {
return ioutil.WriteFile(jsonPath(root), jsonData, 0600)
} else { // Otherwise, unmarshal the image
jsonData, err := json.Marshal(img)
if err != nil {
return err
}
if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil {
return err
}
}
// Otherwise, unmarshal the image
jsonData, err := json.Marshal(img)
if err != nil {
return err
}
if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil {
return err
}
// Compute and save the size of the rootfs
size, err := utils.TreeSize(rootfs)
@ -123,11 +123,11 @@ func jsonPath(root string) string {
}
// TarLayer returns a tar archive of the image's filesystem layer.
func (image *Image) TarLayer(compression archive.Compression) (archive.Archive, error) {
if image.graph == nil {
return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", image.ID)
func (img *Image) TarLayer(compression archive.Compression) (archive.Archive, error) {
if img.graph == nil {
return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID)
}
layerPath, err := image.graph.driver.Get(image.ID)
layerPath, err := img.graph.driver.Get(img.ID)
if err != nil {
return nil, err
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

61
integration/auth_test.go Normal file
Просмотреть файл

@ -0,0 +1,61 @@
package docker
import (
"crypto/rand"
"encoding/hex"
"github.com/dotcloud/docker/auth"
"os"
"strings"
"testing"
)
// FIXME: these tests have an external dependency on a staging index hosted
// on the docker.io infrastructure. That dependency should be removed.
// - Unit tests should have no side-effect dependencies.
// - Integration tests should have side-effects limited to the host environment being tested.
func TestLogin(t *testing.T) {
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
authConfig := &auth.AuthConfig{Username: "unittester", Password: "surlautrerivejetattendrai", Email: "noise+unittester@dotcloud.com"}
status, err := auth.Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
if status != "Login Succeeded" {
t.Fatalf("Expected status \"Login Succeeded\", found \"%s\" instead", status)
}
}
func TestCreateAccount(t *testing.T) {
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
tokenBuffer := make([]byte, 16)
_, err := rand.Read(tokenBuffer)
if err != nil {
t.Fatal(err)
}
token := hex.EncodeToString(tokenBuffer)[:12]
username := "ut" + token
authConfig := &auth.AuthConfig{Username: username, Password: "test42", Email: "docker-ut+" + token + "@example.com"}
status, err := auth.Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
expectedStatus := "Account created. Please use the confirmation link we sent" +
" to your e-mail to activate it."
if status != expectedStatus {
t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status)
}
status, err = auth.Login(authConfig, nil)
if err == nil {
t.Fatalf("Expected error but found nil instead")
}
expectedError := "Login: Account is not Active"
if !strings.Contains(err.Error(), expectedError) {
t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err)
}
}

Просмотреть файл

@ -2,7 +2,9 @@ package docker
import (
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/engine"
"io/ioutil"
"net"
"net/http"
@ -14,7 +16,7 @@ import (
// mkTestContext generates a build context from the contents of the provided dockerfile.
// This context is suitable for use as an argument to BuildFile.Build()
func mkTestContext(dockerfile string, files [][2]string, t *testing.T) archive.Archive {
context, err := mkBuildContext(dockerfile, files)
context, err := docker.MkBuildContext(dockerfile, files)
if err != nil {
t.Fatal(err)
}
@ -228,17 +230,15 @@ func TestBuild(t *testing.T) {
}
}
func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache bool) *Image {
if srv == nil {
runtime := mkRuntime(t)
func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) *docker.Image {
if eng == nil {
eng = NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
// FIXME: we might not need runtime, why not simply nuke
// the engine?
defer nuke(runtime)
srv = &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
}
srv := mkServerFromEngine(eng, t)
httpServer, err := mkTestingFileServer(context.remoteFiles)
if err != nil {
@ -252,10 +252,17 @@ func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache
}
port := httpServer.URL[idx+1:]
ip := srv.runtime.networkManager.bridgeNetwork.IP
iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP")
if iIP == nil {
t.Fatal("Legacy bridgeIP field not set in engine")
}
ip, ok := iIP.(net.IP)
if !ok {
panic("Legacy bridgeIP field in engine does not cast to net.IP")
}
dockerfile := constructDockerfile(context.dockerfile, ip, port)
buildfile := NewBuildFile(srv, ioutil.Discard, false, useCache, false)
buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, useCache, false)
id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t))
if err != nil {
t.Fatal(err)
@ -368,20 +375,14 @@ func TestBuildEntrypoint(t *testing.T) {
// testing #1405 - config.Cmd does not get cleaned up if
// utilizing cache
func TestBuildEntrypointRunCleanup(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
img := buildImage(testContextTemplate{`
from {IMAGE}
run echo "hello"
`,
nil, nil}, t, srv, true)
nil, nil}, t, eng, true)
img = buildImage(testContextTemplate{`
from {IMAGE}
@ -389,7 +390,7 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
add foo /foo
entrypoint ["/bin/echo"]
`,
[][2]string{{"foo", "HEYO"}}, nil}, t, srv, true)
[][2]string{{"foo", "HEYO"}}, nil}, t, eng, true)
if len(img.Config.Cmd) != 0 {
t.Fail()
@ -397,14 +398,8 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
}
func TestBuildImageWithCache(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
template := testContextTemplate{`
from {IMAGE}
@ -412,11 +407,11 @@ func TestBuildImageWithCache(t *testing.T) {
`,
nil, nil}
img := buildImage(template, t, srv, true)
img := buildImage(template, t, eng, true)
imageId := img.ID
img = nil
img = buildImage(template, t, srv, true)
img = buildImage(template, t, eng, true)
if imageId != img.ID {
t.Logf("Image ids should match: %s != %s", imageId, img.ID)
@ -425,14 +420,8 @@ func TestBuildImageWithCache(t *testing.T) {
}
func TestBuildImageWithoutCache(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
template := testContextTemplate{`
from {IMAGE}
@ -440,11 +429,11 @@ func TestBuildImageWithoutCache(t *testing.T) {
`,
nil, nil}
img := buildImage(template, t, srv, true)
img := buildImage(template, t, eng, true)
imageId := img.ID
img = nil
img = buildImage(template, t, srv, false)
img = buildImage(template, t, eng, false)
if imageId == img.ID {
t.Logf("Image ids should not match: %s == %s", imageId, img.ID)
@ -453,14 +442,9 @@ func TestBuildImageWithoutCache(t *testing.T) {
}
func TestForbiddenContextPath(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
srv := mkServerFromEngine(eng, t)
context := testContextTemplate{`
from {IMAGE}
@ -481,10 +465,17 @@ func TestForbiddenContextPath(t *testing.T) {
}
port := httpServer.URL[idx+1:]
ip := srv.runtime.networkManager.bridgeNetwork.IP
iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP")
if iIP == nil {
t.Fatal("Legacy bridgeIP field not set in engine")
}
ip, ok := iIP.(net.IP)
if !ok {
panic("Legacy bridgeIP field in engine does not cast to net.IP")
}
dockerfile := constructDockerfile(context.dockerfile, ip, port)
buildfile := NewBuildFile(srv, ioutil.Discard, false, true, false)
buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, true, false)
_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
if err == nil {
@ -499,14 +490,8 @@ func TestForbiddenContextPath(t *testing.T) {
}
func TestBuildADDFileNotFound(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
context := testContextTemplate{`
from {IMAGE}
@ -526,10 +511,17 @@ func TestBuildADDFileNotFound(t *testing.T) {
}
port := httpServer.URL[idx+1:]
ip := srv.runtime.networkManager.bridgeNetwork.IP
iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP")
if iIP == nil {
t.Fatal("Legacy bridgeIP field not set in engine")
}
ip, ok := iIP.(net.IP)
if !ok {
panic("Legacy bridgeIP field in engine does not cast to net.IP")
}
dockerfile := constructDockerfile(context.dockerfile, ip, port)
buildfile := NewBuildFile(srv, ioutil.Discard, false, true, false)
buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, false, true, false)
_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
if err == nil {
@ -544,26 +536,20 @@ func TestBuildADDFileNotFound(t *testing.T) {
}
func TestBuildInheritance(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
img := buildImage(testContextTemplate{`
from {IMAGE}
expose 4243
`,
nil, nil}, t, srv, true)
nil, nil}, t, eng, true)
img2 := buildImage(testContextTemplate{fmt.Sprintf(`
from %s
entrypoint ["/bin/echo"]
`, img.ID),
nil, nil}, t, srv, true)
nil, nil}, t, eng, true)
// from child
if img2.Config.Entrypoint[0] != "/bin/echo" {

Просмотреть файл

@ -3,6 +3,8 @@ package docker
import (
"bufio"
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@ -66,8 +68,8 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error
func TestRunHostname(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -111,8 +113,8 @@ func TestRunHostname(t *testing.T) {
func TestRunWorkdir(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -156,8 +158,8 @@ func TestRunWorkdir(t *testing.T) {
func TestRunWorkdirExists(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -201,8 +203,8 @@ func TestRunExit(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c1 := make(chan struct{})
go func() {
@ -254,8 +256,8 @@ func TestRunDisconnect(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c1 := make(chan struct{})
go func() {
@ -299,8 +301,8 @@ func TestRunDisconnectTty(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c1 := make(chan struct{})
go func() {
@ -356,8 +358,8 @@ func TestRunAttachStdin(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
ch := make(chan struct{})
go func() {
@ -420,8 +422,8 @@ func TestRunDetach(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
ch := make(chan struct{})
go func() {
@ -466,8 +468,8 @@ func TestAttachDetach(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
ch := make(chan struct{})
go func() {
@ -477,7 +479,7 @@ func TestAttachDetach(t *testing.T) {
}
}()
var container *Container
var container *docker.Container
setTimeout(t, "Reading container's id timed out", 10*time.Second, func() {
buf := make([]byte, 1024)
@ -498,7 +500,7 @@ func TestAttachDetach(t *testing.T) {
stdin, stdinPipe = io.Pipe()
stdout, stdoutPipe = io.Pipe()
cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
ch = make(chan struct{})
go func() {
@ -546,8 +548,8 @@ func TestAttachDetachTruncatedID(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
go stdout.Read(make([]byte, 1024))
setTimeout(t, "Starting container timed out", 2*time.Second, func() {
@ -560,7 +562,7 @@ func TestAttachDetachTruncatedID(t *testing.T) {
stdin, stdinPipe = io.Pipe()
stdout, stdoutPipe = io.Pipe()
cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
ch := make(chan struct{})
go func() {
@ -608,8 +610,8 @@ func TestAttachDisconnect(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
go func() {
// Start a process in daemon mode
@ -677,8 +679,8 @@ func TestAttachDisconnect(t *testing.T) {
func TestRunAutoRemove(t *testing.T) {
t.Skip("Fixme. Skipping test for now, race condition")
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -712,8 +714,8 @@ func TestRunAutoRemove(t *testing.T) {
}
func TestCmdLogs(t *testing.T) {
cli := NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil {
t.Fatal(err)
@ -730,8 +732,8 @@ func TestCmdLogs(t *testing.T) {
// Expected behaviour: using / as a bind mount source should throw an error
func TestRunErrorBindMountRootSource(t *testing.T) {
cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -749,8 +751,8 @@ func TestRunErrorBindMountRootSource(t *testing.T) {
// Expected behaviour: error out when attempting to bind mount non-existing source paths
func TestRunErrorBindNonExistingSource(t *testing.T) {
cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
@ -768,11 +770,10 @@ func TestRunErrorBindNonExistingSource(t *testing.T) {
func TestImagesViz(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
srv := &Server{runtime: globalRuntime}
image := buildTestImages(t, srv)
image := buildTestImages(t, globalEngine)
c := make(chan struct{})
go func() {
@ -819,11 +820,10 @@ func TestImagesViz(t *testing.T) {
func TestImagesTree(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
srv := &Server{runtime: globalRuntime}
image := buildTestImages(t, srv)
image := buildTestImages(t, globalEngine)
c := make(chan struct{})
go func() {
@ -866,7 +866,7 @@ func TestImagesTree(t *testing.T) {
})
}
func buildTestImages(t *testing.T, srv *Server) *Image {
func buildTestImages(t *testing.T, eng *engine.Engine) *docker.Image {
var testBuilder = testContextTemplate{
`
@ -879,9 +879,9 @@ run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
nil,
nil,
}
image := buildImage(testBuilder, t, srv, true)
image := buildImage(testBuilder, t, eng, true)
err := srv.ContainerTag(image.ID, "test", "latest", false)
err := mkServerFromEngine(eng, t).ContainerTag(image.ID, "test", "latest", false)
if err != nil {
t.Fatal(err)
}
@ -901,8 +901,8 @@ func TestRunCidFile(t *testing.T) {
}
tmpCidFile := path.Join(tmpDir, "cid")
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime)
cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {

Просмотреть файл

@ -3,10 +3,10 @@ package docker
import (
"bufio"
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"math/rand"
"os"
"path"
"regexp"
@ -20,7 +20,7 @@ func TestIDFormat(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container1, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/sh", "-c", "echo hello world"},
},
@ -41,7 +41,7 @@ func TestIDFormat(t *testing.T) {
func TestMultipleAttachRestart(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _ := mkContainer(
container, _, _ := mkContainer(
runtime,
[]string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"},
t,
@ -134,10 +134,11 @@ func TestMultipleAttachRestart(t *testing.T) {
}
func TestDiff(t *testing.T) {
runtime := mkRuntime(t)
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
// Create a container and remove a file
container1, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
container1, _, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
defer runtime.Destroy(container1)
// The changelog should be empty and not fail before run. See #1705
@ -169,17 +170,13 @@ func TestDiff(t *testing.T) {
}
// Commit the container
rwTar, err := container1.ExportRw()
if err != nil {
t.Fatal(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image - diff", "", nil)
img, err := runtime.Commit(container1, "", "", "unit test commited image - diff", "", nil)
if err != nil {
t.Fatal(err)
}
// Create a new container from the commited image
container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t)
container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t)
defer runtime.Destroy(container2)
if err := container2.Run(); err != nil {
@ -198,7 +195,7 @@ func TestDiff(t *testing.T) {
}
// Create a new container
container3, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
defer runtime.Destroy(container3)
if err := container3.Run(); err != nil {
@ -224,7 +221,7 @@ func TestDiff(t *testing.T) {
func TestCommitAutoRun(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
defer runtime.Destroy(container1)
if container1.State.Running {
@ -237,17 +234,13 @@ func TestCommitAutoRun(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", &Config{Cmd: []string{"cat", "/world"}})
img, err := runtime.Commit(container1, "", "", "unit test commited image", "", &docker.Config{Cmd: []string{"cat", "/world"}})
if err != nil {
t.Error(err)
}
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, _ := mkContainer(runtime, []string{img.ID}, t)
container2, _, _ := mkContainer(runtime, []string{img.ID}, t)
defer runtime.Destroy(container2)
stdout, err := container2.StdoutPipe()
if err != nil {
@ -284,7 +277,7 @@ func TestCommitRun(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
defer runtime.Destroy(container1)
if container1.State.Running {
@ -297,17 +290,13 @@ func TestCommitRun(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", nil)
img, err := runtime.Commit(container1, "", "", "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t)
container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t)
defer runtime.Destroy(container2)
stdout, err := container2.StdoutPipe()
if err != nil {
@ -343,7 +332,7 @@ func TestCommitRun(t *testing.T) {
func TestStart(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
container, _, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
defer runtime.Destroy(container)
cStdin, err := container.StdinPipe()
@ -373,7 +362,7 @@ func TestStart(t *testing.T) {
func TestRun(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
container, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container)
if container.State.Running {
@ -391,7 +380,7 @@ func TestOutput(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foobar"},
},
@ -414,7 +403,7 @@ func TestContainerNetwork(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ping", "-c", "1", "127.0.0.1"},
},
@ -436,7 +425,7 @@ func TestKillDifferentUser(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"cat"},
OpenStdin: true,
@ -448,7 +437,9 @@ func TestKillDifferentUser(t *testing.T) {
t.Fatal(err)
}
defer runtime.Destroy(container)
defer container.stdin.Close()
// FIXME @shykes: this seems redundant, but is very old, I'm leaving it in case
// there is a side effect I'm not seeing.
// defer container.stdin.Close()
if container.State.Running {
t.Errorf("Container shouldn't be running")
@ -490,22 +481,35 @@ func TestKillDifferentUser(t *testing.T) {
// Test that creating a container with a volume doesn't crash. Regression test for #995.
func TestCreateVolume(t *testing.T) {
runtime := mkRuntime(t)
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, hc, _, err := ParseRun([]string{"-v", "/var/lib/data", GetTestImage(runtime).ID, "echo", "hello", "world"}, nil)
config, hc, _, err := docker.ParseRun([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil)
if err != nil {
t.Fatal(err)
}
c, _, err := runtime.Create(config, "")
if err != nil {
jobCreate := eng.Job("create")
if err := jobCreate.ImportEnv(config); err != nil {
t.Fatal(err)
}
defer runtime.Destroy(c)
c.hostConfig = hc
if err := c.Start(); err != nil {
var id string
jobCreate.StdoutParseString(&id)
if err := jobCreate.Run(); err != nil {
t.Fatal(err)
}
jobStart := eng.Job("start", id)
if err := jobStart.ImportEnv(hc); err != nil {
t.Fatal(err)
}
if err := jobStart.Run(); err != nil {
t.Fatal(err)
}
// FIXME: this hack can be removed once Wait is a job
c := runtime.Get(id)
if c == nil {
t.Fatalf("Couldn't retrieve container %s from runtime", id)
}
c.WaitTimeout(500 * time.Millisecond)
c.Wait()
}
@ -513,7 +517,7 @@ func TestCreateVolume(t *testing.T) {
func TestKill(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sleep", "2"},
},
@ -557,7 +561,7 @@ func TestExitCode(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
trueContainer, _, err := runtime.Create(&Config{
trueContainer, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/true", ""},
}, "")
@ -572,7 +576,7 @@ func TestExitCode(t *testing.T) {
t.Errorf("Unexpected exit code %d (expected 0)", trueContainer.State.ExitCode)
}
falseContainer, _, err := runtime.Create(&Config{
falseContainer, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/false", ""},
}, "")
@ -591,7 +595,7 @@ func TestExitCode(t *testing.T) {
func TestRestart(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foobar"},
},
@ -622,7 +626,7 @@ func TestRestart(t *testing.T) {
func TestRestartStdin(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"cat"},
@ -700,7 +704,7 @@ func TestUser(t *testing.T) {
defer nuke(runtime)
// Default user must be root
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
},
@ -719,7 +723,7 @@ func TestUser(t *testing.T) {
}
// Set a username
container, _, err = runtime.Create(&Config{
container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
@ -740,7 +744,7 @@ func TestUser(t *testing.T) {
}
// Set a UID
container, _, err = runtime.Create(&Config{
container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
@ -761,7 +765,7 @@ func TestUser(t *testing.T) {
}
// Set a different user by uid
container, _, err = runtime.Create(&Config{
container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
@ -784,7 +788,7 @@ func TestUser(t *testing.T) {
}
// Set a different user by username
container, _, err = runtime.Create(&Config{
container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
@ -805,7 +809,7 @@ func TestUser(t *testing.T) {
}
// Test an wrong username
container, _, err = runtime.Create(&Config{
container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"id"},
@ -827,7 +831,7 @@ func TestMultipleContainers(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container1, _, err := runtime.Create(&Config{
container1, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sleep", "2"},
},
@ -838,7 +842,7 @@ func TestMultipleContainers(t *testing.T) {
}
defer runtime.Destroy(container1)
container2, _, err := runtime.Create(&Config{
container2, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sleep", "2"},
},
@ -882,7 +886,7 @@ func TestMultipleContainers(t *testing.T) {
func TestStdin(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"cat"},
@ -927,7 +931,7 @@ func TestStdin(t *testing.T) {
func TestTty(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"cat"},
@ -974,7 +978,7 @@ func TestEnv(t *testing.T) {
os.Setenv("TRICKY", "tri\ncky\n")
runtime := mkRuntime(t)
defer nuke(runtime)
config, _, _, err := ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil)
config, _, _, err := docker.ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil)
if err != nil {
t.Fatal(err)
}
@ -1028,7 +1032,7 @@ func TestEntrypoint(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Entrypoint: []string{"/bin/echo"},
Cmd: []string{"-n", "foobar"},
@ -1052,7 +1056,7 @@ func TestEntrypointNoCmd(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Entrypoint: []string{"/bin/echo", "foobar"},
},
@ -1071,96 +1075,11 @@ func TestEntrypointNoCmd(t *testing.T) {
}
}
func grepFile(t *testing.T, path string, pattern string) {
f, err := os.Open(path)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := bufio.NewReader(f)
var (
line string
)
err = nil
for err == nil {
line, err = r.ReadString('\n')
if strings.Contains(line, pattern) == true {
return
}
}
t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path)
}
func TestLXCConfig(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
// Memory is allocated randomly for testing
rand.Seed(time.Now().UTC().UnixNano())
memMin := 33554432
memMax := 536870912
mem := memMin + rand.Intn(memMax-memMin)
// CPU shares as well
cpuMin := 100
cpuMax := 10000
cpu := cpuMin + rand.Intn(cpuMax-cpuMin)
container, _, err := runtime.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/true"},
Hostname: "foobar",
Memory: int64(mem),
CpuShares: int64(cpu),
},
"",
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
container.generateLXCConfig()
grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2))
}
func TestCustomLxcConfig(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/true"},
Hostname: "foobar",
},
"",
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
container.hostConfig = &HostConfig{LxcConf: []KeyValuePair{
{
Key: "lxc.utsname",
Value: "docker",
},
{
Key: "lxc.cgroup.cpuset.cpus",
Value: "0,1",
},
}}
container.generateLXCConfig()
grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker")
grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1")
}
func BenchmarkRunSequencial(b *testing.B) {
runtime := mkRuntime(b)
defer nuke(runtime)
for i := 0; i < b.N; i++ {
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foo"},
},
@ -1193,7 +1112,7 @@ func BenchmarkRunParallel(b *testing.B) {
complete := make(chan error)
tasks = append(tasks, complete)
go func(i int, complete chan error) {
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foo"},
},
@ -1244,11 +1163,12 @@ func tempDir(t *testing.T) string {
// Test for #1737
func TestCopyVolumeUidGid(t *testing.T) {
r := mkRuntime(t)
defer nuke(r)
eng := NewTestEngine(t)
r := mkRuntimeFromEngine(eng, t)
defer r.Nuke()
// Add directory not owned by root
container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t)
container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t)
defer r.Destroy(container1)
if container1.State.Running {
@ -1261,11 +1181,7 @@ func TestCopyVolumeUidGid(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := r.graph.Create(rwTar, container1, "unit test commited image", "", nil)
img, err := r.Commit(container1, "", "", "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
@ -1273,7 +1189,7 @@ func TestCopyVolumeUidGid(t *testing.T) {
// Test that the uid and gid is copied from the image to the volume
tmpDir1 := tempDir(t)
defer os.RemoveAll(tmpDir1)
stdout1, _ := runContainer(r, []string{"-v", "/hello", img.ID, "stat", "-c", "%U %G", "/hello"}, t)
stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "stat", "-c", "%U %G", "/hello"}, t)
if !strings.Contains(stdout1, "daemon daemon") {
t.Fatal("Container failed to transfer uid and gid to volume")
}
@ -1281,11 +1197,12 @@ func TestCopyVolumeUidGid(t *testing.T) {
// Test for #1582
func TestCopyVolumeContent(t *testing.T) {
r := mkRuntime(t)
defer nuke(r)
eng := NewTestEngine(t)
r := mkRuntimeFromEngine(eng, t)
defer r.Nuke()
// Put some content in a directory of a container and commit it
container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t)
container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t)
defer r.Destroy(container1)
if container1.State.Running {
@ -1298,11 +1215,7 @@ func TestCopyVolumeContent(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := r.graph.Create(rwTar, container1, "unit test commited image", "", nil)
img, err := r.Commit(container1, "", "", "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
@ -1310,31 +1223,33 @@ func TestCopyVolumeContent(t *testing.T) {
// Test that the content is copied from the image to the volume
tmpDir1 := tempDir(t)
defer os.RemoveAll(tmpDir1)
stdout1, _ := runContainer(r, []string{"-v", "/hello", img.ID, "find", "/hello"}, t)
stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "find", "/hello"}, t)
if !(strings.Contains(stdout1, "/hello/local/world") && strings.Contains(stdout1, "/hello/local")) {
t.Fatal("Container failed to transfer content to volume")
}
}
func TestBindMounts(t *testing.T) {
r := mkRuntime(t)
defer nuke(r)
eng := NewTestEngine(t)
r := mkRuntimeFromEngine(eng, t)
defer r.Nuke()
tmpDir := tempDir(t)
defer os.RemoveAll(tmpDir)
writeFile(path.Join(tmpDir, "touch-me"), "", t)
// Test reading from a read-only bind mount
stdout, _ := runContainer(r, []string{"-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t)
stdout, _ := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t)
if !strings.Contains(stdout, "touch-me") {
t.Fatal("Container failed to read from bind mount")
}
// test writing to bind mount
runContainer(r, []string{"-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t)
runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t)
readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist
// test mounting to an illegal destination directory
if _, err := runContainer(r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "_", "ls", "."}, nil); err == nil {
if _, err := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "_", "ls", "."}, nil); err == nil {
t.Fatal("Container bind mounted illegal directory")
}
}
@ -1344,7 +1259,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}},
@ -1364,7 +1279,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) {
}
container2, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
VolumesFrom: container.ID + ":ro",
@ -1405,7 +1320,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}},
@ -1425,7 +1340,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) {
}
container2, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
VolumesFrom: container.ID,
@ -1461,7 +1376,7 @@ func TestRestartWithVolumes(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}},
@ -1505,7 +1420,7 @@ func TestVolumesFromWithVolumes(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
Volumes: map[string]struct{}{"/test": {}},
@ -1534,7 +1449,7 @@ func TestVolumesFromWithVolumes(t *testing.T) {
}
container2, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"cat", "/test/foo"},
VolumesFrom: container.ID,
@ -1568,26 +1483,42 @@ func TestVolumesFromWithVolumes(t *testing.T) {
}
func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
runtime := mkRuntime(t)
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, hc, _, err := ParseRun([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil)
config, hc, _, err := docker.ParseRun([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil)
if err != nil {
t.Fatal(err)
}
c, _, err := runtime.Create(config, "")
if err != nil {
jobCreate := eng.Job("create")
if err := jobCreate.ImportEnv(config); err != nil {
t.Fatal(err)
}
var id string
jobCreate.StdoutParseString(&id)
if err := jobCreate.Run(); err != nil {
t.Fatal(err)
}
// FIXME: this hack can be removed once Wait is a job
c := runtime.Get(id)
if c == nil {
t.Fatalf("Couldn't retrieve container %s from runtime", id)
}
stdout, err := c.StdoutPipe()
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(c)
c.hostConfig = hc
if err := c.Start(); err != nil {
jobStart := eng.Job("start", id)
if err := jobStart.ImportEnv(hc); err != nil {
t.Fatal(err)
}
if err := jobStart.Run(); err != nil {
t.Fatal(err)
}
c.WaitTimeout(500 * time.Millisecond)
c.Wait()
output, err := ioutil.ReadAll(stdout)
@ -1602,37 +1533,40 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
if !strings.HasSuffix(interfaces[0], ": lo") {
t.Fatalf("Wrong interface in test container: expected [*: lo], got %s", interfaces)
}
}
func TestPrivilegedCanMknod(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
if output, _ := runContainer(runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
t.Fatal("Could not mknod into privileged container")
}
}
func TestPrivilegedCanMount(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
if output, _ := runContainer(runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" {
t.Fatal("Could not mount into privileged container")
}
}
func TestPrivilegedCannotMknod(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
if output, _ := runContainer(runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 || echo ok"}, t); output != "ok\n" {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 || echo ok"}, t); output != "ok\n" {
t.Fatal("Could mknod into secure container")
}
}
func TestPrivilegedCannotMount(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
if output, _ := runContainer(runtime, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" {
t.Fatal("Could mount into secure container")
}
}
@ -1641,7 +1575,7 @@ func TestMultipleVolumesFrom(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
Volumes: map[string]struct{}{"/test": {}},
@ -1670,7 +1604,7 @@ func TestMultipleVolumesFrom(t *testing.T) {
}
container2, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", "echo -n bar > /other/foo"},
Volumes: map[string]struct{}{"/other": {}},
@ -1692,7 +1626,7 @@ func TestMultipleVolumesFrom(t *testing.T) {
}
container3, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","),
@ -1720,7 +1654,7 @@ func TestRestartGhost(t *testing.T) {
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
Volumes: map[string]struct{}{"/test": {}},

57
integration/graph_test.go Normal file
Просмотреть файл

@ -0,0 +1,57 @@
package docker
import (
"github.com/dotcloud/docker"
"io/ioutil"
"os"
"path"
"testing"
)
func TestMount(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
image, err := graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
tmp, err := ioutil.TempDir("", "docker-test-graph-mount-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
rootfs := path.Join(tmp, "rootfs")
if err := os.MkdirAll(rootfs, 0700); err != nil {
t.Fatal(err)
}
rw := path.Join(tmp, "rw")
if err := os.MkdirAll(rw, 0700); err != nil {
t.Fatal(err)
}
if err := image.Mount(rootfs, rw); err != nil {
t.Fatal(err)
}
// FIXME: test for mount contents
defer func() {
if err := docker.Unmount(rootfs); err != nil {
t.Error(err)
}
}()
}
//FIXME: duplicate
func tempGraph(t *testing.T) *docker.Graph {
tmp, err := ioutil.TempDir("", "docker-graph-")
if err != nil {
t.Fatal(err)
}
graph, err := docker.NewGraph(tmp)
if err != nil {
t.Fatal(err)
}
return graph
}

Просмотреть файл

@ -0,0 +1,22 @@
package docker
import (
"github.com/dotcloud/docker/iptables"
"os"
"testing"
)
// FIXME: this test should be a unit test.
// For example by mocking os/exec to make sure iptables is not actually called.
func TestIptables(t *testing.T) {
if _, err := iptables.Raw("-L"); err != nil {
t.Fatal(err)
}
path := os.Getenv("PATH")
os.Setenv("PATH", "")
defer os.Setenv("PATH", path)
if _, err := iptables.Raw("-L"); err == nil {
t.Fatal("Not finding iptables in the PATH should cause an error")
}
}

Просмотреть файл

@ -3,6 +3,7 @@ package docker
import (
"bytes"
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/sysinit"
"github.com/dotcloud/docker/utils"
@ -15,7 +16,6 @@ import (
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"testing"
"time"
@ -32,42 +32,33 @@ const (
)
var (
globalRuntime *Runtime
// FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted.
globalRuntime *docker.Runtime
globalEngine *engine.Engine
startFds int
startGoroutines int
)
func nuke(runtime *Runtime) error {
if nonuke := os.Getenv("NONUKE"); nonuke != "" {
return nil
}
var wg sync.WaitGroup
for _, container := range runtime.List() {
wg.Add(1)
go func(c *Container) {
c.Kill()
wg.Done()
}(container)
}
wg.Wait()
runtime.Close()
os.Remove(filepath.Join(runtime.config.Root, "linkgraph.db"))
return os.RemoveAll(runtime.config.Root)
// FIXME: nuke() is deprecated by Runtime.Nuke()
func nuke(runtime *docker.Runtime) error {
return runtime.Nuke()
}
func cleanup(runtime *Runtime) error {
// FIXME: cleanup and nuke are redundant.
func cleanup(eng *engine.Engine, t *testing.T) error {
runtime := mkRuntimeFromEngine(eng, t)
for _, container := range runtime.List() {
container.Kill()
runtime.Destroy(container)
}
images, err := runtime.graph.Map()
srv := mkServerFromEngine(eng, t)
images, err := srv.Images(true, "")
if err != nil {
return err
}
for _, image := range images {
if image.ID != unitTestImageID {
runtime.graph.Delete(image.ID)
srv.ImageDelete(image.ID, false)
}
}
return nil
@ -136,10 +127,9 @@ func setupBaseImage() {
log.Fatalf("Unable to create a runtime for tests:", err)
}
srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0))
runtime := srv.runtime
// If the unit test is not found, try to download it.
if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
if img, err := srv.ImageInspect(unitTestImageName); err != nil || img.ID != unitTestImageID {
// Retrieve the Image
if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
log.Fatalf("Unable to pull the test image: %s", err)
@ -154,8 +144,8 @@ func spawnGlobalDaemon() {
}
t := log.New(os.Stderr, "", 0)
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
globalRuntime = srv.runtime
globalEngine = eng
globalRuntime = mkRuntimeFromEngine(eng, t)
// Spawn a Daemon
go func() {
@ -177,8 +167,8 @@ func spawnGlobalDaemon() {
// FIXME: test that ImagePull(json=true) send correct json output
func GetTestImage(runtime *Runtime) *Image {
imgs, err := runtime.graph.Map()
func GetTestImage(runtime *docker.Runtime) *docker.Image {
imgs, err := runtime.Graph().Map()
if err != nil {
log.Fatalf("Unable to get the test image:", err)
}
@ -187,7 +177,7 @@ func GetTestImage(runtime *Runtime) *Image {
return image
}
}
log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.graph.Root, imgs)
log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.Graph().Root, imgs)
return nil
}
@ -200,7 +190,7 @@ func TestRuntimeCreate(t *testing.T) {
t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
}
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
},
@ -241,13 +231,25 @@ func TestRuntimeCreate(t *testing.T) {
t.Errorf("Exists() returned false for a newly created container")
}
// Test that conflict error displays correct details
testContainer, _, _ := runtime.Create(
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
},
"conflictname",
)
if _, _, err := runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) {
t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error())
}
// Make sure create with bad parameters returns an error
if _, _, err = runtime.Create(&Config{Image: GetTestImage(runtime).ID}, ""); err == nil {
if _, _, err = runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID}, ""); err == nil {
t.Fatal("Builder.Create should throw an error when Cmd is missing")
}
if _, _, err := runtime.Create(
&Config{
&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{},
},
@ -256,7 +258,7 @@ func TestRuntimeCreate(t *testing.T) {
t.Fatal("Builder.Create should throw an error when Cmd is empty")
}
config := &Config{
config := &docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/ls"},
PortSpecs: []string{"80"},
@ -269,7 +271,7 @@ func TestRuntimeCreate(t *testing.T) {
}
// test expose 80:8000
container, warnings, err := runtime.Create(&Config{
container, warnings, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
PortSpecs: []string{"80:8000"},
@ -288,7 +290,7 @@ func TestDestroy(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
}, "")
@ -315,12 +317,6 @@ func TestDestroy(t *testing.T) {
t.Errorf("Unable to get newly created container")
}
// Make sure the container root directory does not exist anymore
_, err = os.Stat(container.root)
if err == nil || !os.IsNotExist(err) {
t.Errorf("Container root directory still exists after destroy")
}
// Test double destroy
if err := runtime.Destroy(container); err == nil {
// It should have failed
@ -332,13 +328,13 @@ func TestGet(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container1, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container1)
container2, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container2)
container3, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container3)
if runtime.Get(container1.ID) != container1 {
@ -355,15 +351,21 @@ func TestGet(t *testing.T) {
}
func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) {
func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *docker.Container, string) {
var (
err error
container *Container
strPort string
runtime = mkRuntime(t)
port = 5554
p Port
err error
id string
strPort string
eng = NewTestEngine(t)
runtime = mkRuntimeFromEngine(eng, t)
port = 5554
p docker.Port
)
defer func() {
if err != nil {
runtime.Nuke()
}
}()
for {
port += 1
@ -376,37 +378,45 @@ func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container,
} else {
t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
}
ep := make(map[Port]struct{}, 1)
p = Port(fmt.Sprintf("%s/%s", strPort, proto))
ep := make(map[docker.Port]struct{}, 1)
p = docker.Port(fmt.Sprintf("%s/%s", strPort, proto))
ep[p] = struct{}{}
container, _, err = runtime.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", cmd},
PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)},
ExposedPorts: ep,
}, "")
if err != nil {
nuke(runtime)
jobCreate := eng.Job("create")
jobCreate.Setenv("Image", unitTestImageID)
jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd})
jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)})
jobCreate.SetenvJson("ExposedPorts", ep)
jobCreate.StdoutParseString(&id)
if err := jobCreate.Run(); err != nil {
t.Fatal(err)
}
if container != nil {
// FIXME: this relies on the undocumented behavior of runtime.Create
// which will return a nil error AND container if the exposed ports
// are invalid. That behavior should be fixed!
if id != "" {
break
}
t.Logf("Port %v already in use, trying another one", strPort)
}
container.hostConfig = &HostConfig{
PortBindings: make(map[Port][]PortBinding),
}
container.hostConfig.PortBindings[p] = []PortBinding{
jobStart := eng.Job("start", id)
portBindings := make(map[docker.Port][]docker.PortBinding)
portBindings[p] = []docker.PortBinding{
{},
}
if err := container.Start(); err != nil {
nuke(runtime)
if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil {
t.Fatal(err)
}
if err := jobStart.Run(); err != nil {
t.Fatal(err)
}
container := runtime.Get(id)
if container == nil {
t.Fatalf("Couldn't fetch test container %s", id)
}
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
for !container.State.Running {
@ -507,14 +517,15 @@ func TestAllocateUDPPortLocalhost(t *testing.T) {
}
func TestRestore(t *testing.T) {
runtime1 := mkRuntime(t)
defer nuke(runtime1)
eng := NewTestEngine(t)
runtime1 := mkRuntimeFromEngine(eng, t)
defer runtime1.Nuke()
// Create a container with one instance of docker
container1, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
defer runtime1.Destroy(container1)
// Create a second container meant to be killed
container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
defer runtime1.Destroy(container2)
// Start the container non blocking
@ -548,12 +559,19 @@ func TestRestore(t *testing.T) {
// Here are are simulating a docker restart - that is, reloading all containers
// from scratch
runtime1.config.AutoRestart = false
runtime2, err := NewRuntimeFromDirectory(runtime1.config)
root := eng.Root()
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
defer nuke(runtime2)
job := eng.Job("initapi")
job.Setenv("Root", eng.Root())
job.SetenvBool("Autorestart", false)
if err := job.Run(); err != nil {
t.Fatal(err)
}
runtime2 := mkRuntimeFromEngine(eng, t)
if len(runtime2.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
}
@ -578,14 +596,31 @@ func TestRestore(t *testing.T) {
}
func TestReloadContainerLinks(t *testing.T) {
runtime1 := mkRuntime(t)
// FIXME: here we don't use NewTestEngine because it calls initapi with Autorestart=false,
// and we want to set it to true.
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
t.Fatal(err)
}
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
job := eng.Job("initapi")
job.Setenv("Root", eng.Root())
job.SetenvBool("Autorestart", true)
if err := job.Run(); err != nil {
t.Fatal(err)
}
runtime1 := mkRuntimeFromEngine(eng, t)
defer nuke(runtime1)
// Create a container with one instance of docker
container1, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
container1, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
defer runtime1.Destroy(container1)
// Create a second container meant to be killed
container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
defer runtime1.Destroy(container2)
// Start the container non blocking
@ -593,7 +628,9 @@ func TestReloadContainerLinks(t *testing.T) {
t.Fatal(err)
}
// Add a link to container 2
container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
// FIXME @shykes: setting hostConfig.Links seems redundant with calling RegisterLink().
// Why do we need it @crosbymichael?
// container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
if err := runtime1.RegisterLink(container1, container2, "first"); err != nil {
t.Fatal(err)
}
@ -615,12 +652,18 @@ func TestReloadContainerLinks(t *testing.T) {
// Here are are simulating a docker restart - that is, reloading all containers
// from scratch
runtime1.config.AutoRestart = true
runtime2, err := NewRuntimeFromDirectory(runtime1.config)
eng, err = engine.New(root)
if err != nil {
t.Fatal(err)
}
defer nuke(runtime2)
job = eng.Job("initapi")
job.Setenv("Root", eng.Root())
job.SetenvBool("Autorestart", false)
if err := job.Run(); err != nil {
t.Fatal(err)
}
runtime2 := mkRuntimeFromEngine(eng, t)
if len(runtime2.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
}
@ -634,27 +677,32 @@ func TestReloadContainerLinks(t *testing.T) {
t.Fatalf("Expected 2 container alive, %d found", runningCount)
}
// FIXME: we no longer test if containers were registered in the right order,
// because there is no public
// Make sure container 2 ( the child of container 1 ) was registered and started first
// with the runtime
first := runtime2.containers.Front()
if first.Value.(*Container).ID != container2.ID {
//
containers := runtime2.List()
if len(containers) == 0 {
t.Fatalf("Runtime has no containers")
}
first := containers[0]
if first.ID != container2.ID {
t.Fatalf("Container 2 %s should be registered first in the runtime", container2.ID)
}
// Verify that the link is still registered in the runtime
entity := runtime2.containerGraph.Get(container1.Name)
if entity == nil {
t.Fatal("Entity should not be nil")
if c := runtime2.Get(container1.Name); c == nil {
t.Fatal("Named container is no longer registered after restart")
}
}
func TestDefaultContainerName(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -666,29 +714,19 @@ func TestDefaultContainerName(t *testing.T) {
t.Fatalf("Expect /some_name got %s", container.Name)
}
paths := runtime.containerGraph.RefPaths(containerID)
if paths == nil || len(paths) == 0 {
t.Fatalf("Could not find edges for %s", containerID)
}
edge := paths[0]
if edge.ParentID != "0" {
t.Fatalf("Expected engine got %s", edge.ParentID)
}
if edge.EntityID != containerID {
t.Fatalf("Expected %s got %s", containerID, edge.EntityID)
}
if edge.Name != "some_name" {
t.Fatalf("Expected some_name got %s", edge.Name)
if c := runtime.Get("/some_name"); c == nil {
t.Fatalf("Couldn't retrieve test container as /some_name")
} else if c.ID != containerID {
t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID)
}
}
func TestRandomContainerName(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -700,29 +738,19 @@ func TestRandomContainerName(t *testing.T) {
t.Fatalf("Expected not empty container name")
}
paths := runtime.containerGraph.RefPaths(containerID)
if paths == nil || len(paths) == 0 {
t.Fatalf("Could not find edges for %s", containerID)
}
edge := paths[0]
if edge.ParentID != "0" {
t.Fatalf("Expected engine got %s", edge.ParentID)
}
if edge.EntityID != containerID {
t.Fatalf("Expected %s got %s", containerID, edge.EntityID)
}
if edge.Name == "" {
t.Fatalf("Expected not empty container name")
if c := runtime.Get(container.Name); c == nil {
log.Fatalf("Could not lookup container %s by its name", container.Name)
} else if c.ID != containerID {
log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID)
}
}
func TestLinkChildContainer(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -738,7 +766,7 @@ func TestLinkChildContainer(t *testing.T) {
t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
}
config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err = docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -761,11 +789,10 @@ func TestLinkChildContainer(t *testing.T) {
func TestGetAllChildren(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -781,7 +808,7 @@ func TestGetAllChildren(t *testing.T) {
t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
}
config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err = docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -813,19 +840,3 @@ func TestGetAllChildren(t *testing.T) {
}
}
}
func TestGetFullName(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
name, err := runtime.getFullName("testing")
if err != nil {
t.Fatal(err)
}
if name != "/testing" {
t.Fatalf("Expected /testing got %s", name)
}
if _, err := runtime.getFullName(""); err == nil {
t.Fatal("Error should not be nil")
}
}

Просмотреть файл

@ -1,32 +1,31 @@
package docker
import (
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"strings"
"testing"
"time"
)
func TestContainerTagImageDelete(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := &Server{runtime: runtime}
srv := mkServerFromEngine(eng, t)
initialImages, err := srv.Images(false, "")
if err != nil {
t.Fatal(err)
}
if err := srv.runtime.repositories.Set("utest", "tag1", unitTestImageName, false); err != nil {
if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil {
t.Fatal(err)
}
if err := srv.runtime.repositories.Set("utest/docker", "tag2", unitTestImageName, false); err != nil {
if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil {
t.Fatal(err)
}
if err := srv.runtime.repositories.Set("utest:5000/docker", "tag3", unitTestImageName, false); err != nil {
if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil {
t.Fatal(err)
}
@ -82,46 +81,43 @@ func TestContainerTagImageDelete(t *testing.T) {
func TestCreateRm(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime)
defer mkRuntimeFromEngine(eng, t).Nuke()
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
id := createTestContainer(eng, config, t)
if len(runtime.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 {
t.Errorf("Expected 1 container, %v found", len(c))
}
if err = srv.ContainerDestroy(id, true, false); err != nil {
t.Fatal(err)
}
if len(runtime.List()) != 0 {
t.Errorf("Expected 0 container, %v found", len(runtime.List()))
if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 {
t.Errorf("Expected 0 container, %v found", len(c))
}
}
func TestCreateRmVolumes(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime)
defer mkRuntimeFromEngine(eng, t).Nuke()
config, hostConfig, _, err := ParseRun([]string{"-v", "/srv", GetTestImage(runtime).ID, "echo test"}, nil)
config, hostConfig, _, err := docker.ParseRun([]string{"-v", "/srv", unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
id := createTestContainer(eng, config, t)
if len(runtime.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 {
t.Errorf("Expected 1 container, %v found", len(c))
}
job := eng.Job("start", id)
@ -141,18 +137,17 @@ func TestCreateRmVolumes(t *testing.T) {
t.Fatal(err)
}
if len(runtime.List()) != 0 {
t.Errorf("Expected 0 container, %v found", len(runtime.List()))
if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 {
t.Errorf("Expected 0 container, %v found", len(c))
}
}
func TestCommit(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime)
defer mkRuntimeFromEngine(eng, t).Nuke()
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil)
config, _, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil)
if err != nil {
t.Fatal(err)
}
@ -167,18 +162,17 @@ func TestCommit(t *testing.T) {
func TestCreateStartRestartStopStartKillRm(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime)
defer mkRuntimeFromEngine(eng, t).Nuke()
config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil)
config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil)
if err != nil {
t.Fatal(err)
}
id := createTestContainer(eng, config, t)
if len(runtime.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 {
t.Errorf("Expected 1 container, %v found", len(c))
}
job := eng.Job("start", id)
@ -214,21 +208,18 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
t.Fatal(err)
}
if len(runtime.List()) != 0 {
t.Errorf("Expected 0 container, %v found", len(runtime.List()))
if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 {
t.Errorf("Expected 0 container, %v found", len(c))
}
}
func TestRunWithTooLowMemoryLimit(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime)
defer mkRuntimeFromEngine(eng, t).Nuke()
// Try to create a container with a memory limit of 1 byte less than the minimum allowed limit.
job := eng.Job("create")
job.Setenv("Image", GetTestImage(runtime).ID)
job.Setenv("Image", unitTestImageID)
job.Setenv("Memory", "524287")
job.Setenv("CpuShares", "1000")
job.SetenvList("Cmd", []string{"/bin/cat"})
@ -239,163 +230,17 @@ func TestRunWithTooLowMemoryLimit(t *testing.T) {
}
}
func TestContainerTop(t *testing.T) {
t.Skip("Fixme. Skipping test for now. Reported error: 'server_test.go:236: Expected 2 processes, found 1.'")
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
c, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
c, err := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(c)
if err := c.Start(); err != nil {
t.Fatal(err)
}
// Give some time to the process to start
c.WaitTimeout(500 * time.Millisecond)
if !c.State.Running {
t.Errorf("Container should be running")
}
procs, err := srv.ContainerTop(c.ID, "")
if err != nil {
t.Fatal(err)
}
if len(procs.Processes) != 2 {
t.Fatalf("Expected 2 processes, found %d.", len(procs.Processes))
}
pos := -1
for i := 0; i < len(procs.Titles); i++ {
if procs.Titles[i] == "CMD" {
pos = i
break
}
}
if pos == -1 {
t.Fatalf("Expected CMD, not found.")
}
if procs.Processes[0][pos] != "sh" && procs.Processes[0][pos] != "busybox" {
t.Fatalf("Expected `busybox` or `sh`, found %s.", procs.Processes[0][pos])
}
if procs.Processes[1][pos] != "sh" && procs.Processes[1][pos] != "busybox" {
t.Fatalf("Expected `busybox` or `sh`, found %s.", procs.Processes[1][pos])
}
}
func TestPools(t *testing.T) {
runtime := mkRuntime(t)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
defer nuke(runtime)
err := srv.poolAdd("pull", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolAdd("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolAdd("push", "test1")
if err == nil || err.Error() != "pull test1 is already in progress" {
t.Fatalf("Expected `pull test1 is already in progress`")
}
err = srv.poolAdd("pull", "test1")
if err == nil || err.Error() != "pull test1 is already in progress" {
t.Fatalf("Expected `pull test1 is already in progress`")
}
err = srv.poolAdd("wait", "test3")
if err == nil || err.Error() != "Unknown pool type" {
t.Fatalf("Expected `Unknown pool type`")
}
err = srv.poolRemove("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("pull", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("push", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("wait", "test3")
if err == nil || err.Error() != "Unknown pool type" {
t.Fatalf("Expected `Unknown pool type`")
}
}
func TestLogEvent(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
events: make([]utils.JSONMessage, 0, 64),
listeners: make(map[string]chan utils.JSONMessage),
}
srv.LogEvent("fakeaction", "fakeid", "fakeimage")
listener := make(chan utils.JSONMessage)
srv.Lock()
srv.listeners["test"] = listener
srv.Unlock()
srv.LogEvent("fakeaction2", "fakeid", "fakeimage")
if len(srv.events) != 2 {
t.Fatalf("Expected 2 events, found %d", len(srv.events))
}
go func() {
time.Sleep(200 * time.Millisecond)
srv.LogEvent("fakeaction3", "fakeid", "fakeimage")
time.Sleep(200 * time.Millisecond)
srv.LogEvent("fakeaction4", "fakeid", "fakeimage")
}()
setTimeout(t, "Listening for events timed out", 2*time.Second, func() {
for i := 2; i < 4; i++ {
event := <-listener
if event != srv.events[i] {
t.Fatalf("Event received it different than expected")
}
}
})
}
func TestRmi(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime)
defer mkRuntimeFromEngine(eng, t).Nuke()
initialImages, err := srv.Images(false, "")
if err != nil {
t.Fatal(err)
}
config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil {
t.Fatal(err)
}
@ -471,19 +316,19 @@ func TestRmi(t *testing.T) {
}
func TestImagesFilter(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
eng := NewTestEngine(t)
defer nuke(mkRuntimeFromEngine(eng, t))
srv := &Server{runtime: runtime}
srv := mkServerFromEngine(eng, t)
if err := srv.runtime.repositories.Set("utest", "tag1", unitTestImageName, false); err != nil {
if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil {
t.Fatal(err)
}
if err := srv.runtime.repositories.Set("utest/docker", "tag2", unitTestImageName, false); err != nil {
if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil {
t.Fatal(err)
}
if err := srv.runtime.repositories.Set("utest:5000/docker", "tag3", unitTestImageName, false); err != nil {
if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil {
t.Fatal(err)
}
@ -525,9 +370,9 @@ func TestImagesFilter(t *testing.T) {
}
func TestImageInsert(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
sf := utils.NewStreamFormatter(true)
// bad image name fails
@ -536,12 +381,12 @@ func TestImageInsert(t *testing.T) {
}
// bad url fails
if err := srv.ImageInsert(GetTestImage(runtime).ID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil {
if err := srv.ImageInsert(unitTestImageID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil {
t.Fatal("expected an error and got none")
}
// success returns nil
if err := srv.ImageInsert(GetTestImage(runtime).ID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil {
if err := srv.ImageInsert(unitTestImageID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil {
t.Fatalf("expected no error, but got %v", err)
}
}

Просмотреть файл

@ -0,0 +1,63 @@
package docker
import (
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"testing"
"time"
)
func TestServerListOrderedImagesByCreationDate(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
if err := generateImage("", srv); err != nil {
t.Fatal(err)
}
images, err := srv.Images(true, "")
if err != nil {
t.Fatal(err)
}
if images[0].Created < images[1].Created {
t.Error("Expected []APIImges to be ordered by most recent creation date.")
}
}
func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
err := generateImage("bar", srv)
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
err = generateImage("zed", srv)
if err != nil {
t.Fatal(err)
}
images, err := srv.Images(true, "")
if err != nil {
t.Fatal(err)
}
if images[0].RepoTags[0] != "repo:zed" && images[0].RepoTags[0] != "repo:bar" {
t.Errorf("Expected []APIImges to be ordered by most recent creation date. %s", images)
}
}
func generateImage(name string, srv *docker.Server) error {
archive, err := fakeTar()
if err != nil {
return err
}
return srv.ImageImport("-", "repo", name, archive, ioutil.Discard, utils.NewStreamFormatter(true))
}

328
integration/utils_test.go Normal file
Просмотреть файл

@ -0,0 +1,328 @@
package docker
import (
"archive/tar"
"bytes"
"fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path"
"strings"
"testing"
"time"
)
// This file contains utility functions for docker's unit test suite.
// It has to be named XXX_test.go, apparently, in other to access private functions
// from other XXX_test.go functions.
// Create a temporary runtime suitable for unit testing.
// Call t.Fatal() at the first error.
func mkRuntime(f utils.Fataler) *docker.Runtime {
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
f.Fatal(err)
}
config := &docker.DaemonConfig{
Root: root,
AutoRestart: false,
}
r, err := docker.NewRuntimeFromDirectory(config)
if err != nil {
f.Fatal(err)
}
r.UpdateCapabilities(true)
return r
}
func createNamedTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler, name string) (shortId string) {
job := eng.Job("create", name)
if err := job.ImportEnv(config); err != nil {
f.Fatal(err)
}
job.StdoutParseString(&shortId)
if err := job.Run(); err != nil {
f.Fatal(err)
}
return
}
func createTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler) (shortId string) {
return createNamedTestContainer(eng, config, f, "")
}
func startContainer(eng *engine.Engine, id string, t utils.Fataler) {
job := eng.Job("start", id)
if err := job.Run(); err != nil {
t.Fatal(err)
}
}
func containerRun(eng *engine.Engine, id string, t utils.Fataler) {
startContainer(eng, id, t)
containerWait(eng, id, t)
}
func containerFileExists(eng *engine.Engine, id, dir string, t utils.Fataler) bool {
c := getContainer(eng, id, t)
if err := c.EnsureMounted(); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(path.Join(c.RootfsPath(), dir)); err != nil {
if os.IsNotExist(err) {
return false
}
t.Fatal(err)
}
return true
}
func containerAttach(eng *engine.Engine, id string, t utils.Fataler) (io.WriteCloser, io.ReadCloser) {
c := getContainer(eng, id, t)
i, err := c.StdinPipe()
if err != nil {
t.Fatal(err)
}
o, err := c.StdoutPipe()
if err != nil {
t.Fatal(err)
}
return i, o
}
func containerWait(eng *engine.Engine, id string, t utils.Fataler) int {
return getContainer(eng, id, t).Wait()
}
func containerWaitTimeout(eng *engine.Engine, id string, t utils.Fataler) error {
return getContainer(eng, id, t).WaitTimeout(500 * time.Millisecond)
}
func containerKill(eng *engine.Engine, id string, t utils.Fataler) {
if err := getContainer(eng, id, t).Kill(); err != nil {
t.Fatal(err)
}
}
func containerRunning(eng *engine.Engine, id string, t utils.Fataler) bool {
return getContainer(eng, id, t).State.Running
}
func containerAssertExists(eng *engine.Engine, id string, t utils.Fataler) {
getContainer(eng, id, t)
}
func containerAssertNotExists(eng *engine.Engine, id string, t utils.Fataler) {
runtime := mkRuntimeFromEngine(eng, t)
if c := runtime.Get(id); c != nil {
t.Fatal(fmt.Errorf("Container %s should not exist", id))
}
}
// assertHttpNotError expect the given response to not have an error.
// Otherwise the it causes the test to fail.
func assertHttpNotError(r *httptest.ResponseRecorder, t utils.Fataler) {
// Non-error http status are [200, 400)
if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest {
t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code))
}
}
// assertHttpError expect the given response to have an error.
// Otherwise the it causes the test to fail.
func assertHttpError(r *httptest.ResponseRecorder, t utils.Fataler) {
// Non-error http status are [200, 400)
if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) {
t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code))
}
}
func getContainer(eng *engine.Engine, id string, t utils.Fataler) *docker.Container {
runtime := mkRuntimeFromEngine(eng, t)
c := runtime.Get(id)
if c == nil {
t.Fatal(fmt.Errorf("No such container: %s", id))
}
return c
}
func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Server {
iSrv := eng.Hack_GetGlobalVar("httpapi.server")
if iSrv == nil {
panic("Legacy server field not set in engine")
}
srv, ok := iSrv.(*docker.Server)
if !ok {
panic("Legacy server field in engine does not cast to *docker.Server")
}
return srv
}
func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Runtime {
iRuntime := eng.Hack_GetGlobalVar("httpapi.runtime")
if iRuntime == nil {
panic("Legacy runtime field not set in engine")
}
runtime, ok := iRuntime.(*docker.Runtime)
if !ok {
panic("Legacy runtime field in engine does not cast to *docker.Runtime")
}
return runtime
}
func NewTestEngine(t utils.Fataler) *engine.Engine {
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
t.Fatal(err)
}
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
// Load default plugins
// (This is manually copied and modified from main() until we have a more generic plugin system)
job := eng.Job("initapi")
job.Setenv("Root", root)
job.SetenvBool("AutoRestart", false)
// TestGetEnabledCors and TestOptionsRoute require EnableCors=true
job.SetenvBool("EnableCors", true)
if err := job.Run(); err != nil {
t.Fatal(err)
}
return eng
}
func newTestDirectory(templateDir string) (dir string, err error) {
return utils.TestDirectory(templateDir)
}
func getCallerName(depth int) string {
return utils.GetCallerName(depth)
}
// Write `content` to the file at path `dst`, creating it if necessary,
// as well as any missing directories.
// The file is truncated if it already exists.
// Call t.Fatal() at the first error.
func writeFile(dst, content string, t *testing.T) {
// Create subdirectories if necessary
if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) {
t.Fatal(err)
}
f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700)
if err != nil {
t.Fatal(err)
}
// Write content (truncate if it exists)
if _, err := io.Copy(f, strings.NewReader(content)); err != nil {
t.Fatal(err)
}
}
// Return the contents of file at path `src`.
// Call t.Fatal() at the first error (including if the file doesn't exist)
func readFile(src string, t *testing.T) (content string) {
f, err := os.Open(src)
if err != nil {
t.Fatal(err)
}
data, err := ioutil.ReadAll(f)
if err != nil {
t.Fatal(err)
}
return string(data)
}
// Create a test container from the given runtime `r` and run arguments `args`.
// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is
// dynamically replaced by the current test image.
// The caller is responsible for destroying the container.
// Call t.Fatal() at the first error.
func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *docker.HostConfig, error) {
config, hc, _, err := docker.ParseRun(args, nil)
defer func() {
if err != nil && t != nil {
t.Fatal(err)
}
}()
if err != nil {
return nil, nil, err
}
if config.Image == "_" {
config.Image = GetTestImage(r).ID
}
c, _, err := r.Create(config, "")
if err != nil {
return nil, nil, err
}
// NOTE: hostConfig is ignored.
// If `args` specify privileged mode, custom lxc conf, external mount binds,
// port redirects etc. they will be ignored.
// This is because the correct way to set these things is to pass environment
// to the `start` job.
// FIXME: this helper function should be deprecated in favor of calling
// `create` and `start` jobs directly.
return c, hc, nil
}
// Create a test container, start it, wait for it to complete, destroy it,
// and return its standard output as a string.
// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally.
func runContainer(eng *engine.Engine, r *docker.Runtime, args []string, t *testing.T) (output string, err error) {
defer func() {
if err != nil && t != nil {
t.Fatal(err)
}
}()
container, hc, err := mkContainer(r, args, t)
if err != nil {
return "", err
}
defer r.Destroy(container)
stdout, err := container.StdoutPipe()
if err != nil {
return "", err
}
defer stdout.Close()
job := eng.Job("start", container.ID)
if err := job.ImportEnv(hc); err != nil {
return "", err
}
if err := job.Run(); err != nil {
return "", err
}
container.Wait()
data, err := ioutil.ReadAll(stdout)
if err != nil {
return "", err
}
output = string(data)
return
}
// FIXME: this is duplicated from graph_test.go in the docker package.
func fakeTar() (io.Reader, error) {
content := []byte("Hello world!\n")
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
hdr := new(tar.Header)
hdr.Size = int64(len(content))
hdr.Name = name
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
tw.Write([]byte(content))
}
tw.Close()
return buf, nil
}

Просмотреть файл

Просмотреть файл

@ -1,18 +0,0 @@
package iptables
import (
"os"
"testing"
)
func TestIptables(t *testing.T) {
if _, err := Raw("-L"); err != nil {
t.Fatal(err)
}
path := os.Getenv("PATH")
os.Setenv("PATH", "")
defer os.Setenv("PATH", path)
if _, err := Raw("-L"); err == nil {
t.Fatal("Not finding iptables in the PATH should cause an error")
}
}

Просмотреть файл

@ -120,7 +120,7 @@ lxc.aa_profile = unconfined
# (Note: 'lxc.cap.keep' is coming soon and should replace this under the
# security principle 'deny all unless explicitly permitted', see
# http://sourceforge.net/mailarchive/message.php?msg_id=31054627 )
lxc.cap.drop = audit_control audit_write mac_admin mac_override mknod setpcap sys_admin sys_boot sys_module sys_nice sys_pacct sys_rawio sys_resource sys_time sys_tty_config
lxc.cap.drop = audit_control audit_write mac_admin mac_override mknod setpcap sys_admin sys_module sys_nice sys_pacct sys_rawio sys_resource sys_time sys_tty_config
{{end}}
# limits

102
lxc_template_unit_test.go Normal file
Просмотреть файл

@ -0,0 +1,102 @@
package docker
import (
"bufio"
"fmt"
"io/ioutil"
"math/rand"
"os"
"strings"
"testing"
"time"
)
func TestLXCConfig(t *testing.T) {
root, err := ioutil.TempDir("", "TestLXCConfig")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
// Memory is allocated randomly for testing
rand.Seed(time.Now().UTC().UnixNano())
memMin := 33554432
memMax := 536870912
mem := memMin + rand.Intn(memMax-memMin)
// CPU shares as well
cpuMin := 100
cpuMax := 10000
cpu := cpuMin + rand.Intn(cpuMax-cpuMin)
container := &Container{
root: root,
Config: &Config{
Hostname: "foobar",
Memory: int64(mem),
CpuShares: int64(cpu),
NetworkDisabled: true,
},
hostConfig: &HostConfig{
Privileged: false,
},
}
if err := container.generateLXCConfig(); err != nil {
t.Fatal(err)
}
grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2))
}
func TestCustomLxcConfig(t *testing.T) {
root, err := ioutil.TempDir("", "TestCustomLxcConfig")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
container := &Container{
root: root,
Config: &Config{
Hostname: "foobar",
NetworkDisabled: true,
},
hostConfig: &HostConfig{
Privileged: false,
LxcConf: []KeyValuePair{
{
Key: "lxc.utsname",
Value: "docker",
},
{
Key: "lxc.cgroup.cpuset.cpus",
Value: "0,1",
},
},
},
}
if err := container.generateLXCConfig(); err != nil {
t.Fatal(err)
}
grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker")
grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1")
}
func grepFile(t *testing.T, path string, pattern string) {
f, err := os.Open(path)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := bufio.NewReader(f)
var (
line string
)
err = nil
for err == nil {
line, err = r.ReadString('\n')
if strings.Contains(line, pattern) == true {
return
}
}
t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path)
}

Просмотреть файл

@ -1,7 +1,7 @@
package docker
import (
_ "code.google.com/p/gosqlite/sqlite3"
_ "code.google.com/p/gosqlite/sqlite3" // registers sqlite
"container/list"
"database/sql"
"fmt"
@ -20,6 +20,7 @@ import (
"path"
"sort"
"strings"
"sync"
"time"
)
@ -417,7 +418,8 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin
// Set the enitity in the graph using the default name specified
if _, err := runtime.containerGraph.Set(name, id); err != nil {
if strings.HasSuffix(err.Error(), "name are not unique") {
return nil, nil, fmt.Errorf("Conflict, %s already exists.", name)
conflictingContainer, _ := runtime.GetByName(name)
return nil, nil, fmt.Errorf("Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", name, utils.TruncateID(conflictingContainer.ID), name)
}
return nil, nil, err
}
@ -548,7 +550,12 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a
return img, nil
}
// FIXME: this is deprecated by the getFullName *function*
func (runtime *Runtime) getFullName(name string) (string, error) {
return getFullName(name)
}
func getFullName(name string) (string, error) {
if name == "" {
return "", fmt.Errorf("Container name cannot be empty")
}
@ -762,6 +769,25 @@ func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) {
return archive.ExportChanges(cDir, changes)
}
// Nuke kills all containers then removes all content
// from the content root, including images, volumes and
// container filesystems.
// Again: this will remove your entire docker runtime!
func (runtime *Runtime) Nuke() error {
var wg sync.WaitGroup
for _, container := range runtime.List() {
wg.Add(1)
go func(c *Container) {
c.Kill()
wg.Done()
}(container)
}
wg.Wait()
runtime.Close()
return os.RemoveAll(runtime.config.Root)
}
func linkLxcStart(root string) error {
sourcePath, err := exec.LookPath("lxc-start")
if err != nil {
@ -779,6 +805,14 @@ func linkLxcStart(root string) error {
return os.Symlink(sourcePath, targetPath)
}
// FIXME: this is a convenience function for integration tests
// which need direct access to runtime.graph.
// Once the tests switch to using engine and jobs, this method
// can go away.
func (runtime *Runtime) Graph() *Graph {
return runtime.graph
}
// History is a convenience type for storing a list of containers,
// ordered by creation date.
type History []*Container

Просмотреть файл

@ -62,6 +62,8 @@ func jobInitApi(job *engine.Job) string {
os.Exit(0)
}()
job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime)
job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", srv.runtime.networkManager.bridgeNetwork.IP)
if err := job.Eng.Register("create", srv.ContainerCreate); err != nil {
return err.Error()
}
@ -422,9 +424,9 @@ func (srv *Server) ImageHistory(name string) ([]APIHistory, error) {
}
func (srv *Server) ContainerTop(name, ps_args string) (*APITop, error) {
func (srv *Server) ContainerTop(name, psArgs string) (*APITop, error) {
if container := srv.runtime.Get(name); container != nil {
output, err := exec.Command("lxc-ps", "--name", container.ID, "--", ps_args).CombinedOutput()
output, err := exec.Command("lxc-ps", "--name", container.ID, "--", psArgs).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("lxc-ps: %s (%s)", err, output)
}
@ -532,6 +534,7 @@ func (srv *Server) ContainerCommit(name, repo, tag, author, comment string, conf
return img.ID, err
}
// FIXME: this should be called ImageTag
func (srv *Server) ContainerTag(name, repo, tag string, force bool) error {
if err := srv.runtime.repositories.Set(repo, tag, name, force); err != nil {
return err
@ -891,12 +894,13 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName
out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", elem.ID))
continue
}
if checksum, err := srv.pushImage(r, out, remoteName, elem.ID, ep, repoData.Tokens, sf); err != nil {
checksum, err := srv.pushImage(r, out, remoteName, elem.ID, ep, repoData.Tokens, sf)
if err != nil {
// FIXME: Continue on error?
return err
} else {
elem.Checksum = checksum
}
elem.Checksum = checksum
if err := pushTags(); err != nil {
return err
}
@ -936,13 +940,15 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
if err != nil {
return "", fmt.Errorf("Failed to generate layer archive: %s", err)
}
defer os.RemoveAll(layerData.Name())
// Send the layer
if checksum, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf.FormatProgress("", "Pushing", "%8v/%v (%v)"), sf, false), ep, token, jsonRaw); err != nil {
checksum, err = r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf.FormatProgress("", "Pushing", "%8v/%v (%v)"), sf, false), ep, token, jsonRaw)
if err != nil {
return "", err
} else {
imgData.Checksum = checksum
}
imgData.Checksum = checksum
out.Write(sf.FormatStatus("", ""))
// Send the checksum
@ -1065,7 +1071,12 @@ func (srv *Server) ContainerCreate(job *engine.Job) string {
return err.Error()
}
srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image))
job.Printf("%s\n", container.ID)
// FIXME: this is necessary because runtime.Create might return a nil container
// with a non-nil error. This should not happen! Once it's fixed we
// can remove this workaround.
if container != nil {
job.Printf("%s\n", container.ID)
}
for _, warning := range buildWarnings {
job.Errorf("%s\n", warning)
}
@ -1603,7 +1614,7 @@ func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HT
return srv.reqFactory
}
func (srv *Server) LogEvent(action, id, from string) {
func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage {
now := time.Now().Unix()
jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
srv.events = append(srv.events, jm)
@ -1613,6 +1624,7 @@ func (srv *Server) LogEvent(action, id, from string) {
default:
}
}
return &jm
}
type Server struct {

109
server_unit_test.go Normal file
Просмотреть файл

@ -0,0 +1,109 @@
package docker
import (
"github.com/dotcloud/docker/utils"
"testing"
"time"
)
func TestPools(t *testing.T) {
srv := &Server{
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
err := srv.poolAdd("pull", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolAdd("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolAdd("push", "test1")
if err == nil || err.Error() != "pull test1 is already in progress" {
t.Fatalf("Expected `pull test1 is already in progress`")
}
err = srv.poolAdd("pull", "test1")
if err == nil || err.Error() != "pull test1 is already in progress" {
t.Fatalf("Expected `pull test1 is already in progress`")
}
err = srv.poolAdd("wait", "test3")
if err == nil || err.Error() != "Unknown pool type" {
t.Fatalf("Expected `Unknown pool type`")
}
err = srv.poolRemove("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("pull", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("push", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("wait", "test3")
if err == nil || err.Error() != "Unknown pool type" {
t.Fatalf("Expected `Unknown pool type`")
}
}
func TestLogEvent(t *testing.T) {
srv := &Server{
events: make([]utils.JSONMessage, 0, 64),
listeners: make(map[string]chan utils.JSONMessage),
}
srv.LogEvent("fakeaction", "fakeid", "fakeimage")
listener := make(chan utils.JSONMessage)
srv.Lock()
srv.listeners["test"] = listener
srv.Unlock()
srv.LogEvent("fakeaction2", "fakeid", "fakeimage")
if len(srv.events) != 2 {
t.Fatalf("Expected 2 events, found %d", len(srv.events))
}
go func() {
time.Sleep(200 * time.Millisecond)
srv.LogEvent("fakeaction3", "fakeid", "fakeimage")
time.Sleep(200 * time.Millisecond)
srv.LogEvent("fakeaction4", "fakeid", "fakeimage")
}()
setTimeout(t, "Listening for events timed out", 2*time.Second, func() {
for i := 2; i < 4; i++ {
event := <-listener
if event != srv.events[i] {
t.Fatalf("Event received it different than expected")
}
}
})
}
// FIXME: this is duplicated from integration/commands_test.go
func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
c := make(chan bool)
// Make sure we are not too long
go func() {
time.Sleep(d)
c <- true
}()
go func() {
f()
c <- false
}()
if <-c && msg != "" {
t.Fatal(msg)
}
}

Просмотреть файл

@ -1,111 +0,0 @@
package docker
import (
"fmt"
"testing"
"time"
)
func TestServerListOrderedImagesByCreationDate(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
_, err = runtime.graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
srv := &Server{runtime: runtime}
images, err := srv.Images(true, "")
if err != nil {
t.Fatal(err)
}
if images[0].Created < images[1].Created {
t.Error("Expected []APIImges to be ordered by most recent creation date.")
}
}
func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
err := generateImage("bar", runtime)
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
err = generateImage("zed", runtime)
if err != nil {
t.Fatal(err)
}
srv := &Server{runtime: runtime}
images, err := srv.Images(true, "")
if err != nil {
t.Fatal(err)
}
if images[0].RepoTags[0] != "repo:zed" && images[0].RepoTags[0] != "repo:bar" {
t.Errorf("Expected []APIImges to be ordered by most recent creation date. %s", images)
}
}
func generateImage(name string, runtime *Runtime) error {
archive, err := fakeTar()
if err != nil {
return err
}
image, err := runtime.graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
return err
}
srv := &Server{runtime: runtime}
srv.ContainerTag(image.ID, "repo", name, false)
return nil
}
func TestSortUniquePorts(t *testing.T) {
ports := []Port{
Port("6379/tcp"),
Port("22/tcp"),
}
sortPorts(ports, func(ip, jp Port) bool {
return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp")
})
first := ports[0]
if fmt.Sprint(first) != "22/tcp" {
t.Log(fmt.Sprint(first))
t.Fail()
}
}
func TestSortSamePortWithDifferentProto(t *testing.T) {
ports := []Port{
Port("8888/tcp"),
Port("8888/udp"),
Port("6379/tcp"),
Port("6379/udp"),
}
sortPorts(ports, func(ip, jp Port) bool {
return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp")
})
first := ports[0]
if fmt.Sprint(first) != "6379/tcp" {
t.Fail()
}
}

41
sorter_unit_test.go Normal file
Просмотреть файл

@ -0,0 +1,41 @@
package docker
import (
"fmt"
"testing"
)
func TestSortUniquePorts(t *testing.T) {
ports := []Port{
Port("6379/tcp"),
Port("22/tcp"),
}
sortPorts(ports, func(ip, jp Port) bool {
return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp")
})
first := ports[0]
if fmt.Sprint(first) != "22/tcp" {
t.Log(fmt.Sprint(first))
t.Fail()
}
}
func TestSortSamePortWithDifferentProto(t *testing.T) {
ports := []Port{
Port("8888/tcp"),
Port("8888/udp"),
Port("6379/tcp"),
Port("6379/udp"),
}
sortPorts(ports, func(ip, jp Port) bool {
return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp")
})
first := ports[0]
if fmt.Sprint(first) != "6379/tcp" {
t.Fail()
}
}

Просмотреть файл

@ -1,46 +0,0 @@
package docker
import (
"testing"
)
func TestLookupImage(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + DEFAULTTAG); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + "fail"); err == nil {
t.Errorf("Expected error, none found")
} else if img != nil {
t.Errorf("Expected 0 image, 1 found")
}
if img, err := runtime.repositories.LookupImage("fail:fail"); err == nil {
t.Errorf("Expected error, none found")
} else if img != nil {
t.Errorf("Expected 0 image, 1 found")
}
if img, err := runtime.repositories.LookupImage(unitTestImageID); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + unitTestImageID); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
}

85
tags_unit_test.go Normal file
Просмотреть файл

@ -0,0 +1,85 @@
package docker
import (
"github.com/dotcloud/docker/graphdriver"
"github.com/dotcloud/docker/utils"
"os"
"path"
"testing"
)
const (
testImageName = "myapp"
testImageID = "foo"
)
func mkTestTagStore(root string, t *testing.T) *TagStore {
driver, err := graphdriver.New(root)
if err != nil {
t.Fatal(err)
}
graph, err := NewGraph(root, driver)
if err != nil {
t.Fatal(err)
}
store, err := NewTagStore(path.Join(root, "tags"), graph)
if err != nil {
t.Fatal(err)
}
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
img := &Image{ID: testImageID}
if err := graph.Register(nil, archive, img); err != nil {
t.Fatal(err)
}
if err := store.Set(testImageName, "", testImageID, false); err != nil {
t.Fatal(err)
}
return store
}
func TestLookupImage(t *testing.T) {
tmp, err := utils.TestDirectory("")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
store := mkTestTagStore(tmp, t)
if img, err := store.LookupImage(testImageName); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := store.LookupImage(testImageName + ":" + DEFAULTTAG); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := store.LookupImage(testImageName + ":" + "fail"); err == nil {
t.Errorf("Expected error, none found")
} else if img != nil {
t.Errorf("Expected 0 image, 1 found")
}
if img, err := store.LookupImage("fail:fail"); err == nil {
t.Errorf("Expected error, none found")
} else if img != nil {
t.Errorf("Expected 0 image, 1 found")
}
if img, err := store.LookupImage(testImageID); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := store.LookupImage(testImageName + ":" + testImageID); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
}

Просмотреть файл

@ -81,12 +81,12 @@ func NewHTTPUserAgentDecorator(versions ...VersionInfo) HTTPRequestDecorator {
return ret
}
func (self *HTTPUserAgentDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) {
func (h *HTTPUserAgentDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) {
if req == nil {
return req, nil
}
userAgent := appendVersions(req.UserAgent(), self.versions...)
userAgent := appendVersions(req.UserAgent(), h.versions...)
if len(userAgent) > 0 {
req.Header.Set("User-Agent", userAgent)
}
@ -97,11 +97,11 @@ type HTTPMetaHeadersDecorator struct {
Headers map[string][]string
}
func (self *HTTPMetaHeadersDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) {
if self.Headers == nil {
func (h *HTTPMetaHeadersDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) {
if h.Headers == nil {
return req, nil
}
for k, v := range self.Headers {
for k, v := range h.Headers {
req.Header[k] = v
}
return req, nil
@ -114,25 +114,25 @@ type HTTPRequestFactory struct {
}
func NewHTTPRequestFactory(d ...HTTPRequestDecorator) *HTTPRequestFactory {
ret := new(HTTPRequestFactory)
ret.decorators = d
return ret
return &HTTPRequestFactory{
decorators: d,
}
}
// NewRequest() creates a new *http.Request,
// applies all decorators in the HTTPRequestFactory on the request,
// then applies decorators provided by d on the request.
func (self *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...HTTPRequestDecorator) (*http.Request, error) {
func (h *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...HTTPRequestDecorator) (*http.Request, error) {
req, err := http.NewRequest(method, urlStr, body)
if err != nil {
return nil, err
}
// By default, a nil factory should work.
if self == nil {
if h == nil {
return req, nil
}
for _, dec := range self.decorators {
for _, dec := range h.decorators {
req, err = dec.ChangeRequest(req)
if err != nil {
return nil, err

Просмотреть файл

@ -1123,7 +1123,7 @@ func (graph *DependencyGraph) GenerateTraversalMap() ([][]string, error) {
for len(processed) < len(graph.nodes) {
// Use a temporary buffer for processed nodes, otherwise
// nodes that depend on each other could end up in the same round.
tmp_processed := []*DependencyNode{}
tmpProcessed := []*DependencyNode{}
for _, node := range graph.nodes {
// If the node has more dependencies than what we have cleared,
// it won't be valid for this round.
@ -1137,7 +1137,7 @@ func (graph *DependencyGraph) GenerateTraversalMap() ([][]string, error) {
// It's not been processed yet and has 0 deps. Add it!
// (this is a shortcut for what we're doing below)
if node.Degree() == 0 {
tmp_processed = append(tmp_processed, node)
tmpProcessed = append(tmpProcessed, node)
continue
}
// If at least one dep hasn't been processed yet, we can't
@ -1151,17 +1151,17 @@ func (graph *DependencyGraph) GenerateTraversalMap() ([][]string, error) {
}
// All deps have already been processed. Add it!
if ok {
tmp_processed = append(tmp_processed, node)
tmpProcessed = append(tmpProcessed, node)
}
}
Debugf("Round %d: found %d available nodes", len(result), len(tmp_processed))
Debugf("Round %d: found %d available nodes", len(result), len(tmpProcessed))
// If no progress has been made this round,
// that means we have circular dependencies.
if len(tmp_processed) == 0 {
if len(tmpProcessed) == 0 {
return nil, fmt.Errorf("Could not find a solution to this dependency graph")
}
round := []string{}
for _, nd := range tmp_processed {
for _, nd := range tmpProcessed {
round = append(round, nd.id)
processed[nd] = true
}
@ -1242,3 +1242,40 @@ func PartParser(template, data string) (map[string]string, error) {
}
return out, nil
}
var globalTestID string
// TestDirectory creates a new temporary directory and returns its path.
// The contents of directory at path `templateDir` is copied into the
// new directory.
func TestDirectory(templateDir string) (dir string, err error) {
if globalTestID == "" {
globalTestID = RandomString()[:4]
}
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2))
if prefix == "" {
prefix = "docker-test-"
}
dir, err = ioutil.TempDir("", prefix)
if err = os.Remove(dir); err != nil {
return
}
if templateDir != "" {
if err = CopyDirectory(templateDir, dir); err != nil {
return
}
}
return
}
// GetCallerName introspects the call stack and returns the name of the
// function `depth` levels down in the stack.
func GetCallerName(depth int) string {
// Use the caller function name as a prefix.
// This helps trace temp directories back to their test.
pc, _, _, _ := runtime.Caller(depth + 1)
callerLongName := runtime.FuncForPC(pc).Name()
parts := strings.Split(callerLongName, ".")
callerShortName := parts[len(parts)-1]
return callerShortName
}

Просмотреть файл

@ -1,493 +0,0 @@
package docker
import (
"fmt"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"os"
"path"
"runtime"
"strings"
"testing"
)
// This file contains utility functions for docker's unit test suite.
// It has to be named XXX_test.go, apparently, in other to access private functions
// from other XXX_test.go functions.
var globalTestID string
// Create a temporary runtime suitable for unit testing.
// Call t.Fatal() at the first error.
func mkRuntime(f utils.Fataler) *Runtime {
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
f.Fatal(err)
}
config := &DaemonConfig{
Root: root,
AutoRestart: false,
}
r, err := NewRuntimeFromDirectory(config)
if err != nil {
f.Fatal(err)
}
r.UpdateCapabilities(true)
return r
}
func createNamedTestContainer(eng *engine.Engine, config *Config, f utils.Fataler, name string) (shortId string) {
job := eng.Job("create", name)
if err := job.ImportEnv(config); err != nil {
f.Fatal(err)
}
job.StdoutParseString(&shortId)
if err := job.Run(); err != nil {
f.Fatal(err)
}
return
}
func createTestContainer(eng *engine.Engine, config *Config, f utils.Fataler) (shortId string) {
return createNamedTestContainer(eng, config, f, "")
}
func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *Server {
iSrv := eng.Hack_GetGlobalVar("httpapi.server")
if iSrv == nil {
panic("Legacy server field not set in engine")
}
srv, ok := iSrv.(*Server)
if !ok {
panic("Legacy server field in engine does not cast to *Server")
}
return srv
}
func NewTestEngine(t utils.Fataler) *engine.Engine {
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
t.Fatal(err)
}
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
// Load default plugins
// (This is manually copied and modified from main() until we have a more generic plugin system)
job := eng.Job("initapi")
job.Setenv("Root", root)
job.SetenvBool("AutoRestart", false)
if err := job.Run(); err != nil {
t.Fatal(err)
}
return eng
}
func newTestDirectory(templateDir string) (dir string, err error) {
if globalTestID == "" {
globalTestID = GenerateID()[:4]
}
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, getCallerName(2))
if prefix == "" {
prefix = "docker-test-"
}
dir, err = ioutil.TempDir("", prefix)
if err = os.Remove(dir); err != nil {
return
}
if err = utils.CopyDirectory(templateDir, dir); err != nil {
return
}
return
}
func getCallerName(depth int) string {
// Use the caller function name as a prefix.
// This helps trace temp directories back to their test.
pc, _, _, _ := runtime.Caller(depth + 1)
callerLongName := runtime.FuncForPC(pc).Name()
parts := strings.Split(callerLongName, ".")
callerShortName := parts[len(parts)-1]
return callerShortName
}
// Write `content` to the file at path `dst`, creating it if necessary,
// as well as any missing directories.
// The file is truncated if it already exists.
// Call t.Fatal() at the first error.
func writeFile(dst, content string, t *testing.T) {
// Create subdirectories if necessary
if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) {
t.Fatal(err)
}
f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700)
if err != nil {
t.Fatal(err)
}
// Write content (truncate if it exists)
if _, err := io.Copy(f, strings.NewReader(content)); err != nil {
t.Fatal(err)
}
}
// Return the contents of file at path `src`.
// Call t.Fatal() at the first error (including if the file doesn't exist)
func readFile(src string, t *testing.T) (content string) {
f, err := os.Open(src)
if err != nil {
t.Fatal(err)
}
data, err := ioutil.ReadAll(f)
if err != nil {
t.Fatal(err)
}
return string(data)
}
// Create a test container from the given runtime `r` and run arguments `args`.
// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is
// dynamically replaced by the current test image.
// The caller is responsible for destroying the container.
// Call t.Fatal() at the first error.
func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, error) {
config, hostConfig, _, err := ParseRun(args, nil)
defer func() {
if err != nil && t != nil {
t.Fatal(err)
}
}()
if err != nil {
return nil, err
}
if config.Image == "_" {
config.Image = GetTestImage(r).ID
}
c, _, err := r.Create(config, "")
if err != nil {
return nil, err
}
c.hostConfig = hostConfig
return c, nil
}
// Create a test container, start it, wait for it to complete, destroy it,
// and return its standard output as a string.
// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally.
func runContainer(r *Runtime, args []string, t *testing.T) (output string, err error) {
defer func() {
if err != nil && t != nil {
t.Fatal(err)
}
}()
container, err := mkContainer(r, args, t)
if err != nil {
return "", err
}
defer r.Destroy(container)
stdout, err := container.StdoutPipe()
if err != nil {
return "", err
}
defer stdout.Close()
if err := container.Start(); err != nil {
return "", err
}
container.Wait()
data, err := ioutil.ReadAll(stdout)
if err != nil {
return "", err
}
output = string(data)
return
}
func TestCompareConfig(t *testing.T) {
volumes1 := make(map[string]struct{})
volumes1["/test1"] = struct{}{}
config1 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config2 := Config{
Dns: []string{"0.0.0.0", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config3 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config4 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "22222222",
Volumes: volumes1,
}
volumes2 := make(map[string]struct{})
volumes2["/test2"] = struct{}{}
config5 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes2,
}
if CompareConfig(&config1, &config2) {
t.Fatalf("CompareConfig should return false, Dns are different")
}
if CompareConfig(&config1, &config3) {
t.Fatalf("CompareConfig should return false, PortSpecs are different")
}
if CompareConfig(&config1, &config4) {
t.Fatalf("CompareConfig should return false, VolumesFrom are different")
}
if CompareConfig(&config1, &config5) {
t.Fatalf("CompareConfig should return false, Volumes are different")
}
if !CompareConfig(&config1, &config1) {
t.Fatalf("CompareConfig should return true")
}
}
func TestMergeConfig(t *testing.T) {
volumesImage := make(map[string]struct{})
volumesImage["/test1"] = struct{}{}
volumesImage["/test2"] = struct{}{}
configImage := &Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "1111",
Volumes: volumesImage,
}
volumesUser := make(map[string]struct{})
volumesUser["/test3"] = struct{}{}
configUser := &Config{
Dns: []string{"3.3.3.3"},
PortSpecs: []string{"3333:2222", "3333:3333"},
Env: []string{"VAR2=3", "VAR3=3"},
Volumes: volumesUser,
}
if err := MergeConfig(configUser, configImage); err != nil {
t.Error(err)
}
if len(configUser.Dns) != 3 {
t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns))
}
for _, dns := range configUser.Dns {
if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" {
t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns)
}
}
if len(configUser.ExposedPorts) != 3 {
t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs)
}
}
if len(configUser.Env) != 3 {
t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env))
}
for _, env := range configUser.Env {
if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" {
t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env)
}
}
if len(configUser.Volumes) != 3 {
t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes))
}
for v := range configUser.Volumes {
if v != "/test1" && v != "/test2" && v != "/test3" {
t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v)
}
}
if configUser.VolumesFrom != "1111" {
t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom)
}
ports, _, err := parsePortSpecs([]string{"0000"})
if err != nil {
t.Error(err)
}
configImage2 := &Config{
ExposedPorts: ports,
}
if err := MergeConfig(configUser, configImage2); err != nil {
t.Error(err)
}
if len(configUser.ExposedPorts) != 4 {
t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs)
}
}
}
func TestParseLxcConfOpt(t *testing.T) {
opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "}
for _, o := range opts {
k, v, err := parseLxcOpt(o)
if err != nil {
t.FailNow()
}
if k != "lxc.utsname" {
t.Fail()
}
if v != "docker" {
t.Fail()
}
}
}
func TestParseNetworkOptsPrivateOnly(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::80"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "tcp" {
t.Logf("Expected tcp got %s", k.Proto())
t.Fail()
}
if k.Port() != "80" {
t.Logf("Expected 80 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "" {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestParseNetworkOptsPublic(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100:8080:80"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "tcp" {
t.Logf("Expected tcp got %s", k.Proto())
t.Fail()
}
if k.Port() != "80" {
t.Logf("Expected 80 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "8080" {
t.Logf("Expected 8080 got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestParseNetworkOptsUdp(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::6000/udp"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "udp" {
t.Logf("Expected udp got %s", k.Proto())
t.Fail()
}
if k.Port() != "6000" {
t.Logf("Expected 6000 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "" {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}