зеркало из https://github.com/microsoft/docker.git
Merge branch 'master' into builder_server-3
Conflicts: docs/sources/use/builder.rst
This commit is contained in:
Коммит
659e846006
|
@ -108,7 +108,7 @@ Note that some methods are community contributions and not yet officially suppor
|
|||
|
||||
* [Ubuntu 12.04 and 12.10 (officially supported)](http://docs.docker.io/en/latest/installation/ubuntulinux/)
|
||||
* [Arch Linux](http://docs.docker.io/en/latest/installation/archlinux/)
|
||||
* [MacOS X (with Vagrant)](http://docs.docker.io/en/latest/installation/macos/)
|
||||
* [Mac OS X (with Vagrant)](http://docs.docker.io/en/latest/installation/vagrant/)
|
||||
* [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/)
|
||||
* [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/)
|
||||
|
||||
|
@ -216,7 +216,8 @@ PORT=$(docker port $JOB 4444)
|
|||
|
||||
# Connect to the public port via the host's public address
|
||||
# Please note that because of how routing works connecting to localhost or 127.0.0.1 $PORT will not work.
|
||||
IP=$(ifconfig eth0 | perl -n -e 'if (m/inet addr:([\d\.]+)/g) { print $1 }')
|
||||
# Replace *eth0* according to your local interface name.
|
||||
IP=$(ip -o -4 addr list eth0 | perl -n -e 'if (m{inet\s([\d\.]+)\/\d+\s}xms) { print $1 }')
|
||||
echo hello world | nc $IP $PORT
|
||||
|
||||
# Verify that the network connection worked
|
||||
|
@ -262,14 +263,14 @@ Setting up a dev environment
|
|||
Instructions that have been verified to work on Ubuntu 12.10,
|
||||
|
||||
```bash
|
||||
sudo apt-get -y install lxc wget bsdtar curl golang git
|
||||
sudo apt-get -y install lxc curl xz-utils golang git
|
||||
|
||||
export GOPATH=~/go/
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
mkdir -p $GOPATH/src/github.com/dotcloud
|
||||
cd $GOPATH/src/github.com/dotcloud
|
||||
git clone git@github.com:dotcloud/docker.git
|
||||
git clone https://github.com/dotcloud/docker.git
|
||||
cd docker
|
||||
|
||||
go get -v github.com/dotcloud/docker/...
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
BOX_NAME = ENV['BOX_NAME'] || "ubuntu"
|
||||
BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64.box"
|
||||
VF_BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64_vmware_fusion.box"
|
||||
AWS_REGION = ENV['AWS_REGION'] || "us-east-1"
|
||||
AWS_AMI = ENV['AWS_AMI'] || "ami-d0f89fb9"
|
||||
FORWARD_DOCKER_PORTS = ENV['FORWARD_DOCKER_PORTS']
|
||||
|
@ -67,6 +68,13 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
|
|||
rs.image = /Ubuntu/
|
||||
end
|
||||
|
||||
config.vm.provider :vmware_fusion do |f, override|
|
||||
override.vm.box = BOX_NAME
|
||||
override.vm.box_url = VF_BOX_URI
|
||||
override.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
f.vmx["displayName"] = "docker"
|
||||
end
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
config.vm.box = BOX_NAME
|
||||
config.vm.box_url = BOX_URI
|
||||
|
|
130
archive.go
130
archive.go
|
@ -1,12 +1,15 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
)
|
||||
|
||||
type Archive io.Reader
|
||||
|
@ -20,6 +23,37 @@ const (
|
|||
Xz
|
||||
)
|
||||
|
||||
func DetectCompression(source []byte) Compression {
|
||||
for _, c := range source[:10] {
|
||||
utils.Debugf("%x", c)
|
||||
}
|
||||
|
||||
sourceLen := len(source)
|
||||
for compression, m := range map[Compression][]byte{
|
||||
Bzip2: {0x42, 0x5A, 0x68},
|
||||
Gzip: {0x1F, 0x8B, 0x08},
|
||||
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
|
||||
} {
|
||||
fail := false
|
||||
if len(m) > sourceLen {
|
||||
utils.Debugf("Len too short")
|
||||
continue
|
||||
}
|
||||
i := 0
|
||||
for _, b := range m {
|
||||
if b != source[i] {
|
||||
fail = true
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
if !fail {
|
||||
return compression
|
||||
}
|
||||
}
|
||||
return Uncompressed
|
||||
}
|
||||
|
||||
func (compression *Compression) Flag() string {
|
||||
switch *compression {
|
||||
case Bzip2:
|
||||
|
@ -46,15 +80,43 @@ func (compression *Compression) Extension() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
// Tar creates an archive from the directory at `path`, and returns it as a
|
||||
// stream of bytes.
|
||||
func Tar(path string, compression Compression) (io.Reader, error) {
|
||||
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-c"+compression.Flag(), ".")
|
||||
return CmdStream(cmd)
|
||||
return TarFilter(path, compression, nil)
|
||||
}
|
||||
|
||||
// Tar creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `filter`. If `filter` is nil, then all files are included.
|
||||
func TarFilter(path string, compression Compression, filter []string) (io.Reader, error) {
|
||||
args := []string{"tar", "-f", "-", "-C", path}
|
||||
if filter == nil {
|
||||
filter = []string{"."}
|
||||
}
|
||||
for _, f := range filter {
|
||||
args = append(args, "-c"+compression.Flag(), f)
|
||||
}
|
||||
return CmdStream(exec.Command(args[0], args[1:]...))
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `path`.
|
||||
// The archive may be compressed with one of the following algorithgms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
// FIXME: specify behavior when target path exists vs. doesn't exist.
|
||||
func Untar(archive io.Reader, path string) error {
|
||||
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-x")
|
||||
cmd.Stdin = archive
|
||||
|
||||
bufferedArchive := bufio.NewReaderSize(archive, 10)
|
||||
buf, err := bufferedArchive.Peek(10)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
compression := DetectCompression(buf)
|
||||
|
||||
utils.Debugf("Archive compression detected: %s", compression.Extension())
|
||||
|
||||
cmd := exec.Command("tar", "-f", "-", "-C", path, "-x"+compression.Flag())
|
||||
cmd.Stdin = bufferedArchive
|
||||
// Hardcode locale environment for predictable outcome regardless of host configuration.
|
||||
// (see https://github.com/dotcloud/docker/issues/355)
|
||||
cmd.Env = []string{"LANG=en_US.utf-8", "LC_ALL=en_US.utf-8"}
|
||||
|
@ -65,6 +127,18 @@ func Untar(archive io.Reader, path string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// TarUntar is a convenience function which calls Tar and Untar, with
|
||||
// the output of one piped into the other. If either Tar or Untar fails,
|
||||
// TarUntar aborts and returns the error.
|
||||
func TarUntar(src string, filter []string, dst string) error {
|
||||
utils.Debugf("TarUntar(%s %s %s)", src, filter, dst)
|
||||
archive, err := TarFilter(src, Uncompressed, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return Untar(archive, dst)
|
||||
}
|
||||
|
||||
// UntarPath is a convenience function which looks for an archive
|
||||
// at filesystem path `src`, and unpacks it at `dst`.
|
||||
func UntarPath(src, dst string) error {
|
||||
|
@ -82,11 +156,55 @@ func UntarPath(src, dst string) error {
|
|||
// intermediary disk IO.
|
||||
//
|
||||
func CopyWithTar(src, dst string) error {
|
||||
archive, err := Tar(src, Uncompressed)
|
||||
srcSt, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return Untar(archive, dst)
|
||||
var dstExists bool
|
||||
dstSt, err := os.Stat(dst)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
dstExists = true
|
||||
}
|
||||
// Things that can go wrong if the source is a directory
|
||||
if srcSt.IsDir() {
|
||||
// The destination exists and is a regular file
|
||||
if dstExists && !dstSt.IsDir() {
|
||||
return fmt.Errorf("Can't copy a directory over a regular file")
|
||||
}
|
||||
// Things that can go wrong if the source is a regular file
|
||||
} else {
|
||||
utils.Debugf("The destination exists, it's a directory, and doesn't end in /")
|
||||
// The destination exists, it's a directory, and doesn't end in /
|
||||
if dstExists && dstSt.IsDir() && dst[len(dst)-1] != '/' {
|
||||
return fmt.Errorf("Can't copy a regular file over a directory %s |%s|", dst, dst[len(dst)-1])
|
||||
}
|
||||
}
|
||||
// Create the destination
|
||||
var dstDir string
|
||||
if srcSt.IsDir() || dst[len(dst)-1] == '/' {
|
||||
// The destination ends in /, or the source is a directory
|
||||
// --> dst is the holding directory and needs to be created for -C
|
||||
dstDir = dst
|
||||
} else {
|
||||
// The destination doesn't end in /
|
||||
// --> dst is the file
|
||||
dstDir = path.Dir(dst)
|
||||
}
|
||||
if !dstExists {
|
||||
// Create the holding directory if necessary
|
||||
utils.Debugf("Creating the holding directory %s", dstDir)
|
||||
if err := os.MkdirAll(dstDir, 0700); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !srcSt.IsDir() {
|
||||
return TarUntar(path.Dir(src), []string{path.Base(src)}, dstDir)
|
||||
}
|
||||
return TarUntar(src, nil, dstDir)
|
||||
}
|
||||
|
||||
// CmdStream executes a command, and returns its stdout as a stream.
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
@ -58,20 +61,58 @@ func TestCmdStreamGood(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTarUntar(t *testing.T) {
|
||||
archive, err := Tar(".", Uncompressed)
|
||||
func tarUntar(t *testing.T, origin string, compression Compression) error {
|
||||
archive, err := Tar(origin, compression)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
buf := make([]byte, 10)
|
||||
if _, err := archive.Read(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
archive = io.MultiReader(bytes.NewReader(buf), archive)
|
||||
|
||||
detectedCompression := DetectCompression(buf)
|
||||
if detectedCompression.Extension() != compression.Extension() {
|
||||
return fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
|
||||
}
|
||||
|
||||
tmp, err := ioutil.TempDir("", "docker-test-untar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
if err := Untar(archive, tmp); err != nil {
|
||||
t.Fatal(err)
|
||||
return err
|
||||
}
|
||||
if _, err := os.Stat(tmp); err != nil {
|
||||
t.Fatalf("Error stating %s: %s", tmp, err.Error())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestTarUntar(t *testing.T) {
|
||||
origin, err := ioutil.TempDir("", "docker-test-untar-origin")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(origin)
|
||||
if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, c := range []Compression{
|
||||
Uncompressed,
|
||||
Gzip,
|
||||
Bzip2,
|
||||
Xz,
|
||||
} {
|
||||
if err := tarUntar(t, origin, c); err != nil {
|
||||
t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,8 +10,8 @@ import (
|
|||
|
||||
func TestEncodeAuth(t *testing.T) {
|
||||
newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"}
|
||||
authStr := EncodeAuth(newAuthConfig)
|
||||
decAuthConfig, err := DecodeAuth(authStr)
|
||||
authStr := encodeAuth(newAuthConfig)
|
||||
decAuthConfig, err := decodeAuth(authStr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ func TestLogin(t *testing.T) {
|
|||
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
|
||||
defer os.Setenv("DOCKER_INDEX_URL", "")
|
||||
authConfig := NewAuthConfig("unittester", "surlautrerivejetattendrai", "noise+unittester@dotcloud.com", "/tmp")
|
||||
status, err := Login(authConfig)
|
||||
status, err := Login(authConfig, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func TestCreateAccount(t *testing.T) {
|
|||
token := hex.EncodeToString(tokenBuffer)[:12]
|
||||
username := "ut" + token
|
||||
authConfig := NewAuthConfig(username, "test42", "docker-ut+"+token+"@example.com", "/tmp")
|
||||
status, err := Login(authConfig)
|
||||
status, err := Login(authConfig, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ func TestCreateAccount(t *testing.T) {
|
|||
t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status)
|
||||
}
|
||||
|
||||
status, err = Login(authConfig)
|
||||
status, err = Login(authConfig, false)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error but found nil instead")
|
||||
}
|
||||
|
|
|
@ -178,15 +178,15 @@ func (b *buildFile) addRemote(container *Container, orig, dest string) error {
|
|||
func (b *buildFile) addContext(container *Container, orig, dest string) error {
|
||||
origPath := path.Join(b.context, orig)
|
||||
destPath := path.Join(container.RootfsPath(), dest)
|
||||
|
||||
// Preserve the trailing '/'
|
||||
if dest[len(dest)-1] == '/' {
|
||||
destPath = destPath + "/"
|
||||
}
|
||||
fi, err := os.Stat(origPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
if err := os.MkdirAll(destPath, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := CopyWithTar(origPath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
66
commands.go
66
commands.go
|
@ -1070,37 +1070,22 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
splitStderr := container.Config.Tty
|
||||
|
||||
connections := 1
|
||||
if splitStderr {
|
||||
connections += 1
|
||||
if !container.State.Running {
|
||||
return fmt.Errorf("Impossible to attach to a stopped container, start it first")
|
||||
}
|
||||
chErrors := make(chan error, connections)
|
||||
|
||||
if container.Config.Tty {
|
||||
cli.monitorTtySize(cmd.Arg(0))
|
||||
}
|
||||
if splitStderr {
|
||||
go func() {
|
||||
chErrors <- cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?stream=1&stderr=1", false, nil, os.Stderr)
|
||||
}()
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("stream", "1")
|
||||
v.Set("stdin", "1")
|
||||
v.Set("stdout", "1")
|
||||
if !splitStderr {
|
||||
v.Set("stderr", "1")
|
||||
}
|
||||
go func() {
|
||||
chErrors <- cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, os.Stdin, os.Stdout)
|
||||
}()
|
||||
for connections > 0 {
|
||||
err := <-chErrors
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
connections -= 1
|
||||
v.Set("stderr", "1")
|
||||
|
||||
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, os.Stdin, os.Stdout); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1269,16 +1254,6 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
|||
fmt.Fprintln(os.Stderr, "WARNING: ", warning)
|
||||
}
|
||||
|
||||
splitStderr := !config.Tty
|
||||
|
||||
connections := 0
|
||||
if config.AttachStdin || config.AttachStdout || (!splitStderr && config.AttachStderr) {
|
||||
connections += 1
|
||||
}
|
||||
if splitStderr && config.AttachStderr {
|
||||
connections += 1
|
||||
}
|
||||
|
||||
//start the container
|
||||
_, _, err = cli.call("POST", "/containers/"+out.ID+"/start", nil)
|
||||
if err != nil {
|
||||
|
@ -1287,19 +1262,11 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
|||
|
||||
if !config.AttachStdout && !config.AttachStderr {
|
||||
fmt.Println(out.ID)
|
||||
}
|
||||
if connections > 0 {
|
||||
chErrors := make(chan error, connections)
|
||||
} else {
|
||||
if config.Tty {
|
||||
cli.monitorTtySize(out.ID)
|
||||
}
|
||||
|
||||
if splitStderr && config.AttachStderr {
|
||||
go func() {
|
||||
chErrors <- cli.hijack("POST", "/containers/"+out.ID+"/attach?logs=1&stream=1&stderr=1", config.Tty, nil, os.Stderr)
|
||||
}()
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("logs", "1")
|
||||
v.Set("stream", "1")
|
||||
|
@ -1310,19 +1277,12 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
|||
if config.AttachStdout {
|
||||
v.Set("stdout", "1")
|
||||
}
|
||||
if !splitStderr && config.AttachStderr {
|
||||
if config.AttachStderr {
|
||||
v.Set("stderr", "1")
|
||||
}
|
||||
go func() {
|
||||
chErrors <- cli.hijack("POST", "/containers/"+out.ID+"/attach?"+v.Encode(), config.Tty, os.Stdin, os.Stdout)
|
||||
}()
|
||||
for connections > 0 {
|
||||
err := <-chErrors
|
||||
if err != nil {
|
||||
utils.Debugf("Error hijack: %s", err)
|
||||
return err
|
||||
}
|
||||
connections -= 1
|
||||
if err := cli.hijack("POST", "/containers/"+out.ID+"/attach?"+v.Encode(), config.Tty, os.Stdin, os.Stdout); err != nil {
|
||||
utils.Debugf("Error hijack: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
echo "Ensuring basic dependencies are installed..."
|
||||
apt-get -qq update
|
||||
apt-get -qq install lxc wget bsdtar
|
||||
apt-get -qq install lxc wget
|
||||
|
||||
echo "Looking in /proc/filesystems to see if we have AUFS support..."
|
||||
if grep -q aufs /proc/filesystems
|
||||
|
|
|
@ -33,7 +33,7 @@ Installation
|
|||
sudo apt-get install python-software-properties
|
||||
sudo add-apt-repository ppa:gophers/go
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install lxc wget bsdtar curl golang-stable git aufs-tools
|
||||
sudo apt-get -y install lxc xz-utils curl golang-stable git aufs-tools
|
||||
|
||||
export GOPATH=~/go/
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
|
|
|
@ -72,7 +72,7 @@ Connect to the host os with the redis-cli.
|
|||
|
||||
docker ps # grab the new container id
|
||||
docker port <container_id> 6379 # grab the external port
|
||||
ifconfig # grab the host ip address
|
||||
ip addr show # grab the host ip address
|
||||
redis-cli -h <host ipaddress> -p <external port>
|
||||
redis 192.168.0.1:49153> set docker awesome
|
||||
OK
|
||||
|
|
|
@ -59,6 +59,7 @@ The password is 'screencast'
|
|||
# it has now given us a port to connect to
|
||||
# we have to connect using a public ip of our host
|
||||
$ hostname
|
||||
# *ifconfig* is deprecated, better use *ip addr show* now
|
||||
$ ifconfig
|
||||
$ ssh root@192.168.33.10 -p 49153
|
||||
# Ah! forgot to set root passwd
|
||||
|
@ -70,6 +71,7 @@ The password is 'screencast'
|
|||
$ docker commit 9e863f0ca0af31c8b951048ba87641d67c382d08d655c2e4879c51410e0fedc1 dhrp/sshd
|
||||
$ docker run -d -p 22 dhrp/sshd /usr/sbin/sshd -D
|
||||
$ docker port a0aaa9558c90cf5c7782648df904a82365ebacce523e4acc085ac1213bfe2206 22
|
||||
# *ifconfig* is deprecated, better use *ip addr show* now
|
||||
$ ifconfig
|
||||
$ ssh root@192.168.33.10 -p 49154
|
||||
# Thanks for watching, Thatcher thatcher@dotcloud.com
|
||||
|
|
|
@ -30,8 +30,7 @@ Dependencies:
|
|||
* 3.8 Kernel (read more about :ref:`kernel`)
|
||||
* AUFS filesystem support
|
||||
* lxc
|
||||
* bsdtar
|
||||
|
||||
* xz-utils
|
||||
|
||||
Get the docker binary:
|
||||
----------------------
|
||||
|
|
|
@ -82,7 +82,8 @@ Expose a service on a TCP port
|
|||
|
||||
# Connect to the public port via the host's public address
|
||||
# Please note that because of how routing works connecting to localhost or 127.0.0.1 $PORT will not work.
|
||||
IP=$(ifconfig eth0 | perl -n -e 'if (m/inet addr:([\d\.]+)/g) { print $1 }')
|
||||
# Replace *eth0* according to your local interface name.
|
||||
IP=$(ip -o -4 addr list eth0 | perl -n -e 'if (m{inet\s([\d\.]+)\/\d+\s}xms) { print $1 }')
|
||||
echo hello world | nc $IP $PORT
|
||||
|
||||
# Verify that the network connection worked
|
||||
|
|
|
@ -138,11 +138,32 @@ curl was installed within the image.
|
|||
|
||||
``ADD <src> <dest>``
|
||||
|
||||
The `ADD` instruction will insert the files from the `<src>` path of the context into `<dest>` path
|
||||
of the container.
|
||||
`<src>` can be a local path or a remote file URL.
|
||||
The `ADD` instruction will copy new files from <src> and add them to the container's filesystem at path `<dest>`.
|
||||
|
||||
The context must be set in order to use this instruction. (see examples)
|
||||
`<src>` must be the path to a file or directory relative to the source directory being built (also called the
|
||||
context of the build) or a remote file URL.
|
||||
|
||||
`<dest>` is the path at which the source will be copied in the destination container.
|
||||
|
||||
The copy obeys the following rules:
|
||||
|
||||
If `<src>` is a directory, the entire directory is copied, including filesystem metadata.
|
||||
|
||||
If `<src>` is a tar archive in a recognized compression format (identity, gzip, bzip2 or xz), it
|
||||
is unpacked as a directory.
|
||||
|
||||
When a directory is copied or unpacked, it has the same behavior as 'tar -x': the result is the union of
|
||||
a) whatever existed at the destination path and b) the contents of the source tree, with conflicts resolved
|
||||
in favor of b on a file-by-file basis.
|
||||
|
||||
If `<src>` is any other kind of file, it is copied individually along with its metadata. In this case,
|
||||
if `<dst>` ends with a trailing slash '/', it will be considered a directory and the contents of `<src>`
|
||||
will be written at `<dst>/base(<src>)`.
|
||||
If `<dst>` does not end with a trailing slash, it will be considered a regular file and the contents
|
||||
of `<src>` will be written at `<dst>`.
|
||||
|
||||
If `<dest>` doesn't exist, it is created along with all missing directories in its path. All new
|
||||
files and directories are created with mode 0700, uid and gid 0.
|
||||
|
||||
3. Dockerfile Examples
|
||||
======================
|
||||
|
|
|
@ -192,11 +192,19 @@ func TestDelete(t *testing.T) {
|
|||
}
|
||||
assertNImages(graph, t, 0)
|
||||
|
||||
archive, err = fakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test 2 create (same name) / 1 delete
|
||||
img1, err := graph.Create(archive, nil, "Testing", "", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
archive, err = fakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = graph.Create(archive, nil, "Testing", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -212,6 +220,10 @@ func TestDelete(t *testing.T) {
|
|||
}
|
||||
assertNImages(graph, t, 1)
|
||||
|
||||
archive, err = fakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test delete twice (pull -> rm -> pull -> rm)
|
||||
if err := graph.Register(archive, false, img1); err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -22,7 +22,7 @@ Vagrant::Config.run do |config|
|
|||
pkg_cmd = "touch #{DOCKER_PATH}; "
|
||||
# Install docker dependencies
|
||||
pkg_cmd << "export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; " \
|
||||
"apt-get install -q -y lxc bsdtar git aufs-tools golang make linux-image-extra-3.8.0-19-generic; " \
|
||||
"apt-get install -q -y lxc git aufs-tools golang make linux-image-extra-3.8.0-19-generic; " \
|
||||
"chown -R #{USER}.#{USER} #{GOPATH}; " \
|
||||
"install -m 0664 #{CFG_PATH}/bash_profile /home/#{USER}/.bash_profile"
|
||||
config.vm.provision :shell, :inline => pkg_cmd
|
||||
|
|
|
@ -67,7 +67,11 @@ lxc.cgroup.devices.allow = c 10:200 rwm
|
|||
|
||||
|
||||
# standard mount point
|
||||
# WARNING: procfs is a known attack vector and should probably be disabled
|
||||
# if your userspace allows it. eg. see http://blog.zx2c4.com/749
|
||||
lxc.mount.entry = proc {{$ROOTFS}}/proc proc nosuid,nodev,noexec 0 0
|
||||
# WARNING: sysfs is a known attack vector and should probably be disabled
|
||||
# if your userspace allows it. eg. see http://bit.ly/T9CkqJ
|
||||
lxc.mount.entry = sysfs {{$ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0
|
||||
lxc.mount.entry = devpts {{$ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0
|
||||
#lxc.mount.entry = varrun {{$ROOTFS}}/var/run tmpfs mode=755,size=4096k,nosuid,nodev,noexec 0 0
|
||||
|
@ -86,6 +90,9 @@ lxc.mount.entry = {{$realPath}} {{$ROOTFS}}/{{$virtualPath}} none bind,rw 0 0
|
|||
{{end}}
|
||||
|
||||
# drop linux capabilities (apply mainly to the user root in the container)
|
||||
# (Note: 'lxc.cap.keep' is coming soon and should replace this under the
|
||||
# security principle 'deny all unless explicitly permitted', see
|
||||
# http://sourceforge.net/mailarchive/message.php?msg_id=31054627 )
|
||||
lxc.cap.drop = audit_control audit_write mac_admin mac_override mknod setfcap setpcap sys_admin sys_boot sys_module sys_nice sys_pacct sys_rawio sys_resource sys_time sys_tty_config
|
||||
|
||||
# limits
|
||||
|
|
|
@ -1,20 +1,17 @@
|
|||
# Ubuntu package Makefile
|
||||
#
|
||||
# Dependencies: debhelper autotools-dev devscripts golang
|
||||
# Dependencies: debhelper autotools-dev devscripts golang-stable
|
||||
# Notes:
|
||||
# Use 'make ubuntu' to create the ubuntu package
|
||||
# GPG_KEY environment variable needs to contain a GPG private key for package to be signed
|
||||
# and uploaded to docker PPA.
|
||||
# If GPG_KEY is not defined, make ubuntu will create docker package and exit with
|
||||
# status code 2
|
||||
# Use 'make ubuntu' to create the ubuntu package and push it to stating PPA by
|
||||
# default. To push to production, set PUBLISH_PPA=1 before doing 'make ubuntu'
|
||||
# GPG_KEY environment variable needs to contain a GPG private key for package
|
||||
# to be signed and uploaded to docker PPA. If GPG_KEY is not defined,
|
||||
# make ubuntu will create docker package and exit with status code 2
|
||||
|
||||
PKG_NAME=lxc-docker
|
||||
VERSION=$(shell head -1 changelog | sed 's/^.\+(\(.\+\)..).\+$$/\1/')
|
||||
GITHUB_PATH=github.com/dotcloud/docker
|
||||
DOCKER_VERSION=${PKG_NAME}_${VERSION}
|
||||
DOCKER_FVERSION=${PKG_NAME}_$(shell head -1 changelog | sed 's/^.\+(\(.\+\)).\+$$/\1/')
|
||||
BUILD_SRC=${CURDIR}/../../build_src
|
||||
VERSION_TAG=v$(shell head -1 changelog | sed 's/^.\+(\(.\+\)-[0-9]\+).\+$$/\1/')
|
||||
VERSION=$(shell sed -En '0,/^\#\# /{s/^\#\# ([^ ]+).+/\1/p}' ../../CHANGELOG.md)
|
||||
|
||||
all:
|
||||
# Compile docker. Used by dpkg-buildpackage.
|
||||
|
@ -35,18 +32,19 @@ ubuntu:
|
|||
# Retrieve docker project and its go structure from internet
|
||||
rm -rf ${BUILD_SRC}
|
||||
git clone $(shell git rev-parse --show-toplevel) ${BUILD_SRC}/${GITHUB_PATH}
|
||||
cd ${BUILD_SRC}/${GITHUB_PATH}; git checkout ${VERSION_TAG} && GOPATH=${BUILD_SRC} go get -d
|
||||
cd ${BUILD_SRC}/${GITHUB_PATH}; git checkout v${VERSION} && GOPATH=${BUILD_SRC} go get -d
|
||||
# Add debianization
|
||||
mkdir ${BUILD_SRC}/debian
|
||||
cp Makefile ${BUILD_SRC}
|
||||
cp -r * ${BUILD_SRC}/debian
|
||||
cp ../../README.md ${BUILD_SRC}
|
||||
./parse_changelog.py < ../../CHANGELOG.md > ${BUILD_SRC}/debian/changelog
|
||||
# Cleanup
|
||||
for d in `find ${BUILD_SRC} -name '.git*'`; do rm -rf $$d; done
|
||||
rm -rf ${BUILD_SRC}/../${DOCKER_VERSION}.orig.tar.gz
|
||||
rm -rf ${BUILD_SRC}/../${PKG_NAME}_${VERSION}.orig.tar.gz
|
||||
rm -rf ${BUILD_SRC}/pkg
|
||||
# Create docker debian files
|
||||
cd ${BUILD_SRC}; tar czf ../${DOCKER_VERSION}.orig.tar.gz .
|
||||
cd ${BUILD_SRC}; tar czf ../${PKG_NAME}_${VERSION}.orig.tar.gz .
|
||||
cd ${BUILD_SRC}; dpkg-buildpackage -us -uc
|
||||
rm -rf ${BUILD_SRC}
|
||||
# Sign package and upload it to PPA if GPG_KEY environment variable
|
||||
|
@ -54,9 +52,11 @@ ubuntu:
|
|||
if /usr/bin/test "$${GPG_KEY}" == ""; then exit 2; fi
|
||||
mkdir ${BUILD_SRC}
|
||||
# Import gpg signing key
|
||||
echo "$${GPG_KEY}" | gpg --allow-secret-key-import --import
|
||||
echo "$${GPG_KEY}" | gpg --allow-secret-key-import --import || true
|
||||
# Sign the package
|
||||
cd ${BUILD_SRC}; dpkg-source -x ${BUILD_SRC}/../${DOCKER_FVERSION}.dsc
|
||||
cd ${BUILD_SRC}; dpkg-source -x ${BUILD_SRC}/../${PKG_NAME}_${VERSION}-1.dsc
|
||||
cd ${BUILD_SRC}/${PKG_NAME}-${VERSION}; debuild -S -sa
|
||||
cd ${BUILD_SRC};dput ppa:dotcloud/lxc-docker ${DOCKER_FVERSION}_source.changes
|
||||
# Upload to PPA
|
||||
if [ "${PUBLISH_PPA}" = "1" ]; then cd ${BUILD_SRC};dput ppa:dotcloud/lxc-docker ${PKG_NAME}_${VERSION}-1_source.changes; fi
|
||||
if [ "${PUBLISH_PPA}" != "1" ]; then cd ${BUILD_SRC};dput ppa:dotcloud/docker-staging ${PKG_NAME}_${VERSION}-1_source.changes; fi
|
||||
rm -rf ${BUILD_SRC}
|
||||
|
|
|
@ -1,246 +0,0 @@
|
|||
lxc-docker (0.4.2-1) precise; urgency=low
|
||||
- Packaging: Bumped version to work around an Ubuntu bug
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Mon, 17 Jun 2013 00:00:00 -0700
|
||||
|
||||
lxc-docker (0.4.1-1) precise; urgency=low
|
||||
- Builder: don't ignore last line in Dockerfile when it doesn't end with \n
|
||||
- Client: allow multiple params in inspect
|
||||
- Client: Print the container id before the hijack in `docker run`
|
||||
- Remote Api: Add flag to enable cross domain requests
|
||||
- Remote Api/Client: Add images and containers sizes in docker ps and docker images
|
||||
- Registry: add regexp check on repo's name
|
||||
- Registry: Move auth to the client
|
||||
- Registry: Remove login check on pull
|
||||
- Runtime: Configure dns configuration host-wide with 'docker -d -dns'
|
||||
- Runtime: Detect faulty DNS configuration and replace it with a public default
|
||||
- Runtime: allow docker run <name>:<id>
|
||||
- Runtime: you can now specify public port (ex: -p 80:4500)
|
||||
- Runtime: improved image removal to garbage-collect unreferenced parents
|
||||
- Vagrantfile: Add the rest api port to vagrantfile's port_forward
|
||||
- Upgrade to Go 1.1
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Mon, 17 Jun 2013 00:00:00 -0700
|
||||
|
||||
lxc-docker (0.4.0-1) precise; urgency=low
|
||||
- Introducing Builder: 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
|
||||
- Introducing Remote API: control Docker programmatically using a simple HTTP/json API
|
||||
- Runtime: various reliability and usability improvements
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Mon, 03 Jun 2013 00:00:00 -0700
|
||||
|
||||
lxc-docker (0.3.4-1) precise; urgency=low
|
||||
- Builder: 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
|
||||
- Builder: 'docker build -t FOO' applies the tag FOO to the newly built container.
|
||||
- Runtime: interactive TTYs correctly handle window resize
|
||||
- Runtime: fix how configuration is merged between layers
|
||||
- Remote API: split stdout and stderr on 'docker run'
|
||||
- Remote API: optionally listen on a different IP and port (use at your own risk)
|
||||
- Documentation: improved install instructions.
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Thu, 30 May 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.3.3-1) precise; urgency=low
|
||||
- Registry: Fix push regression
|
||||
- Various bugfixes
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Thu, 23 May 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.3.2-1) precise; urgency=low
|
||||
- Runtime: Store the actual archive on commit
|
||||
- Registry: Improve the checksum process
|
||||
- Registry: Use the size to have a good progress bar while pushing
|
||||
- Registry: Use the actual archive if it exists in order to speed up the push
|
||||
- Registry: Fix error 400 on push
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Fri, 9 May 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.3.1-1) precise; urgency=low
|
||||
- Builder: Implement the autorun capability within docker builder
|
||||
- Builder: Add caching to docker builder
|
||||
- Builder: Add support for docker builder with native API as top level command
|
||||
- Runtime: Add go version to debug infos
|
||||
- Builder: Implement ENV within docker builder
|
||||
- Registry: Add docker search top level command in order to search a repository
|
||||
- Images: output graph of images to dot (graphviz)
|
||||
- Documentation: new introduction and high-level overview
|
||||
- Documentation: Add the documentation for docker builder
|
||||
- Website: new high-level overview
|
||||
- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc
|
||||
- Images: fix ByParent function
|
||||
- Builder: Check the command existance prior create and add Unit tests for the case
|
||||
- Registry: Fix pull for official images with specific tag
|
||||
- Registry: Fix issue when login in with a different user and trying to push
|
||||
- Documentation: CSS fix for docker documentation to make REST API docs look better.
|
||||
- Documentation: Fixed CouchDB example page header mistake
|
||||
- Documentation: fixed README formatting
|
||||
- Registry: Improve checksum - async calculation
|
||||
- Runtime: kernel version - don't show the dash if flavor is empty
|
||||
- Documentation: updated www.docker.io website.
|
||||
- Builder: use any whitespaces instead of tabs
|
||||
- Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Fri, 8 May 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.3.0-1) precise; urgency=low
|
||||
- Registry: Implement the new registry
|
||||
- Documentation: new example: sharing data between 2 couchdb databases
|
||||
- Runtime: Fix the command existance check
|
||||
- Runtime: strings.Split may return an empty string on no match
|
||||
- Runtime: Fix an index out of range crash if cgroup memory is not
|
||||
- Documentation: Various improvments
|
||||
- Vagrant: Use only one deb line in /etc/apt
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Fri, 5 May 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.2.2-1) precise; urgency=low
|
||||
- Support for data volumes ('docker run -v=PATH')
|
||||
- Share data volumes between containers ('docker run -volumes-from')
|
||||
- Improved documentation
|
||||
- Upgrade to Go 1.0.3
|
||||
- Various upgrades to the dev environment for contributors
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Fri, 3 May 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.2.1-1) precise; urgency=low
|
||||
|
||||
- 'docker commit -run' bundles a layer with default runtime options: command, ports etc.
|
||||
- Improve install process on Vagrant
|
||||
- New Dockerfile operation: "maintainer"
|
||||
- New Dockerfile operation: "expose"
|
||||
- New Dockerfile operation: "cmd"
|
||||
- Contrib script to build a Debian base layer
|
||||
- 'docker -d -r': restart crashed containers at daemon startup
|
||||
- Runtime: improve test coverage
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Wed, 1 May 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.2.0-1) precise; urgency=low
|
||||
|
||||
- Runtime: ghost containers can be killed and waited for
|
||||
- Documentation: update install intructions
|
||||
- Packaging: fix Vagrantfile
|
||||
- Development: automate releasing binaries and ubuntu packages
|
||||
- Add a changelog
|
||||
- Various bugfixes
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Mon, 23 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.8-1) precise; urgency=low
|
||||
|
||||
- Dynamically detect cgroup capabilities
|
||||
- Issue stability warning on kernels <3.8
|
||||
- 'docker push' buffers on disk instead of memory
|
||||
- Fix 'docker diff' for removed files
|
||||
- Fix 'docker stop' for ghost containers
|
||||
- Fix handling of pidfile
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Mon, 22 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.7-1) precise; urgency=low
|
||||
|
||||
- Container ports are available on localhost
|
||||
- 'docker ps' shows allocated TCP ports
|
||||
- Contributors can run 'make hack' to start a continuous integration VM
|
||||
- Streamline ubuntu packaging & uploading
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Thu, 18 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.6-1) precise; urgency=low
|
||||
|
||||
- Record the author an image with 'docker commit -author'
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Wed, 17 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.5-1) precise; urgency=low
|
||||
|
||||
- Disable standalone mode
|
||||
- Use a custom DNS resolver with 'docker -d -dns'
|
||||
- Detect ghost containers
|
||||
- Improve diagnosis of missing system capabilities
|
||||
- Allow disabling memory limits at compile time
|
||||
- Add debian packaging
|
||||
- Documentation: installing on Arch Linux
|
||||
- Documentation: running Redis on docker
|
||||
- Fixed lxc 0.9 compatibility
|
||||
- Automatically load aufs module
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Wed, 17 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.4-1) precise; urgency=low
|
||||
|
||||
- Full support for TTY emulation
|
||||
- Detach from a TTY session with the escape sequence `C-p C-q`
|
||||
- Various bugfixes and stability improvements
|
||||
- Minor UI improvements
|
||||
- Automatically create our own bridge interface 'docker0'
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Tue, 9 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.3-1) precise; urgency=low
|
||||
|
||||
- Choose TCP frontend port with '-p :PORT'
|
||||
- Layer format is versioned
|
||||
- Major reliability improvements to the process manager
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Thu, 4 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.2-1) precise; urgency=low
|
||||
|
||||
- Set container hostname with 'docker run -h'
|
||||
- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
|
||||
- Various bugfixes and stability improvements
|
||||
- UI polish
|
||||
- Progress bar on push/pull
|
||||
- Use XZ compression by default
|
||||
- Make IP allocator lazy
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Wed, 3 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.1-1) precise; urgency=low
|
||||
|
||||
- Display shorthand IDs for convenience
|
||||
- Stabilize process management
|
||||
- Layers can include a commit message
|
||||
- Simplified 'docker attach'
|
||||
- Fixed support for re-attaching
|
||||
- Various bugfixes and stability improvements
|
||||
- Auto-download at run
|
||||
- Auto-login on push
|
||||
- Beefed up documentation
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Sun, 31 Mar 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.0-1) precise; urgency=low
|
||||
|
||||
- First release
|
||||
- Implement registry in order to push/pull images
|
||||
- TCP port allocation
|
||||
- Fix termcaps on Linux
|
||||
- Add documentation
|
||||
- Add Vagrant support with Vagrantfile
|
||||
- Add unit tests
|
||||
- Add repository/tags to ease image management
|
||||
- Improve the layer implementation
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Sat, 23 Mar 2013 00:00:00 -0700
|
|
@ -0,0 +1,23 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
'Parse main CHANGELOG.md from stdin outputing on stdout the ubuntu changelog'
|
||||
|
||||
import sys,re, datetime
|
||||
|
||||
on_block=False
|
||||
for line in sys.stdin.readlines():
|
||||
line = line.strip()
|
||||
if line.startswith('# ') or len(line) == 0:
|
||||
continue
|
||||
if line.startswith('## '):
|
||||
if on_block:
|
||||
print '\n -- dotCloud <ops@dotcloud.com> {0}\n'.format(date)
|
||||
version, date = line[3:].split()
|
||||
date = datetime.datetime.strptime(date, '(%Y-%m-%d)').strftime(
|
||||
'%a, %d %b %Y 00:00:00 -0700')
|
||||
on_block = True
|
||||
print 'lxc-docker ({0}-1) precise; urgency=low'.format(version)
|
||||
continue
|
||||
if on_block:
|
||||
print ' ' + line
|
||||
print '\n -- dotCloud <ops@dotcloud.com> {0}'.format(date)
|
|
@ -12,6 +12,7 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -106,40 +107,45 @@ func (r *Registry) getImagesInRepository(repository string, authConfig *auth.Aut
|
|||
}
|
||||
|
||||
// Retrieve an image from the Registry.
|
||||
// Returns the Image object as well as the layer as an Archive (io.Reader)
|
||||
func (r *Registry) GetRemoteImageJSON(imgId, registry string, token []string) ([]byte, error) {
|
||||
func (r *Registry) GetRemoteImageJSON(imgId, registry string, token []string) ([]byte, int, error) {
|
||||
// Get the JSON
|
||||
req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/json", nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to download json: %s", err)
|
||||
return nil, -1, fmt.Errorf("Failed to download json: %s", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
|
||||
res, err := r.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to download json: %s", err)
|
||||
return nil, -1, fmt.Errorf("Failed to download json: %s", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("HTTP code %d", res.StatusCode)
|
||||
return nil, -1, fmt.Errorf("HTTP code %d", res.StatusCode)
|
||||
}
|
||||
|
||||
imageSize, err := strconv.Atoi(res.Header.Get("X-Docker-Size"))
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
jsonString, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString)
|
||||
return nil, -1, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString)
|
||||
}
|
||||
return jsonString, nil
|
||||
return jsonString, imageSize, nil
|
||||
}
|
||||
|
||||
func (r *Registry) GetRemoteImageLayer(imgId, registry string, token []string) (io.ReadCloser, int, error) {
|
||||
func (r *Registry) GetRemoteImageLayer(imgId, registry string, token []string) (io.ReadCloser, error) {
|
||||
req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/layer", nil)
|
||||
if err != nil {
|
||||
return nil, -1, fmt.Errorf("Error while getting from the server: %s\n", err)
|
||||
return nil, fmt.Errorf("Error while getting from the server: %s\n", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
|
||||
res, err := r.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
return nil, err
|
||||
}
|
||||
return res.Body, int(res.ContentLength), nil
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
func (r *Registry) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) {
|
||||
|
@ -150,16 +156,16 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
|
|||
}
|
||||
for _, host := range registries {
|
||||
endpoint := fmt.Sprintf("https://%s/v1/repositories/%s/tags", host, repository)
|
||||
req, err := http.NewRequest("GET", endpoint, nil)
|
||||
req, err := r.opaqueRequest("GET", endpoint, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
|
||||
res, err := r.client.Do(req)
|
||||
utils.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utils.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 && res.StatusCode != 404 {
|
||||
|
@ -184,7 +190,7 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
|
|||
func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) {
|
||||
repositoryTarget := auth.IndexServerAddress() + "/repositories/" + remote + "/images"
|
||||
|
||||
req, err := http.NewRequest("GET", repositoryTarget, nil)
|
||||
req, err := r.opaqueRequest("GET", repositoryTarget, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -303,6 +309,15 @@ func (r *Registry) PushImageLayerRegistry(imgId string, layer io.Reader, registr
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *Registry) opaqueRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
|
||||
req, err := http.NewRequest(method, urlStr, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.URL.Opaque = strings.Replace(urlStr, req.URL.Scheme + ":", "", 1)
|
||||
return req, err
|
||||
}
|
||||
|
||||
// push a tag on the registry.
|
||||
// Remote has the format '<user>/<repo>
|
||||
func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token []string) error {
|
||||
|
@ -310,7 +325,7 @@ func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token
|
|||
revision = "\"" + revision + "\""
|
||||
registry = "https://" + registry + "/v1"
|
||||
|
||||
req, err := http.NewRequest("PUT", registry+"/repositories/"+remote+"/tags/"+tag, strings.NewReader(revision))
|
||||
req, err := r.opaqueRequest("PUT", registry+"/repositories/"+remote+"/tags/"+tag, strings.NewReader(revision))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -340,7 +355,7 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat
|
|||
|
||||
utils.Debugf("Image list pushed to index:\n%s\n", imgListJSON)
|
||||
|
||||
req, err := http.NewRequest("PUT", auth.IndexServerAddress()+"/repositories/"+remote+"/"+suffix, bytes.NewReader(imgListJSON))
|
||||
req, err := r.opaqueRequest("PUT", auth.IndexServerAddress()+"/repositories/"+remote+"/"+suffix, bytes.NewReader(imgListJSON))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -360,7 +375,7 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat
|
|||
// Redirect if necessary
|
||||
for res.StatusCode >= 300 && res.StatusCode < 400 {
|
||||
utils.Debugf("Redirected to %s\n", res.Header.Get("Location"))
|
||||
req, err = http.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON))
|
||||
req, err = r.opaqueRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -481,7 +496,7 @@ type Registry struct {
|
|||
func NewRegistry(root string, authConfig *auth.AuthConfig) *Registry {
|
||||
httpTransport := &http.Transport{
|
||||
DisableKeepAlives: true,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
|
||||
r := &Registry{
|
||||
|
|
|
@ -65,7 +65,11 @@ func init() {
|
|||
|
||||
// Create the "Server"
|
||||
srv := &Server{
|
||||
runtime: runtime,
|
||||
runtime: runtime,
|
||||
enableCors: false,
|
||||
lock: &sync.Mutex{},
|
||||
pullingPool: make(map[string]struct{}),
|
||||
pushingPool: make(map[string]struct{}),
|
||||
}
|
||||
// Retrieve the Image
|
||||
if err := srv.ImagePull(unitTestImageName, "", "", os.Stdout, utils.NewStreamFormatter(false), nil); err != nil {
|
||||
|
|
76
server.go
76
server.go
|
@ -15,6 +15,7 @@ import (
|
|||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func (srv *Server) DockerVersion() APIVersion {
|
||||
|
@ -321,7 +322,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgId, endpoin
|
|||
for _, id := range history {
|
||||
if !srv.runtime.graph.Exists(id) {
|
||||
out.Write(sf.FormatStatus("Pulling %s metadata", id))
|
||||
imgJSON, err := r.GetRemoteImageJSON(id, endpoint, token)
|
||||
imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
|
||||
if err != nil {
|
||||
// FIXME: Keep goging in case of error?
|
||||
return err
|
||||
|
@ -333,12 +334,12 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgId, endpoin
|
|||
|
||||
// Get the layer
|
||||
out.Write(sf.FormatStatus("Pulling %s fs layer", id))
|
||||
layer, contentLength, err := r.GetRemoteImageLayer(img.ID, endpoint, token)
|
||||
layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer layer.Close()
|
||||
if err := srv.runtime.graph.Register(utils.ProgressReader(layer, contentLength, out, sf.FormatProgress("Downloading", "%v/%v (%v)"), sf), false, img); err != nil {
|
||||
if err := srv.runtime.graph.Register(utils.ProgressReader(layer, imgSize, out, sf.FormatProgress("Downloading", "%v/%v (%v)"), sf), false, img); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -413,7 +414,47 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, local, re
|
|||
return nil
|
||||
}
|
||||
|
||||
func (srv *Server) poolAdd(kind, key string) error {
|
||||
srv.lock.Lock()
|
||||
defer srv.lock.Unlock()
|
||||
|
||||
if _, exists := srv.pullingPool[key]; exists {
|
||||
return fmt.Errorf("%s %s is already in progress", key, kind)
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case "pull":
|
||||
srv.pullingPool[key] = struct{}{}
|
||||
break
|
||||
case "push":
|
||||
srv.pushingPool[key] = struct{}{}
|
||||
break
|
||||
default:
|
||||
return fmt.Errorf("Unkown pool type")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (srv *Server) poolRemove(kind, key string) error {
|
||||
switch kind {
|
||||
case "pull":
|
||||
delete(srv.pullingPool, key)
|
||||
break
|
||||
case "push":
|
||||
delete(srv.pushingPool, key)
|
||||
break
|
||||
default:
|
||||
return fmt.Errorf("Unkown pool type")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (srv *Server) ImagePull(name, tag, endpoint string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error {
|
||||
if err := srv.poolAdd("pull", name+":"+tag); err != nil {
|
||||
return err
|
||||
}
|
||||
defer srv.poolRemove("pull", name+":"+tag)
|
||||
|
||||
r := registry.NewRegistry(srv.runtime.root, authConfig)
|
||||
out = utils.NewWriteFlusher(out)
|
||||
if endpoint != "" {
|
||||
|
@ -430,7 +471,6 @@ func (srv *Server) ImagePull(name, tag, endpoint string, out io.Writer, sf *util
|
|||
if err := srv.pullRepository(r, out, name, remote, tag, sf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -532,7 +572,7 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, name stri
|
|||
// FIXME: Continue on error?
|
||||
return err
|
||||
}
|
||||
out.Write(sf.FormatStatus("Pushing tags for rev [%s] on {%s}", elem.ID, ep+"/users/"+srvName+"/"+elem.Tag))
|
||||
out.Write(sf.FormatStatus("Pushing tags for rev [%s] on {%s}", elem.ID, ep+"/repositories/"+srvName+"/tags/"+elem.Tag))
|
||||
if err := r.PushRegistryTag(srvName, elem.ID, elem.Tag, ep, repoData.Tokens); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -605,7 +645,13 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgId,
|
|||
return nil
|
||||
}
|
||||
|
||||
// FIXME: Allow to interupt current push when new push of same image is done.
|
||||
func (srv *Server) ImagePush(name, endpoint string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error {
|
||||
if err := srv.poolAdd("push", name); err != nil {
|
||||
return err
|
||||
}
|
||||
defer srv.poolRemove("push", name)
|
||||
|
||||
out = utils.NewWriteFlusher(out)
|
||||
img, err := srv.runtime.graph.Get(name)
|
||||
r := registry.NewRegistry(srv.runtime.root, authConfig)
|
||||
|
@ -705,6 +751,9 @@ func (srv *Server) ContainerRestart(name string, t int) error {
|
|||
|
||||
func (srv *Server) ContainerDestroy(name string, removeVolume bool) error {
|
||||
if container := srv.runtime.Get(name); container != nil {
|
||||
if container.State.Running {
|
||||
return fmt.Errorf("Impossible to remove a running container, please stop it first")
|
||||
}
|
||||
volumes := make(map[string]struct{})
|
||||
// Store all the deleted containers volumes
|
||||
for _, volumeId := range container.Volumes {
|
||||
|
@ -942,9 +991,6 @@ func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, std
|
|||
if container.State.Ghost {
|
||||
return fmt.Errorf("Impossible to attach to a ghost container")
|
||||
}
|
||||
if !container.State.Running {
|
||||
return fmt.Errorf("Impossible to attach to a stopped container, start it first")
|
||||
}
|
||||
|
||||
var (
|
||||
cStdin io.ReadCloser
|
||||
|
@ -1003,14 +1049,20 @@ func NewServer(autoRestart, enableCors bool, dns ListOpts) (*Server, error) {
|
|||
return nil, err
|
||||
}
|
||||
srv := &Server{
|
||||
runtime: runtime,
|
||||
enableCors: enableCors,
|
||||
runtime: runtime,
|
||||
enableCors: enableCors,
|
||||
lock: &sync.Mutex{},
|
||||
pullingPool: make(map[string]struct{}),
|
||||
pushingPool: make(map[string]struct{}),
|
||||
}
|
||||
runtime.srv = srv
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
runtime *Runtime
|
||||
enableCors bool
|
||||
runtime *Runtime
|
||||
enableCors bool
|
||||
lock *sync.Mutex
|
||||
pullingPool map[string]struct{}
|
||||
pushingPool map[string]struct{}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ Vagrant::Config.run do |config|
|
|||
# Install docker dependencies
|
||||
pkg_cmd << "apt-get install -q -y python-software-properties; " \
|
||||
"add-apt-repository -y ppa:gophers/go/ubuntu; apt-get update -qq; " \
|
||||
"DEBIAN_FRONTEND=noninteractive apt-get install -q -y lxc bsdtar git golang-stable aufs-tools make; "
|
||||
"DEBIAN_FRONTEND=noninteractive apt-get install -q -y lxc git golang-stable aufs-tools make; "
|
||||
# Activate new kernel
|
||||
pkg_cmd << "shutdown -r +1; "
|
||||
config.vm.provision :shell, :inline => pkg_cmd
|
||||
|
|
|
@ -86,7 +86,7 @@ func (r *progressReader) Read(p []byte) (n int, err error) {
|
|||
}
|
||||
if r.readProgress-r.lastUpdate > updateEvery || err != nil {
|
||||
if r.readTotal > 0 {
|
||||
fmt.Fprintf(r.output, r.template, r.readProgress, r.readTotal, fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
|
||||
fmt.Fprintf(r.output, r.template, HumanSize(int64(r.readProgress)), HumanSize(int64(r.readTotal)), fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
|
||||
} else {
|
||||
fmt.Fprintf(r.output, r.template, r.readProgress, "?", "n/a")
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче