docker/container.go

1147 строки
31 KiB
Go
Исходник Обычный вид История

2013-01-19 04:13:39 +04:00
package docker
import (
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/dotcloud/docker/term"
2013-05-15 02:37:35 +04:00
"github.com/dotcloud/docker/utils"
2013-01-30 01:50:27 +04:00
"github.com/kr/pty"
2013-01-19 04:13:39 +04:00
"io"
"io/ioutil"
"log"
2013-01-19 04:13:39 +04:00
"os"
"os/exec"
"path"
2013-05-13 17:10:26 +04:00
"path/filepath"
2013-04-20 06:29:13 +04:00
"sort"
"strconv"
2013-04-20 06:29:13 +04:00
"strings"
2013-01-19 04:13:39 +04:00
"syscall"
"time"
2013-01-19 04:13:39 +04:00
)
type Container struct {
root string
2013-06-04 22:00:22 +04:00
ID string
Created time.Time
2013-01-19 04:13:39 +04:00
Path string
Args []string
Config *Config
State State
Image string
2013-01-19 04:13:39 +04:00
network *NetworkInterface
NetworkSettings *NetworkSettings
2013-01-19 04:13:39 +04:00
SysInitPath string
ResolvConfPath string
cmd *exec.Cmd
2013-05-15 02:37:35 +04:00
stdout *utils.WriteBroadcaster
stderr *utils.WriteBroadcaster
stdin io.ReadCloser
stdinPipe io.WriteCloser
2013-04-04 23:12:22 +04:00
ptyMaster io.Closer
runtime *Runtime
waitLock chan struct{}
Volumes map[string]string
// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
// Easier than migrating older container configs :)
VolumesRW map[string]bool
2013-01-19 04:13:39 +04:00
}
type Config struct {
Hostname string
User string
Memory int64 // Memory limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap
CpuShares int64 // CPU shares (relative weight vs. other containers)
AttachStdin bool
AttachStdout bool
AttachStderr bool
PortSpecs []string
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string
Cmd []string
Dns []string
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{}
VolumesFrom string
WorkingDir string
Entrypoint []string
NetworkDisabled bool
2013-08-10 02:53:02 +04:00
Privileged bool
}
type HostConfig struct {
Binds []string
ContainerIDFile string
}
type BindMap struct {
SrcPath string
DstPath string
Mode string
}
var (
ErrInvaidWorikingDirectory = errors.New("The working directory is invalid. It needs to be an absolute path.")
)
func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
if len(args) > 0 && args[0] != "--help" {
cmd.SetOutput(ioutil.Discard)
2013-07-19 19:56:00 +04:00
cmd.Usage = nil
}
flHostname := cmd.String("h", "", "Container host name")
flWorkingDir := cmd.String("w", "", "Working directory inside the container")
flUser := cmd.String("u", "", "Username or UID")
flDetach := cmd.Bool("d", false, "Detached mode: Run container in the background, print new container id")
flAttach := NewAttachOpts()
cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.")
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
flContainerIDFile := cmd.String("cidfile", "", "Write the container ID to the file")
flNetwork := cmd.Bool("n", true, "Enable networking for this container")
2013-08-10 02:53:02 +04:00
flPrivileged := cmd.Bool("privileged", false, "Give extended privileges to this container")
2013-05-07 21:23:50 +04:00
if capabilities != nil && *flMemory > 0 && !capabilities.MemoryLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
*flMemory = 0
}
flCpuShares := cmd.Int64("c", 0, "CPU shares (relative weight)")
var flPorts ListOpts
cmd.Var(&flPorts, "p", "Expose a container's port to the host (use 'docker port' to see the actual mapping)")
2013-03-28 09:50:33 +04:00
var flEnv ListOpts
cmd.Var(&flEnv, "e", "Set environment variables")
2013-03-28 09:50:33 +04:00
2013-04-11 06:02:23 +04:00
var flDns ListOpts
cmd.Var(&flDns, "dns", "Set custom dns servers")
flVolumes := NewPathOpts()
2013-07-16 04:18:11 +04:00
cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container")
flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
if err := cmd.Parse(args); err != nil {
return nil, nil, cmd, err
}
if *flDetach && len(flAttach) > 0 {
return nil, nil, cmd, fmt.Errorf("Conflicting options: -a and -d")
}
if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
return nil, nil, cmd, ErrInvaidWorikingDirectory
}
// If neither -d or -a are set, attach to everything by default
if len(flAttach) == 0 && !*flDetach {
if !*flDetach {
flAttach.Set("stdout")
flAttach.Set("stderr")
if *flStdin {
flAttach.Set("stdin")
}
}
}
2013-07-16 04:18:11 +04:00
var binds []string
// add any bind targets to the list of container volumes
2013-07-16 04:18:11 +04:00
for bind := range flVolumes {
arr := strings.Split(bind, ":")
2013-07-16 04:18:11 +04:00
if len(arr) > 1 {
dstDir := arr[1]
flVolumes[dstDir] = struct{}{}
binds = append(binds, bind)
delete(flVolumes, bind)
}
}
parsedArgs := cmd.Args()
runCmd := []string{}
entrypoint := []string{}
image := ""
if len(parsedArgs) >= 1 {
image = cmd.Arg(0)
}
if len(parsedArgs) > 1 {
runCmd = parsedArgs[1:]
}
if *flEntrypoint != "" {
entrypoint = []string{*flEntrypoint}
}
config := &Config{
Hostname: *flHostname,
PortSpecs: flPorts,
User: *flUser,
Tty: *flTty,
NetworkDisabled: !*flNetwork,
OpenStdin: *flStdin,
Memory: *flMemory,
CpuShares: *flCpuShares,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: flEnv,
Cmd: runCmd,
Dns: flDns,
Image: image,
Volumes: flVolumes,
VolumesFrom: *flVolumesFrom,
Entrypoint: entrypoint,
2013-08-10 02:53:02 +04:00
Privileged: *flPrivileged,
WorkingDir: *flWorkingDir,
}
hostConfig := &HostConfig{
Binds: binds,
ContainerIDFile: *flContainerIDFile,
}
2013-05-07 21:23:50 +04:00
if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
}
return config, hostConfig, cmd, nil
2013-01-19 04:13:39 +04:00
}
2013-07-10 18:09:35 +04:00
type PortMapping map[string]string
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-12 02:46:23 +04:00
type NetworkSettings struct {
2013-06-04 22:00:22 +04:00
IPAddress string
IPPrefixLen int
Gateway string
2013-04-04 02:05:03 +04:00
Bridge string
2013-07-10 18:09:35 +04:00
PortMapping map[string]PortMapping
}
2013-04-20 06:29:13 +04:00
// String returns a human-readable description of the port mapping defined in the settings
func (settings *NetworkSettings) PortMappingHuman() string {
var mapping []string
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-12 02:46:23 +04:00
for private, public := range settings.PortMapping["Tcp"] {
2013-04-20 06:29:13 +04:00
mapping = append(mapping, fmt.Sprintf("%s->%s", public, private))
}
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-12 02:46:23 +04:00
for private, public := range settings.PortMapping["Udp"] {
mapping = append(mapping, fmt.Sprintf("%s->%s/udp", public, private))
}
2013-04-20 06:29:13 +04:00
sort.Strings(mapping)
return strings.Join(mapping, ", ")
}
// Inject the io.Reader at the given path. Note: do not close the reader
func (container *Container) Inject(file io.Reader, pth string) error {
// Make sure the directory exists
if err := os.MkdirAll(path.Join(container.rwPath(), path.Dir(pth)), 0755); err != nil {
return err
}
// FIXME: Handle permissions/already existing dest
dest, err := os.Create(path.Join(container.rwPath(), pth))
if err != nil {
return err
}
if _, err := io.Copy(dest, file); err != nil {
return err
}
return nil
}
func (container *Container) Cmd() *exec.Cmd {
return container.cmd
}
func (container *Container) When() time.Time {
return container.Created
}
func (container *Container) FromDisk() error {
data, err := ioutil.ReadFile(container.jsonPath())
if err != nil {
return err
}
// Load container settings
// udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it
if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") {
return err
}
return nil
}
func (container *Container) ToDisk() (err error) {
data, err := json.Marshal(container)
2013-01-19 04:13:39 +04:00
if err != nil {
return
2013-01-19 04:13:39 +04:00
}
return ioutil.WriteFile(container.jsonPath(), data, 0666)
2013-01-19 04:13:39 +04:00
}
func (container *Container) ReadHostConfig() (*HostConfig, error) {
data, err := ioutil.ReadFile(container.hostConfigPath())
if err != nil {
return &HostConfig{}, err
}
hostConfig := &HostConfig{}
if err := json.Unmarshal(data, hostConfig); err != nil {
return &HostConfig{}, err
}
return hostConfig, nil
}
func (container *Container) SaveHostConfig(hostConfig *HostConfig) (err error) {
data, err := json.Marshal(hostConfig)
if err != nil {
return
}
return ioutil.WriteFile(container.hostConfigPath(), data, 0666)
}
2013-01-19 04:13:39 +04:00
func (container *Container) generateLXCConfig() error {
fo, err := os.Create(container.lxcConfigPath())
2013-01-19 04:13:39 +04:00
if err != nil {
return err
}
defer fo.Close()
if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
return err
}
return nil
}
func (container *Container) startPty() error {
2013-04-04 23:12:22 +04:00
ptyMaster, ptySlave, err := pty.Open()
if err != nil {
return err
}
2013-04-04 23:12:22 +04:00
container.ptyMaster = ptyMaster
container.cmd.Stdout = ptySlave
container.cmd.Stderr = ptySlave
// Copy the PTYs to our broadcasters
go func() {
defer container.stdout.CloseWriters()
2013-05-15 02:37:35 +04:00
utils.Debugf("[startPty] Begin of stdout pipe")
2013-04-04 23:12:22 +04:00
io.Copy(container.stdout, ptyMaster)
2013-05-15 02:37:35 +04:00
utils.Debugf("[startPty] End of stdout pipe")
}()
// stdin
if container.Config.OpenStdin {
2013-04-04 23:12:22 +04:00
container.cmd.Stdin = ptySlave
container.cmd.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true}
go func() {
defer container.stdin.Close()
2013-05-15 02:37:35 +04:00
utils.Debugf("[startPty] Begin of stdin pipe")
2013-04-04 23:12:22 +04:00
io.Copy(ptyMaster, container.stdin)
2013-05-15 02:37:35 +04:00
utils.Debugf("[startPty] End of stdin pipe")
}()
}
if err := container.cmd.Start(); err != nil {
return err
}
2013-04-04 23:12:22 +04:00
ptySlave.Close()
return nil
}
func (container *Container) start() error {
container.cmd.Stdout = container.stdout
container.cmd.Stderr = container.stderr
if container.Config.OpenStdin {
stdin, err := container.cmd.StdinPipe()
if err != nil {
return err
}
go func() {
defer stdin.Close()
2013-05-15 02:37:35 +04:00
utils.Debugf("Begin of stdin pipe [start]")
io.Copy(stdin, container.stdin)
2013-05-15 02:37:35 +04:00
utils.Debugf("End of stdin pipe [start]")
}()
}
return container.cmd.Start()
}
func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
var cStdout, cStderr io.ReadCloser
var nJobs int
errors := make(chan error, 3)
if stdin != nil && container.Config.OpenStdin {
nJobs += 1
if cStdin, err := container.StdinPipe(); err != nil {
errors <- err
} else {
go func() {
2013-05-15 02:37:35 +04:00
utils.Debugf("[start] attach stdin\n")
defer utils.Debugf("[end] attach stdin\n")
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
if container.Config.StdinOnce && !container.Config.Tty {
defer cStdin.Close()
} else {
if cStdout != nil {
defer cStdout.Close()
}
if cStderr != nil {
defer cStderr.Close()
}
}
if container.Config.Tty {
2013-05-15 02:37:35 +04:00
_, err = utils.CopyEscapable(cStdin, stdin)
} else {
_, err = io.Copy(cStdin, stdin)
}
if err != nil {
2013-05-15 02:37:35 +04:00
utils.Debugf("[error] attach stdin: %s\n", err)
}
// Discard error, expecting pipe error
errors <- nil
}()
}
}
if stdout != nil {
nJobs += 1
if p, err := container.StdoutPipe(); err != nil {
errors <- err
} else {
cStdout = p
go func() {
2013-05-15 02:37:35 +04:00
utils.Debugf("[start] attach stdout\n")
defer utils.Debugf("[end] attach stdout\n")
// If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce {
if stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
}
_, err := io.Copy(stdout, cStdout)
if err != nil {
2013-05-15 02:37:35 +04:00
utils.Debugf("[error] attach stdout: %s\n", err)
}
errors <- err
}()
}
} else {
go func() {
2013-06-05 01:35:32 +04:00
if stdinCloser != nil {
defer stdinCloser.Close()
}
2013-06-05 01:35:32 +04:00
if cStdout, err := container.StdoutPipe(); err != nil {
utils.Debugf("Error stdout pipe")
2013-06-05 01:35:32 +04:00
} else {
io.Copy(&utils.NopWriter{}, cStdout)
}
}()
}
if stderr != nil {
nJobs += 1
if p, err := container.StderrPipe(); err != nil {
errors <- err
} else {
cStderr = p
go func() {
2013-05-15 02:37:35 +04:00
utils.Debugf("[start] attach stderr\n")
defer utils.Debugf("[end] attach stderr\n")
// If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce {
if stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
}
_, err := io.Copy(stderr, cStderr)
if err != nil {
2013-05-15 02:37:35 +04:00
utils.Debugf("[error] attach stderr: %s\n", err)
}
errors <- err
}()
}
} else {
go func() {
2013-06-05 01:35:32 +04:00
if stdinCloser != nil {
defer stdinCloser.Close()
}
2013-06-10 22:08:40 +04:00
if cStderr, err := container.StderrPipe(); err != nil {
utils.Debugf("Error stdout pipe")
2013-06-05 01:35:32 +04:00
} else {
io.Copy(&utils.NopWriter{}, cStderr)
}
}()
}
2013-05-15 02:37:35 +04:00
return utils.Go(func() error {
if cStdout != nil {
defer cStdout.Close()
}
if cStderr != nil {
defer cStderr.Close()
}
// FIXME: how do clean up the stdin goroutine without the unwanted side effect
// of closing the passed stdin? Add an intermediary io.Pipe?
for i := 0; i < nJobs; i += 1 {
2013-05-15 02:37:35 +04:00
utils.Debugf("Waiting for job %d/%d\n", i+1, nJobs)
if err := <-errors; err != nil {
2013-05-15 02:37:35 +04:00
utils.Debugf("Job %d returned error %s. Aborting all jobs\n", i+1, err)
return err
}
2013-05-15 02:37:35 +04:00
utils.Debugf("Job %d completed successfully\n", i+1)
}
2013-05-15 02:37:35 +04:00
utils.Debugf("All jobs completed successfully\n")
return nil
})
}
func (container *Container) Start(hostConfig *HostConfig) error {
container.State.Lock()
defer container.State.Unlock()
2013-07-16 04:18:11 +04:00
if len(hostConfig.Binds) == 0 {
hostConfig, _ = container.ReadHostConfig()
}
if container.State.Running {
2013-06-04 22:00:22 +04:00
return fmt.Errorf("The container %s is already running.", container.ID)
}
if err := container.EnsureMounted(); err != nil {
2013-01-19 04:13:39 +04:00
return err
}
if container.runtime.networkManager.disabled {
container.Config.NetworkDisabled = true
} else {
if err := container.allocateNetwork(); err != nil {
return err
}
}
2013-04-19 08:08:20 +04:00
// Make sure the config is compatible with the current kernel
if container.Config.Memory > 0 && !container.runtime.capabilities.MemoryLimit {
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
container.Config.Memory = 0
}
2013-04-19 08:08:20 +04:00
if container.Config.Memory > 0 && !container.runtime.capabilities.SwapLimit {
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
container.Config.MemorySwap = -1
}
if !container.runtime.capabilities.IPv4Forwarding {
log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
}
// Create the requested bind mounts
binds := make(map[string]BindMap)
// Define illegal container destinations
illegalDsts := []string{"/", "."}
for _, bind := range hostConfig.Binds {
// FIXME: factorize bind parsing in parseBind
var src, dst, mode string
arr := strings.Split(bind, ":")
if len(arr) == 2 {
src = arr[0]
dst = arr[1]
mode = "rw"
} else if len(arr) == 3 {
src = arr[0]
dst = arr[1]
mode = arr[2]
} else {
return fmt.Errorf("Invalid bind specification: %s", bind)
}
// Bail if trying to mount to an illegal destination
for _, illegal := range illegalDsts {
if dst == illegal {
return fmt.Errorf("Illegal bind destination: %s", dst)
}
}
bindMap := BindMap{
SrcPath: src,
DstPath: dst,
Mode: mode,
}
binds[path.Clean(dst)] = bindMap
}
if container.Volumes == nil || len(container.Volumes) == 0 {
container.Volumes = make(map[string]string)
container.VolumesRW = make(map[string]bool)
}
// Apply volumes from another container if requested
if container.Config.VolumesFrom != "" {
c := container.runtime.Get(container.Config.VolumesFrom)
if c == nil {
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
}
for volPath, id := range c.Volumes {
if _, exists := container.Volumes[volPath]; exists {
continue
}
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
container.Volumes[volPath] = id
if isRW, exists := c.VolumesRW[volPath]; exists {
container.VolumesRW[volPath] = isRW
}
}
}
// Create the requested volumes if they don't exist
for volPath := range container.Config.Volumes {
volPath = path.Clean(volPath)
// Skip existing volumes
if _, exists := container.Volumes[volPath]; exists {
continue
}
// If an external bind is defined for this volume, use that as a source
if bindMap, exists := binds[volPath]; exists {
container.Volumes[volPath] = bindMap.SrcPath
if strings.ToLower(bindMap.Mode) == "rw" {
container.VolumesRW[volPath] = true
}
// Otherwise create an directory in $ROOT/volumes/ and use that
} else {
c, err := container.runtime.volumes.Create(nil, container, "", "", nil)
if err != nil {
return err
}
srcPath, err := c.layer()
if err != nil {
return err
}
container.Volumes[volPath] = srcPath
container.VolumesRW[volPath] = true // RW by default
}
// Create the mountpoint
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
}
if err := container.generateLXCConfig(); err != nil {
return err
}
2013-04-10 05:19:55 +04:00
2013-01-19 04:13:39 +04:00
params := []string{
2013-06-04 22:00:22 +04:00
"-n", container.ID,
"-f", container.lxcConfigPath(),
2013-01-19 04:13:39 +04:00
"--",
"/.dockerinit",
2013-01-19 04:13:39 +04:00
}
// Networking
if !container.Config.NetworkDisabled {
params = append(params, "-g", container.network.Gateway.String())
}
// User
if container.Config.User != "" {
params = append(params, "-u", container.Config.User)
}
if container.Config.Tty {
params = append(params, "-e", "TERM=xterm")
}
// Setup environment
params = append(params,
"-e", "HOME=/",
"-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
2013-07-19 21:22:16 +04:00
"-e", "container=lxc",
"-e", "HOSTNAME="+container.Config.Hostname,
)
if container.Config.WorkingDir != "" {
workingDir := path.Clean(container.Config.WorkingDir)
utils.Debugf("[working dir] working dir is %s", workingDir)
if err := os.MkdirAll(path.Join(container.RootfsPath(), workingDir), 0755); err != nil {
return nil
}
params = append(params,
"-w", workingDir,
)
}
for _, elem := range container.Config.Env {
params = append(params, "-e", elem)
}
// Program
params = append(params, "--", container.Path)
2013-01-19 04:13:39 +04:00
params = append(params, container.Args...)
container.cmd = exec.Command("lxc-start", params...)
2013-01-19 04:13:39 +04:00
2013-03-29 19:46:06 +04:00
// Setup logging of stdout and stderr to disk
2013-07-11 21:18:28 +04:00
if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
2013-03-29 19:46:06 +04:00
return err
}
2013-07-11 21:18:28 +04:00
if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil {
2013-03-29 19:46:06 +04:00
return err
}
var err error
if container.Config.Tty {
err = container.startPty()
} else {
err = container.start()
}
if err != nil {
2013-01-19 04:13:39 +04:00
return err
}
// FIXME: save state on disk *first*, then converge
// this way disk state is used as a journal, eg. we can restore after crash etc.
2013-01-19 04:13:39 +04:00
container.State.setRunning(container.cmd.Process.Pid)
// Init the lock
container.waitLock = make(chan struct{})
container.ToDisk()
container.SaveHostConfig(hostConfig)
2013-01-19 04:13:39 +04:00
go container.monitor()
return nil
2013-01-19 04:13:39 +04:00
}
func (container *Container) Run() error {
hostConfig := &HostConfig{}
if err := container.Start(hostConfig); err != nil {
2013-01-19 04:13:39 +04:00
return err
}
container.Wait()
return nil
}
func (container *Container) Output() (output []byte, err error) {
pipe, err := container.StdoutPipe()
if err != nil {
return nil, err
}
defer pipe.Close()
hostConfig := &HostConfig{}
if err := container.Start(hostConfig); err != nil {
2013-01-19 04:13:39 +04:00
return nil, err
}
output, err = ioutil.ReadAll(pipe)
container.Wait()
return output, err
}
// StdinPipe() returns a pipe connected to the standard input of the container's
// active process.
//
func (container *Container) StdinPipe() (io.WriteCloser, error) {
return container.stdinPipe, nil
}
2013-01-19 04:13:39 +04:00
func (container *Container) StdoutPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
2013-07-11 21:18:28 +04:00
container.stdout.AddWriter(writer, "")
2013-05-15 02:37:35 +04:00
return utils.NewBufReader(reader), nil
2013-01-19 04:13:39 +04:00
}
func (container *Container) StderrPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
2013-07-11 21:18:28 +04:00
container.stderr.AddWriter(writer, "")
2013-05-15 02:37:35 +04:00
return utils.NewBufReader(reader), nil
2013-01-19 04:13:39 +04:00
}
func (container *Container) allocateNetwork() error {
if container.Config.NetworkDisabled {
return nil
}
iface, err := container.runtime.networkManager.Allocate()
if err != nil {
return err
}
2013-07-10 18:09:35 +04:00
container.NetworkSettings.PortMapping = make(map[string]PortMapping)
container.NetworkSettings.PortMapping["Tcp"] = make(PortMapping)
container.NetworkSettings.PortMapping["Udp"] = make(PortMapping)
for _, spec := range container.Config.PortSpecs {
2013-06-04 17:51:12 +04:00
nat, err := iface.AllocatePort(spec)
if err != nil {
iface.Release()
return err
}
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-12 02:46:23 +04:00
proto := strings.Title(nat.Proto)
backend, frontend := strconv.Itoa(nat.Backend), strconv.Itoa(nat.Frontend)
container.NetworkSettings.PortMapping[proto][backend] = frontend
}
container.network = iface
2013-04-04 02:05:03 +04:00
container.NetworkSettings.Bridge = container.runtime.networkManager.bridgeIface
2013-06-04 22:00:22 +04:00
container.NetworkSettings.IPAddress = iface.IPNet.IP.String()
container.NetworkSettings.IPPrefixLen, _ = iface.IPNet.Mask.Size()
container.NetworkSettings.Gateway = iface.Gateway.String()
return nil
}
2013-04-01 09:04:59 +04:00
func (container *Container) releaseNetwork() {
if container.Config.NetworkDisabled {
return
}
container.network.Release()
container.network = nil
container.NetworkSettings = &NetworkSettings{}
}
// FIXME: replace this with a control socket within docker-init
func (container *Container) waitLxc() error {
for {
2013-06-04 22:00:22 +04:00
output, err := exec.Command("lxc-info", "-n", container.ID).CombinedOutput()
2013-06-04 17:51:12 +04:00
if err != nil {
return err
2013-06-04 17:51:12 +04:00
}
if !strings.Contains(string(output), "RUNNING") {
return nil
}
time.Sleep(500 * time.Millisecond)
}
}
2013-01-19 04:13:39 +04:00
func (container *Container) monitor() {
2013-01-23 03:03:40 +04:00
// Wait for the program to exit
2013-05-15 02:37:35 +04:00
utils.Debugf("Waiting for process")
// If the command does not exists, try to wait via lxc
if container.cmd == nil {
if err := container.waitLxc(); err != nil {
2013-06-04 22:00:22 +04:00
utils.Debugf("%s: Process: %s", container.ID, err)
}
} else {
if err := container.cmd.Wait(); err != nil {
// Discard the error as any signals or non 0 returns will generate an error
2013-06-04 22:00:22 +04:00
utils.Debugf("%s: Process: %s", container.ID, err)
}
}
2013-05-15 02:37:35 +04:00
utils.Debugf("Process finished")
2013-07-19 18:31:42 +04:00
if container.runtime != nil && container.runtime.srv != nil {
container.runtime.srv.LogEvent("die", container.ShortID(), container.runtime.repositories.ImageName(container.Image))
2013-07-19 18:31:42 +04:00
}
2013-06-04 17:51:12 +04:00
exitCode := -1
if container.cmd != nil {
exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
}
2013-01-23 03:03:40 +04:00
// Cleanup
container.releaseNetwork()
if container.Config.OpenStdin {
if err := container.stdin.Close(); err != nil {
2013-06-04 22:00:22 +04:00
utils.Debugf("%s: Error close stdin: %s", container.ID, err)
}
}
if err := container.stdout.CloseWriters(); err != nil {
2013-06-04 22:00:22 +04:00
utils.Debugf("%s: Error close stdout: %s", container.ID, err)
}
if err := container.stderr.CloseWriters(); err != nil {
2013-06-04 22:00:22 +04:00
utils.Debugf("%s: Error close stderr: %s", container.ID, err)
}
2013-04-04 23:12:22 +04:00
if container.ptyMaster != nil {
if err := container.ptyMaster.Close(); err != nil {
2013-06-04 22:00:22 +04:00
utils.Debugf("%s: Error closing Pty master: %s", container.ID, err)
}
}
if err := container.Unmount(); err != nil {
2013-06-04 22:00:22 +04:00
log.Printf("%v: Failed to umount filesystem: %v", container.ID, err)
}
// Re-create a brand new stdin pipe once the container exited
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
}
// Report status back
container.State.setStopped(exitCode)
// Release the lock
close(container.waitLock)
if err := container.ToDisk(); err != nil {
// FIXME: there is a race condition here which causes this to fail during the unit tests.
// If another goroutine was waiting for Wait() to return before removing the container's root
// from the filesystem... At this point it may already have done so.
// This is because State.setStopped() has already been called, and has caused Wait()
// to return.
// FIXME: why are we serializing running state to disk in the first place?
2013-06-04 22:00:22 +04:00
//log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err)
}
2013-01-19 04:13:39 +04:00
}
func (container *Container) kill() error {
if !container.State.Running {
return nil
}
// Sending SIGKILL to the process via lxc
2013-06-04 22:00:22 +04:00
output, err := exec.Command("lxc-kill", "-n", container.ID, "9").CombinedOutput()
if err != nil {
2013-06-04 22:00:22 +04:00
log.Printf("error killing container %s (%s, %s)", container.ID, output, err)
2013-01-19 04:13:39 +04:00
}
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := container.WaitTimeout(10 * time.Second); err != nil {
if container.cmd == nil {
2013-06-04 22:00:22 +04:00
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.ID)
}
2013-06-04 22:00:22 +04:00
log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.ID)
if err := container.cmd.Process.Kill(); err != nil {
return err
}
}
// Wait for the container to be actually stopped
container.Wait()
return nil
}
func (container *Container) Kill() error {
container.State.Lock()
defer container.State.Unlock()
2013-04-11 20:26:17 +04:00
if !container.State.Running {
return nil
}
return container.kill()
}
func (container *Container) Stop(seconds int) error {
container.State.Lock()
defer container.State.Unlock()
if !container.State.Running {
2013-01-19 04:13:39 +04:00
return nil
}
// 1. Send a SIGTERM
2013-06-04 22:00:22 +04:00
if output, err := exec.Command("lxc-kill", "-n", container.ID, "15").CombinedOutput(); err != nil {
log.Print(string(output))
log.Print("Failed to send SIGTERM to the process, force killing")
if err := container.kill(); err != nil {
return err
}
}
// 2. Wait for the process to exit on its own
if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
2013-06-04 22:00:22 +04:00
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
2013-04-09 23:06:01 +04:00
if err := container.kill(); err != nil {
return err
}
}
2013-01-19 04:13:39 +04:00
return nil
}
func (container *Container) Restart(seconds int) error {
if err := container.Stop(seconds); err != nil {
2013-01-23 03:03:40 +04:00
return err
}
hostConfig := &HostConfig{}
if err := container.Start(hostConfig); err != nil {
2013-01-23 03:03:40 +04:00
return err
}
return nil
}
// Wait blocks until the container stops running, then returns its exit code.
func (container *Container) Wait() int {
<-container.waitLock
return container.State.ExitCode
2013-01-19 04:13:39 +04:00
}
2013-05-24 06:33:28 +04:00
func (container *Container) Resize(h, w int) error {
pty, ok := container.ptyMaster.(*os.File)
if !ok {
return fmt.Errorf("ptyMaster does not have Fd() method")
}
return term.SetWinsize(pty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
2013-05-24 06:33:28 +04:00
}
2013-03-22 04:47:23 +04:00
func (container *Container) ExportRw() (Archive, error) {
return Tar(container.rwPath(), Uncompressed)
}
func (container *Container) RwChecksum() (string, error) {
rwData, err := Tar(container.rwPath(), Xz)
if err != nil {
return "", err
}
2013-05-15 02:37:35 +04:00
return utils.HashData(rwData)
}
2013-03-22 04:47:23 +04:00
func (container *Container) Export() (Archive, error) {
if err := container.EnsureMounted(); err != nil {
return nil, err
}
2013-03-22 04:47:23 +04:00
return Tar(container.RootfsPath(), Uncompressed)
}
func (container *Container) WaitTimeout(timeout time.Duration) error {
done := make(chan bool)
go func() {
container.Wait()
done <- true
}()
select {
case <-time.After(timeout):
return fmt.Errorf("Timed Out")
case <-done:
return nil
}
}
func (container *Container) EnsureMounted() error {
if mounted, err := container.Mounted(); err != nil {
return err
} else if mounted {
return nil
}
return container.Mount()
}
func (container *Container) Mount() error {
image, err := container.GetImage()
if err != nil {
return err
}
return image.Mount(container.RootfsPath(), container.rwPath())
}
2013-03-22 04:47:23 +04:00
func (container *Container) Changes() ([]Change, error) {
image, err := container.GetImage()
if err != nil {
return nil, err
}
return image.Changes(container.rwPath())
}
2013-03-22 04:47:23 +04:00
func (container *Container) GetImage() (*Image, error) {
if container.runtime == nil {
return nil, fmt.Errorf("Can't get image of unregistered container")
}
return container.runtime.graph.Get(container.Image)
}
func (container *Container) Mounted() (bool, error) {
2013-03-22 04:47:23 +04:00
return Mounted(container.RootfsPath())
}
func (container *Container) Unmount() error {
2013-03-22 04:47:23 +04:00
return Unmount(container.RootfsPath())
}
2013-06-04 22:00:22 +04:00
// ShortID returns a shorthand version of the container's id for convenience.
// A collision with other container shorthands is very unlikely, but possible.
// In case of a collision a lookup with Runtime.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length container Id.
2013-06-04 22:00:22 +04:00
func (container *Container) ShortID() string {
return utils.TruncateID(container.ID)
}
func (container *Container) logPath(name string) string {
2013-06-04 22:00:22 +04:00
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name))
}
func (container *Container) ReadLog(name string) (io.Reader, error) {
return os.Open(container.logPath(name))
}
func (container *Container) hostConfigPath() string {
2013-07-02 21:02:42 +04:00
return path.Join(container.root, "hostconfig.json")
}
func (container *Container) jsonPath() string {
return path.Join(container.root, "config.json")
}
func (container *Container) lxcConfigPath() string {
return path.Join(container.root, "config.lxc")
}
// This method must be exported to be used from the lxc template
func (container *Container) RootfsPath() string {
return path.Join(container.root, "rootfs")
}
func (container *Container) rwPath() string {
return path.Join(container.root, "rw")
}
2013-06-04 22:00:22 +04:00
func validateID(id string) error {
if id == "" {
return fmt.Errorf("Invalid empty id")
}
return nil
}
2013-05-13 17:10:26 +04:00
// GetSize, return real size, virtual size
func (container *Container) GetSize() (int64, int64) {
var sizeRw, sizeRootfs int64
filepath.Walk(container.rwPath(), func(path string, fileInfo os.FileInfo, err error) error {
if fileInfo != nil {
sizeRw += fileInfo.Size()
}
return nil
})
_, err := os.Stat(container.RootfsPath())
if err == nil {
filepath.Walk(container.RootfsPath(), func(path string, fileInfo os.FileInfo, err error) error {
if fileInfo != nil {
sizeRootfs += fileInfo.Size()
}
return nil
})
}
return sizeRw, sizeRootfs
}
func (container *Container) Copy(resource string) (Archive, error) {
if err := container.EnsureMounted(); err != nil {
return nil, err
}
var filter []string
basePath := path.Join(container.RootfsPath(), resource)
stat, err := os.Stat(basePath)
if err != nil {
return nil, err
}
if !stat.IsDir() {
d, f := path.Split(basePath)
basePath = d
filter = []string{f}
} else {
filter = []string{path.Base(basePath)}
basePath = path.Dir(basePath)
}
return TarFilter(basePath, Uncompressed, filter)
}