docker/container.go

1213 строки
30 KiB
Go
Исходник Обычный вид История

2013-01-19 04:13:39 +04:00
package docker
import (
"encoding/json"
"errors"
"fmt"
2013-11-01 03:57:45 +04:00
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/execdriver"
"github.com/dotcloud/docker/graphdriver"
"github.com/dotcloud/docker/links"
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/pkg/term"
"github.com/dotcloud/docker/runconfig"
2013-05-15 02:37:35 +04:00
"github.com/dotcloud/docker/utils"
2013-01-30 01:50:27 +04:00
"github.com/kr/pty"
2013-01-19 04:13:39 +04:00
"io"
"io/ioutil"
"log"
2013-01-19 04:13:39 +04:00
"os"
"path"
2013-04-20 06:29:13 +04:00
"strings"
2013-11-22 00:21:03 +04:00
"sync"
2013-01-19 04:13:39 +04:00
"syscall"
"time"
2013-01-19 04:13:39 +04:00
)
var (
ErrNotATTY = errors.New("The PTY is not a file")
ErrNoTTY = errors.New("No PTY found")
ErrContainerStart = errors.New("The container failed to start. Unknown error")
ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.")
)
2013-01-19 04:13:39 +04:00
type Container struct {
2013-11-22 00:21:03 +04:00
sync.Mutex
2013-11-08 03:58:03 +04:00
root string // Path to the "home" of the container, including metadata.
basefs string // Path to the graphdriver mountpoint
2013-06-04 22:00:22 +04:00
ID string
Created time.Time
2013-01-19 04:13:39 +04:00
Path string
Args []string
Config *runconfig.Config
State State
Image string
2013-01-19 04:13:39 +04:00
NetworkSettings *NetworkSettings
2013-01-19 04:13:39 +04:00
ResolvConfPath string
HostnamePath string
HostsPath string
Name string
Driver string
command *execdriver.Command
2013-05-15 02:37:35 +04:00
stdout *utils.WriteBroadcaster
stderr *utils.WriteBroadcaster
stdin io.ReadCloser
stdinPipe io.WriteCloser
2013-04-04 23:12:22 +04:00
ptyMaster io.Closer
runtime *Runtime
waitLock chan struct{}
Volumes map[string]string
// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
// Easier than migrating older container configs :)
VolumesRW map[string]bool
hostConfig *runconfig.HostConfig
activeLinks map[string]*links.Link
2013-01-19 04:13:39 +04:00
}
// FIXME: move deprecated port stuff to nat to clean up the core.
type PortMapping map[string]string // Deprecated
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-12 02:46:23 +04:00
type NetworkSettings struct {
2013-06-04 22:00:22 +04:00
IPAddress string
IPPrefixLen int
Gateway string
2013-04-04 02:05:03 +04:00
Bridge string
PortMapping map[string]PortMapping // Deprecated
Ports nat.PortMap
}
func (settings *NetworkSettings) PortMappingAPI() *engine.Table {
var outs = engine.NewTable("", 0)
for port, bindings := range settings.Ports {
p, _ := nat.ParsePort(port.Port())
if len(bindings) == 0 {
out := &engine.Env{}
out.SetInt("PublicPort", p)
out.Set("Type", port.Proto())
outs.Add(out)
continue
}
for _, binding := range bindings {
out := &engine.Env{}
h, _ := nat.ParsePort(binding.HostPort)
out.SetInt("PrivatePort", p)
out.SetInt("PublicPort", h)
out.Set("Type", port.Proto())
out.Set("IP", binding.HostIp)
outs.Add(out)
}
Add support for UDP (closes #33) API Changes ----------- The port notation is extended to support "/udp" or "/tcp" at the *end* of the specifier string (and defaults to tcp if "/tcp" or "/udp" are missing) `docker ps` now shows UDP ports as "frontend->backend/udp". Nothing changes for TCP ports. `docker inspect` now displays two sub-dictionaries: "Tcp" and "Udp", under "PortMapping" in "NetworkSettings". Theses changes stand true for the values returned by the HTTP API too. This changeset will definitely break tools built upon the API (or upon `docker inspect`). A less intrusive way to add UDP ports in `docker inspect` would be to simply add "/udp" for UDP ports but it will still break existing applications which tries to convert the whole field to an integer. I believe that having two TCP/UDP sub-dictionaries is better because it makes the whole thing more clear and more easy to parse right away (i.e: you don't have to check the format of the string, split it and convert the right part to an integer) Code Changes ------------ Significant changes in network.go: - A second PortAllocator is instantiated for the UDP range; - PortMapper maintains separate mapping for TCP and UDP; - The extPorts array in NetworkInterface is now an array of Nat objects (so we can know on which protocol a given port was mapped when NetworkInterface.Release() is called); - TCP proxying on localhost has been moved away in network_proxy.go. localhost proxy code rewrite in network_proxy.go: We have to proxy the traffic between localhost:frontend-port and container:backend-port because Netfilter doesn't work properly on the loopback interface and DNAT iptable rules aren't applied there. - Goroutines in the TCP proxying code are now explicitly stopped when the proxy is stopped; - UDP connection tracking using a map (more infos in [1]); - Support for IPv6 (to be more accurate, the code is transparent to the Go net package, so you can use, tcp/tcp4/tcp6/udp/udp4/udp6); - Single Proxy interface for both UDP and TCP proxying; - Full test suite. [1] https://github.com/dotcloud/docker/issues/33#issuecomment-20010400
2013-06-12 02:46:23 +04:00
}
return outs
2013-04-20 06:29:13 +04:00
}
// Inject the io.Reader at the given path. Note: do not close the reader
func (container *Container) Inject(file io.Reader, pth string) error {
if err := container.Mount(); err != nil {
return fmt.Errorf("inject: error mounting container %s: %s", container.ID, err)
}
defer container.Unmount()
// Return error if path exists
destPath := path.Join(container.basefs, pth)
if _, err := os.Stat(destPath); err == nil {
// Since err is nil, the path could be stat'd and it exists
return fmt.Errorf("%s exists", pth)
2013-11-08 00:19:24 +04:00
} else if !os.IsNotExist(err) {
// Expect err might be that the file doesn't exist, so
2013-11-08 00:19:24 +04:00
// if it's some other error, return that.
return err
}
// Make sure the directory exists
if err := os.MkdirAll(path.Join(container.basefs, path.Dir(pth)), 0755); err != nil {
return err
}
dest, err := os.Create(destPath)
if err != nil {
return err
}
defer dest.Close()
if _, err := io.Copy(dest, file); err != nil {
return err
}
return nil
}
func (container *Container) When() time.Time {
return container.Created
}
func (container *Container) FromDisk() error {
data, err := ioutil.ReadFile(container.jsonPath())
if err != nil {
return err
}
// Load container settings
// udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it
if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") {
return err
}
return container.readHostConfig()
}
func (container *Container) ToDisk() (err error) {
data, err := json.Marshal(container)
2013-01-19 04:13:39 +04:00
if err != nil {
return
2013-01-19 04:13:39 +04:00
}
err = ioutil.WriteFile(container.jsonPath(), data, 0666)
if err != nil {
return
}
return container.writeHostConfig()
2013-01-19 04:13:39 +04:00
}
func (container *Container) readHostConfig() error {
container.hostConfig = &runconfig.HostConfig{}
// If the hostconfig file does not exist, do not read it.
// (We still have to initialize container.hostConfig,
// but that's OK, since we just did that above.)
_, err := os.Stat(container.hostConfigPath())
if os.IsNotExist(err) {
return nil
}
data, err := ioutil.ReadFile(container.hostConfigPath())
if err != nil {
return err
}
return json.Unmarshal(data, container.hostConfig)
}
func (container *Container) writeHostConfig() (err error) {
data, err := json.Marshal(container.hostConfig)
if err != nil {
return
}
return ioutil.WriteFile(container.hostConfigPath(), data, 0666)
}
func (container *Container) generateEnvConfig(env []string) error {
data, err := json.Marshal(env)
if err != nil {
return err
}
2013-12-18 02:04:37 +04:00
p, err := container.EnvConfigPath()
if err != nil {
return err
}
ioutil.WriteFile(p, data, 0600)
return nil
}
func (container *Container) setupPty() error {
2013-04-04 23:12:22 +04:00
ptyMaster, ptySlave, err := pty.Open()
if err != nil {
return err
}
2013-04-04 23:12:22 +04:00
container.ptyMaster = ptyMaster
container.command.Stdout = ptySlave
container.command.Stderr = ptySlave
container.command.Console = ptySlave.Name()
// Copy the PTYs to our broadcasters
go func() {
defer container.stdout.CloseWriters()
utils.Debugf("startPty: begin of stdout pipe")
2013-04-04 23:12:22 +04:00
io.Copy(container.stdout, ptyMaster)
utils.Debugf("startPty: end of stdout pipe")
}()
// stdin
if container.Config.OpenStdin {
container.command.Stdin = ptySlave
container.command.SysProcAttr.Setctty = true
go func() {
defer container.stdin.Close()
utils.Debugf("startPty: begin of stdin pipe")
2013-04-04 23:12:22 +04:00
io.Copy(ptyMaster, container.stdin)
utils.Debugf("startPty: end of stdin pipe")
}()
}
return nil
}
func (container *Container) setupStd() error {
container.command.Stdout = container.stdout
container.command.Stderr = container.stderr
if container.Config.OpenStdin {
stdin, err := container.command.StdinPipe()
if err != nil {
return err
}
go func() {
defer stdin.Close()
utils.Debugf("start: begin of stdin pipe")
io.Copy(stdin, container.stdin)
utils.Debugf("start: end of stdin pipe")
}()
}
return nil
}
func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
var cStdout, cStderr io.ReadCloser
var nJobs int
errors := make(chan error, 3)
if stdin != nil && container.Config.OpenStdin {
nJobs += 1
if cStdin, err := container.StdinPipe(); err != nil {
errors <- err
} else {
go func() {
utils.Debugf("attach: stdin: begin")
defer utils.Debugf("attach: stdin: end")
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
if container.Config.StdinOnce && !container.Config.Tty {
defer cStdin.Close()
} else {
defer func() {
if cStdout != nil {
cStdout.Close()
}
if cStderr != nil {
cStderr.Close()
}
}()
}
if container.Config.Tty {
2013-05-15 02:37:35 +04:00
_, err = utils.CopyEscapable(cStdin, stdin)
} else {
_, err = io.Copy(cStdin, stdin)
}
if err == io.ErrClosedPipe {
err = nil
}
if err != nil {
utils.Errorf("attach: stdin: %s", err)
}
errors <- err
}()
}
}
if stdout != nil {
nJobs += 1
if p, err := container.StdoutPipe(); err != nil {
errors <- err
} else {
cStdout = p
go func() {
utils.Debugf("attach: stdout: begin")
defer utils.Debugf("attach: stdout: end")
// If we are in StdinOnce mode, then close stdin
2013-09-24 04:53:02 +04:00
if container.Config.StdinOnce && stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
_, err := io.Copy(stdout, cStdout)
if err == io.ErrClosedPipe {
err = nil
}
if err != nil {
utils.Errorf("attach: stdout: %s", err)
}
errors <- err
}()
}
} else {
go func() {
2013-06-05 01:35:32 +04:00
if stdinCloser != nil {
defer stdinCloser.Close()
}
if cStdout, err := container.StdoutPipe(); err != nil {
utils.Errorf("attach: stdout pipe: %s", err)
2013-06-05 01:35:32 +04:00
} else {
io.Copy(&utils.NopWriter{}, cStdout)
}
}()
}
if stderr != nil {
nJobs += 1
if p, err := container.StderrPipe(); err != nil {
errors <- err
} else {
cStderr = p
go func() {
utils.Debugf("attach: stderr: begin")
defer utils.Debugf("attach: stderr: end")
// If we are in StdinOnce mode, then close stdin
2013-09-24 04:53:02 +04:00
if container.Config.StdinOnce && stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
_, err := io.Copy(stderr, cStderr)
if err == io.ErrClosedPipe {
err = nil
}
if err != nil {
utils.Errorf("attach: stderr: %s", err)
}
errors <- err
}()
}
} else {
go func() {
2013-06-05 01:35:32 +04:00
if stdinCloser != nil {
defer stdinCloser.Close()
}
2013-06-10 22:08:40 +04:00
if cStderr, err := container.StderrPipe(); err != nil {
utils.Errorf("attach: stdout pipe: %s", err)
2013-06-05 01:35:32 +04:00
} else {
io.Copy(&utils.NopWriter{}, cStderr)
}
}()
}
2013-05-15 02:37:35 +04:00
return utils.Go(func() error {
defer func() {
if cStdout != nil {
cStdout.Close()
}
if cStderr != nil {
cStderr.Close()
}
}()
// FIXME: how to clean up the stdin goroutine without the unwanted side effect
// of closing the passed stdin? Add an intermediary io.Pipe?
for i := 0; i < nJobs; i += 1 {
utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
if err := <-errors; err != nil {
utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)
return err
}
utils.Debugf("attach: job %d completed successfully", i+1)
}
utils.Debugf("attach: all jobs completed successfully")
return nil
})
}
func populateCommand(c *Container) {
var (
en *execdriver.Network
driverConfig []string
)
if !c.Config.NetworkDisabled {
network := c.NetworkSettings
en = &execdriver.Network{
Gateway: network.Gateway,
Bridge: network.Bridge,
IPAddress: network.IPAddress,
IPPrefixLen: network.IPPrefixLen,
Mtu: c.runtime.config.Mtu,
}
}
if lxcConf := c.hostConfig.LxcConf; lxcConf != nil {
for _, pair := range lxcConf {
driverConfig = append(driverConfig, fmt.Sprintf("%s = %s", pair.Key, pair.Value))
}
}
resources := &execdriver.Resources{
Memory: c.Config.Memory,
MemorySwap: c.Config.MemorySwap,
CpuShares: c.Config.CpuShares,
}
c.command = &execdriver.Command{
ID: c.ID,
Privileged: c.hostConfig.Privileged,
Rootfs: c.RootfsPath(),
InitPath: "/.dockerinit",
Entrypoint: c.Path,
Arguments: c.Args,
WorkingDir: c.Config.WorkingDir,
Network: en,
Tty: c.Config.Tty,
User: c.Config.User,
Config: driverConfig,
Resources: resources,
}
c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
}
func (container *Container) Start() (err error) {
2013-11-22 00:21:03 +04:00
container.Lock()
defer container.Unlock()
if container.State.IsRunning() {
return fmt.Errorf("The container %s is already running.", container.ID)
}
2013-10-17 00:12:56 +04:00
defer func() {
if err != nil {
container.cleanup()
2013-10-16 23:01:55 +04:00
}
2013-10-17 00:12:56 +04:00
}()
if err := container.Mount(); err != nil {
2013-10-17 00:12:56 +04:00
return err
}
if container.runtime.config.DisableNetwork {
2013-10-17 00:12:56 +04:00
container.Config.NetworkDisabled = true
container.buildHostnameAndHostsFiles("127.0.1.1")
2013-10-17 00:12:56 +04:00
} else {
if err := container.allocateNetwork(); err != nil {
return err
}
container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
2013-10-17 00:12:56 +04:00
}
2013-10-17 00:12:56 +04:00
// Make sure the config is compatible with the current kernel
if container.Config.Memory > 0 && !container.runtime.sysInfo.MemoryLimit {
2013-10-17 00:12:56 +04:00
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
container.Config.Memory = 0
}
if container.Config.Memory > 0 && !container.runtime.sysInfo.SwapLimit {
2013-10-17 00:12:56 +04:00
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
container.Config.MemorySwap = -1
}
if container.runtime.sysInfo.IPv4ForwardingDisabled {
2013-10-17 00:12:56 +04:00
log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
}
if err := prepareVolumesForContainer(container); err != nil {
return err
2013-10-17 00:12:56 +04:00
}
2013-04-10 05:19:55 +04:00
// Setup environment
env := []string{
"HOME=/",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOSTNAME=" + container.Config.Hostname,
2013-10-17 00:12:56 +04:00
}
if container.Config.Tty {
env = append(env, "TERM=xterm")
}
// Init any links between the parent and children
runtime := container.runtime
children, err := runtime.Children(container.Name)
if err != nil {
return err
}
if len(children) > 0 {
container.activeLinks = make(map[string]*links.Link, len(children))
// If we encounter an error make sure that we rollback any network
// config and ip table changes
rollback := func() {
for _, link := range container.activeLinks {
link.Disable()
}
container.activeLinks = nil
}
for linkAlias, child := range children {
if !child.State.IsRunning() {
return fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias)
}
link, err := links.NewLink(
container.NetworkSettings.IPAddress,
child.NetworkSettings.IPAddress,
linkAlias,
child.Config.Env,
child.Config.ExposedPorts,
runtime.eng)
if err != nil {
rollback()
return err
}
container.activeLinks[link.Alias()] = link
if err := link.Enable(); err != nil {
rollback()
return err
}
for _, envVar := range link.ToEnv() {
env = append(env, envVar)
}
}
}
for _, elem := range container.Config.Env {
env = append(env, elem)
}
if err := container.generateEnvConfig(env); err != nil {
return err
}
2013-10-17 00:12:56 +04:00
if container.Config.WorkingDir != "" {
container.Config.WorkingDir = path.Clean(container.Config.WorkingDir)
if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil {
2013-10-17 00:12:56 +04:00
return nil
2013-10-16 23:01:55 +04:00
}
2013-10-17 00:12:56 +04:00
}
2013-03-29 19:46:06 +04:00
envPath, err := container.EnvConfigPath()
2013-12-18 02:04:37 +04:00
if err != nil {
return err
}
if err := mountVolumesForContainer(container, envPath); err != nil {
return err
}
populateCommand(container)
2013-10-17 00:12:56 +04:00
// Setup logging of stdout and stderr to disk
if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
return err
}
if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil {
return err
}
container.waitLock = make(chan struct{})
// Setuping pipes and/or Pty
var setup func() error
if container.Config.Tty {
setup = container.setupPty
} else {
setup = container.setupStd
}
if err := setup(); err != nil {
return err
}
callbackLock := make(chan struct{})
callback := func(command *execdriver.Command) {
container.State.SetRunning(command.Pid())
if command.Tty {
// The callback is called after the process Start()
// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace
// which we close here.
if c, ok := command.Stdout.(io.Closer); ok {
c.Close()
}
}
if err := container.ToDisk(); err != nil {
utils.Debugf("%s", err)
}
close(callbackLock)
2013-01-19 04:13:39 +04:00
}
2013-10-17 00:12:56 +04:00
// We use a callback here instead of a goroutine and an chan for
// syncronization purposes
cErr := utils.Go(func() error { return container.monitor(callback) })
// Start should not return until the process is actually running
select {
case <-callbackLock:
case err := <-cErr:
return err
}
return nil
2013-01-19 04:13:39 +04:00
}
func (container *Container) Run() error {
if err := container.Start(); err != nil {
2013-01-19 04:13:39 +04:00
return err
}
container.Wait()
return nil
}
func (container *Container) Output() (output []byte, err error) {
pipe, err := container.StdoutPipe()
if err != nil {
return nil, err
}
defer pipe.Close()
if err := container.Start(); err != nil {
2013-01-19 04:13:39 +04:00
return nil, err
}
output, err = ioutil.ReadAll(pipe)
container.Wait()
return output, err
}
// Container.StdinPipe returns a WriteCloser which can be used to feed data
// to the standard input of the container's active process.
// Container.StdoutPipe and Container.StderrPipe each return a ReadCloser
// which can be used to retrieve the standard output (and error) generated
// by the container's active process. The output (and error) are actually
// copied and delivered to all StdoutPipe and StderrPipe consumers, using
// a kind of "broadcaster".
func (container *Container) StdinPipe() (io.WriteCloser, error) {
return container.stdinPipe, nil
}
2013-01-19 04:13:39 +04:00
func (container *Container) StdoutPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
2013-07-11 21:18:28 +04:00
container.stdout.AddWriter(writer, "")
2013-05-15 02:37:35 +04:00
return utils.NewBufReader(reader), nil
2013-01-19 04:13:39 +04:00
}
func (container *Container) StderrPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
2013-07-11 21:18:28 +04:00
container.stderr.AddWriter(writer, "")
2013-05-15 02:37:35 +04:00
return utils.NewBufReader(reader), nil
2013-01-19 04:13:39 +04:00
}
func (container *Container) buildHostnameAndHostsFiles(IP string) {
container.HostnamePath = path.Join(container.root, "hostname")
ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
hostsContent := []byte(`
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
`)
container.HostsPath = path.Join(container.root, "hosts")
if container.Config.Domainname != "" {
hostsContent = append([]byte(fmt.Sprintf("%s\t%s.%s %s\n", IP, container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...)
} else if !container.Config.NetworkDisabled {
hostsContent = append([]byte(fmt.Sprintf("%s\t%s\n", IP, container.Config.Hostname)), hostsContent...)
}
ioutil.WriteFile(container.HostsPath, hostsContent, 0644)
}
func (container *Container) allocateNetwork() error {
if container.Config.NetworkDisabled {
return nil
}
2013-11-22 00:21:03 +04:00
var (
env *engine.Env
err error
eng = container.runtime.eng
2013-11-22 00:21:03 +04:00
)
2013-11-22 00:21:03 +04:00
if container.State.IsGhost() {
if container.runtime.config.DisableNetwork {
env = &engine.Env{}
} else {
currentIP := container.NetworkSettings.IPAddress
job := eng.Job("allocate_interface", container.ID)
if currentIP != "" {
job.Setenv("RequestIP", currentIP)
}
env, err = job.Stdout.AddEnv()
if err != nil {
return err
}
if err := job.Run(); err != nil {
return err
}
}
} else {
job := eng.Job("allocate_interface", container.ID)
env, err = job.Stdout.AddEnv()
if err != nil {
return err
}
if err := job.Run(); err != nil {
return err
}
}
if container.Config.PortSpecs != nil {
utils.Debugf("Migrating port mappings for container: %s", strings.Join(container.Config.PortSpecs, ", "))
if err := migratePortMappings(container.Config, container.hostConfig); err != nil {
return err
}
container.Config.PortSpecs = nil
if err := container.writeHostConfig(); err != nil {
return err
}
}
2013-11-22 00:21:03 +04:00
var (
portSpecs = make(nat.PortSet)
bindings = make(nat.PortMap)
2013-11-22 00:21:03 +04:00
)
2013-11-22 00:21:03 +04:00
if !container.State.IsGhost() {
if container.Config.ExposedPorts != nil {
portSpecs = container.Config.ExposedPorts
2013-08-27 23:14:21 +04:00
}
if container.hostConfig.PortBindings != nil {
bindings = container.hostConfig.PortBindings
}
} else {
if container.NetworkSettings.Ports != nil {
for port, binding := range container.NetworkSettings.Ports {
portSpecs[port] = struct{}{}
bindings[port] = binding
}
2013-08-27 23:14:21 +04:00
}
}
2013-08-27 23:14:21 +04:00
container.NetworkSettings.PortMapping = nil
for port := range portSpecs {
binding := bindings[port]
2013-11-02 01:01:32 +04:00
if container.hostConfig.PublishAllPorts && len(binding) == 0 {
binding = append(binding, nat.PortBinding{})
}
for i := 0; i < len(binding); i++ {
b := binding[i]
portJob := eng.Job("allocate_port", container.ID)
portJob.Setenv("HostIP", b.HostIp)
portJob.Setenv("HostPort", b.HostPort)
portJob.Setenv("Proto", port.Proto())
portJob.Setenv("ContainerPort", port.Port())
portEnv, err := portJob.Stdout.AddEnv()
if err != nil {
return err
}
if err := portJob.Run(); err != nil {
eng.Job("release_interface", container.ID).Run()
return err
}
b.HostIp = portEnv.Get("HostIP")
b.HostPort = portEnv.Get("HostPort")
binding[i] = b
}
bindings[port] = binding
}
container.writeHostConfig()
container.NetworkSettings.Ports = bindings
container.NetworkSettings.Bridge = env.Get("Bridge")
container.NetworkSettings.IPAddress = env.Get("IP")
container.NetworkSettings.IPPrefixLen = env.GetInt("IPPrefixLen")
container.NetworkSettings.Gateway = env.Get("Gateway")
return nil
}
2013-04-01 09:04:59 +04:00
func (container *Container) releaseNetwork() {
if container.Config.NetworkDisabled {
return
}
eng := container.runtime.eng
eng.Job("release_interface", container.ID).Run()
container.NetworkSettings = &NetworkSettings{}
}
func (container *Container) monitor(callback execdriver.StartCallback) error {
var (
err error
exitCode int
)
if container.command == nil {
// This happends when you have a GHOST container with lxc
populateCommand(container)
err = container.runtime.RestoreCommand(container)
} else {
exitCode, err = container.runtime.Run(container, callback)
}
if err != nil {
utils.Errorf("Error running container: %s", err)
2013-09-21 00:36:19 +04:00
}
2013-01-23 03:03:40 +04:00
// Cleanup
2013-10-16 23:01:55 +04:00
container.cleanup()
// Re-create a brand new stdin pipe once the container exited
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
}
container.State.SetStopped(exitCode)
if container.runtime != nil && container.runtime.srv != nil {
container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image))
}
close(container.waitLock)
// FIXME: there is a race condition here which causes this to fail during the unit tests.
// If another goroutine was waiting for Wait() to return before removing the container's root
// from the filesystem... At this point it may already have done so.
// This is because State.setStopped() has already been called, and has caused Wait()
// to return.
// FIXME: why are we serializing running state to disk in the first place?
//log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err)
container.ToDisk()
return err
2013-10-16 23:01:55 +04:00
}
func (container *Container) cleanup() {
container.releaseNetwork()
// Disable all active links
if container.activeLinks != nil {
for _, link := range container.activeLinks {
link.Disable()
}
}
if container.Config.OpenStdin {
if err := container.stdin.Close(); err != nil {
utils.Errorf("%s: Error close stdin: %s", container.ID, err)
}
}
if err := container.stdout.CloseWriters(); err != nil {
utils.Errorf("%s: Error close stdout: %s", container.ID, err)
}
if err := container.stderr.CloseWriters(); err != nil {
utils.Errorf("%s: Error close stderr: %s", container.ID, err)
}
2013-04-04 23:12:22 +04:00
if container.ptyMaster != nil {
if err := container.ptyMaster.Close(); err != nil {
utils.Errorf("%s: Error closing Pty master: %s", container.ID, err)
}
}
unmountVolumesForContainer(container)
if err := container.Unmount(); err != nil {
2013-06-04 22:00:22 +04:00
log.Printf("%v: Failed to umount filesystem: %v", container.ID, err)
}
2013-01-19 04:13:39 +04:00
}
func (container *Container) kill(sig int) error {
2013-11-22 00:21:03 +04:00
container.Lock()
defer container.Unlock()
2013-11-22 00:21:03 +04:00
if !container.State.IsRunning() {
return nil
}
return container.runtime.Kill(container, sig)
}
func (container *Container) Kill() error {
2013-11-22 00:21:03 +04:00
if !container.State.IsRunning() {
return nil
}
// 1. Send SIGKILL
if err := container.kill(9); err != nil {
return err
2013-01-19 04:13:39 +04:00
}
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := container.WaitTimeout(10 * time.Second); err != nil {
if container.command == nil {
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", utils.TruncateID(container.ID))
}
log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", utils.TruncateID(container.ID))
if err := container.runtime.Kill(container, 9); err != nil {
return err
}
}
container.Wait()
return nil
}
func (container *Container) Stop(seconds int) error {
2013-11-22 00:21:03 +04:00
if !container.State.IsRunning() {
2013-01-19 04:13:39 +04:00
return nil
}
// 1. Send a SIGTERM
if err := container.kill(15); err != nil {
utils.Debugf("Error sending kill SIGTERM: %s", err)
log.Print("Failed to send SIGTERM to the process, force killing")
if err := container.kill(9); err != nil {
return err
}
}
// 2. Wait for the process to exit on its own
if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
2013-06-04 22:00:22 +04:00
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
// 3. If it doesn't, then send SIGKILL
if err := container.Kill(); err != nil {
return err
}
}
2013-01-19 04:13:39 +04:00
return nil
}
func (container *Container) Restart(seconds int) error {
// Avoid unnecessarily unmounting and then directly mounting
// the container when the container stops and then starts
// again
if err := container.Mount(); err == nil {
defer container.Unmount()
}
if err := container.Stop(seconds); err != nil {
2013-01-23 03:03:40 +04:00
return err
}
return container.Start()
2013-01-23 03:03:40 +04:00
}
// Wait blocks until the container stops running, then returns its exit code.
func (container *Container) Wait() int {
<-container.waitLock
return container.State.GetExitCode()
2013-01-19 04:13:39 +04:00
}
2013-05-24 06:33:28 +04:00
func (container *Container) Resize(h, w int) error {
pty, ok := container.ptyMaster.(*os.File)
if !ok {
return fmt.Errorf("ptyMaster does not have Fd() method")
}
return term.SetWinsize(pty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
2013-05-24 06:33:28 +04:00
}
2013-11-01 03:57:45 +04:00
func (container *Container) ExportRw() (archive.Archive, error) {
if err := container.Mount(); err != nil {
return nil, err
}
if container.runtime == nil {
return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
}
archive, err := container.runtime.Diff(container)
if err != nil {
container.Unmount()
return nil, err
}
return utils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
container.Unmount()
return err
}), nil
}
2013-11-01 03:57:45 +04:00
func (container *Container) Export() (archive.Archive, error) {
if err := container.Mount(); err != nil {
return nil, err
}
archive, err := archive.Tar(container.basefs, archive.Uncompressed)
if err != nil {
container.Unmount()
return nil, err
}
return utils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
container.Unmount()
return err
}), nil
}
func (container *Container) WaitTimeout(timeout time.Duration) error {
done := make(chan bool)
go func() {
container.Wait()
done <- true
}()
select {
case <-time.After(timeout):
return fmt.Errorf("Timed Out")
case <-done:
return nil
}
}
func (container *Container) Mount() error {
return container.runtime.Mount(container)
}
func (container *Container) Changes() ([]archive.Change, error) {
return container.runtime.Changes(container)
}
2013-03-22 04:47:23 +04:00
func (container *Container) GetImage() (*Image, error) {
if container.runtime == nil {
return nil, fmt.Errorf("Can't get image of unregistered container")
}
return container.runtime.graph.Get(container.Image)
}
func (container *Container) Unmount() error {
return container.runtime.Unmount(container)
}
func (container *Container) logPath(name string) string {
2013-06-04 22:00:22 +04:00
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name))
}
func (container *Container) ReadLog(name string) (io.Reader, error) {
return os.Open(container.logPath(name))
}
func (container *Container) hostConfigPath() string {
2013-07-02 21:02:42 +04:00
return path.Join(container.root, "hostconfig.json")
}
func (container *Container) jsonPath() string {
return path.Join(container.root, "config.json")
}
2013-12-18 02:04:37 +04:00
func (container *Container) EnvConfigPath() (string, error) {
p := path.Join(container.root, "config.env")
if _, err := os.Stat(p); err != nil {
if os.IsNotExist(err) {
f, err := os.Create(p)
if err != nil {
return "", err
}
f.Close()
} else {
return "", err
}
}
return p, nil
}
// This method must be exported to be used from the lxc template
// This directory is only usable when the container is running
func (container *Container) RootfsPath() string {
return path.Join(container.root, "root")
}
// This is the stand-alone version of the root fs, without any additional mounts.
// This directory is usable whenever the container is mounted (and not unmounted)
func (container *Container) BasefsPath() string {
return container.basefs
}
2013-06-04 22:00:22 +04:00
func validateID(id string) error {
if id == "" {
return fmt.Errorf("Invalid empty id")
}
return nil
}
2013-05-13 17:10:26 +04:00
// GetSize, return real size, virtual size
func (container *Container) GetSize() (int64, int64) {
var (
sizeRw, sizeRootfs int64
err error
driver = container.runtime.driver
)
2013-05-13 17:10:26 +04:00
if err := container.Mount(); err != nil {
utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err)
return sizeRw, sizeRootfs
}
defer container.Unmount()
if differ, ok := container.runtime.driver.(graphdriver.Differ); ok {
sizeRw, err = differ.DiffSize(container.ID)
if err != nil {
utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
// FIXME: GetSize should return an error. Not changing it now in case
// there is a side-effect.
sizeRw = -1
}
} else {
changes, _ := container.Changes()
if changes != nil {
sizeRw = archive.ChangesSize(container.basefs, changes)
} else {
sizeRw = -1
}
}
if _, err = os.Stat(container.basefs); err != nil {
if sizeRootfs, err = utils.TreeSize(container.basefs); err != nil {
sizeRootfs = -1
2013-05-13 17:10:26 +04:00
}
}
return sizeRw, sizeRootfs
}
func (container *Container) Copy(resource string) (io.ReadCloser, error) {
if err := container.Mount(); err != nil {
return nil, err
}
var filter []string
basePath := path.Join(container.basefs, resource)
stat, err := os.Stat(basePath)
if err != nil {
container.Unmount()
return nil, err
}
if !stat.IsDir() {
d, f := path.Split(basePath)
basePath = d
filter = []string{f}
} else {
filter = []string{path.Base(basePath)}
basePath = path.Dir(basePath)
}
archive, err := archive.TarFilter(basePath, &archive.TarOptions{
2013-11-12 02:30:38 +04:00
Compression: archive.Uncompressed,
Includes: filter,
})
if err != nil {
return nil, err
}
return utils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
container.Unmount()
return err
}), nil
}
// Returns true if the container exposes a certain port
func (container *Container) Exposes(p nat.Port) bool {
_, exists := container.Config.ExposedPorts[p]
return exists
}
func (container *Container) GetPtyMaster() (*os.File, error) {
if container.ptyMaster == nil {
return nil, ErrNoTTY
}
if pty, ok := container.ptyMaster.(*os.File); ok {
return pty, nil
}
return nil, ErrNotATTY
}