зеркало из https://github.com/microsoft/docker.git
Remove the ghosts and kill everything
Docker-DCO-1.1-Signed-off-by: Michael Crosby <michael@crosbymichael.com> (github: crosbymichael)
This commit is contained in:
Родитель
b722aa21b7
Коммит
772ef99d28
12
container.go
12
container.go
|
@ -55,6 +55,7 @@ type Container struct {
|
|||
HostsPath string
|
||||
Name string
|
||||
Driver string
|
||||
ExecDriver string
|
||||
|
||||
command *execdriver.Command
|
||||
stdout *utils.WriteBroadcaster
|
||||
|
@ -777,15 +778,8 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
|
|||
exitCode int
|
||||
)
|
||||
|
||||
if container.command == nil {
|
||||
// This happends when you have a GHOST container with lxc
|
||||
populateCommand(container)
|
||||
err = container.runtime.RestoreCommand(container)
|
||||
} else {
|
||||
pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin)
|
||||
exitCode, err = container.runtime.Run(container, pipes, callback)
|
||||
}
|
||||
|
||||
pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin)
|
||||
exitCode, err = container.runtime.Run(container, pipes, callback)
|
||||
if err != nil {
|
||||
utils.Errorf("Error running container: %s", err)
|
||||
}
|
||||
|
|
|
@ -77,7 +77,6 @@ type TtyTerminal interface {
|
|||
type Driver interface {
|
||||
Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code
|
||||
Kill(c *Command, sig int) error
|
||||
Restore(c *Command) error // Wait and try to re-attach on an out of process command
|
||||
Name() string // Driver name
|
||||
Info(id string) Info // "temporary" hack (until we move state from core to plugins)
|
||||
GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container.
|
||||
|
|
|
@ -189,20 +189,7 @@ func getExitCode(c *execdriver.Command) int {
|
|||
}
|
||||
|
||||
func (d *driver) Kill(c *execdriver.Command, sig int) error {
|
||||
return d.kill(c, sig)
|
||||
}
|
||||
|
||||
func (d *driver) Restore(c *execdriver.Command) error {
|
||||
for {
|
||||
output, err := exec.Command("lxc-info", "-n", c.ID).CombinedOutput()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.Contains(string(output), "RUNNING") {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return KillLxc(c.ID, sig)
|
||||
}
|
||||
|
||||
func (d *driver) version() string {
|
||||
|
@ -225,16 +212,16 @@ func (d *driver) version() string {
|
|||
return version
|
||||
}
|
||||
|
||||
func (d *driver) kill(c *execdriver.Command, sig int) error {
|
||||
func KillLxc(id string, sig int) error {
|
||||
var (
|
||||
err error
|
||||
output []byte
|
||||
)
|
||||
_, err = exec.LookPath("lxc-kill")
|
||||
if err == nil {
|
||||
output, err = exec.Command("lxc-kill", "-n", c.ID, strconv.Itoa(sig)).CombinedOutput()
|
||||
output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput()
|
||||
} else {
|
||||
output, err = exec.Command("lxc-stop", "-k", "-n", c.ID, strconv.Itoa(sig)).CombinedOutput()
|
||||
output, err = exec.Command("lxc-stop", "-k", "-n", id, strconv.Itoa(sig)).CombinedOutput()
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Err: %s Output: %s", err, output)
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -109,41 +108,9 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
}
|
||||
|
||||
func (d *driver) Kill(p *execdriver.Command, sig int) error {
|
||||
return syscall.Kill(p.Process.Pid, syscall.Signal(sig))
|
||||
}
|
||||
|
||||
func (d *driver) Restore(c *execdriver.Command) error {
|
||||
var nspid int
|
||||
f, err := os.Open(filepath.Join(d.root, c.ID, "pid"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer d.removeContainerRoot(c.ID)
|
||||
|
||||
if _, err := fmt.Fscanf(f, "%d", &nspid); err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
|
||||
if _, err := os.FindProcess(nspid); err != nil {
|
||||
return fmt.Errorf("finding existing pid %d %s", nspid, err)
|
||||
}
|
||||
c.Process = &os.Process{
|
||||
Pid: nspid,
|
||||
}
|
||||
ticker := time.NewTicker(500 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for _ = range ticker.C {
|
||||
if err := syscall.Kill(nspid, 0); err != nil {
|
||||
if strings.Contains(err.Error(), "no such process") {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("signal error %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
err := syscall.Kill(p.Process.Pid, syscall.Signal(sig))
|
||||
d.removeContainerRoot(p.ID)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *driver) Info(id string) execdriver.Info {
|
||||
|
|
|
@ -589,90 +589,6 @@ func TestRestore(t *testing.T) {
|
|||
container2.State.SetStopped(0)
|
||||
}
|
||||
|
||||
func TestReloadContainerLinks(t *testing.T) {
|
||||
root, err := newTestDirectory(unitTestStoreBase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// FIXME: here we don't use NewTestEngine because it calls initserver with Autorestart=false,
|
||||
// and we want to set it to true.
|
||||
|
||||
eng := newTestEngine(t, true, root)
|
||||
|
||||
runtime1 := mkRuntimeFromEngine(eng, t)
|
||||
defer nuke(runtime1)
|
||||
// Create a container with one instance of docker
|
||||
container1, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
|
||||
defer runtime1.Destroy(container1)
|
||||
|
||||
// Create a second container meant to be killed
|
||||
container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
|
||||
defer runtime1.Destroy(container2)
|
||||
|
||||
// Start the container non blocking
|
||||
if err := container2.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Add a link to container 2
|
||||
// FIXME @shykes: setting hostConfig.Links seems redundant with calling RegisterLink().
|
||||
// Why do we need it @crosbymichael?
|
||||
// container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
|
||||
if err := runtime1.RegisterLink(container1, container2, "first"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := container1.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !container2.State.IsRunning() {
|
||||
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
|
||||
}
|
||||
|
||||
if !container1.State.IsRunning() {
|
||||
t.Fatalf("Container %s should appear as running but isn't", container1.ID)
|
||||
}
|
||||
|
||||
if len(runtime1.List()) != 2 {
|
||||
t.Errorf("Expected 2 container, %v found", len(runtime1.List()))
|
||||
}
|
||||
|
||||
// Here are are simulating a docker restart - that is, reloading all containers
|
||||
// from scratch
|
||||
eng = newTestEngine(t, false, root)
|
||||
runtime2 := mkRuntimeFromEngine(eng, t)
|
||||
if len(runtime2.List()) != 2 {
|
||||
t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
|
||||
}
|
||||
runningCount := 0
|
||||
for _, c := range runtime2.List() {
|
||||
if c.State.IsRunning() {
|
||||
runningCount++
|
||||
}
|
||||
}
|
||||
if runningCount != 2 {
|
||||
t.Fatalf("Expected 2 container alive, %d found", runningCount)
|
||||
}
|
||||
|
||||
// FIXME: we no longer test if containers were registered in the right order,
|
||||
// because there is no public
|
||||
// Make sure container 2 ( the child of container 1 ) was registered and started first
|
||||
// with the runtime
|
||||
//
|
||||
containers := runtime2.List()
|
||||
if len(containers) == 0 {
|
||||
t.Fatalf("Runtime has no containers")
|
||||
}
|
||||
first := containers[0]
|
||||
if first.ID != container2.ID {
|
||||
t.Fatalf("Container 2 %s should be registered first in the runtime", container2.ID)
|
||||
}
|
||||
|
||||
// Verify that the link is still registered in the runtime
|
||||
if c := runtime2.Get(container1.Name); c == nil {
|
||||
t.Fatal("Named container is no longer registered after restart")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultContainerName(t *testing.T) {
|
||||
eng := NewTestEngine(t)
|
||||
runtime := mkRuntimeFromEngine(eng, t)
|
||||
|
|
|
@ -48,17 +48,14 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol
|
|||
return fmt.Errorf("setctty %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
/* this is commented out so that we get the current Ghost functionality
|
||||
if err := system.ParentDeathSignal(); err != nil {
|
||||
return fmt.Errorf("parent death signal %s", err)
|
||||
}
|
||||
/*
|
||||
if err := system.ParentDeathSignal(); err != nil {
|
||||
return fmt.Errorf("parent death signal %s", err)
|
||||
}
|
||||
*/
|
||||
|
||||
if err := setupNewMountNamespace(rootfs, console, container.ReadonlyFs); err != nil {
|
||||
return fmt.Errorf("setup mount namespace %s", err)
|
||||
}
|
||||
|
||||
if err := setupNetwork(container, context); err != nil {
|
||||
return fmt.Errorf("setup networking %s", err)
|
||||
}
|
||||
|
|
43
runtime.go
43
runtime.go
|
@ -154,12 +154,39 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
// if so, then we need to restart monitor and init a new lock
|
||||
// If the container is supposed to be running, make sure of it
|
||||
if container.State.IsRunning() {
|
||||
info := runtime.execDriver.Info(container.ID)
|
||||
if container.State.IsGhost() {
|
||||
utils.Debugf("killing ghost %s", container.ID)
|
||||
|
||||
existingPid := container.State.Pid
|
||||
container.State.SetGhost(false)
|
||||
container.State.SetStopped(0)
|
||||
|
||||
if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") {
|
||||
lxc.KillLxc(container.ID, 9)
|
||||
} else {
|
||||
command := &execdriver.Command{
|
||||
ID: container.ID,
|
||||
}
|
||||
command.Process = &os.Process{Pid: existingPid}
|
||||
runtime.execDriver.Kill(command, 9)
|
||||
}
|
||||
// ensure that the filesystem is also unmounted
|
||||
unmountVolumesForContainer(container)
|
||||
if err := container.Unmount(); err != nil {
|
||||
utils.Debugf("ghost unmount error %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
info := runtime.execDriver.Info(container.ID)
|
||||
if !info.IsRunning() {
|
||||
utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
|
||||
if runtime.config.AutoRestart {
|
||||
utils.Debugf("Restarting")
|
||||
unmountVolumesForContainer(container)
|
||||
if err := container.Unmount(); err != nil {
|
||||
utils.Debugf("restart unmount error %s", err)
|
||||
}
|
||||
|
||||
container.State.SetGhost(false)
|
||||
container.State.SetStopped(0)
|
||||
if err := container.Start(); err != nil {
|
||||
|
@ -172,15 +199,6 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
utils.Debugf("Reconnecting to container %v", container.ID)
|
||||
|
||||
if err := container.allocateNetwork(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
container.waitLock = make(chan struct{})
|
||||
go container.monitor(nil)
|
||||
}
|
||||
} else {
|
||||
// When the container is not running, we still initialize the waitLock
|
||||
|
@ -447,6 +465,7 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe
|
|||
NetworkSettings: &NetworkSettings{},
|
||||
Name: name,
|
||||
Driver: runtime.driver.String(),
|
||||
ExecDriver: runtime.execDriver.Name(),
|
||||
}
|
||||
container.root = runtime.containerRoot(container.ID)
|
||||
// Step 1: create the container directory.
|
||||
|
@ -834,10 +853,6 @@ func (runtime *Runtime) Kill(c *Container, sig int) error {
|
|||
return runtime.execDriver.Kill(c.command, sig)
|
||||
}
|
||||
|
||||
func (runtime *Runtime) RestoreCommand(c *Container) error {
|
||||
return runtime.execDriver.Restore(c.command)
|
||||
}
|
||||
|
||||
// Nuke kills all containers then removes all content
|
||||
// from the content root, including images, volumes and
|
||||
// container filesystems.
|
||||
|
|
Загрузка…
Ссылка в новой задаче