Rework monitor and waitlock inside of driver

Docker-DCO-1.1-Signed-off-by: Michael Crosby <michael@crosbymichael.com> (github: crosbymichael)
This commit is contained in:
Michael Crosby 2014-01-10 15:06:16 -08:00
Родитель 5a3d9bd432
Коммит 8e87835968
4 изменённых файлов: 29 добавлений и 54 удалений

Просмотреть файл

@ -743,6 +743,7 @@ func (container *Container) Start() (err error) {
Network: en,
Tty: container.Config.Tty,
User: container.Config.User,
WaitLock: make(chan struct{}),
}
container.process.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
@ -754,8 +755,6 @@ func (container *Container) Start() (err error) {
return err
}
// Init the lock
container.waitLock = make(chan struct{})
go container.monitor()
if container.Config.Tty {
@ -1143,10 +1142,17 @@ func (container *Container) releaseNetwork() {
}
func (container *Container) monitor() {
time.Sleep(1 * time.Second)
// Wait for the program to exit
fmt.Printf("--->Before WAIT %s\n", container.ID)
if err := container.runtime.Wait(container, time.Duration(0)); err != nil {
if container.process == nil {
if err := container.runtime.Wait(container, 0); err != nil {
utils.Debugf("monitor: cmd.Wait reported exit status %s for container %s", err, container.ID)
}
} else {
<-container.process.WaitLock
}
if err := container.process.WaitError; err != nil {
// Since non-zero exit status and signal terminations will cause err to be non-nil,
// we have to actually discard it. Still, log it anyway, just in case.
utils.Debugf("monitor: cmd.Wait reported exit status %s for container %s", err, container.ID)
@ -1167,9 +1173,6 @@ func (container *Container) monitor() {
exitCode := container.process.GetExitCode()
container.State.SetStopped(exitCode)
// Release the lock
close(container.waitLock)
if err := container.ToDisk(); err != nil {
// FIXME: there is a race condition here which causes this to fail during the unit tests.
// If another goroutine was waiting for Wait() to return before removing the container's root
@ -1283,8 +1286,8 @@ func (container *Container) Restart(seconds int) error {
// Wait blocks until the container stops running, then returns its exit code.
func (container *Container) Wait() int {
<-container.waitLock
return container.State.GetExitCode()
<-container.process.WaitLock
return container.process.GetExitCode()
}
func (container *Container) Resize(h, w int) error {

Просмотреть файл

@ -1,20 +1,15 @@
package execdriver
import (
"errors"
"os/exec"
"syscall"
"time"
)
var (
ErrCommandIsNil = errors.New("Process's cmd is nil")
)
type Driver interface {
Start(c *Process) error
Kill(c *Process, sig int) error
Wait(c *Process, duration time.Duration) error
Wait(id string, duration time.Duration) error // Wait on an out of process option - lxc ghosts
}
// Network settings of the container
@ -40,6 +35,8 @@ type Process struct {
ConfigPath string
Tty bool
Network *Network // if network is nil then networking is disabled
WaitLock chan struct{}
WaitError error
}
func (c *Process) Pid() int {

Просмотреть файл

@ -4,7 +4,6 @@ import (
"errors"
"fmt"
"github.com/dotcloud/docker/execdriver"
"os"
"os/exec"
"strconv"
"strings"
@ -78,11 +77,17 @@ func (d *driver) Start(c *execdriver.Process) error {
c.Path = aname
c.Args = append([]string{name}, arg...)
fmt.Printf("-->%s\n-->%v\n", name, arg)
if err := c.Start(); err != nil {
return err
}
go func() {
if err := c.Wait(); err != nil {
c.WaitError = err
}
close(c.WaitLock)
}()
// Poll for running
if err := d.waitForStart(c); err != nil {
return err
@ -94,37 +99,22 @@ func (d *driver) Kill(c *execdriver.Process, sig int) error {
return d.kill(c, sig)
}
func (d *driver) Wait(c *execdriver.Process, duration time.Duration) error {
return d.wait(c, duration)
}
// If seconds < 0 then wait forever
func (d *driver) wait(c *execdriver.Process, duration time.Duration) error {
func (d *driver) Wait(id string, duration time.Duration) error {
var (
killer bool
done = d.waitCmd(c)
done = d.waitLxc(id, &killer)
)
begin:
if duration > 0 {
select {
case err := <-done:
if err != nil && err == execdriver.ErrCommandIsNil {
done = d.waitLxc(c, &killer)
goto begin
}
return err
case <-time.After(duration):
killer = true
return ErrWaitTimeoutReached
}
} else {
if err := <-done; err != nil {
if err == execdriver.ErrCommandIsNil {
done = d.waitLxc(c, &killer)
goto begin
}
return err
}
return <-done
}
return nil
}
@ -160,29 +150,14 @@ func (d *driver) waitForStart(c *execdriver.Process) error {
}
time.Sleep(50 * time.Millisecond)
}
fmt.Printf("-->%s\n", string(output))
os.Exit(1)
return ErrNotRunning
}
func (d *driver) waitCmd(c *execdriver.Process) <-chan error {
done := make(chan error)
go func() {
if c == nil {
done <- execdriver.ErrCommandIsNil
return
}
done <- c.Wait()
}()
return done
}
func (d *driver) waitLxc(c *execdriver.Process, kill *bool) <-chan error {
func (d *driver) waitLxc(id string, kill *bool) <-chan error {
done := make(chan error)
go func() {
for *kill {
output, err := exec.Command("lxc-info", "-n", c.ID).CombinedOutput()
output, err := exec.Command("lxc-info", "-n", id).CombinedOutput()
if err != nil {
done <- err
return

Просмотреть файл

@ -847,7 +847,7 @@ func (runtime *Runtime) Kill(c *Container, sig int) error {
}
func (runtime *Runtime) Wait(c *Container, duration time.Duration) error {
return runtime.execDriver.Wait(c.process, duration)
return runtime.execDriver.Wait(c.ID, duration)
}
// Nuke kills all containers then removes all content