зеркало из https://github.com/microsoft/docker.git
Merge pull request #473 from dotcloud/26-auto_restart_containers-feature
+ runtime: Add -r flag to dockerd in order to restart previously running container....
This commit is contained in:
Коммит
8f81e175af
|
@ -993,11 +993,11 @@ func (srv *Server) CmdRun(stdin io.ReadCloser, stdout rcli.DockerConn, args ...s
|
|||
return nil
|
||||
}
|
||||
|
||||
func NewServer() (*Server, error) {
|
||||
func NewServer(autoRestart bool) (*Server, error) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
log.Fatalf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
|
||||
}
|
||||
runtime, err := NewRuntime()
|
||||
runtime, err := NewRuntime(autoRestart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ func main() {
|
|||
// FIXME: Switch d and D ? (to be more sshd like)
|
||||
flDaemon := flag.Bool("d", false, "Daemon mode")
|
||||
flDebug := flag.Bool("D", false, "Debug mode")
|
||||
flAutoRestart := flag.Bool("r", false, "Restart previously running containers")
|
||||
bridgeName := flag.String("b", "", "Attach containers to a pre-existing network bridge")
|
||||
pidfile := flag.String("p", "/var/run/docker.pid", "File containing process PID")
|
||||
flag.Parse()
|
||||
|
@ -45,7 +46,7 @@ func main() {
|
|||
flag.Usage()
|
||||
return
|
||||
}
|
||||
if err := daemon(*pidfile); err != nil {
|
||||
if err := daemon(*pidfile, *flAutoRestart); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
|
@ -82,7 +83,7 @@ func removePidFile(pidfile string) {
|
|||
}
|
||||
}
|
||||
|
||||
func daemon(pidfile string) error {
|
||||
func daemon(pidfile string, autoRestart bool) error {
|
||||
if err := createPidFile(pidfile); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
@ -97,7 +98,7 @@ func daemon(pidfile string) error {
|
|||
os.Exit(0)
|
||||
}()
|
||||
|
||||
service, err := docker.NewServer()
|
||||
service, err := docker.NewServer(autoRestart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
59
runtime.go
59
runtime.go
|
@ -31,6 +31,7 @@ type Runtime struct {
|
|||
idIndex *TruncIndex
|
||||
capabilities *Capabilities
|
||||
kernelVersion *KernelVersionInfo
|
||||
autoRestart bool
|
||||
}
|
||||
|
||||
var sysInitPath string
|
||||
|
@ -167,23 +168,6 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
// init the wait lock
|
||||
container.waitLock = make(chan struct{})
|
||||
|
||||
// FIXME: if the container is supposed to be running but is not, auto restart it?
|
||||
// if so, then we need to restart monitor and init a new lock
|
||||
// If the container is supposed to be running, make sure of it
|
||||
if container.State.Running {
|
||||
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
if !strings.Contains(string(output), "RUNNING") {
|
||||
Debugf("Container %s was supposed to be running be is not.", container.Id)
|
||||
container.State.setStopped(-127)
|
||||
if err := container.ToDisk(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Even if not running, we init the lock (prevents races in start/stop/kill)
|
||||
container.State.initLock()
|
||||
|
||||
|
@ -202,11 +186,43 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
runtime.containers.PushBack(container)
|
||||
runtime.idIndex.Add(container.Id)
|
||||
|
||||
// When we actually restart, Start() do the monitoring.
|
||||
// However, when we simply 'reattach', we have to restart a monitor
|
||||
nomonitor := false
|
||||
|
||||
// FIXME: if the container is supposed to be running but is not, auto restart it?
|
||||
// if so, then we need to restart monitor and init a new lock
|
||||
// If the container is supposed to be running, make sure of it
|
||||
if container.State.Running {
|
||||
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
if !strings.Contains(string(output), "RUNNING") {
|
||||
Debugf("Container %s was supposed to be running be is not.", container.Id)
|
||||
if runtime.autoRestart {
|
||||
Debugf("Restarting")
|
||||
container.State.Ghost = false
|
||||
container.State.setStopped(0)
|
||||
if err := container.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
nomonitor = true
|
||||
} else {
|
||||
Debugf("Marking as stopped")
|
||||
container.State.setStopped(-127)
|
||||
if err := container.ToDisk(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the container is not running or just has been flagged not running
|
||||
// then close the wait lock chan (will be reset upon start)
|
||||
if !container.State.Running {
|
||||
close(container.waitLock)
|
||||
} else {
|
||||
} else if !nomonitor {
|
||||
container.allocateNetwork()
|
||||
go container.monitor()
|
||||
}
|
||||
|
@ -292,8 +308,8 @@ func (runtime *Runtime) restore() error {
|
|||
}
|
||||
|
||||
// FIXME: harmonize with NewGraph()
|
||||
func NewRuntime() (*Runtime, error) {
|
||||
runtime, err := NewRuntimeFromDirectory("/var/lib/docker")
|
||||
func NewRuntime(autoRestart bool) (*Runtime, error) {
|
||||
runtime, err := NewRuntimeFromDirectory("/var/lib/docker", autoRestart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -326,7 +342,7 @@ func NewRuntime() (*Runtime, error) {
|
|||
return runtime, nil
|
||||
}
|
||||
|
||||
func NewRuntimeFromDirectory(root string) (*Runtime, error) {
|
||||
func NewRuntimeFromDirectory(root string, autoRestart bool) (*Runtime, error) {
|
||||
runtimeRepo := path.Join(root, "containers")
|
||||
|
||||
if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) {
|
||||
|
@ -363,6 +379,7 @@ func NewRuntimeFromDirectory(root string) (*Runtime, error) {
|
|||
authConfig: authConfig,
|
||||
idIndex: NewTruncIndex(),
|
||||
capabilities: &Capabilities{},
|
||||
autoRestart: autoRestart,
|
||||
}
|
||||
|
||||
if err := runtime.restore(); err != nil {
|
||||
|
|
|
@ -63,7 +63,7 @@ func init() {
|
|||
NetworkBridgeIface = "testdockbr0"
|
||||
|
||||
// Make it our Store root
|
||||
runtime, err := NewRuntimeFromDirectory(unitTestStoreBase)
|
||||
runtime, err := NewRuntimeFromDirectory(unitTestStoreBase, false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func newTestRuntime() (*Runtime, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
runtime, err := NewRuntimeFromDirectory(root)
|
||||
runtime, err := NewRuntimeFromDirectory(root, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -320,7 +320,7 @@ func TestRestore(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
runtime1, err := NewRuntimeFromDirectory(root)
|
||||
runtime1, err := NewRuntimeFromDirectory(root, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -379,7 +379,7 @@ func TestRestore(t *testing.T) {
|
|||
|
||||
// Here are are simulating a docker restart - that is, reloading all containers
|
||||
// from scratch
|
||||
runtime2, err := NewRuntimeFromDirectory(root)
|
||||
runtime2, err := NewRuntimeFromDirectory(root, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче