2013-05-06 13:31:22 +04:00
|
|
|
package docker
|
|
|
|
|
|
|
|
import (
|
2013-06-28 19:51:58 +04:00
|
|
|
"bufio"
|
2013-07-15 20:17:58 +04:00
|
|
|
"encoding/json"
|
2013-05-30 23:30:21 +04:00
|
|
|
"errors"
|
2013-05-06 13:31:22 +04:00
|
|
|
"fmt"
|
2013-11-01 03:57:45 +04:00
|
|
|
"github.com/dotcloud/docker/archive"
|
2013-05-16 23:09:06 +04:00
|
|
|
"github.com/dotcloud/docker/auth"
|
2013-11-01 03:57:45 +04:00
|
|
|
"github.com/dotcloud/docker/engine"
|
2013-11-16 03:55:45 +04:00
|
|
|
"github.com/dotcloud/docker/graphdb"
|
2013-05-15 05:41:39 +04:00
|
|
|
"github.com/dotcloud/docker/registry"
|
2013-05-15 02:37:35 +04:00
|
|
|
"github.com/dotcloud/docker/utils"
|
2013-05-06 13:31:22 +04:00
|
|
|
"io"
|
2013-05-15 22:30:40 +04:00
|
|
|
"io/ioutil"
|
2013-05-06 13:31:22 +04:00
|
|
|
"log"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"os"
|
2013-06-28 19:51:58 +04:00
|
|
|
"os/exec"
|
2013-11-01 03:57:45 +04:00
|
|
|
"os/signal"
|
2013-05-15 22:30:40 +04:00
|
|
|
"path"
|
2013-10-08 20:35:47 +04:00
|
|
|
"path/filepath"
|
2013-05-06 13:31:22 +04:00
|
|
|
"runtime"
|
2013-11-17 04:26:04 +04:00
|
|
|
"strconv"
|
2013-05-06 13:31:22 +04:00
|
|
|
"strings"
|
2013-06-18 03:10:00 +04:00
|
|
|
"sync"
|
2013-10-21 20:04:42 +04:00
|
|
|
"syscall"
|
2013-11-01 03:57:45 +04:00
|
|
|
"time"
|
2013-05-06 13:31:22 +04:00
|
|
|
)
|
|
|
|
|
2013-10-23 03:23:52 +04:00
|
|
|
func (srv *Server) Close() error {
|
|
|
|
return srv.runtime.Close()
|
|
|
|
}
|
|
|
|
|
2013-10-21 20:04:42 +04:00
|
|
|
func init() {
|
2013-10-27 04:19:35 +04:00
|
|
|
engine.Register("initapi", jobInitApi)
|
2013-10-21 20:04:42 +04:00
|
|
|
}
|
|
|
|
|
2013-10-27 04:19:35 +04:00
|
|
|
// jobInitApi runs the remote api server `srv` as a daemon,
|
|
|
|
// Only one api server can run at the same time - this is enforced by a pidfile.
|
2013-11-30 02:13:00 +04:00
|
|
|
// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
|
2013-11-20 11:37:03 +04:00
|
|
|
func jobInitApi(job *engine.Job) engine.Status {
|
2013-10-27 11:16:32 +04:00
|
|
|
job.Logf("Creating server")
|
2013-11-20 11:37:03 +04:00
|
|
|
// FIXME: ImportEnv deprecates ConfigFromJob
|
2013-10-27 06:24:01 +04:00
|
|
|
srv, err := NewServer(job.Eng, ConfigFromJob(job))
|
2013-10-21 20:04:42 +04:00
|
|
|
if err != nil {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
2013-10-21 20:04:42 +04:00
|
|
|
}
|
2013-10-27 10:51:43 +04:00
|
|
|
if srv.runtime.config.Pidfile != "" {
|
|
|
|
job.Logf("Creating pidfile")
|
|
|
|
if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
|
2013-11-20 11:37:03 +04:00
|
|
|
// FIXME: do we need fatal here instead of returning a job error?
|
2013-10-27 10:51:43 +04:00
|
|
|
log.Fatal(err)
|
|
|
|
}
|
2013-10-25 10:30:34 +04:00
|
|
|
}
|
2013-10-27 11:16:32 +04:00
|
|
|
job.Logf("Setting up signal traps")
|
2013-10-25 10:30:34 +04:00
|
|
|
c := make(chan os.Signal, 1)
|
2013-11-30 02:13:00 +04:00
|
|
|
signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
|
2013-10-25 10:30:34 +04:00
|
|
|
go func() {
|
|
|
|
sig := <-c
|
|
|
|
log.Printf("Received signal '%v', exiting\n", sig)
|
|
|
|
utils.RemovePidFile(srv.runtime.config.Pidfile)
|
2013-10-27 01:28:53 +04:00
|
|
|
srv.Close()
|
2013-10-25 10:30:34 +04:00
|
|
|
os.Exit(0)
|
|
|
|
}()
|
2013-10-27 06:24:01 +04:00
|
|
|
job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
|
2013-11-14 10:08:08 +04:00
|
|
|
job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime)
|
2013-11-19 18:41:10 +04:00
|
|
|
// https://github.com/dotcloud/docker/issues/2768
|
|
|
|
if srv.runtime.networkManager.bridgeNetwork != nil {
|
|
|
|
job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", srv.runtime.networkManager.bridgeNetwork.IP)
|
|
|
|
}
|
2013-12-08 05:33:37 +04:00
|
|
|
if err := job.Eng.Register("export", srv.ContainerExport); err != nil {
|
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
2013-10-28 06:20:00 +04:00
|
|
|
if err := job.Eng.Register("create", srv.ContainerCreate); err != nil {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
2013-10-28 06:20:00 +04:00
|
|
|
}
|
2013-11-17 07:00:16 +04:00
|
|
|
if err := job.Eng.Register("stop", srv.ContainerStop); err != nil {
|
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
2013-10-27 10:54:51 +04:00
|
|
|
if err := job.Eng.Register("start", srv.ContainerStart); err != nil {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
2013-10-27 06:24:01 +04:00
|
|
|
}
|
2013-11-17 04:26:04 +04:00
|
|
|
if err := job.Eng.Register("kill", srv.ContainerKill); err != nil {
|
2013-11-22 07:41:17 +04:00
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
2013-11-17 04:26:04 +04:00
|
|
|
}
|
2013-10-27 10:54:51 +04:00
|
|
|
if err := job.Eng.Register("serveapi", srv.ListenAndServe); err != nil {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
2013-10-27 04:19:35 +04:00
|
|
|
}
|
2013-11-25 05:05:59 +04:00
|
|
|
if err := job.Eng.Register("wait", srv.ContainerWait); err != nil {
|
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
2013-12-12 05:52:41 +04:00
|
|
|
if err := job.Eng.Register("tag", srv.ImageTag); err != nil {
|
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
2013-12-12 06:25:30 +04:00
|
|
|
if err := job.Eng.Register("resize", srv.ContainerResize); err != nil {
|
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
2013-12-12 05:03:48 +04:00
|
|
|
if err := job.Eng.Register("commit", srv.ContainerCommit); err != nil {
|
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
2013-12-11 22:35:21 +04:00
|
|
|
if err := job.Eng.Register("info", srv.DockerInfo); err != nil {
|
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
2013-11-20 11:37:03 +04:00
|
|
|
return engine.StatusOK
|
2013-10-27 04:19:35 +04:00
|
|
|
}
|
|
|
|
|
2013-11-20 11:37:03 +04:00
|
|
|
func (srv *Server) ListenAndServe(job *engine.Job) engine.Status {
|
2013-10-27 06:24:01 +04:00
|
|
|
protoAddrs := job.Args
|
2013-10-21 20:04:42 +04:00
|
|
|
chErrors := make(chan error, len(protoAddrs))
|
|
|
|
for _, protoAddr := range protoAddrs {
|
|
|
|
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
|
2013-11-01 05:42:44 +04:00
|
|
|
switch protoAddrParts[0] {
|
|
|
|
case "unix":
|
|
|
|
if err := syscall.Unlink(protoAddrParts[1]); err != nil && !os.IsNotExist(err) {
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
case "tcp":
|
2013-10-21 20:04:42 +04:00
|
|
|
if !strings.HasPrefix(protoAddrParts[1], "127.0.0.1") {
|
|
|
|
log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
|
|
|
|
}
|
2013-11-01 05:42:44 +04:00
|
|
|
default:
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Errorf("Invalid protocol format.")
|
|
|
|
return engine.StatusErr
|
2013-10-21 20:04:42 +04:00
|
|
|
}
|
|
|
|
go func() {
|
2013-10-27 11:06:43 +04:00
|
|
|
// FIXME: merge Server.ListenAndServe with ListenAndServe
|
|
|
|
chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], srv, job.GetenvBool("Logging"))
|
2013-10-21 20:04:42 +04:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
for i := 0; i < len(protoAddrs); i += 1 {
|
|
|
|
err := <-chErrors
|
|
|
|
if err != nil {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
2013-10-21 20:04:42 +04:00
|
|
|
}
|
|
|
|
}
|
2013-11-20 11:37:03 +04:00
|
|
|
return engine.StatusOK
|
2013-10-21 20:04:42 +04:00
|
|
|
}
|
|
|
|
|
2013-07-24 01:05:13 +04:00
|
|
|
// simpleVersionInfo is a simple implementation of
|
|
|
|
// the interface VersionInfo, which is used
|
2013-07-18 22:22:49 +04:00
|
|
|
// to provide version information for some product,
|
|
|
|
// component, etc. It stores the product name and the version
|
|
|
|
// in string and returns them on calls to Name() and Version().
|
2013-07-24 01:05:13 +04:00
|
|
|
type simpleVersionInfo struct {
|
2013-06-29 01:42:04 +04:00
|
|
|
name string
|
|
|
|
version string
|
|
|
|
}
|
|
|
|
|
2013-07-24 01:05:13 +04:00
|
|
|
func (v *simpleVersionInfo) Name() string {
|
2013-06-29 01:42:04 +04:00
|
|
|
return v.name
|
|
|
|
}
|
|
|
|
|
2013-07-24 01:05:13 +04:00
|
|
|
func (v *simpleVersionInfo) Version() string {
|
2013-06-29 01:42:04 +04:00
|
|
|
return v.version
|
|
|
|
}
|
|
|
|
|
2013-10-08 23:15:29 +04:00
|
|
|
// ContainerKill send signal to the container
|
|
|
|
// If no signal is given (sig 0), then Kill with SIGKILL and wait
|
|
|
|
// for the container to exit.
|
|
|
|
// If a signal is given, then just send it to the container and return.
|
2013-11-22 07:41:17 +04:00
|
|
|
func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
|
2013-11-17 04:26:04 +04:00
|
|
|
if n := len(job.Args); n < 1 || n > 2 {
|
2013-11-22 07:41:17 +04:00
|
|
|
job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
|
|
|
|
return engine.StatusErr
|
2013-11-17 04:26:04 +04:00
|
|
|
}
|
|
|
|
name := job.Args[0]
|
|
|
|
var sig uint64
|
|
|
|
if len(job.Args) == 2 && job.Args[1] != "" {
|
|
|
|
var err error
|
|
|
|
// The largest legal signal is 31, so let's parse on 5 bits
|
|
|
|
sig, err = strconv.ParseUint(job.Args[1], 10, 5)
|
|
|
|
if err != nil {
|
2013-11-22 07:41:17 +04:00
|
|
|
job.Errorf("Invalid signal: %s", job.Args[1])
|
|
|
|
return engine.StatusErr
|
2013-11-17 04:26:04 +04:00
|
|
|
}
|
|
|
|
}
|
2013-05-06 13:31:22 +04:00
|
|
|
if container := srv.runtime.Get(name); container != nil {
|
2013-10-08 23:15:29 +04:00
|
|
|
// If no signal is passed, perform regular Kill (SIGKILL + wait())
|
|
|
|
if sig == 0 {
|
|
|
|
if err := container.Kill(); err != nil {
|
2013-11-22 07:41:17 +04:00
|
|
|
job.Errorf("Cannot kill container %s: %s", name, err)
|
|
|
|
return engine.StatusErr
|
2013-10-08 23:15:29 +04:00
|
|
|
}
|
2013-10-25 05:59:59 +04:00
|
|
|
srv.LogEvent("kill", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
2013-10-08 23:15:29 +04:00
|
|
|
} else {
|
|
|
|
// Otherwise, just send the requested signal
|
2013-11-17 04:26:04 +04:00
|
|
|
if err := container.kill(int(sig)); err != nil {
|
2013-11-22 07:41:17 +04:00
|
|
|
job.Errorf("Cannot kill container %s: %s", name, err)
|
|
|
|
return engine.StatusErr
|
2013-10-08 23:15:29 +04:00
|
|
|
}
|
|
|
|
// FIXME: Add event for signals
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
} else {
|
2013-11-22 07:41:17 +04:00
|
|
|
job.Errorf("No such container: %s", name)
|
|
|
|
return engine.StatusErr
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-11-22 07:41:17 +04:00
|
|
|
return engine.StatusOK
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
|
2013-12-08 05:33:37 +04:00
|
|
|
func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
|
|
|
|
if len(job.Args) != 1 {
|
|
|
|
job.Errorf("Usage: %s container_id", job.Name)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
|
|
|
name := job.Args[0]
|
2013-05-06 13:31:22 +04:00
|
|
|
if container := srv.runtime.Get(name); container != nil {
|
|
|
|
data, err := container.Export()
|
|
|
|
if err != nil {
|
2013-12-08 05:33:37 +04:00
|
|
|
job.Errorf("%s: %s", name, err)
|
|
|
|
return engine.StatusErr
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stream the entire contents of the container (basically a volatile snapshot)
|
2013-12-08 05:33:37 +04:00
|
|
|
if _, err := io.Copy(job.Stdout, data); err != nil {
|
|
|
|
job.Errorf("%s: %s", name, err)
|
|
|
|
return engine.StatusErr
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-12-08 05:33:37 +04:00
|
|
|
// FIXME: factor job-specific LogEvent to engine.Job.Run()
|
2013-10-25 05:59:59 +04:00
|
|
|
srv.LogEvent("export", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
2013-12-08 05:33:37 +04:00
|
|
|
return engine.StatusOK
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-12-08 05:33:37 +04:00
|
|
|
job.Errorf("No such container: %s", name)
|
|
|
|
return engine.StatusErr
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
|
2013-09-02 20:06:17 +04:00
|
|
|
// ImageExport exports all images with the given tag. All versions
|
|
|
|
// containing the same tag are exported. The resulting output is an
|
|
|
|
// uncompressed tar ball.
|
|
|
|
// name is the set of tags to export.
|
|
|
|
// out is the writer where the images are written to.
|
|
|
|
func (srv *Server) ImageExport(name string, out io.Writer) error {
|
|
|
|
// get image json
|
|
|
|
tempdir, err := ioutil.TempDir("", "docker-export-")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-19 00:34:34 +04:00
|
|
|
defer os.RemoveAll(tempdir)
|
|
|
|
|
2013-09-02 20:06:17 +04:00
|
|
|
utils.Debugf("Serializing %s", name)
|
|
|
|
|
2013-11-21 05:28:19 +04:00
|
|
|
rootRepo, err := srv.runtime.repositories.Get(name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if rootRepo != nil {
|
|
|
|
for _, id := range rootRepo {
|
|
|
|
image, err := srv.ImageInspect(id)
|
2013-09-02 20:06:17 +04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-18 23:48:55 +04:00
|
|
|
|
2013-11-21 05:28:19 +04:00
|
|
|
if err := srv.exportImage(image, tempdir); err != nil {
|
2013-09-02 20:06:17 +04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-21 05:28:19 +04:00
|
|
|
// write repositories
|
|
|
|
rootRepoMap := map[string]Repository{}
|
|
|
|
rootRepoMap[name] = rootRepo
|
|
|
|
rootRepoJson, _ := json.Marshal(rootRepoMap)
|
2013-09-02 20:06:17 +04:00
|
|
|
|
2013-11-21 05:28:19 +04:00
|
|
|
if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.ModeAppend); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
image, err := srv.ImageInspect(name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := srv.exportImage(image, tempdir); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-18 23:48:55 +04:00
|
|
|
}
|
2013-09-02 20:06:17 +04:00
|
|
|
|
2013-11-14 03:41:42 +04:00
|
|
|
fs, err := archive.Tar(tempdir, archive.Uncompressed)
|
2013-09-02 20:06:17 +04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-18 23:48:55 +04:00
|
|
|
|
2013-09-02 20:06:17 +04:00
|
|
|
if _, err := io.Copy(out, fs); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-11-21 05:28:19 +04:00
|
|
|
func (srv *Server) exportImage(image *Image, tempdir string) error {
|
|
|
|
for i := image; i != nil; {
|
|
|
|
// temporary directory
|
|
|
|
tmpImageDir := path.Join(tempdir, i.ID)
|
|
|
|
if err := os.Mkdir(tmpImageDir, os.ModeDir); err != nil {
|
2013-12-04 23:55:42 +04:00
|
|
|
if os.IsExist(err) {
|
|
|
|
return nil
|
|
|
|
}
|
2013-11-21 05:28:19 +04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var version = "1.0"
|
|
|
|
var versionBuf = []byte(version)
|
|
|
|
|
|
|
|
if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.ModeAppend); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// serialize json
|
|
|
|
b, err := json.Marshal(i)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := ioutil.WriteFile(path.Join(tmpImageDir, "json"), b, os.ModeAppend); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// serialize filesystem
|
2013-11-21 22:26:21 +04:00
|
|
|
fs, err := i.TarLayer()
|
2013-11-21 05:28:19 +04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar"))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err = io.Copy(fsTar, fs); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fsTar.Close()
|
|
|
|
|
|
|
|
// find parent
|
|
|
|
if i.Parent != "" {
|
|
|
|
i, err = srv.ImageInspect(i.Parent)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
i = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-09-02 20:06:17 +04:00
|
|
|
// Loads a set of images into the repository. This is the complementary of ImageExport.
|
|
|
|
// The input stream is an uncompressed tar ball containing images and metadata.
|
|
|
|
func (srv *Server) ImageLoad(in io.Reader) error {
|
2013-11-18 23:48:55 +04:00
|
|
|
tmpImageDir, err := ioutil.TempDir("", "docker-import-")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-10-12 09:11:49 +04:00
|
|
|
defer os.RemoveAll(tmpImageDir)
|
2013-11-18 23:48:55 +04:00
|
|
|
|
|
|
|
var (
|
|
|
|
repoTarFile = path.Join(tmpImageDir, "repo.tar")
|
|
|
|
repoDir = path.Join(tmpImageDir, "repo")
|
|
|
|
)
|
|
|
|
|
|
|
|
tarFile, err := os.Create(repoTarFile)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := io.Copy(tarFile, in); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-09-02 20:06:17 +04:00
|
|
|
tarFile.Close()
|
2013-11-18 23:48:55 +04:00
|
|
|
|
|
|
|
repoFile, err := os.Open(repoTarFile)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-20 23:07:42 +04:00
|
|
|
if err := archive.Untar(repoFile, repoDir, nil); err != nil {
|
2013-11-18 23:48:55 +04:00
|
|
|
return err
|
|
|
|
}
|
2013-11-21 05:28:19 +04:00
|
|
|
|
|
|
|
dirs, err := ioutil.ReadDir(repoDir)
|
2013-11-18 23:48:55 +04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-09-02 20:06:17 +04:00
|
|
|
|
2013-11-21 05:28:19 +04:00
|
|
|
for _, d := range dirs {
|
|
|
|
if d.IsDir() {
|
|
|
|
if err := srv.recursiveLoad(d.Name(), tmpImageDir); err != nil {
|
2013-11-18 23:48:55 +04:00
|
|
|
return err
|
|
|
|
}
|
2013-11-21 05:28:19 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories"))
|
|
|
|
if err == nil {
|
|
|
|
repositories := map[string]Repository{}
|
|
|
|
if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for imageName, tagMap := range repositories {
|
|
|
|
for tag, address := range tagMap {
|
|
|
|
if err := srv.runtime.repositories.Set(imageName, tag, address, true); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-09-02 20:06:17 +04:00
|
|
|
}
|
|
|
|
}
|
2013-11-21 05:28:19 +04:00
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return err
|
2013-09-02 20:06:17 +04:00
|
|
|
}
|
2013-11-21 05:28:19 +04:00
|
|
|
|
2013-09-02 20:06:17 +04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (srv *Server) recursiveLoad(address, tmpImageDir string) error {
|
2013-11-18 23:48:55 +04:00
|
|
|
if _, err := srv.ImageInspect(address); err != nil {
|
2013-09-02 20:06:17 +04:00
|
|
|
utils.Debugf("Loading %s", address)
|
2013-11-18 23:48:55 +04:00
|
|
|
|
2013-09-02 20:06:17 +04:00
|
|
|
imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json"))
|
|
|
|
if err != nil {
|
|
|
|
utils.Debugf("Error reading json", err)
|
2013-11-30 04:53:20 +04:00
|
|
|
return err
|
2013-09-02 20:06:17 +04:00
|
|
|
}
|
2013-11-18 23:48:55 +04:00
|
|
|
|
2013-09-02 20:06:17 +04:00
|
|
|
layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar"))
|
|
|
|
if err != nil {
|
|
|
|
utils.Debugf("Error reading embedded tar", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
img, err := NewImgJSON(imageJson)
|
|
|
|
if err != nil {
|
|
|
|
utils.Debugf("Error unmarshalling json", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if img.Parent != "" {
|
|
|
|
if !srv.runtime.graph.Exists(img.Parent) {
|
2013-11-18 23:48:55 +04:00
|
|
|
if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-09-02 20:06:17 +04:00
|
|
|
}
|
|
|
|
}
|
2013-11-18 23:48:55 +04:00
|
|
|
if err := srv.runtime.graph.Register(imageJson, layer, img); err != nil {
|
|
|
|
return err
|
2013-09-02 20:06:17 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
utils.Debugf("Completed processing %s", address)
|
2013-11-21 05:28:19 +04:00
|
|
|
|
2013-09-02 20:06:17 +04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-10-24 23:20:34 +04:00
|
|
|
func (srv *Server) ImagesSearch(term string) ([]registry.SearchResult, error) {
|
2013-10-22 22:49:13 +04:00
|
|
|
r, err := registry.NewRegistry(nil, srv.HTTPRequestFactory(nil), auth.IndexServerAddress())
|
2013-06-17 22:13:40 +04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
results, err := r.SearchRepositories(term)
|
2013-05-07 22:37:35 +04:00
|
|
|
if err != nil {
|
2013-05-07 22:59:04 +04:00
|
|
|
return nil, err
|
|
|
|
}
|
2013-10-24 23:20:34 +04:00
|
|
|
return results.Results, nil
|
2013-05-07 22:37:35 +04:00
|
|
|
}
|
|
|
|
|
2013-09-20 12:55:17 +04:00
|
|
|
func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.StreamFormatter) error {
|
2013-05-20 21:58:35 +04:00
|
|
|
out = utils.NewWriteFlusher(out)
|
2013-05-07 21:23:50 +04:00
|
|
|
img, err := srv.runtime.repositories.LookupImage(name)
|
|
|
|
if err != nil {
|
2013-09-20 12:55:17 +04:00
|
|
|
return err
|
2013-05-07 21:23:50 +04:00
|
|
|
}
|
|
|
|
|
2013-12-04 23:54:11 +04:00
|
|
|
file, err := utils.Download(url)
|
2013-05-07 21:23:50 +04:00
|
|
|
if err != nil {
|
2013-09-20 12:55:17 +04:00
|
|
|
return err
|
2013-05-07 21:23:50 +04:00
|
|
|
}
|
|
|
|
defer file.Body.Close()
|
|
|
|
|
2013-05-14 03:39:54 +04:00
|
|
|
config, _, _, err := ParseRun([]string{img.ID, "echo", "insert", url, path}, srv.runtime.capabilities)
|
2013-05-07 21:23:50 +04:00
|
|
|
if err != nil {
|
2013-09-20 12:55:17 +04:00
|
|
|
return err
|
2013-05-07 21:23:50 +04:00
|
|
|
}
|
|
|
|
|
2013-10-29 03:58:59 +04:00
|
|
|
c, _, err := srv.runtime.Create(config, "")
|
2013-05-07 21:23:50 +04:00
|
|
|
if err != nil {
|
2013-09-20 12:55:17 +04:00
|
|
|
return err
|
2013-05-07 21:23:50 +04:00
|
|
|
}
|
|
|
|
|
2013-11-29 00:16:57 +04:00
|
|
|
if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, "", "Downloading"), path); err != nil {
|
2013-09-20 12:55:17 +04:00
|
|
|
return err
|
2013-05-07 21:23:50 +04:00
|
|
|
}
|
|
|
|
// FIXME: Handle custom repo, tag comment, author
|
2013-09-07 04:33:05 +04:00
|
|
|
img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil)
|
2013-05-07 21:23:50 +04:00
|
|
|
if err != nil {
|
2013-09-20 12:55:17 +04:00
|
|
|
return err
|
2013-05-07 21:23:50 +04:00
|
|
|
}
|
2013-11-11 23:26:24 +04:00
|
|
|
out.Write(sf.FormatStatus(img.ID, ""))
|
2013-09-20 12:55:17 +04:00
|
|
|
return nil
|
2013-05-07 21:23:50 +04:00
|
|
|
}
|
|
|
|
|
2013-10-08 17:52:36 +04:00
|
|
|
func (srv *Server) ImagesViz(out io.Writer) error {
|
|
|
|
images, _ := srv.runtime.graph.Map()
|
|
|
|
if images == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
out.Write([]byte("digraph docker {\n"))
|
|
|
|
|
|
|
|
var (
|
|
|
|
parentImage *Image
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
for _, image := range images {
|
|
|
|
parentImage, err = image.GetParent()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error while getting parent image: %v", err)
|
|
|
|
}
|
|
|
|
if parentImage != nil {
|
2013-10-25 05:59:59 +04:00
|
|
|
out.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
|
2013-10-08 17:52:36 +04:00
|
|
|
} else {
|
2013-10-25 05:59:59 +04:00
|
|
|
out.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n"))
|
2013-10-08 17:52:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
reporefs := make(map[string][]string)
|
|
|
|
|
|
|
|
for name, repository := range srv.runtime.repositories.Repositories {
|
|
|
|
for tag, id := range repository {
|
|
|
|
reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for id, repos := range reporefs {
|
|
|
|
out.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n"))
|
|
|
|
}
|
|
|
|
out.Write([]byte(" base [style=invisible]\n}\n"))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-06-04 22:00:22 +04:00
|
|
|
func (srv *Server) Images(all bool, filter string) ([]APIImages, error) {
|
2013-05-19 21:46:24 +04:00
|
|
|
var (
|
|
|
|
allImages map[string]*Image
|
|
|
|
err error
|
|
|
|
)
|
2013-05-08 03:47:43 +04:00
|
|
|
if all {
|
2013-05-06 13:31:22 +04:00
|
|
|
allImages, err = srv.runtime.graph.Map()
|
|
|
|
} else {
|
|
|
|
allImages, err = srv.runtime.graph.Heads()
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-10-06 09:44:04 +04:00
|
|
|
lookup := make(map[string]APIImages)
|
2013-05-06 13:31:22 +04:00
|
|
|
for name, repository := range srv.runtime.repositories.Repositories {
|
2013-09-06 23:51:49 +04:00
|
|
|
if filter != "" {
|
2013-09-07 00:16:10 +04:00
|
|
|
if match, _ := path.Match(filter, name); !match {
|
2013-09-06 23:51:49 +04:00
|
|
|
continue
|
|
|
|
}
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
for tag, id := range repository {
|
|
|
|
image, err := srv.runtime.graph.Get(id)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
|
|
|
|
continue
|
|
|
|
}
|
2013-10-06 09:44:04 +04:00
|
|
|
|
|
|
|
if out, exists := lookup[id]; exists {
|
|
|
|
out.RepoTags = append(out.RepoTags, fmt.Sprintf("%s:%s", name, tag))
|
|
|
|
|
|
|
|
lookup[id] = out
|
|
|
|
} else {
|
|
|
|
var out APIImages
|
|
|
|
|
|
|
|
delete(allImages, id)
|
|
|
|
|
|
|
|
out.ParentId = image.Parent
|
|
|
|
out.RepoTags = []string{fmt.Sprintf("%s:%s", name, tag)}
|
|
|
|
out.ID = image.ID
|
|
|
|
out.Created = image.Created.Unix()
|
|
|
|
out.Size = image.Size
|
|
|
|
out.VirtualSize = image.getParentsSize(0) + image.Size
|
|
|
|
|
|
|
|
lookup[id] = out
|
|
|
|
}
|
|
|
|
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
}
|
2013-10-06 09:44:04 +04:00
|
|
|
|
|
|
|
outs := make([]APIImages, 0, len(lookup))
|
|
|
|
for _, value := range lookup {
|
|
|
|
outs = append(outs, value)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Display images which aren't part of a repository/tag
|
2013-05-06 13:31:22 +04:00
|
|
|
if filter == "" {
|
2013-05-13 14:18:55 +04:00
|
|
|
for _, image := range allImages {
|
2013-06-04 22:00:22 +04:00
|
|
|
var out APIImages
|
|
|
|
out.ID = image.ID
|
2013-10-08 17:41:44 +04:00
|
|
|
out.ParentId = image.Parent
|
2013-10-06 20:36:38 +04:00
|
|
|
out.RepoTags = []string{"<none>:<none>"}
|
2013-05-13 14:18:55 +04:00
|
|
|
out.Created = image.Created.Unix()
|
2013-05-22 17:41:29 +04:00
|
|
|
out.Size = image.Size
|
2013-06-14 14:05:01 +04:00
|
|
|
out.VirtualSize = image.getParentsSize(0) + image.Size
|
2013-05-06 13:31:22 +04:00
|
|
|
outs = append(outs, out)
|
|
|
|
}
|
|
|
|
}
|
2013-08-04 02:33:51 +04:00
|
|
|
|
2013-08-18 09:11:34 +04:00
|
|
|
sortImagesByCreationAndTag(outs)
|
2013-05-06 13:31:22 +04:00
|
|
|
return outs, nil
|
|
|
|
}
|
|
|
|
|
2013-12-11 22:35:21 +04:00
|
|
|
func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
|
2013-09-01 07:31:21 +04:00
|
|
|
images, _ := srv.runtime.graph.Map()
|
2013-12-11 22:35:21 +04:00
|
|
|
var imgcount int64
|
2013-05-06 13:31:22 +04:00
|
|
|
if images == nil {
|
|
|
|
imgcount = 0
|
|
|
|
} else {
|
2013-12-11 22:35:21 +04:00
|
|
|
imgcount = int64(len(images))
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-07-19 20:36:23 +04:00
|
|
|
lxcVersion := ""
|
|
|
|
if output, err := exec.Command("lxc-version").CombinedOutput(); err == nil {
|
|
|
|
outputStr := string(output)
|
|
|
|
if len(strings.SplitN(outputStr, ":", 2)) == 2 {
|
|
|
|
lxcVersion = strings.TrimSpace(strings.SplitN(string(output), ":", 2)[1])
|
|
|
|
}
|
|
|
|
}
|
2013-07-24 17:35:38 +04:00
|
|
|
kernelVersion := "<unknown>"
|
|
|
|
if kv, err := utils.GetKernelVersion(); err == nil {
|
|
|
|
kernelVersion = kv.String()
|
|
|
|
}
|
2013-07-19 20:36:23 +04:00
|
|
|
|
2013-12-11 22:35:21 +04:00
|
|
|
v := &engine.Env{}
|
|
|
|
v.SetInt("Containers", int64(len(srv.runtime.List())))
|
|
|
|
v.SetInt("Images", imgcount)
|
|
|
|
v.Set("Driver", srv.runtime.driver.String())
|
|
|
|
v.SetJson("DriverStatus", srv.runtime.driver.Status())
|
|
|
|
v.SetBool("MemoryLimit", srv.runtime.capabilities.MemoryLimit)
|
|
|
|
v.SetBool("SwapLimit", srv.runtime.capabilities.SwapLimit)
|
|
|
|
v.SetBool("IPv4Forwarding", !srv.runtime.capabilities.IPv4ForwardingDisabled)
|
|
|
|
v.SetBool("Debug", os.Getenv("DEBUG") != "")
|
|
|
|
v.SetInt("NFd", int64(utils.GetTotalUsedFds()))
|
|
|
|
v.SetInt("NGoroutines", int64(runtime.NumGoroutine()))
|
|
|
|
v.Set("LXCVersion", lxcVersion)
|
|
|
|
v.SetInt("NEventsListener", int64(len(srv.events)))
|
|
|
|
v.Set("KernelVersion", kernelVersion)
|
|
|
|
v.Set("IndexServerAddress", auth.IndexServerAddress())
|
|
|
|
if _, err := v.WriteTo(job.Stdout); err != nil {
|
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
2013-06-01 02:53:57 +04:00
|
|
|
}
|
2013-12-11 22:35:21 +04:00
|
|
|
return engine.StatusOK
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
|
2013-06-04 22:00:22 +04:00
|
|
|
func (srv *Server) ImageHistory(name string) ([]APIHistory, error) {
|
2013-05-06 13:31:22 +04:00
|
|
|
image, err := srv.runtime.repositories.LookupImage(name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2013-06-18 21:31:07 +04:00
|
|
|
lookupMap := make(map[string][]string)
|
2013-06-18 05:39:30 +04:00
|
|
|
for name, repository := range srv.runtime.repositories.Repositories {
|
|
|
|
for tag, id := range repository {
|
|
|
|
// If the ID already has a reverse lookup, do not update it unless for "latest"
|
2013-06-18 21:31:07 +04:00
|
|
|
if _, exists := lookupMap[id]; !exists {
|
|
|
|
lookupMap[id] = []string{}
|
2013-06-18 05:39:30 +04:00
|
|
|
}
|
2013-06-18 21:31:07 +04:00
|
|
|
lookupMap[id] = append(lookupMap[id], name+":"+tag)
|
2013-06-18 05:39:30 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-04 22:00:22 +04:00
|
|
|
outs := []APIHistory{} //produce [] when empty instead of 'null'
|
2013-05-06 13:31:22 +04:00
|
|
|
err = image.WalkHistory(func(img *Image) error {
|
2013-06-04 22:00:22 +04:00
|
|
|
var out APIHistory
|
2013-10-19 03:39:40 +04:00
|
|
|
out.ID = img.ID
|
2013-05-06 13:31:22 +04:00
|
|
|
out.Created = img.Created.Unix()
|
|
|
|
out.CreatedBy = strings.Join(img.ContainerConfig.Cmd, " ")
|
2013-06-18 21:31:07 +04:00
|
|
|
out.Tags = lookupMap[img.ID]
|
2013-10-19 03:39:40 +04:00
|
|
|
out.Size = img.Size
|
2013-05-06 13:31:22 +04:00
|
|
|
outs = append(outs, out)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return outs, nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2013-11-19 03:35:56 +04:00
|
|
|
func (srv *Server) ContainerTop(name, psArgs string) (*APITop, error) {
|
2013-06-28 19:51:58 +04:00
|
|
|
if container := srv.runtime.Get(name); container != nil {
|
2013-11-19 03:35:56 +04:00
|
|
|
output, err := exec.Command("lxc-ps", "--name", container.ID, "--", psArgs).CombinedOutput()
|
2013-06-28 19:51:58 +04:00
|
|
|
if err != nil {
|
2013-10-30 22:45:11 +04:00
|
|
|
return nil, fmt.Errorf("lxc-ps: %s (%s)", err, output)
|
2013-06-28 19:51:58 +04:00
|
|
|
}
|
2013-07-19 14:06:32 +04:00
|
|
|
procs := APITop{}
|
2013-06-28 19:51:58 +04:00
|
|
|
for i, line := range strings.Split(string(output), "\n") {
|
2013-07-19 14:06:32 +04:00
|
|
|
if len(line) == 0 {
|
2013-06-28 19:51:58 +04:00
|
|
|
continue
|
|
|
|
}
|
2013-07-19 14:06:32 +04:00
|
|
|
words := []string{}
|
2013-06-28 19:51:58 +04:00
|
|
|
scanner := bufio.NewScanner(strings.NewReader(line))
|
|
|
|
scanner.Split(bufio.ScanWords)
|
|
|
|
if !scanner.Scan() {
|
2013-10-30 22:45:11 +04:00
|
|
|
return nil, fmt.Errorf("Wrong output using lxc-ps")
|
2013-06-28 19:51:58 +04:00
|
|
|
}
|
|
|
|
// no scanner.Text because we skip container id
|
2013-07-19 14:06:32 +04:00
|
|
|
for scanner.Scan() {
|
2013-10-30 06:03:41 +04:00
|
|
|
if i != 0 && len(words) == len(procs.Titles) {
|
|
|
|
words[len(words)-1] = fmt.Sprintf("%s %s", words[len(words)-1], scanner.Text())
|
|
|
|
} else {
|
|
|
|
words = append(words, scanner.Text())
|
|
|
|
}
|
2013-07-19 14:06:32 +04:00
|
|
|
}
|
|
|
|
if i == 0 {
|
|
|
|
procs.Titles = words
|
|
|
|
} else {
|
|
|
|
procs.Processes = append(procs.Processes, words)
|
|
|
|
}
|
2013-06-28 19:51:58 +04:00
|
|
|
}
|
2013-07-19 14:06:32 +04:00
|
|
|
return &procs, nil
|
2013-06-28 19:51:58 +04:00
|
|
|
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("No such container: %s", name)
|
|
|
|
}
|
|
|
|
|
2013-11-08 04:35:26 +04:00
|
|
|
func (srv *Server) ContainerChanges(name string) ([]archive.Change, error) {
|
2013-05-06 13:31:22 +04:00
|
|
|
if container := srv.runtime.Get(name); container != nil {
|
2013-05-08 19:35:50 +04:00
|
|
|
return container.Changes()
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("No such container: %s", name)
|
|
|
|
}
|
|
|
|
|
2013-06-20 18:19:50 +04:00
|
|
|
func (srv *Server) Containers(all, size bool, n int, since, before string) []APIContainers {
|
2013-05-08 20:28:11 +04:00
|
|
|
var foundBefore bool
|
|
|
|
var displayed int
|
2013-10-05 06:25:15 +04:00
|
|
|
out := []APIContainers{}
|
2013-05-10 04:50:56 +04:00
|
|
|
|
2013-11-19 23:02:10 +04:00
|
|
|
names := map[string][]string{}
|
2013-11-20 23:07:42 +04:00
|
|
|
srv.runtime.containerGraph.Walk("/", func(p string, e *graphdb.Entity) error {
|
2013-11-19 23:02:10 +04:00
|
|
|
names[e.ID()] = append(names[e.ID()], p)
|
|
|
|
return nil
|
|
|
|
}, -1)
|
|
|
|
|
2013-05-08 20:28:11 +04:00
|
|
|
for _, container := range srv.runtime.List() {
|
2013-11-22 00:21:03 +04:00
|
|
|
if !container.State.IsRunning() && !all && n == -1 && since == "" && before == "" {
|
2013-05-06 13:31:22 +04:00
|
|
|
continue
|
|
|
|
}
|
2013-11-13 13:29:00 +04:00
|
|
|
if before != "" && !foundBefore {
|
2013-10-25 05:59:59 +04:00
|
|
|
if container.ID == before || utils.TruncateID(container.ID) == before {
|
2013-05-08 20:28:11 +04:00
|
|
|
foundBefore = true
|
|
|
|
}
|
2013-11-13 13:29:00 +04:00
|
|
|
continue
|
2013-05-08 20:28:11 +04:00
|
|
|
}
|
|
|
|
if displayed == n {
|
|
|
|
break
|
|
|
|
}
|
2013-10-25 05:59:59 +04:00
|
|
|
if container.ID == since || utils.TruncateID(container.ID) == since {
|
2013-05-06 13:31:22 +04:00
|
|
|
break
|
|
|
|
}
|
2013-05-10 04:50:56 +04:00
|
|
|
displayed++
|
2013-11-19 23:02:10 +04:00
|
|
|
c := createAPIContainer(names[container.ID], container, size, srv.runtime)
|
2013-10-05 06:25:15 +04:00
|
|
|
out = append(out, c)
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-10-05 06:25:15 +04:00
|
|
|
return out
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
|
2013-11-19 23:02:10 +04:00
|
|
|
func createAPIContainer(names []string, container *Container, size bool, runtime *Runtime) APIContainers {
|
2013-10-05 06:25:15 +04:00
|
|
|
c := APIContainers{
|
|
|
|
ID: container.ID,
|
|
|
|
}
|
|
|
|
c.Names = names
|
|
|
|
c.Image = runtime.repositories.ImageName(container.Image)
|
|
|
|
c.Command = fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " "))
|
|
|
|
c.Created = container.Created.Unix()
|
|
|
|
c.Status = container.State.String()
|
|
|
|
c.Ports = container.NetworkSettings.PortMappingAPI()
|
|
|
|
if size {
|
|
|
|
c.SizeRw, c.SizeRootFs = container.GetSize()
|
|
|
|
}
|
|
|
|
return c
|
|
|
|
}
|
2013-12-12 05:03:48 +04:00
|
|
|
func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
|
2013-12-14 02:29:27 +04:00
|
|
|
if len(job.Args) != 1 {
|
|
|
|
job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
|
2013-12-12 05:03:48 +04:00
|
|
|
return engine.StatusErr
|
|
|
|
}
|
2013-12-14 02:29:27 +04:00
|
|
|
name := job.Args[0]
|
2013-12-12 05:03:48 +04:00
|
|
|
|
2013-05-07 21:23:50 +04:00
|
|
|
container := srv.runtime.Get(name)
|
|
|
|
if container == nil {
|
2013-12-12 05:03:48 +04:00
|
|
|
job.Errorf("No such container: %s", name)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
2013-12-14 03:01:54 +04:00
|
|
|
var config Config
|
|
|
|
if err := job.GetenvJson("config", &config); err != nil {
|
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
2013-05-07 21:23:50 +04:00
|
|
|
}
|
2013-12-12 05:03:48 +04:00
|
|
|
|
2013-12-14 03:01:54 +04:00
|
|
|
img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &config)
|
2013-05-06 13:31:22 +04:00
|
|
|
if err != nil {
|
2013-12-12 05:03:48 +04:00
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-12-12 05:03:48 +04:00
|
|
|
job.Printf("%s\n", img.ID)
|
|
|
|
return engine.StatusOK
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
|
2013-12-12 05:52:41 +04:00
|
|
|
func (srv *Server) ImageTag(job *engine.Job) engine.Status {
|
|
|
|
if len(job.Args) != 2 && len(job.Args) != 3 {
|
|
|
|
job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
|
|
|
|
return engine.StatusErr
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-12-12 05:52:41 +04:00
|
|
|
var tag string
|
|
|
|
if len(job.Args) == 3 {
|
|
|
|
tag = job.Args[2]
|
|
|
|
}
|
|
|
|
if err := srv.runtime.repositories.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
|
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
|
|
|
return engine.StatusOK
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
|
2013-07-03 02:27:22 +04:00
|
|
|
func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error {
|
|
|
|
history, err := r.GetRemoteHistory(imgID, endpoint, token)
|
2013-05-15 05:41:39 +04:00
|
|
|
if err != nil {
|
2013-05-06 13:31:22 +04:00
|
|
|
return err
|
|
|
|
}
|
2013-11-29 00:16:57 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil))
|
2013-05-15 05:41:39 +04:00
|
|
|
// FIXME: Try to stream the images?
|
|
|
|
// FIXME: Launch the getRemoteImage() in goroutines
|
2013-08-22 15:23:05 +04:00
|
|
|
|
2013-11-12 23:48:35 +04:00
|
|
|
for i := len(history) - 1; i >= 0; i-- {
|
|
|
|
id := history[i]
|
2013-08-22 15:23:05 +04:00
|
|
|
|
|
|
|
// ensure no two downloads of the same layer happen at the same time
|
2013-11-21 01:51:05 +04:00
|
|
|
if c, err := srv.poolAdd("pull", "layer:"+id); err != nil {
|
2013-10-08 11:54:47 +04:00
|
|
|
utils.Errorf("Image (id: %s) pull is already running, skipping: %v", id, err)
|
2013-11-21 01:51:05 +04:00
|
|
|
<-c
|
2013-08-22 15:23:05 +04:00
|
|
|
}
|
|
|
|
defer srv.poolRemove("pull", "layer:"+id)
|
|
|
|
|
2013-05-15 05:41:39 +04:00
|
|
|
if !srv.runtime.graph.Exists(id) {
|
2013-11-29 00:16:57 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
|
2013-06-07 05:16:16 +04:00
|
|
|
imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
|
2013-05-15 05:41:39 +04:00
|
|
|
if err != nil {
|
2013-11-29 00:16:57 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
|
2013-08-12 21:53:06 +04:00
|
|
|
// FIXME: Keep going in case of error?
|
2013-05-15 05:41:39 +04:00
|
|
|
return err
|
|
|
|
}
|
2013-06-04 22:00:22 +04:00
|
|
|
img, err := NewImgJSON(imgJSON)
|
2013-05-15 05:41:39 +04:00
|
|
|
if err != nil {
|
2013-11-29 00:16:57 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
|
2013-05-15 05:41:39 +04:00
|
|
|
return fmt.Errorf("Failed to parse json: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the layer
|
2013-11-29 00:16:57 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling fs layer", nil))
|
2013-06-07 05:16:16 +04:00
|
|
|
layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token)
|
2013-05-15 05:41:39 +04:00
|
|
|
if err != nil {
|
2013-11-29 00:16:57 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
|
2013-05-15 05:41:39 +04:00
|
|
|
return err
|
|
|
|
}
|
2013-06-03 23:14:57 +04:00
|
|
|
defer layer.Close()
|
2013-11-29 00:16:57 +04:00
|
|
|
if err := srv.runtime.graph.Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
|
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
|
2013-05-15 05:41:39 +04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2013-11-29 00:16:57 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil))
|
2013-08-22 15:23:05 +04:00
|
|
|
|
2013-05-15 05:41:39 +04:00
|
|
|
}
|
2013-05-06 13:31:22 +04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-10-22 22:49:13 +04:00
|
|
|
func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool) error {
|
2013-07-24 21:10:59 +04:00
|
|
|
out.Write(sf.FormatStatus("", "Pulling repository %s", localName))
|
2013-05-15 05:41:39 +04:00
|
|
|
|
2013-10-22 22:49:13 +04:00
|
|
|
repoData, err := r.GetRepositoryData(remoteName)
|
2013-05-06 15:34:31 +04:00
|
|
|
if err != nil {
|
2013-05-15 05:41:39 +04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
utils.Debugf("Retrieving the tag list")
|
2013-07-09 04:26:50 +04:00
|
|
|
tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens)
|
2013-05-15 05:41:39 +04:00
|
|
|
if err != nil {
|
2013-10-08 11:54:47 +04:00
|
|
|
utils.Errorf("%v", err)
|
2013-05-15 05:41:39 +04:00
|
|
|
return err
|
|
|
|
}
|
2013-05-24 21:37:34 +04:00
|
|
|
|
2013-07-05 23:20:58 +04:00
|
|
|
for tag, id := range tagsList {
|
|
|
|
repoData.ImgList[id] = ®istry.ImgData{
|
|
|
|
ID: id,
|
|
|
|
Tag: tag,
|
|
|
|
Checksum: "",
|
2013-05-24 21:37:34 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-16 23:29:16 +04:00
|
|
|
utils.Debugf("Registering tags")
|
2013-05-24 21:37:34 +04:00
|
|
|
// If no tag has been specified, pull them all
|
2013-05-16 23:29:16 +04:00
|
|
|
if askedTag == "" {
|
|
|
|
for tag, id := range tagsList {
|
|
|
|
repoData.ImgList[id].Tag = tag
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Otherwise, check that the tag exists and use only that one
|
2013-06-04 17:51:12 +04:00
|
|
|
id, exists := tagsList[askedTag]
|
|
|
|
if !exists {
|
2013-07-09 04:26:50 +04:00
|
|
|
return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName)
|
2013-05-16 23:29:16 +04:00
|
|
|
}
|
2013-06-04 17:51:12 +04:00
|
|
|
repoData.ImgList[id].Tag = askedTag
|
2013-05-15 05:41:39 +04:00
|
|
|
}
|
|
|
|
|
2013-07-24 19:41:34 +04:00
|
|
|
errors := make(chan error)
|
|
|
|
for _, image := range repoData.ImgList {
|
2013-07-30 16:09:07 +04:00
|
|
|
downloadImage := func(img *registry.ImgData) {
|
2013-07-24 19:41:34 +04:00
|
|
|
if askedTag != "" && img.Tag != askedTag {
|
|
|
|
utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
|
2013-08-22 15:23:05 +04:00
|
|
|
if parallel {
|
|
|
|
errors <- nil
|
|
|
|
}
|
2013-07-24 19:41:34 +04:00
|
|
|
return
|
|
|
|
}
|
2013-07-02 20:25:06 +04:00
|
|
|
|
2013-07-24 19:41:34 +04:00
|
|
|
if img.Tag == "" {
|
|
|
|
utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
|
2013-08-22 15:23:05 +04:00
|
|
|
if parallel {
|
|
|
|
errors <- nil
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensure no two downloads of the same image happen at the same time
|
2013-12-06 02:41:56 +04:00
|
|
|
if c, err := srv.poolAdd("pull", "img:"+img.ID); err != nil {
|
|
|
|
if c != nil {
|
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
|
|
|
|
<-c
|
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
|
|
|
|
} else {
|
|
|
|
utils.Errorf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
|
|
|
|
}
|
2013-08-22 15:23:05 +04:00
|
|
|
if parallel {
|
|
|
|
errors <- nil
|
|
|
|
}
|
2013-07-24 19:41:34 +04:00
|
|
|
return
|
2013-05-15 05:41:39 +04:00
|
|
|
}
|
2013-08-22 15:23:05 +04:00
|
|
|
defer srv.poolRemove("pull", "img:"+img.ID)
|
|
|
|
|
2013-11-29 00:16:57 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil))
|
2013-07-24 19:41:34 +04:00
|
|
|
success := false
|
2013-08-22 15:23:05 +04:00
|
|
|
var lastErr error
|
2013-07-24 19:41:34 +04:00
|
|
|
for _, ep := range repoData.Endpoints {
|
2013-11-29 00:16:57 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil))
|
2013-07-24 19:41:34 +04:00
|
|
|
if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
|
2013-08-22 15:23:05 +04:00
|
|
|
// Its not ideal that only the last error is returned, it would be better to concatenate the errors.
|
|
|
|
// As the error is also given to the output stream the user will see the error.
|
|
|
|
lastErr = err
|
2013-11-29 00:16:57 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil))
|
2013-07-24 19:41:34 +04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
success = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if !success {
|
2013-11-29 00:16:57 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, %s", img.Tag, localName, lastErr), nil))
|
2013-08-22 15:23:05 +04:00
|
|
|
if parallel {
|
|
|
|
errors <- fmt.Errorf("Could not find repository on any of the indexed registries.")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2013-11-29 00:16:57 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
|
2013-08-22 15:23:05 +04:00
|
|
|
|
|
|
|
if parallel {
|
|
|
|
errors <- nil
|
2013-07-24 19:41:34 +04:00
|
|
|
}
|
2013-05-15 05:41:39 +04:00
|
|
|
}
|
2013-07-02 20:25:06 +04:00
|
|
|
|
2013-07-30 16:09:07 +04:00
|
|
|
if parallel {
|
|
|
|
go downloadImage(image)
|
|
|
|
} else {
|
|
|
|
downloadImage(image)
|
2013-07-02 20:25:06 +04:00
|
|
|
}
|
2013-07-24 19:41:34 +04:00
|
|
|
}
|
2013-07-30 16:09:07 +04:00
|
|
|
if parallel {
|
2013-08-22 15:23:05 +04:00
|
|
|
var lastError error
|
2013-07-30 16:09:07 +04:00
|
|
|
for i := 0; i < len(repoData.ImgList); i++ {
|
|
|
|
if err := <-errors; err != nil {
|
2013-08-22 15:23:05 +04:00
|
|
|
lastError = err
|
2013-05-15 05:41:39 +04:00
|
|
|
}
|
|
|
|
}
|
2013-08-22 15:23:05 +04:00
|
|
|
if lastError != nil {
|
|
|
|
return lastError
|
|
|
|
}
|
2013-07-24 19:41:34 +04:00
|
|
|
|
2013-08-22 15:23:05 +04:00
|
|
|
}
|
2013-05-15 05:41:39 +04:00
|
|
|
for tag, id := range tagsList {
|
2013-05-21 04:30:33 +04:00
|
|
|
if askedTag != "" && tag != askedTag {
|
|
|
|
continue
|
|
|
|
}
|
2013-07-09 04:26:50 +04:00
|
|
|
if err := srv.runtime.repositories.Set(localName, tag, id, true); err != nil {
|
2013-05-15 05:41:39 +04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := srv.runtime.repositories.Save(); err != nil {
|
2013-05-06 15:34:31 +04:00
|
|
|
return err
|
|
|
|
}
|
2013-05-15 05:41:39 +04:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-11-21 01:51:05 +04:00
|
|
|
func (srv *Server) poolAdd(kind, key string) (chan struct{}, error) {
|
2013-07-03 02:46:32 +04:00
|
|
|
srv.Lock()
|
|
|
|
defer srv.Unlock()
|
2013-06-18 03:10:00 +04:00
|
|
|
|
2013-11-21 01:51:05 +04:00
|
|
|
if c, exists := srv.pullingPool[key]; exists {
|
|
|
|
return c, fmt.Errorf("pull %s is already in progress", key)
|
2013-07-18 00:39:36 +04:00
|
|
|
}
|
2013-11-21 01:51:05 +04:00
|
|
|
if c, exists := srv.pushingPool[key]; exists {
|
|
|
|
return c, fmt.Errorf("push %s is already in progress", key)
|
2013-06-18 03:10:00 +04:00
|
|
|
}
|
|
|
|
|
2013-11-21 01:51:05 +04:00
|
|
|
c := make(chan struct{})
|
2013-06-18 03:10:00 +04:00
|
|
|
switch kind {
|
|
|
|
case "pull":
|
2013-11-21 01:51:05 +04:00
|
|
|
srv.pullingPool[key] = c
|
2013-06-18 03:10:00 +04:00
|
|
|
case "push":
|
2013-11-21 01:51:05 +04:00
|
|
|
srv.pushingPool[key] = c
|
2013-06-18 03:10:00 +04:00
|
|
|
default:
|
2013-11-21 01:51:05 +04:00
|
|
|
return nil, fmt.Errorf("Unknown pool type")
|
2013-06-18 03:10:00 +04:00
|
|
|
}
|
2013-11-21 01:51:05 +04:00
|
|
|
return c, nil
|
2013-06-18 03:10:00 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (srv *Server) poolRemove(kind, key string) error {
|
2013-11-25 22:58:17 +04:00
|
|
|
srv.Lock()
|
|
|
|
defer srv.Unlock()
|
2013-06-18 03:10:00 +04:00
|
|
|
switch kind {
|
|
|
|
case "pull":
|
2013-11-21 01:51:05 +04:00
|
|
|
if c, exists := srv.pullingPool[key]; exists {
|
|
|
|
close(c)
|
|
|
|
delete(srv.pullingPool, key)
|
|
|
|
}
|
2013-06-18 03:10:00 +04:00
|
|
|
case "push":
|
2013-11-21 01:51:05 +04:00
|
|
|
if c, exists := srv.pushingPool[key]; exists {
|
|
|
|
close(c)
|
|
|
|
delete(srv.pushingPool, key)
|
|
|
|
}
|
2013-06-18 03:10:00 +04:00
|
|
|
default:
|
2013-08-12 21:53:06 +04:00
|
|
|
return fmt.Errorf("Unknown pool type")
|
2013-06-18 03:10:00 +04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-08-22 23:15:31 +04:00
|
|
|
func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, metaHeaders map[string][]string, parallel bool) error {
|
2013-11-27 16:18:01 +04:00
|
|
|
out = utils.NewWriteFlusher(out)
|
|
|
|
|
2013-11-27 20:53:36 +04:00
|
|
|
c, err := srv.poolAdd("pull", localName+":"+tag)
|
|
|
|
if err != nil {
|
|
|
|
if c != nil {
|
|
|
|
// Another pull of the same repository is already taking place; just wait for it to finish
|
|
|
|
out.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
|
|
|
|
<-c
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
2013-06-18 03:10:00 +04:00
|
|
|
}
|
2013-07-09 04:26:50 +04:00
|
|
|
defer srv.poolRemove("pull", localName+":"+tag)
|
2013-06-18 03:10:00 +04:00
|
|
|
|
2013-07-05 23:20:58 +04:00
|
|
|
// Resolve the Repository name from fqn to endpoint + name
|
2013-07-09 04:26:50 +04:00
|
|
|
endpoint, remoteName, err := registry.ResolveRepositoryName(localName)
|
2013-07-05 23:20:58 +04:00
|
|
|
if err != nil {
|
2013-06-18 03:10:00 +04:00
|
|
|
return err
|
|
|
|
}
|
2013-07-05 23:20:58 +04:00
|
|
|
|
2013-10-22 22:49:13 +04:00
|
|
|
r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2013-07-09 22:30:12 +04:00
|
|
|
if endpoint == auth.IndexServerAddress() {
|
|
|
|
// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
|
|
|
|
localName = remoteName
|
|
|
|
}
|
2013-06-18 03:10:00 +04:00
|
|
|
|
2013-10-22 22:49:13 +04:00
|
|
|
if err = srv.pullRepository(r, out, localName, remoteName, tag, sf, parallel); err != nil {
|
2013-09-03 22:45:49 +04:00
|
|
|
return err
|
|
|
|
}
|
2013-06-06 02:13:01 +04:00
|
|
|
|
2013-05-15 05:41:39 +04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-05-15 07:27:15 +04:00
|
|
|
// Retrieve the all the images to be uploaded in the correct order
|
|
|
|
// Note: we can't use a map as it is not ordered
|
2013-09-04 04:20:50 +04:00
|
|
|
func (srv *Server) getImageList(localRepo map[string]string) ([][]*registry.ImgData, error) {
|
|
|
|
imgList := map[string]*registry.ImgData{}
|
|
|
|
depGraph := utils.NewDependencyGraph()
|
2013-05-15 07:27:15 +04:00
|
|
|
|
|
|
|
for tag, id := range localRepo {
|
2013-05-15 22:30:40 +04:00
|
|
|
img, err := srv.runtime.graph.Get(id)
|
2013-05-15 07:27:15 +04:00
|
|
|
if err != nil {
|
2013-05-15 22:30:40 +04:00
|
|
|
return nil, err
|
2013-05-15 07:27:15 +04:00
|
|
|
}
|
2013-09-04 04:20:50 +04:00
|
|
|
depGraph.NewNode(img.ID)
|
|
|
|
img.WalkHistory(func(current *Image) error {
|
|
|
|
imgList[current.ID] = ®istry.ImgData{
|
2013-09-07 00:16:10 +04:00
|
|
|
ID: current.ID,
|
2013-09-04 04:20:50 +04:00
|
|
|
Tag: tag,
|
|
|
|
}
|
|
|
|
parent, err := current.GetParent()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if parent == nil {
|
2013-05-15 07:27:15 +04:00
|
|
|
return nil
|
|
|
|
}
|
2013-09-04 04:20:50 +04:00
|
|
|
depGraph.NewNode(parent.ID)
|
|
|
|
depGraph.AddDependency(current.ID, parent.ID)
|
2013-05-15 07:27:15 +04:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
2013-09-04 04:20:50 +04:00
|
|
|
|
|
|
|
traversalMap, err := depGraph.GenerateTraversalMap()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
utils.Debugf("Traversal map: %v", traversalMap)
|
|
|
|
result := [][]*registry.ImgData{}
|
|
|
|
for _, round := range traversalMap {
|
|
|
|
dataRound := []*registry.ImgData{}
|
|
|
|
for _, imgID := range round {
|
|
|
|
dataRound = append(dataRound, imgList[imgID])
|
|
|
|
}
|
|
|
|
result = append(result, dataRound)
|
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func flatten(slc [][]*registry.ImgData) []*registry.ImgData {
|
|
|
|
result := []*registry.ImgData{}
|
|
|
|
for _, x := range slc {
|
|
|
|
result = append(result, x...)
|
|
|
|
}
|
|
|
|
return result
|
2013-05-15 07:27:15 +04:00
|
|
|
}
|
|
|
|
|
2013-10-22 22:49:13 +04:00
|
|
|
func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, sf *utils.StreamFormatter) error {
|
2013-05-20 21:58:35 +04:00
|
|
|
out = utils.NewWriteFlusher(out)
|
2013-05-15 22:30:40 +04:00
|
|
|
imgList, err := srv.getImageList(localRepo)
|
2013-05-15 07:27:15 +04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-09-04 04:20:50 +04:00
|
|
|
flattenedImgList := flatten(imgList)
|
2013-07-24 21:10:59 +04:00
|
|
|
out.Write(sf.FormatStatus("", "Sending image list"))
|
2013-05-15 07:27:15 +04:00
|
|
|
|
2013-05-24 21:37:34 +04:00
|
|
|
var repoData *registry.RepositoryData
|
2013-10-22 22:49:13 +04:00
|
|
|
repoData, err = r.PushImageJSONIndex(remoteName, flattenedImgList, false, nil)
|
2013-05-15 07:27:15 +04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ep := range repoData.Endpoints {
|
2013-07-24 21:10:59 +04:00
|
|
|
out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo)))
|
2013-09-04 04:20:50 +04:00
|
|
|
// This section can not be parallelized (each round depends on the previous one)
|
2013-11-28 02:00:58 +04:00
|
|
|
for i, round := range imgList {
|
2013-09-04 04:20:50 +04:00
|
|
|
// FIXME: This section can be parallelized
|
|
|
|
for _, elem := range round {
|
2013-09-09 23:02:37 +04:00
|
|
|
var pushTags func() error
|
|
|
|
pushTags = func() error {
|
2013-11-28 02:00:58 +04:00
|
|
|
if i < (len(imgList) - 1) {
|
|
|
|
// Only tag the top layer in the repository
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-12-10 22:57:16 +04:00
|
|
|
out.Write(sf.FormatStatus("", "Pushing tags for rev [%s] on {%s}", utils.TruncateID(elem.ID), ep+"repositories/"+remoteName+"/tags/"+elem.Tag))
|
2013-09-09 23:02:37 +04:00
|
|
|
if err := r.PushRegistryTag(remoteName, elem.ID, elem.Tag, ep, repoData.Tokens); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2013-09-04 04:20:50 +04:00
|
|
|
if _, exists := repoData.ImgList[elem.ID]; exists {
|
2013-09-09 23:02:37 +04:00
|
|
|
if err := pushTags(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-12-10 22:57:16 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(elem.ID), "Image already pushed, skipping", nil))
|
2013-09-04 04:20:50 +04:00
|
|
|
continue
|
|
|
|
} else if r.LookupRemoteImage(elem.ID, ep, repoData.Tokens) {
|
2013-09-09 23:02:37 +04:00
|
|
|
if err := pushTags(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-12-10 22:57:16 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(elem.ID), "Image already pushed, skipping", nil))
|
2013-09-04 04:20:50 +04:00
|
|
|
continue
|
|
|
|
}
|
2013-11-19 03:35:56 +04:00
|
|
|
checksum, err := srv.pushImage(r, out, remoteName, elem.ID, ep, repoData.Tokens, sf)
|
|
|
|
if err != nil {
|
2013-09-04 04:20:50 +04:00
|
|
|
// FIXME: Continue on error?
|
|
|
|
return err
|
|
|
|
}
|
2013-11-19 03:35:56 +04:00
|
|
|
elem.Checksum = checksum
|
|
|
|
|
2013-09-11 21:39:33 +04:00
|
|
|
if err := pushTags(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-05-15 22:30:40 +04:00
|
|
|
}
|
2013-05-15 07:27:15 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-22 22:49:13 +04:00
|
|
|
if _, err := r.PushImageJSONIndex(remoteName, flattenedImgList, true, repoData.Endpoints); err != nil {
|
2013-05-15 07:27:15 +04:00
|
|
|
return err
|
|
|
|
}
|
2013-05-24 21:37:34 +04:00
|
|
|
|
2013-05-15 22:30:40 +04:00
|
|
|
return nil
|
2013-05-15 07:27:15 +04:00
|
|
|
}
|
|
|
|
|
2013-07-23 03:44:34 +04:00
|
|
|
func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
|
2013-05-20 21:58:35 +04:00
|
|
|
out = utils.NewWriteFlusher(out)
|
2013-07-03 02:27:22 +04:00
|
|
|
jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgID, "json"))
|
2013-05-15 07:27:15 +04:00
|
|
|
if err != nil {
|
2013-10-30 22:45:11 +04:00
|
|
|
return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
|
2013-05-15 07:27:15 +04:00
|
|
|
}
|
2013-12-10 22:57:16 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil))
|
2013-05-15 07:27:15 +04:00
|
|
|
|
|
|
|
imgData := ®istry.ImgData{
|
2013-07-17 23:13:22 +04:00
|
|
|
ID: imgID,
|
2013-05-15 07:27:15 +04:00
|
|
|
}
|
|
|
|
|
2013-05-16 00:22:57 +04:00
|
|
|
// Send the json
|
2013-06-04 22:00:22 +04:00
|
|
|
if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
|
2013-05-16 00:22:57 +04:00
|
|
|
if err == registry.ErrAlreadyExists {
|
2013-12-10 22:57:16 +04:00
|
|
|
out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
|
2013-07-23 03:44:34 +04:00
|
|
|
return "", nil
|
2013-05-16 00:22:57 +04:00
|
|
|
}
|
2013-07-23 03:44:34 +04:00
|
|
|
return "", err
|
2013-05-16 00:22:57 +04:00
|
|
|
}
|
|
|
|
|
2013-11-01 03:57:45 +04:00
|
|
|
layerData, err := srv.runtime.graph.TempLayerArchive(imgID, archive.Uncompressed, sf, out)
|
2013-05-15 07:27:15 +04:00
|
|
|
if err != nil {
|
2013-07-23 03:44:34 +04:00
|
|
|
return "", fmt.Errorf("Failed to generate layer archive: %s", err)
|
2013-05-15 07:27:15 +04:00
|
|
|
}
|
2013-11-16 04:23:55 +04:00
|
|
|
defer os.RemoveAll(layerData.Name())
|
2013-05-15 22:30:40 +04:00
|
|
|
|
|
|
|
// Send the layer
|
2013-12-10 22:57:16 +04:00
|
|
|
checksum, err = r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw)
|
2013-11-19 03:35:56 +04:00
|
|
|
if err != nil {
|
2013-07-23 03:44:34 +04:00
|
|
|
return "", err
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-11-19 03:35:56 +04:00
|
|
|
imgData.Checksum = checksum
|
2013-07-17 23:13:22 +04:00
|
|
|
// Send the checksum
|
|
|
|
if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil {
|
2013-07-23 03:44:34 +04:00
|
|
|
return "", err
|
2013-07-17 23:13:22 +04:00
|
|
|
}
|
|
|
|
|
2013-07-23 03:44:34 +04:00
|
|
|
return imgData.Checksum, nil
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
|
2013-08-12 21:53:06 +04:00
|
|
|
// FIXME: Allow to interrupt current push when new push of same image is done.
|
2013-08-22 23:15:31 +04:00
|
|
|
func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, metaHeaders map[string][]string) error {
|
2013-11-21 01:51:05 +04:00
|
|
|
if _, err := srv.poolAdd("push", localName); err != nil {
|
2013-06-18 03:10:00 +04:00
|
|
|
return err
|
|
|
|
}
|
2013-07-09 04:26:50 +04:00
|
|
|
defer srv.poolRemove("push", localName)
|
2013-06-18 03:10:00 +04:00
|
|
|
|
2013-07-05 23:20:58 +04:00
|
|
|
// Resolve the Repository name from fqn to endpoint + name
|
2013-07-09 04:26:50 +04:00
|
|
|
endpoint, remoteName, err := registry.ResolveRepositoryName(localName)
|
2013-07-06 02:26:08 +04:00
|
|
|
if err != nil {
|
2013-06-18 03:10:00 +04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2013-05-20 21:58:35 +04:00
|
|
|
out = utils.NewWriteFlusher(out)
|
2013-07-09 04:26:50 +04:00
|
|
|
img, err := srv.runtime.graph.Get(localName)
|
2013-10-22 22:49:13 +04:00
|
|
|
r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint)
|
2013-06-17 22:13:40 +04:00
|
|
|
if err2 != nil {
|
|
|
|
return err2
|
|
|
|
}
|
2013-06-06 02:12:50 +04:00
|
|
|
|
2013-05-06 15:34:31 +04:00
|
|
|
if err != nil {
|
2013-07-09 04:26:50 +04:00
|
|
|
reposLen := len(srv.runtime.repositories.Repositories[localName])
|
2013-07-24 21:10:59 +04:00
|
|
|
out.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
|
2013-05-08 03:33:12 +04:00
|
|
|
// If it fails, try to get the repository
|
2013-07-09 04:26:50 +04:00
|
|
|
if localRepo, exists := srv.runtime.repositories.Repositories[localName]; exists {
|
2013-10-22 22:49:13 +04:00
|
|
|
if err := srv.pushRepository(r, out, localName, remoteName, localRepo, sf); err != nil {
|
2013-05-06 15:34:31 +04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2013-07-05 23:20:58 +04:00
|
|
|
|
|
|
|
var token []string
|
2013-07-24 21:10:59 +04:00
|
|
|
out.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName))
|
2013-07-23 03:44:34 +04:00
|
|
|
if _, err := srv.pushImage(r, out, remoteName, img.ID, endpoint, token, sf); err != nil {
|
2013-05-06 15:34:31 +04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-05-25 19:09:46 +04:00
|
|
|
func (srv *Server) ImageImport(src, repo, tag string, in io.Reader, out io.Writer, sf *utils.StreamFormatter) error {
|
2013-05-06 13:31:22 +04:00
|
|
|
var archive io.Reader
|
|
|
|
var resp *http.Response
|
|
|
|
|
|
|
|
if src == "-" {
|
2013-05-08 05:06:49 +04:00
|
|
|
archive = in
|
2013-05-06 13:31:22 +04:00
|
|
|
} else {
|
|
|
|
u, err := url.Parse(src)
|
|
|
|
if err != nil {
|
2013-05-25 19:09:46 +04:00
|
|
|
return err
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
if u.Scheme == "" {
|
|
|
|
u.Scheme = "http"
|
|
|
|
u.Host = src
|
|
|
|
u.Path = ""
|
|
|
|
}
|
2013-07-24 21:10:59 +04:00
|
|
|
out.Write(sf.FormatStatus("", "Downloading from %s", u))
|
2013-05-08 03:33:12 +04:00
|
|
|
// Download with curl (pretty progress bar)
|
|
|
|
// If curl is not available, fallback to http.Get()
|
2013-12-04 23:54:11 +04:00
|
|
|
resp, err = utils.Download(u.String())
|
2013-05-06 13:31:22 +04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-29 00:16:57 +04:00
|
|
|
archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), out, sf, true, "", "Importing")
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-05-08 03:33:12 +04:00
|
|
|
// Optionally register the image at REPO/TAG
|
2013-05-06 13:31:22 +04:00
|
|
|
if repo != "" {
|
2013-06-04 22:00:22 +04:00
|
|
|
if err := srv.runtime.repositories.Set(repo, tag, img.ID, true); err != nil {
|
2013-05-06 13:31:22 +04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2013-10-25 05:59:59 +04:00
|
|
|
out.Write(sf.FormatStatus("", img.ID))
|
2013-05-06 13:31:22 +04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-11-20 11:37:03 +04:00
|
|
|
func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
|
2013-10-28 06:20:00 +04:00
|
|
|
var name string
|
|
|
|
if len(job.Args) == 1 {
|
|
|
|
name = job.Args[0]
|
|
|
|
} else if len(job.Args) > 1 {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Printf("Usage: %s", job.Name)
|
|
|
|
return engine.StatusErr
|
2013-10-28 06:20:00 +04:00
|
|
|
}
|
|
|
|
var config Config
|
|
|
|
if err := job.ExportEnv(&config); err != nil {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
2013-10-28 06:20:00 +04:00
|
|
|
}
|
2013-06-14 20:46:04 +04:00
|
|
|
if config.Memory != 0 && config.Memory < 524288 {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Errorf("Minimum memory limit allowed is 512k")
|
|
|
|
return engine.StatusErr
|
2013-06-14 20:46:04 +04:00
|
|
|
}
|
2013-05-06 13:31:22 +04:00
|
|
|
if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
|
|
|
|
config.Memory = 0
|
|
|
|
}
|
|
|
|
if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit {
|
|
|
|
config.MemorySwap = -1
|
|
|
|
}
|
2013-10-28 06:20:00 +04:00
|
|
|
container, buildWarnings, err := srv.runtime.Create(&config, name)
|
2013-05-06 13:31:22 +04:00
|
|
|
if err != nil {
|
|
|
|
if srv.runtime.graph.IsNotExist(err) {
|
2013-08-18 07:03:54 +04:00
|
|
|
_, tag := utils.ParseRepositoryTag(config.Image)
|
|
|
|
if tag == "" {
|
|
|
|
tag = DEFAULTTAG
|
|
|
|
}
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
|
|
|
|
return engine.StatusErr
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-10-25 05:59:59 +04:00
|
|
|
srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
2013-11-14 10:08:08 +04:00
|
|
|
// FIXME: this is necessary because runtime.Create might return a nil container
|
|
|
|
// with a non-nil error. This should not happen! Once it's fixed we
|
|
|
|
// can remove this workaround.
|
|
|
|
if container != nil {
|
|
|
|
job.Printf("%s\n", container.ID)
|
|
|
|
}
|
2013-10-28 06:20:00 +04:00
|
|
|
for _, warning := range buildWarnings {
|
|
|
|
job.Errorf("%s\n", warning)
|
|
|
|
}
|
2013-11-20 11:37:03 +04:00
|
|
|
return engine.StatusOK
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (srv *Server) ContainerRestart(name string, t int) error {
|
|
|
|
if container := srv.runtime.Get(name); container != nil {
|
|
|
|
if err := container.Restart(t); err != nil {
|
2013-10-30 22:45:11 +04:00
|
|
|
return fmt.Errorf("Cannot restart container %s: %s", name, err)
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-10-25 05:59:59 +04:00
|
|
|
srv.LogEvent("restart", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
2013-05-06 13:31:22 +04:00
|
|
|
} else {
|
|
|
|
return fmt.Errorf("No such container: %s", name)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-10-05 06:25:15 +04:00
|
|
|
func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool) error {
|
2013-10-26 03:49:49 +04:00
|
|
|
container := srv.runtime.Get(name)
|
|
|
|
|
2013-10-05 06:25:15 +04:00
|
|
|
if removeLink {
|
2013-10-26 03:49:49 +04:00
|
|
|
if container == nil {
|
|
|
|
return fmt.Errorf("No such link: %s", name)
|
2013-10-05 06:25:15 +04:00
|
|
|
}
|
2013-11-04 21:28:40 +04:00
|
|
|
name, err := srv.runtime.getFullName(name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-10-26 03:49:49 +04:00
|
|
|
parent, n := path.Split(name)
|
2013-10-30 22:45:11 +04:00
|
|
|
if parent == "/" {
|
|
|
|
return fmt.Errorf("Conflict, cannot remove the default name of the container")
|
|
|
|
}
|
2013-10-05 06:25:15 +04:00
|
|
|
pe := srv.runtime.containerGraph.Get(parent)
|
2013-10-29 06:19:31 +04:00
|
|
|
if pe == nil {
|
|
|
|
return fmt.Errorf("Cannot get parent %s for name %s", parent, name)
|
2013-10-05 06:25:15 +04:00
|
|
|
}
|
2013-10-29 06:19:31 +04:00
|
|
|
parentContainer := srv.runtime.Get(pe.ID())
|
2013-10-05 06:25:15 +04:00
|
|
|
|
2013-10-29 06:19:31 +04:00
|
|
|
if parentContainer != nil && parentContainer.activeLinks != nil {
|
|
|
|
if link, exists := parentContainer.activeLinks[n]; exists {
|
|
|
|
link.Disable()
|
|
|
|
} else {
|
|
|
|
utils.Debugf("Could not find active link for %s", name)
|
|
|
|
}
|
2013-10-26 03:49:49 +04:00
|
|
|
}
|
|
|
|
|
2013-10-05 06:25:15 +04:00
|
|
|
if err := srv.runtime.containerGraph.Delete(name); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2013-10-29 06:19:31 +04:00
|
|
|
|
2013-10-26 03:49:49 +04:00
|
|
|
if container != nil {
|
2013-11-22 00:21:03 +04:00
|
|
|
if container.State.IsRunning() {
|
2013-06-20 19:45:30 +04:00
|
|
|
return fmt.Errorf("Impossible to remove a running container, please stop it first")
|
|
|
|
}
|
2013-05-06 13:52:15 +04:00
|
|
|
volumes := make(map[string]struct{})
|
2013-11-13 10:59:24 +04:00
|
|
|
|
|
|
|
binds := make(map[string]struct{})
|
|
|
|
|
|
|
|
for _, bind := range container.hostConfig.Binds {
|
|
|
|
splitBind := strings.Split(bind, ":")
|
|
|
|
source := splitBind[0]
|
|
|
|
binds[source] = struct{}{}
|
|
|
|
}
|
|
|
|
|
2013-05-06 13:52:15 +04:00
|
|
|
// Store all the deleted containers volumes
|
|
|
|
for _, volumeId := range container.Volumes {
|
2013-11-13 10:59:24 +04:00
|
|
|
|
|
|
|
// Skip the volumes mounted from external
|
|
|
|
if _, exists := binds[volumeId]; exists {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
volumeId = strings.TrimSuffix(volumeId, "/layer")
|
2013-10-08 20:35:47 +04:00
|
|
|
volumeId = filepath.Base(volumeId)
|
2013-05-06 13:52:15 +04:00
|
|
|
volumes[volumeId] = struct{}{}
|
|
|
|
}
|
2013-05-06 13:31:22 +04:00
|
|
|
if err := srv.runtime.Destroy(container); err != nil {
|
2013-10-30 22:45:11 +04:00
|
|
|
return fmt.Errorf("Cannot destroy container %s: %s", name, err)
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-10-25 05:59:59 +04:00
|
|
|
srv.LogEvent("destroy", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
2013-05-06 13:52:15 +04:00
|
|
|
|
2013-05-10 06:19:55 +04:00
|
|
|
if removeVolume {
|
2013-05-06 13:52:15 +04:00
|
|
|
// Retrieve all volumes from all remaining containers
|
|
|
|
usedVolumes := make(map[string]*Container)
|
|
|
|
for _, container := range srv.runtime.List() {
|
|
|
|
for _, containerVolumeId := range container.Volumes {
|
|
|
|
usedVolumes[containerVolumeId] = container
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for volumeId := range volumes {
|
|
|
|
// If the requested volu
|
|
|
|
if c, exists := usedVolumes[volumeId]; exists {
|
2013-06-04 22:00:22 +04:00
|
|
|
log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
|
2013-05-06 13:52:15 +04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err := srv.runtime.volumes.Delete(volumeId); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-05-06 13:31:22 +04:00
|
|
|
} else {
|
|
|
|
return fmt.Errorf("No such container: %s", name)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-05-30 23:30:21 +04:00
|
|
|
var ErrImageReferenced = errors.New("Image referenced by a repository")
|
|
|
|
|
2013-11-27 21:55:15 +04:00
|
|
|
func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi, byParents map[string][]*Image) error {
|
2013-05-30 23:30:21 +04:00
|
|
|
// If the image is referenced by a repo, do not delete
|
2013-06-11 01:05:54 +04:00
|
|
|
if len(srv.runtime.repositories.ByID()[id]) != 0 {
|
2013-05-30 23:30:21 +04:00
|
|
|
return ErrImageReferenced
|
|
|
|
}
|
|
|
|
// If the image is not referenced but has children, go recursive
|
|
|
|
referenced := false
|
|
|
|
for _, img := range byParents[id] {
|
2013-11-27 21:55:15 +04:00
|
|
|
if err := srv.deleteImageAndChildren(img.ID, imgs, byParents); err != nil {
|
2013-05-30 23:30:21 +04:00
|
|
|
if err != ErrImageReferenced {
|
|
|
|
return err
|
|
|
|
}
|
2013-06-11 01:05:54 +04:00
|
|
|
referenced = true
|
2013-05-30 23:30:21 +04:00
|
|
|
}
|
2013-06-04 17:51:12 +04:00
|
|
|
}
|
2013-05-30 23:30:21 +04:00
|
|
|
if referenced {
|
|
|
|
return ErrImageReferenced
|
|
|
|
}
|
2013-05-31 02:53:45 +04:00
|
|
|
|
|
|
|
// If the image is not referenced and has no children, remove it
|
2013-11-27 21:55:15 +04:00
|
|
|
byParents, err := srv.runtime.graph.ByParent()
|
2013-05-31 02:53:45 +04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if len(byParents[id]) == 0 {
|
|
|
|
if err := srv.runtime.repositories.DeleteAll(id); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-05-31 18:37:02 +04:00
|
|
|
err := srv.runtime.graph.Delete(id)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-19 06:39:02 +04:00
|
|
|
*imgs = append(*imgs, APIRmi{Deleted: id})
|
|
|
|
srv.LogEvent("delete", id, "")
|
2013-05-31 18:37:02 +04:00
|
|
|
return nil
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-06-11 01:05:54 +04:00
|
|
|
func (srv *Server) deleteImageParents(img *Image, imgs *[]APIRmi) error {
|
2013-05-30 23:30:21 +04:00
|
|
|
if img.Parent != "" {
|
|
|
|
parent, err := srv.runtime.graph.Get(img.Parent)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-27 21:55:15 +04:00
|
|
|
byParents, err := srv.runtime.graph.ByParent()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-05-30 23:30:21 +04:00
|
|
|
// Remove all children images
|
2013-11-27 21:55:15 +04:00
|
|
|
if err := srv.deleteImageAndChildren(img.Parent, imgs, byParents); err != nil {
|
2013-05-30 23:30:21 +04:00
|
|
|
return err
|
|
|
|
}
|
2013-05-31 18:37:02 +04:00
|
|
|
return srv.deleteImageParents(parent, imgs)
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-06-28 23:41:09 +04:00
|
|
|
func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, error) {
|
2013-07-05 20:58:39 +04:00
|
|
|
imgs := []APIRmi{}
|
2013-10-22 03:54:02 +04:00
|
|
|
tags := []string{}
|
2013-07-17 19:48:53 +04:00
|
|
|
|
|
|
|
//If delete by id, see if the id belong only to one repository
|
2013-10-22 03:54:02 +04:00
|
|
|
if repoName == "" {
|
2013-07-17 19:48:53 +04:00
|
|
|
for _, repoAndTag := range srv.runtime.repositories.ByID()[img.ID] {
|
2013-08-14 20:59:21 +04:00
|
|
|
parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
|
2013-10-22 03:54:02 +04:00
|
|
|
if repoName == "" || repoName == parsedRepo {
|
2013-07-17 19:48:53 +04:00
|
|
|
repoName = parsedRepo
|
2013-10-22 03:54:02 +04:00
|
|
|
if parsedTag != "" {
|
|
|
|
tags = append(tags, parsedTag)
|
2013-07-26 13:19:26 +04:00
|
|
|
}
|
2013-07-17 19:48:53 +04:00
|
|
|
} else if repoName != parsedRepo {
|
|
|
|
// the id belongs to multiple repos, like base:latest and user:test,
|
|
|
|
// in that case return conflict
|
|
|
|
return imgs, nil
|
|
|
|
}
|
|
|
|
}
|
2013-10-22 03:54:02 +04:00
|
|
|
} else {
|
|
|
|
tags = append(tags, tag)
|
2013-07-17 19:48:53 +04:00
|
|
|
}
|
|
|
|
//Untag the current image
|
2013-10-22 03:54:02 +04:00
|
|
|
for _, tag := range tags {
|
|
|
|
tagDeleted, err := srv.runtime.repositories.Delete(repoName, tag)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if tagDeleted {
|
2013-10-25 05:59:59 +04:00
|
|
|
imgs = append(imgs, APIRmi{Untagged: img.ID})
|
|
|
|
srv.LogEvent("untag", img.ID, "")
|
2013-10-22 03:54:02 +04:00
|
|
|
}
|
2013-05-30 23:30:21 +04:00
|
|
|
}
|
2013-06-11 01:05:54 +04:00
|
|
|
if len(srv.runtime.repositories.ByID()[img.ID]) == 0 {
|
2013-11-27 21:55:15 +04:00
|
|
|
if err := srv.deleteImageAndChildren(img.ID, &imgs, nil); err != nil {
|
2013-05-31 02:53:45 +04:00
|
|
|
if err != ErrImageReferenced {
|
2013-06-28 23:41:09 +04:00
|
|
|
return imgs, err
|
2013-05-31 02:53:45 +04:00
|
|
|
}
|
2013-05-31 18:37:02 +04:00
|
|
|
} else if err := srv.deleteImageParents(img, &imgs); err != nil {
|
2013-05-30 23:30:21 +04:00
|
|
|
if err != ErrImageReferenced {
|
2013-06-28 23:41:09 +04:00
|
|
|
return imgs, err
|
2013-05-30 23:30:21 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-06-28 23:41:09 +04:00
|
|
|
return imgs, nil
|
2013-05-30 23:30:21 +04:00
|
|
|
}
|
|
|
|
|
2013-06-28 23:41:09 +04:00
|
|
|
func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
|
2013-05-31 02:53:45 +04:00
|
|
|
img, err := srv.runtime.repositories.LookupImage(name)
|
|
|
|
if err != nil {
|
2013-05-31 18:37:02 +04:00
|
|
|
return nil, fmt.Errorf("No such image: %s", name)
|
|
|
|
}
|
|
|
|
if !autoPrune {
|
2013-06-11 01:05:54 +04:00
|
|
|
if err := srv.runtime.graph.Delete(img.ID); err != nil {
|
2013-10-30 22:45:11 +04:00
|
|
|
return nil, fmt.Errorf("Cannot delete image %s: %s", name, err)
|
2013-05-31 18:37:02 +04:00
|
|
|
}
|
|
|
|
return nil, nil
|
2013-05-31 02:53:45 +04:00
|
|
|
}
|
2013-11-09 03:01:01 +04:00
|
|
|
|
2013-12-10 00:46:21 +04:00
|
|
|
// Prevent deletion if image is used by a container
|
2013-11-09 03:01:01 +04:00
|
|
|
for _, container := range srv.runtime.List() {
|
2013-12-10 00:46:21 +04:00
|
|
|
parent, err := srv.runtime.repositories.LookupImage(container.Image)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-11-09 03:01:01 +04:00
|
|
|
|
2013-12-10 00:46:21 +04:00
|
|
|
if err := parent.WalkHistory(func(p *Image) error {
|
|
|
|
if img.ID == p.ID {
|
|
|
|
return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it", name, container.ID)
|
2013-11-09 03:01:01 +04:00
|
|
|
}
|
2013-12-10 00:46:21 +04:00
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
2013-11-09 03:01:01 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-22 03:54:02 +04:00
|
|
|
if strings.Contains(img.ID, name) {
|
|
|
|
//delete via ID
|
|
|
|
return srv.deleteImage(img, "", "")
|
|
|
|
}
|
2013-07-29 16:15:27 +04:00
|
|
|
name, tag := utils.ParseRepositoryTag(name)
|
2013-05-31 02:53:45 +04:00
|
|
|
return srv.deleteImage(img, name, tag)
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
|
2013-07-03 02:27:22 +04:00
|
|
|
func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error) {
|
2013-05-19 21:46:24 +04:00
|
|
|
|
|
|
|
// Retrieve all images
|
2013-09-01 07:31:21 +04:00
|
|
|
images, err := srv.runtime.graph.Map()
|
2013-05-19 21:46:24 +04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the tree in a map of map (map[parentId][childId])
|
|
|
|
imageMap := make(map[string]map[string]struct{})
|
|
|
|
for _, img := range images {
|
|
|
|
if _, exists := imageMap[img.Parent]; !exists {
|
|
|
|
imageMap[img.Parent] = make(map[string]struct{})
|
|
|
|
}
|
2013-06-04 22:00:22 +04:00
|
|
|
imageMap[img.Parent][img.ID] = struct{}{}
|
2013-05-19 21:46:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Loop on the children of the given image and check the config
|
2013-07-03 02:27:22 +04:00
|
|
|
for elem := range imageMap[imgID] {
|
2013-05-19 21:46:24 +04:00
|
|
|
img, err := srv.runtime.graph.Get(elem)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if CompareConfig(&img.ContainerConfig, config) {
|
|
|
|
return img, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2013-10-29 03:58:59 +04:00
|
|
|
func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
|
2013-10-05 06:25:15 +04:00
|
|
|
runtime := srv.runtime
|
|
|
|
container := runtime.Get(name)
|
|
|
|
if container == nil {
|
2013-05-06 13:31:22 +04:00
|
|
|
return fmt.Errorf("No such container: %s", name)
|
|
|
|
}
|
2013-10-29 06:19:31 +04:00
|
|
|
|
2013-10-05 06:25:15 +04:00
|
|
|
if hostConfig != nil && hostConfig.Links != nil {
|
|
|
|
for _, l := range hostConfig.Links {
|
|
|
|
parts, err := parseLink(l)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-10-29 03:58:59 +04:00
|
|
|
child, err := srv.runtime.GetByName(parts["name"])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if child == nil {
|
|
|
|
return fmt.Errorf("Could not get container for %s", parts["name"])
|
2013-10-19 01:15:24 +04:00
|
|
|
}
|
2013-10-29 03:58:59 +04:00
|
|
|
if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil {
|
2013-10-05 06:25:15 +04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2013-10-29 06:19:31 +04:00
|
|
|
|
|
|
|
// After we load all the links into the runtime
|
|
|
|
// set them to nil on the hostconfig
|
|
|
|
hostConfig.Links = nil
|
2013-11-01 01:58:43 +04:00
|
|
|
if err := container.writeHostConfig(); err != nil {
|
2013-10-29 06:19:31 +04:00
|
|
|
return err
|
|
|
|
}
|
2013-10-05 06:25:15 +04:00
|
|
|
}
|
2013-10-29 03:58:59 +04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-11-20 11:37:03 +04:00
|
|
|
func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
|
2013-10-27 06:24:01 +04:00
|
|
|
if len(job.Args) < 1 {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Errorf("Usage: %s container_id", job.Name)
|
|
|
|
return engine.StatusErr
|
2013-10-27 06:24:01 +04:00
|
|
|
}
|
|
|
|
name := job.Args[0]
|
2013-10-29 03:58:59 +04:00
|
|
|
runtime := srv.runtime
|
|
|
|
container := runtime.Get(name)
|
2013-10-19 02:56:52 +04:00
|
|
|
|
2013-10-29 03:58:59 +04:00
|
|
|
if container == nil {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Errorf("No such container: %s", name)
|
|
|
|
return engine.StatusErr
|
2013-10-29 03:58:59 +04:00
|
|
|
}
|
2013-10-27 06:24:01 +04:00
|
|
|
// If no environment was set, then no hostconfig was passed.
|
|
|
|
if len(job.Environ()) > 0 {
|
|
|
|
var hostConfig HostConfig
|
|
|
|
if err := job.ExportEnv(&hostConfig); err != nil {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
2013-10-27 06:24:01 +04:00
|
|
|
}
|
2013-11-13 04:36:20 +04:00
|
|
|
// Validate the HostConfig binds. Make sure that:
|
2013-11-13 23:25:55 +04:00
|
|
|
// 1) the source of a bind mount isn't /
|
|
|
|
// The bind mount "/:/foo" isn't allowed.
|
|
|
|
// 2) Check that the source exists
|
|
|
|
// The source to be bind mounted must exist.
|
|
|
|
for _, bind := range hostConfig.Binds {
|
|
|
|
splitBind := strings.Split(bind, ":")
|
|
|
|
source := splitBind[0]
|
|
|
|
|
|
|
|
// refuse to bind mount "/" to the container
|
|
|
|
if source == "/" {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind)
|
|
|
|
return engine.StatusErr
|
2013-11-13 23:25:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ensure the source exists on the host
|
|
|
|
_, err := os.Stat(source)
|
|
|
|
if err != nil && os.IsNotExist(err) {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Errorf("Invalid bind mount '%s' : source doesn't exist", bind)
|
|
|
|
return engine.StatusErr
|
2013-11-13 23:25:55 +04:00
|
|
|
}
|
|
|
|
}
|
2013-10-27 06:24:01 +04:00
|
|
|
// Register any links from the host config before starting the container
|
|
|
|
// FIXME: we could just pass the container here, no need to lookup by name again.
|
|
|
|
if err := srv.RegisterLinks(name, &hostConfig); err != nil {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
2013-10-27 06:24:01 +04:00
|
|
|
}
|
|
|
|
container.hostConfig = &hostConfig
|
2013-11-01 01:58:43 +04:00
|
|
|
container.ToDisk()
|
|
|
|
}
|
|
|
|
if err := container.Start(); err != nil {
|
2013-11-20 11:37:03 +04:00
|
|
|
job.Errorf("Cannot start container %s: %s", name, err)
|
|
|
|
return engine.StatusErr
|
2013-10-05 06:25:15 +04:00
|
|
|
}
|
2013-10-25 05:59:59 +04:00
|
|
|
srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image))
|
2013-10-05 06:25:15 +04:00
|
|
|
|
2013-11-20 11:37:03 +04:00
|
|
|
return engine.StatusOK
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
|
2013-11-17 07:00:16 +04:00
|
|
|
func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
|
2013-12-12 03:36:50 +04:00
|
|
|
if len(job.Args) != 1 {
|
|
|
|
job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
2013-11-17 07:00:16 +04:00
|
|
|
return engine.StatusErr
|
|
|
|
}
|
|
|
|
name := job.Args[0]
|
2013-12-12 03:36:50 +04:00
|
|
|
t := job.GetenvInt("t")
|
|
|
|
if t == -1 {
|
2013-11-17 07:00:16 +04:00
|
|
|
t = 10
|
|
|
|
}
|
2013-05-06 13:31:22 +04:00
|
|
|
if container := srv.runtime.Get(name); container != nil {
|
2013-11-17 07:00:16 +04:00
|
|
|
if err := container.Stop(int(t)); err != nil {
|
|
|
|
job.Errorf("Cannot stop container %s: %s\n", name, err)
|
|
|
|
return engine.StatusErr
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-10-25 05:59:59 +04:00
|
|
|
srv.LogEvent("stop", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
2013-05-06 13:31:22 +04:00
|
|
|
} else {
|
2013-11-17 07:00:16 +04:00
|
|
|
job.Errorf("No such container: %s\n", name)
|
|
|
|
return engine.StatusErr
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-11-17 07:00:16 +04:00
|
|
|
return engine.StatusOK
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
|
2013-11-25 05:05:59 +04:00
|
|
|
func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
|
|
|
|
if len(job.Args) != 1 {
|
|
|
|
job.Errorf("Usage: %s", job.Name)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
|
|
|
name := job.Args[0]
|
2013-05-06 13:31:22 +04:00
|
|
|
if container := srv.runtime.Get(name); container != nil {
|
2013-11-25 05:05:59 +04:00
|
|
|
status := container.Wait()
|
|
|
|
job.Printf("%d\n", status)
|
|
|
|
return engine.StatusOK
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-11-25 05:05:59 +04:00
|
|
|
job.Errorf("%s: no such container: %s", job.Name, name)
|
|
|
|
return engine.StatusErr
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
|
2013-12-12 06:25:30 +04:00
|
|
|
func (srv *Server) ContainerResize(job *engine.Job) engine.Status {
|
|
|
|
if len(job.Args) != 3 {
|
|
|
|
job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
|
|
|
name := job.Args[0]
|
|
|
|
height, err := strconv.Atoi(job.Args[1])
|
|
|
|
if err != nil {
|
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
|
|
|
width, err := strconv.Atoi(job.Args[2])
|
|
|
|
if err != nil {
|
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
2013-05-24 06:33:28 +04:00
|
|
|
if container := srv.runtime.Get(name); container != nil {
|
2013-12-12 06:25:30 +04:00
|
|
|
if err := container.Resize(height, width); err != nil {
|
|
|
|
job.Error(err)
|
|
|
|
return engine.StatusErr
|
|
|
|
}
|
|
|
|
return engine.StatusOK
|
2013-05-24 06:33:28 +04:00
|
|
|
}
|
2013-12-12 06:25:30 +04:00
|
|
|
job.Errorf("No such container: %s", name)
|
|
|
|
return engine.StatusErr
|
2013-05-24 06:33:28 +04:00
|
|
|
}
|
|
|
|
|
2013-09-11 22:35:09 +04:00
|
|
|
func (srv *Server) ContainerAttach(name string, logs, stream, stdin, stdout, stderr bool, inStream io.ReadCloser, outStream, errStream io.Writer) error {
|
2013-05-08 10:32:17 +04:00
|
|
|
container := srv.runtime.Get(name)
|
|
|
|
if container == nil {
|
|
|
|
return fmt.Errorf("No such container: %s", name)
|
|
|
|
}
|
2013-09-11 22:35:09 +04:00
|
|
|
|
2013-05-08 10:32:17 +04:00
|
|
|
//logs
|
|
|
|
if logs {
|
2013-07-15 20:17:58 +04:00
|
|
|
cLog, err := container.ReadLog("json")
|
2013-07-18 17:25:47 +04:00
|
|
|
if err != nil && os.IsNotExist(err) {
|
|
|
|
// Legacy logs
|
2013-10-08 11:54:47 +04:00
|
|
|
utils.Errorf("Old logs format")
|
2013-07-18 17:25:47 +04:00
|
|
|
if stdout {
|
|
|
|
cLog, err := container.ReadLog("stdout")
|
|
|
|
if err != nil {
|
2013-10-08 11:54:47 +04:00
|
|
|
utils.Errorf("Error reading logs (stdout): %s", err)
|
2013-09-11 22:35:09 +04:00
|
|
|
} else if _, err := io.Copy(outStream, cLog); err != nil {
|
2013-10-08 11:54:47 +04:00
|
|
|
utils.Errorf("Error streaming logs (stdout): %s", err)
|
2013-07-18 17:25:47 +04:00
|
|
|
}
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-07-18 17:25:47 +04:00
|
|
|
if stderr {
|
|
|
|
cLog, err := container.ReadLog("stderr")
|
|
|
|
if err != nil {
|
2013-10-08 11:54:47 +04:00
|
|
|
utils.Errorf("Error reading logs (stderr): %s", err)
|
2013-09-11 22:35:09 +04:00
|
|
|
} else if _, err := io.Copy(errStream, cLog); err != nil {
|
2013-10-08 11:54:47 +04:00
|
|
|
utils.Errorf("Error streaming logs (stderr): %s", err)
|
2013-07-18 17:25:47 +04:00
|
|
|
}
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-07-18 17:25:47 +04:00
|
|
|
} else if err != nil {
|
2013-10-08 11:54:47 +04:00
|
|
|
utils.Errorf("Error reading logs (json): %s", err)
|
2013-07-18 17:25:47 +04:00
|
|
|
} else {
|
|
|
|
dec := json.NewDecoder(cLog)
|
|
|
|
for {
|
2013-09-27 02:59:02 +04:00
|
|
|
l := &utils.JSONLog{}
|
|
|
|
|
|
|
|
if err := dec.Decode(l); err == io.EOF {
|
2013-07-18 17:25:47 +04:00
|
|
|
break
|
|
|
|
} else if err != nil {
|
2013-10-08 11:54:47 +04:00
|
|
|
utils.Errorf("Error streaming logs: %s", err)
|
2013-07-18 17:25:47 +04:00
|
|
|
break
|
|
|
|
}
|
2013-09-27 02:59:02 +04:00
|
|
|
if l.Stream == "stdout" && stdout {
|
2013-09-11 22:35:09 +04:00
|
|
|
fmt.Fprintf(outStream, "%s", l.Log)
|
2013-07-18 17:25:47 +04:00
|
|
|
}
|
2013-09-27 02:59:02 +04:00
|
|
|
if l.Stream == "stderr" && stderr {
|
|
|
|
fmt.Fprintf(errStream, "%s", l.Log)
|
2013-07-18 17:25:47 +04:00
|
|
|
}
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
}
|
2013-05-08 10:32:17 +04:00
|
|
|
}
|
2013-05-06 13:31:22 +04:00
|
|
|
|
2013-05-08 10:32:17 +04:00
|
|
|
//stream
|
|
|
|
if stream {
|
2013-11-22 00:21:03 +04:00
|
|
|
if container.State.IsGhost() {
|
2013-05-08 10:32:17 +04:00
|
|
|
return fmt.Errorf("Impossible to attach to a ghost container")
|
|
|
|
}
|
2013-05-06 13:31:22 +04:00
|
|
|
|
2013-05-08 10:32:17 +04:00
|
|
|
var (
|
|
|
|
cStdin io.ReadCloser
|
|
|
|
cStdout, cStderr io.Writer
|
|
|
|
cStdinCloser io.Closer
|
|
|
|
)
|
2013-05-06 13:31:22 +04:00
|
|
|
|
2013-05-08 10:32:17 +04:00
|
|
|
if stdin {
|
|
|
|
r, w := io.Pipe()
|
|
|
|
go func() {
|
|
|
|
defer w.Close()
|
2013-05-15 02:37:35 +04:00
|
|
|
defer utils.Debugf("Closing buffered stdin pipe")
|
2013-09-11 22:35:09 +04:00
|
|
|
io.Copy(w, inStream)
|
2013-05-08 10:32:17 +04:00
|
|
|
}()
|
|
|
|
cStdin = r
|
2013-09-11 22:35:09 +04:00
|
|
|
cStdinCloser = inStream
|
2013-05-08 10:32:17 +04:00
|
|
|
}
|
|
|
|
if stdout {
|
2013-09-11 22:35:09 +04:00
|
|
|
cStdout = outStream
|
2013-05-08 10:32:17 +04:00
|
|
|
}
|
|
|
|
if stderr {
|
2013-09-11 22:35:09 +04:00
|
|
|
cStderr = errStream
|
2013-05-08 10:32:17 +04:00
|
|
|
}
|
2013-05-08 01:15:42 +04:00
|
|
|
|
2013-05-08 10:32:17 +04:00
|
|
|
<-container.Attach(cStdin, cStdinCloser, cStdout, cStderr)
|
|
|
|
|
|
|
|
// If we are in stdinonce mode, wait for the process to end
|
|
|
|
// otherwise, simply return
|
|
|
|
if container.Config.StdinOnce && !container.Config.Tty {
|
|
|
|
container.Wait()
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (srv *Server) ContainerInspect(name string) (*Container, error) {
|
|
|
|
if container := srv.runtime.Get(name); container != nil {
|
|
|
|
return container, nil
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("No such container: %s", name)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (srv *Server) ImageInspect(name string) (*Image, error) {
|
|
|
|
if image, err := srv.runtime.repositories.LookupImage(name); err == nil && image != nil {
|
|
|
|
return image, nil
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("No such image: %s", name)
|
|
|
|
}
|
|
|
|
|
2013-07-17 08:07:41 +04:00
|
|
|
func (srv *Server) ContainerCopy(name string, resource string, out io.Writer) error {
|
|
|
|
if container := srv.runtime.Get(name); container != nil {
|
|
|
|
|
|
|
|
data, err := container.Copy(resource)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := io.Copy(out, data); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return fmt.Errorf("No such container: %s", name)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2013-10-27 06:24:01 +04:00
|
|
|
func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) {
|
2013-10-05 06:25:15 +04:00
|
|
|
runtime, err := NewRuntime(config)
|
2013-05-06 13:31:22 +04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
srv := &Server{
|
2013-10-27 06:24:01 +04:00
|
|
|
Eng: eng,
|
2013-06-18 03:10:00 +04:00
|
|
|
runtime: runtime,
|
2013-11-21 01:51:05 +04:00
|
|
|
pullingPool: make(map[string]chan struct{}),
|
|
|
|
pushingPool: make(map[string]chan struct{}),
|
2013-07-12 20:29:23 +04:00
|
|
|
events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
|
|
|
|
listeners: make(map[string]chan utils.JSONMessage),
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|
2013-05-16 04:17:33 +04:00
|
|
|
runtime.srv = srv
|
2013-05-06 13:31:22 +04:00
|
|
|
return srv, nil
|
|
|
|
}
|
|
|
|
|
2013-08-22 23:15:31 +04:00
|
|
|
func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory {
|
2013-11-25 22:58:17 +04:00
|
|
|
srv.Lock()
|
|
|
|
defer srv.Unlock()
|
2013-12-08 11:35:24 +04:00
|
|
|
v := dockerVersion()
|
|
|
|
httpVersion := make([]utils.VersionInfo, 0, 4)
|
|
|
|
httpVersion = append(httpVersion, &simpleVersionInfo{"docker", v.Get("Version")})
|
|
|
|
httpVersion = append(httpVersion, &simpleVersionInfo{"go", v.Get("GoVersion")})
|
|
|
|
httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", v.Get("GitCommit")})
|
|
|
|
httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", v.Get("KernelVersion")})
|
|
|
|
ud := utils.NewHTTPUserAgentDecorator(httpVersion...)
|
2013-10-22 22:49:13 +04:00
|
|
|
md := &utils.HTTPMetaHeadersDecorator{
|
|
|
|
Headers: metaHeaders,
|
2013-08-02 11:30:45 +04:00
|
|
|
}
|
2013-10-22 22:49:13 +04:00
|
|
|
factory := utils.NewHTTPRequestFactory(ud, md)
|
|
|
|
return factory
|
2013-08-02 11:30:45 +04:00
|
|
|
}
|
|
|
|
|
2013-11-14 10:08:08 +04:00
|
|
|
func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage {
|
2013-11-22 04:41:41 +04:00
|
|
|
now := time.Now().UTC().Unix()
|
2013-08-12 15:50:03 +04:00
|
|
|
jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
|
2013-11-25 22:58:17 +04:00
|
|
|
srv.AddEvent(jm)
|
2013-07-12 20:29:23 +04:00
|
|
|
for _, c := range srv.listeners {
|
2013-07-18 18:35:14 +04:00
|
|
|
select { // non blocking channel
|
|
|
|
case c <- jm:
|
|
|
|
default:
|
|
|
|
}
|
2013-07-10 16:55:05 +04:00
|
|
|
}
|
2013-11-14 10:08:08 +04:00
|
|
|
return &jm
|
2013-07-10 16:55:05 +04:00
|
|
|
}
|
|
|
|
|
2013-11-25 22:58:17 +04:00
|
|
|
func (srv *Server) AddEvent(jm utils.JSONMessage) {
|
|
|
|
srv.Lock()
|
|
|
|
defer srv.Unlock()
|
|
|
|
srv.events = append(srv.events, jm)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (srv *Server) GetEvents() []utils.JSONMessage {
|
|
|
|
srv.RLock()
|
|
|
|
defer srv.RUnlock()
|
|
|
|
return srv.events
|
|
|
|
}
|
|
|
|
|
2013-05-06 13:31:22 +04:00
|
|
|
type Server struct {
|
2013-11-25 22:58:17 +04:00
|
|
|
sync.RWMutex
|
2013-06-18 03:10:00 +04:00
|
|
|
runtime *Runtime
|
2013-11-21 01:51:05 +04:00
|
|
|
pullingPool map[string]chan struct{}
|
|
|
|
pushingPool map[string]chan struct{}
|
2013-07-12 20:29:23 +04:00
|
|
|
events []utils.JSONMessage
|
|
|
|
listeners map[string]chan utils.JSONMessage
|
2013-10-27 06:24:01 +04:00
|
|
|
Eng *engine.Engine
|
2013-05-06 13:31:22 +04:00
|
|
|
}
|