зеркало из https://github.com/microsoft/docker.git
Move integration-cli daemon package to internal/test…
… and do not use the `docker` cli in it. One of the reason of this move is to not make `integration` package using legacy `integration-cli` package. Next move will be to support swarm within this package *and* provide some helper function using the api (compared to the one using cli in `integration-cli/daemon` package). Signed-off-by: Vincent Demeester <vincent@sbr.pm>
This commit is contained in:
Родитель
fb08a5c6c3
Коммит
f0d277fe84
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/docker/docker/integration-cli/environment"
|
||||
"github.com/docker/docker/integration-cli/fixtures/plugin"
|
||||
"github.com/docker/docker/integration-cli/registry"
|
||||
testdaemon "github.com/docker/docker/internal/test/daemon"
|
||||
ienv "github.com/docker/docker/internal/test/environment"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/go-check/check"
|
||||
|
@ -100,7 +101,7 @@ func (s *DockerSuite) OnTimeout(c *check.C) {
|
|||
|
||||
daemonPid := int(rawPid)
|
||||
if daemonPid > 0 {
|
||||
daemon.SignalDaemonDump(daemonPid)
|
||||
testdaemon.SignalDaemonDump(daemonPid)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -285,7 +286,7 @@ func (s *DockerDaemonSuite) TearDownTest(c *check.C) {
|
|||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TearDownSuite(c *check.C) {
|
||||
filepath.Walk(daemon.SockRoot, func(path string, fi os.FileInfo, err error) error {
|
||||
filepath.Walk(testdaemon.SockRoot, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
// ignore errors here
|
||||
// not cleaning up sockets is not really an error
|
||||
|
@ -296,7 +297,7 @@ func (s *DockerDaemonSuite) TearDownSuite(c *check.C) {
|
|||
}
|
||||
return nil
|
||||
})
|
||||
os.RemoveAll(daemon.SockRoot)
|
||||
os.RemoveAll(testdaemon.SockRoot)
|
||||
}
|
||||
|
||||
const defaultSwarmPort = 2477
|
||||
|
|
|
@ -1,33 +1,17 @@
|
|||
package daemon // import "github.com/docker/docker/integration-cli/daemon"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
"github.com/docker/docker/integration-cli/request"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/go-check/check"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
"github.com/gotestyourself/gotestyourself/icmd"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type testingT interface {
|
||||
|
@ -40,32 +24,10 @@ type logT interface {
|
|||
Logf(string, ...interface{})
|
||||
}
|
||||
|
||||
// SockRoot holds the path of the default docker integration daemon socket
|
||||
var SockRoot = filepath.Join(os.TempDir(), "docker-integration")
|
||||
|
||||
var errDaemonNotStarted = errors.New("daemon not started")
|
||||
|
||||
// Daemon represents a Docker daemon for the testing framework.
|
||||
type Daemon struct {
|
||||
GlobalFlags []string
|
||||
Root string
|
||||
Folder string
|
||||
Wait chan error
|
||||
UseDefaultHost bool
|
||||
UseDefaultTLSHost bool
|
||||
|
||||
id string
|
||||
logFile *os.File
|
||||
stdin io.WriteCloser
|
||||
stdout, stderr io.ReadCloser
|
||||
cmd *exec.Cmd
|
||||
storageDriver string
|
||||
userlandProxy bool
|
||||
execRoot string
|
||||
experimental bool
|
||||
dockerBinary string
|
||||
dockerdBinary string
|
||||
log logT
|
||||
*daemon.Daemon
|
||||
dockerBinary string
|
||||
}
|
||||
|
||||
// Config holds docker daemon integration configuration
|
||||
|
@ -73,504 +35,24 @@ type Config struct {
|
|||
Experimental bool
|
||||
}
|
||||
|
||||
type clientConfig struct {
|
||||
transport *http.Transport
|
||||
scheme string
|
||||
addr string
|
||||
}
|
||||
|
||||
// New returns a Daemon instance to be used for testing.
|
||||
// This will create a directory such as d123456789 in the folder specified by $DOCKER_INTEGRATION_DAEMON_DEST or $DEST.
|
||||
// The daemon will not automatically start.
|
||||
func New(t testingT, dockerBinary string, dockerdBinary string, config Config) *Daemon {
|
||||
dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
|
||||
if dest == "" {
|
||||
dest = os.Getenv("DEST")
|
||||
ops := []func(*daemon.Daemon){
|
||||
daemon.WithDockerdBinary(dockerdBinary),
|
||||
}
|
||||
if dest == "" {
|
||||
t.Fatalf("Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable")
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(SockRoot, 0700); err != nil {
|
||||
t.Fatalf("could not create daemon socket root")
|
||||
}
|
||||
|
||||
id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID()))
|
||||
dir := filepath.Join(dest, id)
|
||||
daemonFolder, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not make %q an absolute path", dir)
|
||||
}
|
||||
daemonRoot := filepath.Join(daemonFolder, "root")
|
||||
|
||||
if err := os.MkdirAll(daemonRoot, 0755); err != nil {
|
||||
t.Fatalf("Could not create daemon root %q", dir)
|
||||
}
|
||||
|
||||
userlandProxy := true
|
||||
if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
|
||||
if val, err := strconv.ParseBool(env); err != nil {
|
||||
userlandProxy = val
|
||||
}
|
||||
if config.Experimental {
|
||||
ops = append(ops, daemon.WithExperimental)
|
||||
}
|
||||
d := daemon.New(t, ops...)
|
||||
|
||||
return &Daemon{
|
||||
id: id,
|
||||
Folder: daemonFolder,
|
||||
Root: daemonRoot,
|
||||
storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"),
|
||||
userlandProxy: userlandProxy,
|
||||
execRoot: filepath.Join(os.TempDir(), "docker-execroot", id),
|
||||
dockerBinary: dockerBinary,
|
||||
dockerdBinary: dockerdBinary,
|
||||
experimental: config.Experimental,
|
||||
log: t,
|
||||
Daemon: d,
|
||||
dockerBinary: dockerBinary,
|
||||
}
|
||||
}
|
||||
|
||||
// RootDir returns the root directory of the daemon.
|
||||
func (d *Daemon) RootDir() string {
|
||||
return d.Root
|
||||
}
|
||||
|
||||
// ID returns the generated id of the daemon
|
||||
func (d *Daemon) ID() string {
|
||||
return d.id
|
||||
}
|
||||
|
||||
// StorageDriver returns the configured storage driver of the daemon
|
||||
func (d *Daemon) StorageDriver() string {
|
||||
return d.storageDriver
|
||||
}
|
||||
|
||||
// CleanupExecRoot cleans the daemon exec root (network namespaces, ...)
|
||||
func (d *Daemon) CleanupExecRoot(c *check.C) {
|
||||
cleanupExecRoot(c, d.execRoot)
|
||||
}
|
||||
|
||||
func (d *Daemon) getClientConfig() (*clientConfig, error) {
|
||||
var (
|
||||
transport *http.Transport
|
||||
scheme string
|
||||
addr string
|
||||
proto string
|
||||
)
|
||||
if d.UseDefaultTLSHost {
|
||||
option := &tlsconfig.Options{
|
||||
CAFile: "fixtures/https/ca.pem",
|
||||
CertFile: "fixtures/https/client-cert.pem",
|
||||
KeyFile: "fixtures/https/client-key.pem",
|
||||
}
|
||||
tlsConfig, err := tlsconfig.Client(*option)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transport = &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort)
|
||||
scheme = "https"
|
||||
proto = "tcp"
|
||||
} else if d.UseDefaultHost {
|
||||
addr = opts.DefaultUnixSocket
|
||||
proto = "unix"
|
||||
scheme = "http"
|
||||
transport = &http.Transport{}
|
||||
} else {
|
||||
addr = d.sockPath()
|
||||
proto = "unix"
|
||||
scheme = "http"
|
||||
transport = &http.Transport{}
|
||||
}
|
||||
|
||||
if err := sockets.ConfigureTransport(transport, proto, addr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transport.DisableKeepAlives = true
|
||||
|
||||
return &clientConfig{
|
||||
transport: transport,
|
||||
scheme: scheme,
|
||||
addr: addr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start starts the daemon and return once it is ready to receive requests.
|
||||
func (d *Daemon) Start(t testingT, args ...string) {
|
||||
if err := d.StartWithError(args...); err != nil {
|
||||
t.Fatalf("Error starting daemon with arguments: %v", args)
|
||||
}
|
||||
}
|
||||
|
||||
// StartWithError starts the daemon and return once it is ready to receive requests.
|
||||
// It returns an error in case it couldn't start.
|
||||
func (d *Daemon) StartWithError(args ...string) error {
|
||||
logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "[%s] Could not create %s/docker.log", d.id, d.Folder)
|
||||
}
|
||||
|
||||
return d.StartWithLogFile(logFile, args...)
|
||||
}
|
||||
|
||||
// StartWithLogFile will start the daemon and attach its streams to a given file.
|
||||
func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
|
||||
dockerdBinary, err := exec.LookPath(d.dockerdBinary)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id)
|
||||
}
|
||||
args := append(d.GlobalFlags,
|
||||
"--containerd", "/var/run/docker/containerd/docker-containerd.sock",
|
||||
"--data-root", d.Root,
|
||||
"--exec-root", d.execRoot,
|
||||
"--pidfile", fmt.Sprintf("%s/docker.pid", d.Folder),
|
||||
fmt.Sprintf("--userland-proxy=%t", d.userlandProxy),
|
||||
)
|
||||
if d.experimental {
|
||||
args = append(args, "--experimental", "--init")
|
||||
}
|
||||
if !(d.UseDefaultHost || d.UseDefaultTLSHost) {
|
||||
args = append(args, []string{"--host", d.Sock()}...)
|
||||
}
|
||||
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
|
||||
args = append(args, []string{"--userns-remap", root}...)
|
||||
}
|
||||
|
||||
// If we don't explicitly set the log-level or debug flag(-D) then
|
||||
// turn on debug mode
|
||||
foundLog := false
|
||||
foundSd := false
|
||||
for _, a := range providedArgs {
|
||||
if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") {
|
||||
foundLog = true
|
||||
}
|
||||
if strings.Contains(a, "--storage-driver") {
|
||||
foundSd = true
|
||||
}
|
||||
}
|
||||
if !foundLog {
|
||||
args = append(args, "--debug")
|
||||
}
|
||||
if d.storageDriver != "" && !foundSd {
|
||||
args = append(args, "--storage-driver", d.storageDriver)
|
||||
}
|
||||
|
||||
args = append(args, providedArgs...)
|
||||
d.cmd = exec.Command(dockerdBinary, args...)
|
||||
d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1")
|
||||
d.cmd.Stdout = out
|
||||
d.cmd.Stderr = out
|
||||
d.logFile = out
|
||||
|
||||
if err := d.cmd.Start(); err != nil {
|
||||
return errors.Errorf("[%s] could not start daemon container: %v", d.id, err)
|
||||
}
|
||||
|
||||
wait := make(chan error)
|
||||
|
||||
go func() {
|
||||
wait <- d.cmd.Wait()
|
||||
d.log.Logf("[%s] exiting daemon", d.id)
|
||||
close(wait)
|
||||
}()
|
||||
|
||||
d.Wait = wait
|
||||
|
||||
tick := time.Tick(500 * time.Millisecond)
|
||||
// make sure daemon is ready to receive requests
|
||||
startTime := time.Now().Unix()
|
||||
for {
|
||||
d.log.Logf("[%s] waiting for daemon to start", d.id)
|
||||
if time.Now().Unix()-startTime > 5 {
|
||||
// After 5 seconds, give up
|
||||
return errors.Errorf("[%s] Daemon exited and never started", d.id)
|
||||
}
|
||||
select {
|
||||
case <-time.After(2 * time.Second):
|
||||
return errors.Errorf("[%s] timeout: daemon does not respond", d.id)
|
||||
case <-tick:
|
||||
clientConfig, err := d.getClientConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: clientConfig.transport,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", "/_ping", nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "[%s] could not create new request", d.id)
|
||||
}
|
||||
req.URL.Host = clientConfig.addr
|
||||
req.URL.Scheme = clientConfig.scheme
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status)
|
||||
}
|
||||
d.log.Logf("[%s] daemon started\n", d.id)
|
||||
d.Root, err = d.queryRootDir()
|
||||
if err != nil {
|
||||
return errors.Errorf("[%s] error querying daemon for root directory: %v", d.id, err)
|
||||
}
|
||||
return nil
|
||||
case <-d.Wait:
|
||||
return errors.Errorf("[%s] Daemon exited during startup", d.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StartWithBusybox will first start the daemon with Daemon.Start()
|
||||
// then save the busybox image from the main daemon and load it into this Daemon instance.
|
||||
func (d *Daemon) StartWithBusybox(t testingT, arg ...string) {
|
||||
d.Start(t, arg...)
|
||||
d.LoadBusybox(t)
|
||||
}
|
||||
|
||||
// Kill will send a SIGKILL to the daemon
|
||||
func (d *Daemon) Kill() error {
|
||||
if d.cmd == nil || d.Wait == nil {
|
||||
return errDaemonNotStarted
|
||||
}
|
||||
|
||||
defer func() {
|
||||
d.logFile.Close()
|
||||
d.cmd = nil
|
||||
}()
|
||||
|
||||
if err := d.cmd.Process.Kill(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder))
|
||||
}
|
||||
|
||||
// Pid returns the pid of the daemon
|
||||
func (d *Daemon) Pid() int {
|
||||
return d.cmd.Process.Pid
|
||||
}
|
||||
|
||||
// Interrupt stops the daemon by sending it an Interrupt signal
|
||||
func (d *Daemon) Interrupt() error {
|
||||
return d.Signal(os.Interrupt)
|
||||
}
|
||||
|
||||
// Signal sends the specified signal to the daemon if running
|
||||
func (d *Daemon) Signal(signal os.Signal) error {
|
||||
if d.cmd == nil || d.Wait == nil {
|
||||
return errDaemonNotStarted
|
||||
}
|
||||
return d.cmd.Process.Signal(signal)
|
||||
}
|
||||
|
||||
// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its
|
||||
// stack to its log file and exit
|
||||
// This is used primarily for gathering debug information on test timeout
|
||||
func (d *Daemon) DumpStackAndQuit() {
|
||||
if d.cmd == nil || d.cmd.Process == nil {
|
||||
return
|
||||
}
|
||||
SignalDaemonDump(d.cmd.Process.Pid)
|
||||
}
|
||||
|
||||
// Stop will send a SIGINT every second and wait for the daemon to stop.
|
||||
// If it times out, a SIGKILL is sent.
|
||||
// Stop will not delete the daemon directory. If a purged daemon is needed,
|
||||
// instantiate a new one with NewDaemon.
|
||||
// If an error occurs while starting the daemon, the test will fail.
|
||||
func (d *Daemon) Stop(t testingT) {
|
||||
err := d.StopWithError()
|
||||
if err != nil {
|
||||
if err != errDaemonNotStarted {
|
||||
t.Fatalf("Error while stopping the daemon %s : %v", d.id, err)
|
||||
} else {
|
||||
t.Logf("Daemon %s is not started", d.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StopWithError will send a SIGINT every second and wait for the daemon to stop.
|
||||
// If it timeouts, a SIGKILL is sent.
|
||||
// Stop will not delete the daemon directory. If a purged daemon is needed,
|
||||
// instantiate a new one with NewDaemon.
|
||||
func (d *Daemon) StopWithError() error {
|
||||
if d.cmd == nil || d.Wait == nil {
|
||||
return errDaemonNotStarted
|
||||
}
|
||||
|
||||
defer func() {
|
||||
d.logFile.Close()
|
||||
d.cmd = nil
|
||||
}()
|
||||
|
||||
i := 1
|
||||
tick := time.Tick(time.Second)
|
||||
|
||||
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
|
||||
if strings.Contains(err.Error(), "os: process already finished") {
|
||||
return errDaemonNotStarted
|
||||
}
|
||||
return errors.Errorf("could not send signal: %v", err)
|
||||
}
|
||||
out1:
|
||||
for {
|
||||
select {
|
||||
case err := <-d.Wait:
|
||||
return err
|
||||
case <-time.After(20 * time.Second):
|
||||
// time for stopping jobs and run onShutdown hooks
|
||||
d.log.Logf("[%s] daemon started", d.id)
|
||||
break out1
|
||||
}
|
||||
}
|
||||
|
||||
out2:
|
||||
for {
|
||||
select {
|
||||
case err := <-d.Wait:
|
||||
return err
|
||||
case <-tick:
|
||||
i++
|
||||
if i > 5 {
|
||||
d.log.Logf("tried to interrupt daemon for %d times, now try to kill it", i)
|
||||
break out2
|
||||
}
|
||||
d.log.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid)
|
||||
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
|
||||
return errors.Errorf("could not send signal: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.cmd.Process.Kill(); err != nil {
|
||||
d.log.Logf("Could not kill daemon: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
d.cmd.Wait()
|
||||
|
||||
return os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder))
|
||||
}
|
||||
|
||||
// Restart will restart the daemon by first stopping it and the starting it.
|
||||
// If an error occurs while starting the daemon, the test will fail.
|
||||
func (d *Daemon) Restart(t testingT, args ...string) {
|
||||
d.Stop(t)
|
||||
d.handleUserns()
|
||||
d.Start(t, args...)
|
||||
}
|
||||
|
||||
// RestartWithError will restart the daemon by first stopping it and then starting it.
|
||||
func (d *Daemon) RestartWithError(arg ...string) error {
|
||||
if err := d.StopWithError(); err != nil {
|
||||
return err
|
||||
}
|
||||
d.handleUserns()
|
||||
return d.StartWithError(arg...)
|
||||
}
|
||||
|
||||
func (d *Daemon) handleUserns() {
|
||||
// in the case of tests running a user namespace-enabled daemon, we have resolved
|
||||
// d.Root to be the actual final path of the graph dir after the "uid.gid" of
|
||||
// remapped root is added--we need to subtract it from the path before calling
|
||||
// start or else we will continue making subdirectories rather than truly restarting
|
||||
// with the same location/root:
|
||||
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
|
||||
d.Root = filepath.Dir(d.Root)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadBusybox image into the daemon
|
||||
func (d *Daemon) LoadBusybox(t testingT) {
|
||||
clientHost, err := client.NewEnvClient()
|
||||
assert.NilError(t, err, "failed to create client")
|
||||
defer clientHost.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"})
|
||||
assert.NilError(t, err, "failed to download busybox")
|
||||
defer reader.Close()
|
||||
|
||||
client, err := d.NewClient()
|
||||
assert.NilError(t, err, "failed to create client")
|
||||
defer client.Close()
|
||||
|
||||
resp, err := client.ImageLoad(ctx, reader, true)
|
||||
assert.NilError(t, err, "failed to load busybox")
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
func (d *Daemon) queryRootDir() (string, error) {
|
||||
// update daemon root by asking /info endpoint (to support user
|
||||
// namespaced daemon with root remapped uid.gid directory)
|
||||
clientConfig, err := d.getClientConfig()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: clientConfig.transport,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", "/info", nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.URL.Host = clientConfig.addr
|
||||
req.URL.Scheme = clientConfig.scheme
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
body := ioutils.NewReadCloserWrapper(resp.Body, func() error {
|
||||
return resp.Body.Close()
|
||||
})
|
||||
|
||||
type Info struct {
|
||||
DockerRootDir string
|
||||
}
|
||||
var b []byte
|
||||
var i Info
|
||||
b, err = request.ReadBody(body)
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
// read the docker root dir
|
||||
if err = json.Unmarshal(b, &i); err == nil {
|
||||
return i.DockerRootDir, nil
|
||||
}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Sock returns the socket path of the daemon
|
||||
func (d *Daemon) Sock() string {
|
||||
return fmt.Sprintf("unix://" + d.sockPath())
|
||||
}
|
||||
|
||||
func (d *Daemon) sockPath() string {
|
||||
return filepath.Join(SockRoot, d.id+".sock")
|
||||
}
|
||||
|
||||
// WaitRun waits for a container to be running for 10s
|
||||
func (d *Daemon) WaitRun(contID string) error {
|
||||
args := []string{"--host", d.Sock()}
|
||||
return WaitInspectWithArgs(d.dockerBinary, contID, "{{.State.Running}}", "true", 10*time.Second, args...)
|
||||
}
|
||||
|
||||
// Info returns the info struct for this daemon
|
||||
func (d *Daemon) Info(t assert.TestingT) types.Info {
|
||||
apiclient, err := client.NewClientWithOpts(client.WithHost((d.Sock())))
|
||||
assert.NilError(t, err)
|
||||
info, err := apiclient.Info(context.Background())
|
||||
assert.NilError(t, err)
|
||||
return info
|
||||
}
|
||||
|
||||
// Cmd executes a docker CLI command against this daemon.
|
||||
// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version
|
||||
func (d *Daemon) Cmd(args ...string) (string, error) {
|
||||
|
@ -594,11 +76,6 @@ func (d *Daemon) PrependHostArg(args []string) []string {
|
|||
return append([]string{"--host", d.Sock()}, args...)
|
||||
}
|
||||
|
||||
// LogFileName returns the path the daemon's log file
|
||||
func (d *Daemon) LogFileName() string {
|
||||
return d.logFile.Name()
|
||||
}
|
||||
|
||||
// GetIDByName returns the ID of an object (container, volume, …) given its name
|
||||
func (d *Daemon) GetIDByName(name string) (string, error) {
|
||||
return d.inspectFieldWithError(name, "Id")
|
||||
|
@ -616,11 +93,6 @@ func (d *Daemon) ActiveContainers() (ids []string) {
|
|||
return
|
||||
}
|
||||
|
||||
// ReadLogFile returns the content of the daemon log file
|
||||
func (d *Daemon) ReadLogFile() ([]byte, error) {
|
||||
return ioutil.ReadFile(d.logFile.Name())
|
||||
}
|
||||
|
||||
// InspectField returns the field filter by 'filter'
|
||||
func (d *Daemon) InspectField(name, filter string) (string, error) {
|
||||
return d.inspectFilter(name, filter)
|
||||
|
@ -672,59 +144,10 @@ func (d *Daemon) CheckActiveContainerCount(c *check.C) (interface{}, check.Comme
|
|||
return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out))
|
||||
}
|
||||
|
||||
// ReloadConfig asks the daemon to reload its configuration
|
||||
func (d *Daemon) ReloadConfig() error {
|
||||
if d.cmd == nil || d.cmd.Process == nil {
|
||||
return errors.New("daemon is not running")
|
||||
}
|
||||
|
||||
errCh := make(chan error)
|
||||
started := make(chan struct{})
|
||||
go func() {
|
||||
_, body, err := request.DoOnHost(d.Sock(), "/events", request.Method(http.MethodGet))
|
||||
close(started)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
defer body.Close()
|
||||
dec := json.NewDecoder(body)
|
||||
for {
|
||||
var e events.Message
|
||||
if err := dec.Decode(&e); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if e.Type != events.DaemonEventType {
|
||||
continue
|
||||
}
|
||||
if e.Action != "reload" {
|
||||
continue
|
||||
}
|
||||
close(errCh) // notify that we are done
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
<-started
|
||||
if err := signalDaemonReload(d.cmd.Process.Pid); err != nil {
|
||||
return errors.Errorf("error signaling daemon reload: %v", err)
|
||||
}
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
return errors.Errorf("error waiting for daemon reload event: %v", err)
|
||||
}
|
||||
case <-time.After(30 * time.Second):
|
||||
return errors.New("timeout waiting for daemon reload event")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewClient creates new client based on daemon's socket path
|
||||
func (d *Daemon) NewClient() (*client.Client, error) {
|
||||
return client.NewClientWithOpts(
|
||||
client.FromEnv,
|
||||
client.WithHost(d.Sock()))
|
||||
// WaitRun waits for a container to be running for 10s
|
||||
func (d *Daemon) WaitRun(contID string) error {
|
||||
args := []string{"--host", d.Sock()}
|
||||
return WaitInspectWithArgs(d.dockerBinary, contID, "{{.State.Running}}", "true", 10*time.Second, args...)
|
||||
}
|
||||
|
||||
// WaitInspectWithArgs waits for the specified expression to be equals to the specified expected string in the given time.
|
||||
|
|
|
@ -9,8 +9,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
"github.com/gotestyourself/gotestyourself/skip"
|
||||
"golang.org/x/sys/unix"
|
||||
|
@ -30,7 +30,7 @@ func TestContainerStartOnDaemonRestart(t *testing.T) {
|
|||
skip.If(t, testEnv.IsRemoteDaemon(), "cannot start daemon on remote test run")
|
||||
t.Parallel()
|
||||
|
||||
d := daemon.New(t, "", "dockerd", daemon.Config{})
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(t, "--iptables=false")
|
||||
defer d.Stop(t)
|
||||
|
||||
|
|
|
@ -9,9 +9,9 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
containerTypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/integration/internal/request"
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
is "github.com/gotestyourself/gotestyourself/assert/cmp"
|
||||
|
@ -62,7 +62,7 @@ func TestExportContainerAfterDaemonRestart(t *testing.T) {
|
|||
skip.If(t, testEnv.DaemonInfo.OSType != "linux")
|
||||
skip.If(t, testEnv.IsRemoteDaemon())
|
||||
|
||||
d := daemon.New(t, "", "dockerd", daemon.Config{})
|
||||
d := daemon.New(t)
|
||||
client, err := d.NewClient()
|
||||
assert.NilError(t, err)
|
||||
|
||||
|
|
|
@ -12,8 +12,8 @@ import (
|
|||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/integration/internal/request"
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
|
@ -25,7 +25,7 @@ import (
|
|||
func TestContainerShmNoLeak(t *testing.T) {
|
||||
skip.If(t, testEnv.IsRemoteDaemon(), "cannot start daemon on remote test run")
|
||||
t.Parallel()
|
||||
d := daemon.New(t, "docker", "dockerd", daemon.Config{})
|
||||
d := daemon.New(t)
|
||||
client, err := d.NewClient()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/gotestyourself/gotestyourself/skip"
|
||||
)
|
||||
|
||||
|
@ -55,7 +55,7 @@ func TestDaemonRestartKillContainers(t *testing.T) {
|
|||
|
||||
t.Parallel()
|
||||
|
||||
d := daemon.New(t, "", "dockerd", daemon.Config{})
|
||||
d := daemon.New(t)
|
||||
client, err := d.NewClient()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
)
|
||||
|
||||
const defaultSwarmPort = 2477
|
||||
const dockerdBinary = "dockerd"
|
||||
|
||||
func TestInspectNetwork(t *testing.T) {
|
||||
defer setupTest(t)()
|
||||
|
|
|
@ -9,8 +9,8 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
"github.com/gotestyourself/gotestyourself/assert/cmp"
|
||||
|
@ -24,7 +24,7 @@ func TestDockerNetworkMacvlanPersistance(t *testing.T) {
|
|||
skip.If(t, testEnv.IsRemoteDaemon())
|
||||
skip.If(t, !macvlanKernelSupport(), "Kernel doesn't support macvlan")
|
||||
|
||||
d := daemon.New(t, "", "dockerd", daemon.Config{})
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(t)
|
||||
defer d.Stop(t)
|
||||
|
||||
|
@ -53,7 +53,7 @@ func TestDockerNetworkMacvlanOverlapParent(t *testing.T) {
|
|||
skip.If(t, testEnv.IsRemoteDaemon())
|
||||
skip.If(t, !macvlanKernelSupport(), "Kernel doesn't support macvlan")
|
||||
|
||||
d := daemon.New(t, "", "dockerd", daemon.Config{})
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(t)
|
||||
defer d.Stop(t)
|
||||
|
||||
|
@ -95,7 +95,7 @@ func TestDockerNetworkMacvlanSubinterface(t *testing.T) {
|
|||
skip.If(t, testEnv.IsRemoteDaemon())
|
||||
skip.If(t, !macvlanKernelSupport(), "Kernel doesn't support macvlan")
|
||||
|
||||
d := daemon.New(t, "", "dockerd", daemon.Config{})
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(t)
|
||||
defer d.Stop(t)
|
||||
|
||||
|
@ -131,7 +131,7 @@ func TestDockerNetworkMacvlanBridgeNilParent(t *testing.T) {
|
|||
skip.If(t, testEnv.IsRemoteDaemon())
|
||||
skip.If(t, !macvlanKernelSupport(), "Kernel doesn't support macvlan")
|
||||
|
||||
d := daemon.New(t, "", "dockerd", daemon.Config{})
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(t)
|
||||
defer d.Stop(t)
|
||||
client, err := d.NewClient()
|
||||
|
@ -157,7 +157,7 @@ func TestDockerNetworkMacvlanBridgeInternal(t *testing.T) {
|
|||
skip.If(t, testEnv.IsRemoteDaemon())
|
||||
skip.If(t, !macvlanKernelSupport(), "Kernel doesn't support macvlan")
|
||||
|
||||
d := daemon.New(t, "", "dockerd", daemon.Config{})
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(t)
|
||||
defer d.Stop(t)
|
||||
client, err := d.NewClient()
|
||||
|
@ -191,7 +191,7 @@ func TestDockerNetworkMacvlanMultiSubnet(t *testing.T) {
|
|||
skip.If(t, !macvlanKernelSupport(), "Kernel doesn't support macvlan")
|
||||
t.Skip("Temporarily skipping while investigating sporadic v6 CI issues")
|
||||
|
||||
d := daemon.New(t, "", "dockerd", daemon.Config{})
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(t)
|
||||
defer d.Stop(t)
|
||||
client, err := d.NewClient()
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/docker/docker/internal/test/environment"
|
||||
"github.com/docker/docker/pkg/authorization"
|
||||
"github.com/docker/docker/pkg/plugins"
|
||||
|
@ -25,8 +25,6 @@ var (
|
|||
server *httptest.Server
|
||||
)
|
||||
|
||||
const dockerdBinary = "dockerd"
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
testEnv, err = environment.New()
|
||||
|
@ -52,9 +50,7 @@ func setupTest(t *testing.T) func() {
|
|||
skip.IfCondition(t, testEnv.IsRemoteDaemon(), "cannot run daemon when remote daemon")
|
||||
environment.ProtectAll(t, testEnv)
|
||||
|
||||
d = daemon.New(t, "", dockerdBinary, daemon.Config{
|
||||
Experimental: testEnv.DaemonInfo.ExperimentalBuild,
|
||||
})
|
||||
d = daemon.New(t, daemon.WithExperimental)
|
||||
|
||||
return func() {
|
||||
if d != nil {
|
||||
|
|
|
@ -12,8 +12,6 @@ var (
|
|||
testEnv *environment.Execution
|
||||
)
|
||||
|
||||
const dockerdBinary = "dockerd"
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
testEnv, err = environment.New()
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
"github.com/gotestyourself/gotestyourself/skip"
|
||||
)
|
||||
|
@ -17,7 +17,7 @@ func TestDaemonStartWithLogOpt(t *testing.T) {
|
|||
skip.IfCondition(t, testEnv.IsRemoteDaemon(), "cannot run daemon when remote daemon")
|
||||
t.Parallel()
|
||||
|
||||
d := daemon.New(t, "", dockerdBinary, daemon.Config{})
|
||||
d := daemon.New(t)
|
||||
d.Start(t, "--iptables=false")
|
||||
defer d.Stop(t)
|
||||
|
||||
|
|
|
@ -12,8 +12,6 @@ var (
|
|||
testEnv *environment.Execution
|
||||
)
|
||||
|
||||
const dockerdBinary = "dockerd"
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
testEnv, err = environment.New()
|
||||
|
|
|
@ -7,8 +7,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/integration-cli/fixtures/plugin"
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
)
|
||||
|
||||
|
@ -17,7 +17,7 @@ import (
|
|||
func TestPluginWithDevMounts(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
d := daemon.New(t, "", dockerdBinary, daemon.Config{})
|
||||
d := daemon.New(t)
|
||||
d.Start(t, "--iptables=false")
|
||||
defer d.Stop(t)
|
||||
|
||||
|
|
|
@ -10,8 +10,6 @@ import (
|
|||
|
||||
var testEnv *environment.Execution
|
||||
|
||||
const dockerdBinary = "dockerd"
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
testEnv, err = environment.New()
|
||||
|
|
|
@ -10,8 +10,6 @@ import (
|
|||
|
||||
var testEnv *environment.Execution
|
||||
|
||||
const dockerdBinary = "dockerd"
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
testEnv, err = environment.New()
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
)
|
||||
|
@ -33,7 +33,7 @@ func TestCgroupDriverSystemdMemoryLimit(t *testing.T) {
|
|||
t.Skip("systemd not available")
|
||||
}
|
||||
|
||||
d := daemon.New(t, "docker", "dockerd", daemon.Config{})
|
||||
d := daemon.New(t)
|
||||
client, err := d.NewClient()
|
||||
assert.NilError(t, err)
|
||||
d.StartWithBusybox(t, "--exec-opt", "native.cgroupdriver=systemd", "--iptables=false")
|
||||
|
|
|
@ -0,0 +1,618 @@
|
|||
package daemon // import "github.com/docker/docker/internal/test/daemon"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration-cli/request"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type testingT interface {
|
||||
assert.TestingT
|
||||
logT
|
||||
Fatalf(string, ...interface{})
|
||||
}
|
||||
|
||||
type logT interface {
|
||||
Logf(string, ...interface{})
|
||||
}
|
||||
|
||||
const defaultDockerdBinary = "dockerd"
|
||||
|
||||
var errDaemonNotStarted = errors.New("daemon not started")
|
||||
|
||||
// SockRoot holds the path of the default docker integration daemon socket
|
||||
var SockRoot = filepath.Join(os.TempDir(), "docker-integration")
|
||||
|
||||
type clientConfig struct {
|
||||
transport *http.Transport
|
||||
scheme string
|
||||
addr string
|
||||
}
|
||||
|
||||
// Daemon represents a Docker daemon for the testing framework
|
||||
type Daemon struct {
|
||||
GlobalFlags []string
|
||||
Root string
|
||||
Folder string
|
||||
Wait chan error
|
||||
UseDefaultHost bool
|
||||
UseDefaultTLSHost bool
|
||||
|
||||
id string
|
||||
logFile *os.File
|
||||
cmd *exec.Cmd
|
||||
storageDriver string
|
||||
userlandProxy bool
|
||||
execRoot string
|
||||
experimental bool
|
||||
dockerdBinary string
|
||||
log logT
|
||||
}
|
||||
|
||||
// New returns a Daemon instance to be used for testing.
|
||||
// This will create a directory such as d123456789 in the folder specified by $DOCKER_INTEGRATION_DAEMON_DEST or $DEST.
|
||||
// The daemon will not automatically start.
|
||||
func New(t testingT, ops ...func(*Daemon)) *Daemon {
|
||||
dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
|
||||
if dest == "" {
|
||||
dest = os.Getenv("DEST")
|
||||
}
|
||||
assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable")
|
||||
|
||||
storageDriver := os.Getenv("DOCKER_GRAPHDRIVER")
|
||||
|
||||
assert.NilError(t, os.MkdirAll(SockRoot, 0700), "could not create daemon socket root")
|
||||
|
||||
id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID()))
|
||||
dir := filepath.Join(dest, id)
|
||||
daemonFolder, err := filepath.Abs(dir)
|
||||
assert.NilError(t, err, "Could not make %q an absolute path", dir)
|
||||
daemonRoot := filepath.Join(daemonFolder, "root")
|
||||
|
||||
assert.NilError(t, os.MkdirAll(daemonRoot, 0755), "Could not create daemon root %q", dir)
|
||||
|
||||
userlandProxy := true
|
||||
if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
|
||||
if val, err := strconv.ParseBool(env); err != nil {
|
||||
userlandProxy = val
|
||||
}
|
||||
}
|
||||
d := &Daemon{
|
||||
id: id,
|
||||
Folder: daemonFolder,
|
||||
Root: daemonRoot,
|
||||
storageDriver: storageDriver,
|
||||
userlandProxy: userlandProxy,
|
||||
execRoot: filepath.Join(os.TempDir(), "docker-execroot", id),
|
||||
dockerdBinary: defaultDockerdBinary,
|
||||
log: t,
|
||||
}
|
||||
|
||||
for _, op := range ops {
|
||||
op(d)
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// RootDir returns the root directory of the daemon.
|
||||
func (d *Daemon) RootDir() string {
|
||||
return d.Root
|
||||
}
|
||||
|
||||
// ID returns the generated id of the daemon
|
||||
func (d *Daemon) ID() string {
|
||||
return d.id
|
||||
}
|
||||
|
||||
// StorageDriver returns the configured storage driver of the daemon
|
||||
func (d *Daemon) StorageDriver() string {
|
||||
return d.storageDriver
|
||||
}
|
||||
|
||||
// Sock returns the socket path of the daemon
|
||||
func (d *Daemon) Sock() string {
|
||||
return fmt.Sprintf("unix://" + d.sockPath())
|
||||
}
|
||||
|
||||
func (d *Daemon) sockPath() string {
|
||||
return filepath.Join(SockRoot, d.id+".sock")
|
||||
}
|
||||
|
||||
// LogFileName returns the path the daemon's log file
|
||||
func (d *Daemon) LogFileName() string {
|
||||
return d.logFile.Name()
|
||||
}
|
||||
|
||||
// ReadLogFile returns the content of the daemon log file
|
||||
func (d *Daemon) ReadLogFile() ([]byte, error) {
|
||||
return ioutil.ReadFile(d.logFile.Name())
|
||||
}
|
||||
|
||||
// NewClient creates new client based on daemon's socket path
|
||||
func (d *Daemon) NewClient() (*client.Client, error) {
|
||||
return client.NewClientWithOpts(
|
||||
client.FromEnv,
|
||||
client.WithHost(d.Sock()))
|
||||
}
|
||||
|
||||
// CleanupExecRoot cleans the daemon exec root (network namespaces, ...)
|
||||
func (d *Daemon) CleanupExecRoot(t testingT) {
|
||||
cleanupExecRoot(t, d.execRoot)
|
||||
}
|
||||
|
||||
// Start starts the daemon and return once it is ready to receive requests.
|
||||
func (d *Daemon) Start(t testingT, args ...string) {
|
||||
if err := d.StartWithError(args...); err != nil {
|
||||
t.Fatalf("Error starting daemon with arguments: %v", args)
|
||||
}
|
||||
}
|
||||
|
||||
// StartWithError starts the daemon and return once it is ready to receive requests.
|
||||
// It returns an error in case it couldn't start.
|
||||
func (d *Daemon) StartWithError(args ...string) error {
|
||||
logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "[%s] Could not create %s/docker.log", d.id, d.Folder)
|
||||
}
|
||||
|
||||
return d.StartWithLogFile(logFile, args...)
|
||||
}
|
||||
|
||||
// StartWithLogFile will start the daemon and attach its streams to a given file.
|
||||
func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
|
||||
dockerdBinary, err := exec.LookPath(d.dockerdBinary)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id)
|
||||
}
|
||||
args := append(d.GlobalFlags,
|
||||
"--containerd", "/var/run/docker/containerd/docker-containerd.sock",
|
||||
"--data-root", d.Root,
|
||||
"--exec-root", d.execRoot,
|
||||
"--pidfile", fmt.Sprintf("%s/docker.pid", d.Folder),
|
||||
fmt.Sprintf("--userland-proxy=%t", d.userlandProxy),
|
||||
)
|
||||
if d.experimental {
|
||||
args = append(args, "--experimental", "--init")
|
||||
}
|
||||
if !(d.UseDefaultHost || d.UseDefaultTLSHost) {
|
||||
args = append(args, []string{"--host", d.Sock()}...)
|
||||
}
|
||||
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
|
||||
args = append(args, []string{"--userns-remap", root}...)
|
||||
}
|
||||
|
||||
// If we don't explicitly set the log-level or debug flag(-D) then
|
||||
// turn on debug mode
|
||||
foundLog := false
|
||||
foundSd := false
|
||||
for _, a := range providedArgs {
|
||||
if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") {
|
||||
foundLog = true
|
||||
}
|
||||
if strings.Contains(a, "--storage-driver") {
|
||||
foundSd = true
|
||||
}
|
||||
}
|
||||
if !foundLog {
|
||||
args = append(args, "--debug")
|
||||
}
|
||||
if d.storageDriver != "" && !foundSd {
|
||||
args = append(args, "--storage-driver", d.storageDriver)
|
||||
}
|
||||
|
||||
args = append(args, providedArgs...)
|
||||
d.cmd = exec.Command(dockerdBinary, args...)
|
||||
d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1")
|
||||
d.cmd.Stdout = out
|
||||
d.cmd.Stderr = out
|
||||
d.logFile = out
|
||||
|
||||
if err := d.cmd.Start(); err != nil {
|
||||
return errors.Errorf("[%s] could not start daemon container: %v", d.id, err)
|
||||
}
|
||||
|
||||
wait := make(chan error)
|
||||
|
||||
go func() {
|
||||
wait <- d.cmd.Wait()
|
||||
d.log.Logf("[%s] exiting daemon", d.id)
|
||||
close(wait)
|
||||
}()
|
||||
|
||||
d.Wait = wait
|
||||
|
||||
tick := time.Tick(500 * time.Millisecond)
|
||||
// make sure daemon is ready to receive requests
|
||||
startTime := time.Now().Unix()
|
||||
for {
|
||||
d.log.Logf("[%s] waiting for daemon to start", d.id)
|
||||
if time.Now().Unix()-startTime > 5 {
|
||||
// After 5 seconds, give up
|
||||
return errors.Errorf("[%s] Daemon exited and never started", d.id)
|
||||
}
|
||||
select {
|
||||
case <-time.After(2 * time.Second):
|
||||
return errors.Errorf("[%s] timeout: daemon does not respond", d.id)
|
||||
case <-tick:
|
||||
clientConfig, err := d.getClientConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: clientConfig.transport,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", "/_ping", nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "[%s] could not create new request", d.id)
|
||||
}
|
||||
req.URL.Host = clientConfig.addr
|
||||
req.URL.Scheme = clientConfig.scheme
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status)
|
||||
}
|
||||
d.log.Logf("[%s] daemon started\n", d.id)
|
||||
d.Root, err = d.queryRootDir()
|
||||
if err != nil {
|
||||
return errors.Errorf("[%s] error querying daemon for root directory: %v", d.id, err)
|
||||
}
|
||||
return nil
|
||||
case <-d.Wait:
|
||||
return errors.Errorf("[%s] Daemon exited during startup", d.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StartWithBusybox will first start the daemon with Daemon.Start()
|
||||
// then save the busybox image from the main daemon and load it into this Daemon instance.
|
||||
func (d *Daemon) StartWithBusybox(t testingT, arg ...string) {
|
||||
d.Start(t, arg...)
|
||||
d.LoadBusybox(t)
|
||||
}
|
||||
|
||||
// Kill will send a SIGKILL to the daemon
|
||||
func (d *Daemon) Kill() error {
|
||||
if d.cmd == nil || d.Wait == nil {
|
||||
return errDaemonNotStarted
|
||||
}
|
||||
|
||||
defer func() {
|
||||
d.logFile.Close()
|
||||
d.cmd = nil
|
||||
}()
|
||||
|
||||
if err := d.cmd.Process.Kill(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder))
|
||||
}
|
||||
|
||||
// Pid returns the pid of the daemon
|
||||
func (d *Daemon) Pid() int {
|
||||
return d.cmd.Process.Pid
|
||||
}
|
||||
|
||||
// Interrupt stops the daemon by sending it an Interrupt signal
|
||||
func (d *Daemon) Interrupt() error {
|
||||
return d.Signal(os.Interrupt)
|
||||
}
|
||||
|
||||
// Signal sends the specified signal to the daemon if running
|
||||
func (d *Daemon) Signal(signal os.Signal) error {
|
||||
if d.cmd == nil || d.Wait == nil {
|
||||
return errDaemonNotStarted
|
||||
}
|
||||
return d.cmd.Process.Signal(signal)
|
||||
}
|
||||
|
||||
// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its
|
||||
// stack to its log file and exit
|
||||
// This is used primarily for gathering debug information on test timeout
|
||||
func (d *Daemon) DumpStackAndQuit() {
|
||||
if d.cmd == nil || d.cmd.Process == nil {
|
||||
return
|
||||
}
|
||||
SignalDaemonDump(d.cmd.Process.Pid)
|
||||
}
|
||||
|
||||
// Stop will send a SIGINT every second and wait for the daemon to stop.
|
||||
// If it times out, a SIGKILL is sent.
|
||||
// Stop will not delete the daemon directory. If a purged daemon is needed,
|
||||
// instantiate a new one with NewDaemon.
|
||||
// If an error occurs while starting the daemon, the test will fail.
|
||||
func (d *Daemon) Stop(t testingT) {
|
||||
err := d.StopWithError()
|
||||
if err != nil {
|
||||
if err != errDaemonNotStarted {
|
||||
t.Fatalf("Error while stopping the daemon %s : %v", d.id, err)
|
||||
} else {
|
||||
t.Logf("Daemon %s is not started", d.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StopWithError will send a SIGINT every second and wait for the daemon to stop.
|
||||
// If it timeouts, a SIGKILL is sent.
|
||||
// Stop will not delete the daemon directory. If a purged daemon is needed,
|
||||
// instantiate a new one with NewDaemon.
|
||||
func (d *Daemon) StopWithError() error {
|
||||
if d.cmd == nil || d.Wait == nil {
|
||||
return errDaemonNotStarted
|
||||
}
|
||||
|
||||
defer func() {
|
||||
d.logFile.Close()
|
||||
d.cmd = nil
|
||||
}()
|
||||
|
||||
i := 1
|
||||
tick := time.Tick(time.Second)
|
||||
|
||||
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
|
||||
if strings.Contains(err.Error(), "os: process already finished") {
|
||||
return errDaemonNotStarted
|
||||
}
|
||||
return errors.Errorf("could not send signal: %v", err)
|
||||
}
|
||||
out1:
|
||||
for {
|
||||
select {
|
||||
case err := <-d.Wait:
|
||||
return err
|
||||
case <-time.After(20 * time.Second):
|
||||
// time for stopping jobs and run onShutdown hooks
|
||||
d.log.Logf("[%s] daemon started", d.id)
|
||||
break out1
|
||||
}
|
||||
}
|
||||
|
||||
out2:
|
||||
for {
|
||||
select {
|
||||
case err := <-d.Wait:
|
||||
return err
|
||||
case <-tick:
|
||||
i++
|
||||
if i > 5 {
|
||||
d.log.Logf("tried to interrupt daemon for %d times, now try to kill it", i)
|
||||
break out2
|
||||
}
|
||||
d.log.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid)
|
||||
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
|
||||
return errors.Errorf("could not send signal: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.cmd.Process.Kill(); err != nil {
|
||||
d.log.Logf("Could not kill daemon: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
d.cmd.Wait()
|
||||
|
||||
return os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder))
|
||||
}
|
||||
|
||||
// Restart will restart the daemon by first stopping it and the starting it.
|
||||
// If an error occurs while starting the daemon, the test will fail.
|
||||
func (d *Daemon) Restart(t testingT, args ...string) {
|
||||
d.Stop(t)
|
||||
d.handleUserns()
|
||||
d.Start(t, args...)
|
||||
}
|
||||
|
||||
// RestartWithError will restart the daemon by first stopping it and then starting it.
|
||||
func (d *Daemon) RestartWithError(arg ...string) error {
|
||||
if err := d.StopWithError(); err != nil {
|
||||
return err
|
||||
}
|
||||
d.handleUserns()
|
||||
return d.StartWithError(arg...)
|
||||
}
|
||||
|
||||
func (d *Daemon) handleUserns() {
|
||||
// in the case of tests running a user namespace-enabled daemon, we have resolved
|
||||
// d.Root to be the actual final path of the graph dir after the "uid.gid" of
|
||||
// remapped root is added--we need to subtract it from the path before calling
|
||||
// start or else we will continue making subdirectories rather than truly restarting
|
||||
// with the same location/root:
|
||||
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
|
||||
d.Root = filepath.Dir(d.Root)
|
||||
}
|
||||
}
|
||||
|
||||
// ReloadConfig asks the daemon to reload its configuration
|
||||
func (d *Daemon) ReloadConfig() error {
|
||||
if d.cmd == nil || d.cmd.Process == nil {
|
||||
return errors.New("daemon is not running")
|
||||
}
|
||||
|
||||
errCh := make(chan error)
|
||||
started := make(chan struct{})
|
||||
go func() {
|
||||
_, body, err := request.DoOnHost(d.Sock(), "/events", request.Method(http.MethodGet))
|
||||
close(started)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
defer body.Close()
|
||||
dec := json.NewDecoder(body)
|
||||
for {
|
||||
var e events.Message
|
||||
if err := dec.Decode(&e); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if e.Type != events.DaemonEventType {
|
||||
continue
|
||||
}
|
||||
if e.Action != "reload" {
|
||||
continue
|
||||
}
|
||||
close(errCh) // notify that we are done
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
<-started
|
||||
if err := signalDaemonReload(d.cmd.Process.Pid); err != nil {
|
||||
return errors.Errorf("error signaling daemon reload: %v", err)
|
||||
}
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
return errors.Errorf("error waiting for daemon reload event: %v", err)
|
||||
}
|
||||
case <-time.After(30 * time.Second):
|
||||
return errors.New("timeout waiting for daemon reload event")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadBusybox image into the daemon
|
||||
func (d *Daemon) LoadBusybox(t assert.TestingT) {
|
||||
clientHost, err := client.NewEnvClient()
|
||||
assert.NilError(t, err, "failed to create client")
|
||||
defer clientHost.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"})
|
||||
assert.NilError(t, err, "failed to download busybox")
|
||||
defer reader.Close()
|
||||
|
||||
client, err := d.NewClient()
|
||||
assert.NilError(t, err, "failed to create client")
|
||||
defer client.Close()
|
||||
|
||||
resp, err := client.ImageLoad(ctx, reader, true)
|
||||
assert.NilError(t, err, "failed to load busybox")
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
func (d *Daemon) getClientConfig() (*clientConfig, error) {
|
||||
var (
|
||||
transport *http.Transport
|
||||
scheme string
|
||||
addr string
|
||||
proto string
|
||||
)
|
||||
if d.UseDefaultTLSHost {
|
||||
option := &tlsconfig.Options{
|
||||
CAFile: "fixtures/https/ca.pem",
|
||||
CertFile: "fixtures/https/client-cert.pem",
|
||||
KeyFile: "fixtures/https/client-key.pem",
|
||||
}
|
||||
tlsConfig, err := tlsconfig.Client(*option)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transport = &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort)
|
||||
scheme = "https"
|
||||
proto = "tcp"
|
||||
} else if d.UseDefaultHost {
|
||||
addr = opts.DefaultUnixSocket
|
||||
proto = "unix"
|
||||
scheme = "http"
|
||||
transport = &http.Transport{}
|
||||
} else {
|
||||
addr = d.sockPath()
|
||||
proto = "unix"
|
||||
scheme = "http"
|
||||
transport = &http.Transport{}
|
||||
}
|
||||
|
||||
if err := sockets.ConfigureTransport(transport, proto, addr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transport.DisableKeepAlives = true
|
||||
|
||||
return &clientConfig{
|
||||
transport: transport,
|
||||
scheme: scheme,
|
||||
addr: addr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Daemon) queryRootDir() (string, error) {
|
||||
// update daemon root by asking /info endpoint (to support user
|
||||
// namespaced daemon with root remapped uid.gid directory)
|
||||
clientConfig, err := d.getClientConfig()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: clientConfig.transport,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", "/info", nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.URL.Host = clientConfig.addr
|
||||
req.URL.Scheme = clientConfig.scheme
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
body := ioutils.NewReadCloserWrapper(resp.Body, func() error {
|
||||
return resp.Body.Close()
|
||||
})
|
||||
|
||||
type Info struct {
|
||||
DockerRootDir string
|
||||
}
|
||||
var b []byte
|
||||
var i Info
|
||||
b, err = request.ReadBody(body)
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
// read the docker root dir
|
||||
if err = json.Unmarshal(b, &i); err == nil {
|
||||
return i.DockerRootDir, nil
|
||||
}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Info returns the info struct for this daemon
|
||||
func (d *Daemon) Info(t assert.TestingT) types.Info {
|
||||
apiclient, err := client.NewClientWithOpts(client.WithHost((d.Sock())))
|
||||
assert.NilError(t, err)
|
||||
info, err := apiclient.Info(context.Background())
|
||||
assert.NilError(t, err)
|
||||
return info
|
||||
}
|
|
@ -1,16 +1,15 @@
|
|||
// +build !windows
|
||||
|
||||
package daemon // import "github.com/docker/docker/integration-cli/daemon"
|
||||
package daemon // import "github.com/docker/docker/internal/test/daemon"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/go-check/check"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func cleanupExecRoot(c *check.C, execRoot string) {
|
||||
func cleanupExecRoot(t testingT, execRoot string) {
|
||||
// Cleanup network namespaces in the exec root of this
|
||||
// daemon because this exec root is specific to this
|
||||
// daemon instance and has no chance of getting
|
||||
|
@ -19,7 +18,7 @@ func cleanupExecRoot(c *check.C, execRoot string) {
|
|||
netnsPath := filepath.Join(execRoot, "netns")
|
||||
filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error {
|
||||
if err := unix.Unmount(path, unix.MNT_FORCE); err != nil {
|
||||
c.Logf("unmount of %s failed: %v", path, err)
|
||||
t.Logf("unmount of %s failed: %v", path, err)
|
||||
}
|
||||
os.Remove(path)
|
||||
return nil
|
|
@ -1,10 +1,9 @@
|
|||
package daemon // import "github.com/docker/docker/integration-cli/daemon"
|
||||
package daemon // import "github.com/docker/docker/internal/test/daemon"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-check/check"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
|
@ -22,5 +21,5 @@ func signalDaemonReload(pid int) error {
|
|||
return fmt.Errorf("daemon reload not supported")
|
||||
}
|
||||
|
||||
func cleanupExecRoot(c *check.C, execRoot string) {
|
||||
func cleanupExecRoot(t testingT, execRoot string) {
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
package daemon
|
||||
|
||||
// WithExperimental sets the daemon in experimental mode
|
||||
func WithExperimental(d *Daemon) {
|
||||
d.experimental = true
|
||||
}
|
||||
|
||||
// WithDockerdBinary sets the dockerd binary to the specified one
|
||||
func WithDockerdBinary(dockerdBinary string) func(*Daemon) {
|
||||
return func(d *Daemon) {
|
||||
d.dockerdBinary = dockerdBinary
|
||||
}
|
||||
}
|
Загрузка…
Ссылка в новой задаче