Merge pull request #22541 from crosbymichael/graph-restore

Implement graph driver restore on reboot
This commit is contained in:
Alexander Morozov 2016-05-23 22:57:23 -07:00
Родитель a5aba7752a 8bb4d31b10
Коммит d7dfe9103b
19 изменённых файлов: 252 добавлений и 413 удалений

Просмотреть файл

@ -277,11 +277,6 @@ func (daemon *Daemon) restore() error {
defer wg.Done()
rm := c.RestartManager(false)
if c.IsRunning() || c.IsPaused() {
// Fix activityCount such that graph mounts can be unmounted later
if err := daemon.layerStore.ReinitRWLayer(c.RWLayer); err != nil {
logrus.Errorf("Failed to ReinitRWLayer for %s due to %s", c.ID, err)
return
}
if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(rm)); err != nil {
logrus.Errorf("Failed to restore with containerd: %q", err)
return

Просмотреть файл

@ -70,6 +70,7 @@ type Driver struct {
root string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
pathCacheLock sync.Mutex
pathCache map[string]string
}
@ -108,6 +109,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
uidMaps: uidMaps,
gidMaps: gidMaps,
pathCache: make(map[string]string),
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
@ -320,6 +322,9 @@ func (a *Driver) Get(id, mountLabel string) (string, error) {
m = a.getMountpoint(id)
}
}
if count := a.ctr.Increment(m); count > 1 {
return m, nil
}
// If a dir does not have a parent ( no layers )do not try to mount
// just return the diff path to the data
@ -344,6 +349,9 @@ func (a *Driver) Put(id string) error {
a.pathCache[id] = m
}
a.pathCacheLock.Unlock()
if count := a.ctr.Decrement(m); count > 0 {
return nil
}
err := a.unmount(m)
if err != nil {

Просмотреть файл

@ -2,31 +2,66 @@ package graphdriver
import "sync"
type minfo struct {
check bool
count int
}
// RefCounter is a generic counter for use by graphdriver Get/Put calls
type RefCounter struct {
counts map[string]int
mu sync.Mutex
counts map[string]*minfo
mu sync.Mutex
checker Checker
}
// NewRefCounter returns a new RefCounter
func NewRefCounter() *RefCounter {
return &RefCounter{counts: make(map[string]int)}
func NewRefCounter(c Checker) *RefCounter {
return &RefCounter{
checker: c,
counts: make(map[string]*minfo),
}
}
// Increment increaes the ref count for the given id and returns the current count
func (c *RefCounter) Increment(id string) int {
func (c *RefCounter) Increment(path string) int {
c.mu.Lock()
c.counts[id]++
count := c.counts[id]
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count++
c.mu.Unlock()
return count
return m.count
}
// Decrement decreases the ref count for the given id and returns the current count
func (c *RefCounter) Decrement(id string) int {
func (c *RefCounter) Decrement(path string) int {
c.mu.Lock()
c.counts[id]--
count := c.counts[id]
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count--
c.mu.Unlock()
return count
return m.count
}

Просмотреть файл

@ -47,7 +47,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
home: home,
uidMaps: uidMaps,
gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(),
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
}
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
@ -160,35 +160,35 @@ func (d *Driver) Remove(id string) error {
// Get mounts a device with given id into the root filesystem
func (d *Driver) Get(id, mountLabel string) (string, error) {
mp := path.Join(d.home, "mnt", id)
if count := d.ctr.Increment(id); count > 1 {
if count := d.ctr.Increment(mp); count > 1 {
return mp, nil
}
uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
d.ctr.Decrement(id)
d.ctr.Decrement(mp)
return "", err
}
// Create the target directories if they don't exist
if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) {
d.ctr.Decrement(id)
d.ctr.Decrement(mp)
return "", err
}
if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) {
d.ctr.Decrement(id)
d.ctr.Decrement(mp)
return "", err
}
// Mount the device
if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil {
d.ctr.Decrement(id)
d.ctr.Decrement(mp)
return "", err
}
rootFs := path.Join(mp, "rootfs")
if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) {
d.ctr.Decrement(id)
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err
}
@ -198,7 +198,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
// Create an "id" file with the container/image id in it to help reconstruct this in case
// of later problems
if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
d.ctr.Decrement(id)
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err
}
@ -209,10 +209,10 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
// Put unmounts a device and removes it.
func (d *Driver) Put(id string) error {
if count := d.ctr.Decrement(id); count > 0 {
mp := path.Join(d.home, "mnt", id)
if count := d.ctr.Decrement(mp); count > 0 {
return nil
}
mp := path.Join(d.home, "mnt", id)
err := d.DeviceSet.UnmountDevice(id, mp)
if err != nil {
logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err)

Просмотреть файл

@ -113,6 +113,12 @@ type FileGetCloser interface {
Close() error
}
// Checker makes checks on specified filesystems.
type Checker interface {
// IsMounted returns true if the provided path is mounted for the specific checker
IsMounted(path string) bool
}
func init() {
drivers = make(map[string]InitFunc)
}

Просмотреть файл

@ -5,6 +5,8 @@ package graphdriver
import (
"path/filepath"
"syscall"
"github.com/docker/docker/pkg/mount"
)
const (
@ -89,6 +91,36 @@ func GetFSMagic(rootpath string) (FsMagic, error) {
return FsMagic(buf.Type), nil
}
// NewFsChecker returns a checker configured for the provied FsMagic
func NewFsChecker(t FsMagic) Checker {
return &fsChecker{
t: t,
}
}
type fsChecker struct {
t FsMagic
}
func (c *fsChecker) IsMounted(path string) bool {
m, _ := Mounted(c.t, path)
return m
}
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
// if the specified path is mounted.
func NewDefaultChecker() Checker {
return &defaultChecker{}
}
type defaultChecker struct {
}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
return m
}
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t

Просмотреть файл

@ -9,7 +9,6 @@ import (
"os"
"os/exec"
"path"
"sync"
"syscall"
"github.com/Sirupsen/logrus"
@ -92,12 +91,10 @@ func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Rea
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
type Driver struct {
home string
pathCacheLock sync.Mutex
pathCache map[string]string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
}
func init() {
@ -141,11 +138,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
}
d := &Driver{
home: home,
pathCache: make(map[string]string),
uidMaps: uidMaps,
gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(),
home: home,
uidMaps: uidMaps,
gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
}
return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil
@ -328,77 +324,53 @@ func (d *Driver) Remove(id string) error {
if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) {
return err
}
d.pathCacheLock.Lock()
delete(d.pathCache, id)
d.pathCacheLock.Unlock()
return nil
}
// Get creates and mounts the required file system for the given id and returns the mount path.
func (d *Driver) Get(id string, mountLabel string) (string, error) {
func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return "", err
}
// If id has a root, just return it
rootDir := path.Join(dir, "root")
if _, err := os.Stat(rootDir); err == nil {
d.pathCacheLock.Lock()
d.pathCache[id] = rootDir
d.pathCacheLock.Unlock()
return rootDir, nil
}
mergedDir := path.Join(dir, "merged")
if count := d.ctr.Increment(mergedDir); count > 1 {
return mergedDir, nil
}
defer func() {
if err != nil {
if c := d.ctr.Decrement(mergedDir); c <= 0 {
syscall.Unmount(mergedDir, 0)
}
}
}()
lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
if err != nil {
return "", err
}
lowerDir := path.Join(d.dir(string(lowerID)), "root")
upperDir := path.Join(dir, "upper")
workDir := path.Join(dir, "work")
mergedDir := path.Join(dir, "merged")
if count := d.ctr.Increment(id); count > 1 {
return mergedDir, nil
}
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
// if it's mounted already, just return
mounted, err := d.mounted(mergedDir)
if err != nil {
d.ctr.Decrement(id)
return "", err
}
if mounted {
d.ctr.Decrement(id)
return mergedDir, nil
}
var (
lowerDir = path.Join(d.dir(string(lowerID)), "root")
upperDir = path.Join(dir, "upper")
workDir = path.Join(dir, "work")
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
)
if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
d.ctr.Decrement(id)
return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
}
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
// user namespace requires this to move a directory from lower to upper.
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
d.ctr.Decrement(id)
syscall.Unmount(mergedDir, 0)
return "", err
}
if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
d.ctr.Decrement(id)
syscall.Unmount(mergedDir, 0)
return "", err
}
d.pathCacheLock.Lock()
d.pathCache[id] = mergedDir
d.pathCacheLock.Unlock()
return mergedDir, nil
}
@ -408,30 +380,12 @@ func (d *Driver) mounted(dir string) (bool, error) {
// Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error {
if count := d.ctr.Decrement(id); count > 0 {
mountpoint := path.Join(d.dir(id), "merged")
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
d.pathCacheLock.Lock()
mountpoint, exists := d.pathCache[id]
d.pathCacheLock.Unlock()
if !exists {
logrus.Debugf("Put on a non-mounted device %s", id)
// but it might be still here
if d.Exists(id) {
mountpoint = path.Join(d.dir(id), "merged")
}
d.pathCacheLock.Lock()
d.pathCache[id] = mountpoint
d.pathCacheLock.Unlock()
}
if mounted, err := d.mounted(mountpoint); mounted || err != nil {
if err = syscall.Unmount(mountpoint, 0); err != nil {
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
}
return err
if err := syscall.Unmount(mountpoint, 0); err != nil {
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
}
return nil
}

Просмотреть файл

@ -15,6 +15,7 @@ import (
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
"unsafe"
@ -43,10 +44,22 @@ func init() {
reexec.Register("docker-windows-write-layer", writeLayer)
}
type checker struct {
}
func (c *checker) IsMounted(path string) bool {
return false
}
// Driver represents a windows graph driver.
type Driver struct {
// info stores the shim driver information
info hcsshim.DriverInfo
ctr *graphdriver.RefCounter
// it is safe for windows to use a cache here because it does not support
// restoring containers when the daemon dies.
cacheMu sync.Mutex
cache map[string]string
}
func isTP5OrOlder() bool {
@ -61,6 +74,8 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap)
HomeDir: home,
Flavour: filterDriver,
},
cache: make(map[string]string),
ctr: graphdriver.NewRefCounter(&checker{}),
}
return d, nil
}
@ -211,17 +226,23 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
if err != nil {
return "", err
}
if count := d.ctr.Increment(rID); count > 1 {
return d.cache[rID], nil
}
// Getting the layer paths must be done outside of the lock.
layerChain, err := d.getLayerChain(rID)
if err != nil {
d.ctr.Decrement(rID)
return "", err
}
if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
d.ctr.Decrement(rID)
return "", err
}
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
d.ctr.Decrement(rID)
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
}
@ -230,11 +251,15 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
if err != nil {
d.ctr.Decrement(rID)
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
}
return "", err
}
d.cacheMu.Lock()
d.cache[rID] = mountPath
d.cacheMu.Unlock()
// If the layer has a mount path, use that. Otherwise, use the
// folder path.
@ -255,6 +280,12 @@ func (d *Driver) Put(id string) error {
if err != nil {
return err
}
if count := d.ctr.Decrement(rID); count > 0 {
return nil
}
d.cacheMu.Lock()
delete(d.cache, rID)
d.cacheMu.Unlock()
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return err

Просмотреть файл

@ -105,7 +105,7 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
filesystemsCache: filesystemsCache,
uidMaps: uidMaps,
gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(),
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
}
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
}
@ -307,7 +307,7 @@ func (d *Driver) Remove(id string) error {
// Get returns the mountpoint for the given id after creating the target directories if necessary.
func (d *Driver) Get(id, mountLabel string) (string, error) {
mountpoint := d.mountPath(id)
if count := d.ctr.Increment(id); count > 1 {
if count := d.ctr.Increment(mountpoint); count > 1 {
return mountpoint, nil
}
@ -317,17 +317,17 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
d.ctr.Decrement(id)
d.ctr.Decrement(mountpoint)
return "", err
}
// Create the target directories if they don't exist
if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil {
d.ctr.Decrement(id)
d.ctr.Decrement(mountpoint)
return "", err
}
if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil {
d.ctr.Decrement(id)
d.ctr.Decrement(mountpoint)
return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
}
@ -335,7 +335,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
// permissions instead of the remapped root uid:gid (if user namespaces are enabled):
if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
mount.Unmount(mountpoint)
d.ctr.Decrement(id)
d.ctr.Decrement(mountpoint)
return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
}
@ -344,10 +344,10 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
// Put removes the existing mountpoint for the given id if it exists.
func (d *Driver) Put(id string) error {
if count := d.ctr.Decrement(id); count > 0 {
mountpoint := d.mountPath(id)
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
mountpoint := d.mountPath(id)
mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint)
if err != nil || !mounted {
return err

Просмотреть файл

@ -121,10 +121,6 @@ func (ls *mockLayerStore) GetMountID(string) (string, error) {
return "", errors.New("not implemented")
}
func (ls *mockLayerStore) ReinitRWLayer(layer.RWLayer) error {
return errors.New("not implemented")
}
func (ls *mockLayerStore) Cleanup() error {
return nil
}

Просмотреть файл

@ -1616,35 +1616,6 @@ func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) {
check.Commentf("The network interfaces in container should be the same with host when --net=host when bridge network is disabled: %s", out))
}
// os.Kill should kill daemon ungracefully, leaving behind container mounts.
// A subsequent daemon restart shoud clean up said mounts.
func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonKill(c *check.C) {
testRequires(c, NotExperimentalDaemon)
c.Assert(s.d.StartWithBusybox(), check.IsNil)
out, err := s.d.Cmd("run", "-d", "busybox", "top")
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
id := strings.TrimSpace(out)
c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil)
mountOut, err := ioutil.ReadFile("/proc/self/mountinfo")
c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
// container mounts should exist even after daemon has crashed.
comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment)
// restart daemon.
if err := s.d.Restart(); err != nil {
c.Fatal(err)
}
// Now, container mounts should be gone.
mountOut, err = ioutil.ReadFile("/proc/self/mountinfo")
c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment)
}
func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
t.Fatal(err)

Просмотреть файл

@ -174,7 +174,6 @@ type Store interface {
CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error)
GetRWLayer(id string) (RWLayer, error)
GetMountID(id string) (string, error)
ReinitRWLayer(l RWLayer) error
ReleaseRWLayer(RWLayer) ([]Metadata, error)
Cleanup() error

Просмотреть файл

@ -495,25 +495,6 @@ func (ls *layerStore) GetMountID(id string) (string, error) {
return mount.mountID, nil
}
// ReinitRWLayer reinitializes a given mount to the layerstore, specifically
// initializing the usage count. It should strictly only be used in the
// daemon's restore path to restore state of live containers.
func (ls *layerStore) ReinitRWLayer(l RWLayer) error {
ls.mountL.Lock()
defer ls.mountL.Unlock()
m, ok := ls.mounts[l.Name()]
if !ok {
return ErrMountDoesNotExist
}
if err := m.incActivityCount(l); err != nil {
return err
}
return nil
}
func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
ls.mountL.Lock()
defer ls.mountL.Unlock()

Просмотреть файл

@ -174,10 +174,7 @@ func getCachedLayer(l Layer) *roLayer {
}
func getMountLayer(l RWLayer) *mountedLayer {
if rl, ok := l.(*referencedRWLayer); ok {
return rl.mountedLayer
}
return l.(*mountedLayer)
return l.(*referencedRWLayer).mountedLayer
}
func createMetadata(layers ...Layer) []Metadata {
@ -400,14 +397,11 @@ func TestStoreRestore(t *testing.T) {
if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil {
t.Fatal(err)
}
assertActivityCount(t, m, 1)
if err := m.Unmount(); err != nil {
t.Fatal(err)
}
assertActivityCount(t, m, 0)
ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver)
if err != nil {
t.Fatal(err)
@ -438,20 +432,15 @@ func TestStoreRestore(t *testing.T) {
t.Fatalf("Unexpected path %s, expected %s", mountPath, path)
}
assertActivityCount(t, m2, 1)
if mountPath, err := m2.Mount(""); err != nil {
t.Fatal(err)
} else if path != mountPath {
t.Fatalf("Unexpected path %s, expected %s", mountPath, path)
}
assertActivityCount(t, m2, 2)
if err := m2.Unmount(); err != nil {
t.Fatal(err)
}
assertActivityCount(t, m2, 1)
b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt"))
if err != nil {
t.Fatal(err)
@ -464,8 +453,6 @@ func TestStoreRestore(t *testing.T) {
t.Fatal(err)
}
assertActivityCount(t, m2, 0)
if metadata, err := ls2.ReleaseRWLayer(m2); err != nil {
t.Fatal(err)
} else if len(metadata) != 0 {
@ -674,13 +661,6 @@ func assertReferences(t *testing.T, references ...Layer) {
}
}
func assertActivityCount(t *testing.T, l RWLayer, expected int) {
rl := l.(*referencedRWLayer)
if rl.activityCount != expected {
t.Fatalf("Unexpected activity count %d, expected %d", rl.activityCount, expected)
}
}
func TestRegisterExistingLayer(t *testing.T) {
ls, _, cleanup := newTestStore(t)
defer cleanup()

Просмотреть файл

@ -380,8 +380,6 @@ func TestMountMigration(t *testing.T) {
Kind: archive.ChangeAdd,
})
assertActivityCount(t, rwLayer1, 1)
if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), "", nil, nil); err == nil {
t.Fatal("Expected error creating mount with same name")
} else if err != ErrMountNameConflict {
@ -401,16 +399,10 @@ func TestMountMigration(t *testing.T) {
t.Fatal(err)
}
assertActivityCount(t, rwLayer2, 1)
assertActivityCount(t, rwLayer1, 1)
if _, err := rwLayer2.Mount(""); err != nil {
t.Fatal(err)
}
assertActivityCount(t, rwLayer2, 2)
assertActivityCount(t, rwLayer1, 1)
if metadata, err := ls.Release(layer1); err != nil {
t.Fatal(err)
} else if len(metadata) > 0 {
@ -420,8 +412,6 @@ func TestMountMigration(t *testing.T) {
if err := rwLayer1.Unmount(); err != nil {
t.Fatal(err)
}
assertActivityCount(t, rwLayer2, 2)
assertActivityCount(t, rwLayer1, 0)
if _, err := ls.ReleaseRWLayer(rwLayer1); err != nil {
t.Fatal(err)
@ -430,9 +420,6 @@ func TestMountMigration(t *testing.T) {
if err := rwLayer2.Unmount(); err != nil {
t.Fatal(err)
}
if _, err := ls.ReleaseRWLayer(rwLayer2); err == nil {
t.Fatal("Expected error deleting active mount")
}
if err := rwLayer2.Unmount(); err != nil {
t.Fatal(err)
}

Просмотреть файл

@ -2,7 +2,6 @@ package layer
import (
"io"
"sync"
"github.com/docker/docker/pkg/archive"
)
@ -50,14 +49,6 @@ func (ml *mountedLayer) Parent() Layer {
return nil
}
func (ml *mountedLayer) Mount(mountLabel string) (string, error) {
return ml.layerStore.driver.Get(ml.mountID, mountLabel)
}
func (ml *mountedLayer) Unmount() error {
return ml.layerStore.driver.Put(ml.mountID)
}
func (ml *mountedLayer) Size() (int64, error) {
return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent())
}
@ -83,106 +74,30 @@ func (ml *mountedLayer) hasReferences() bool {
return len(ml.references) > 0
}
func (ml *mountedLayer) incActivityCount(ref RWLayer) error {
rl, ok := ml.references[ref]
if !ok {
return ErrLayerNotRetained
}
if err := rl.acquire(); err != nil {
return err
}
return nil
}
func (ml *mountedLayer) deleteReference(ref RWLayer) error {
rl, ok := ml.references[ref]
if !ok {
if _, ok := ml.references[ref]; !ok {
return ErrLayerNotRetained
}
if err := rl.release(); err != nil {
return err
}
delete(ml.references, ref)
return nil
}
func (ml *mountedLayer) retakeReference(r RWLayer) {
if ref, ok := r.(*referencedRWLayer); ok {
ref.activityCount = 0
ml.references[ref] = ref
}
}
type referencedRWLayer struct {
*mountedLayer
activityL sync.Mutex
activityCount int
}
func (rl *referencedRWLayer) acquire() error {
rl.activityL.Lock()
defer rl.activityL.Unlock()
rl.activityCount++
return nil
}
func (rl *referencedRWLayer) release() error {
rl.activityL.Lock()
defer rl.activityL.Unlock()
if rl.activityCount > 0 {
return ErrActiveMount
}
rl.activityCount = -1
return nil
}
func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
rl.activityL.Lock()
defer rl.activityL.Unlock()
if rl.activityCount == -1 {
return "", ErrLayerNotRetained
}
if rl.activityCount > 0 {
rl.activityCount++
return rl.path, nil
}
m, err := rl.mountedLayer.Mount(mountLabel)
if err == nil {
rl.activityCount++
rl.path = m
}
return m, err
return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel)
}
// Unmount decrements the activity count and unmounts the underlying layer
// Callers should only call `Unmount` once per call to `Mount`, even on error.
func (rl *referencedRWLayer) Unmount() error {
rl.activityL.Lock()
defer rl.activityL.Unlock()
if rl.activityCount == 0 {
return ErrNotMounted
}
if rl.activityCount == -1 {
return ErrLayerNotRetained
}
rl.activityCount--
if rl.activityCount > 0 {
return nil
}
return rl.mountedLayer.Unmount()
return rl.layerStore.driver.Put(rl.mountedLayer.mountID)
}

Просмотреть файл

@ -13,7 +13,7 @@ import (
containerd "github.com/docker/containerd/api/grpc/types"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/mount"
"github.com/opencontainers/specs/specs-go"
specs "github.com/opencontainers/specs/specs-go"
"golang.org/x/net/context"
)
@ -380,6 +380,81 @@ func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier {
return w
}
func (clnt *client) restore(cont *containerd.Container, options ...CreateOption) (err error) {
clnt.lock(cont.Id)
defer clnt.unlock(cont.Id)
logrus.Debugf("restore container %s state %s", cont.Id, cont.Status)
containerID := cont.Id
if _, err := clnt.getContainer(containerID); err == nil {
return fmt.Errorf("container %s is already active", containerID)
}
defer func() {
if err != nil {
clnt.deleteContainer(cont.Id)
}
}()
container := clnt.newContainer(cont.BundlePath, options...)
container.systemPid = systemPid(cont)
var terminal bool
for _, p := range cont.Processes {
if p.Pid == InitFriendlyName {
terminal = p.Terminal
}
}
iopipe, err := container.openFifos(terminal)
if err != nil {
return err
}
if err := clnt.backend.AttachStreams(containerID, *iopipe); err != nil {
return err
}
clnt.appendContainer(container)
err = clnt.backend.StateChanged(containerID, StateInfo{
CommonStateInfo: CommonStateInfo{
State: StateRestore,
Pid: container.systemPid,
}})
if err != nil {
return err
}
if event, ok := clnt.remote.pastEvents[containerID]; ok {
// This should only be a pause or resume event
if event.Type == StatePause || event.Type == StateResume {
return clnt.backend.StateChanged(containerID, StateInfo{
CommonStateInfo: CommonStateInfo{
State: event.Type,
Pid: container.systemPid,
}})
}
logrus.Warnf("unexpected backlog event: %#v", event)
}
return nil
}
func (clnt *client) Restore(containerID string, options ...CreateOption) error {
cont, err := clnt.getContainerdContainer(containerID)
if err == nil && cont.Status != "stopped" {
if err := clnt.restore(cont, options...); err != nil {
logrus.Errorf("error restoring %s: %v", containerID, err)
}
return nil
}
return clnt.setExited(containerID)
}
type exitNotifier struct {
id string
client *client

Просмотреть файл

@ -1,85 +0,0 @@
// +build experimental
package libcontainerd
import (
"fmt"
"github.com/Sirupsen/logrus"
containerd "github.com/docker/containerd/api/grpc/types"
)
func (clnt *client) restore(cont *containerd.Container, options ...CreateOption) (err error) {
clnt.lock(cont.Id)
defer clnt.unlock(cont.Id)
logrus.Debugf("restore container %s state %s", cont.Id, cont.Status)
containerID := cont.Id
if _, err := clnt.getContainer(containerID); err == nil {
return fmt.Errorf("container %s is already active", containerID)
}
defer func() {
if err != nil {
clnt.deleteContainer(cont.Id)
}
}()
container := clnt.newContainer(cont.BundlePath, options...)
container.systemPid = systemPid(cont)
var terminal bool
for _, p := range cont.Processes {
if p.Pid == InitFriendlyName {
terminal = p.Terminal
}
}
iopipe, err := container.openFifos(terminal)
if err != nil {
return err
}
if err := clnt.backend.AttachStreams(containerID, *iopipe); err != nil {
return err
}
clnt.appendContainer(container)
err = clnt.backend.StateChanged(containerID, StateInfo{
CommonStateInfo: CommonStateInfo{
State: StateRestore,
Pid: container.systemPid,
}})
if err != nil {
return err
}
if event, ok := clnt.remote.pastEvents[containerID]; ok {
// This should only be a pause or resume event
if event.Type == StatePause || event.Type == StateResume {
return clnt.backend.StateChanged(containerID, StateInfo{
CommonStateInfo: CommonStateInfo{
State: event.Type,
Pid: container.systemPid,
}})
}
logrus.Warnf("unexpected backlog event: %#v", event)
}
return nil
}
func (clnt *client) Restore(containerID string, options ...CreateOption) error {
cont, err := clnt.getContainerdContainer(containerID)
if err == nil && cont.Status != "stopped" {
if err := clnt.restore(cont, options...); err != nil {
logrus.Errorf("error restoring %s: %v", containerID, err)
}
return nil
}
return clnt.setExited(containerID)
}

Просмотреть файл

@ -1,41 +0,0 @@
// +build !experimental
package libcontainerd
import (
"syscall"
"time"
"github.com/Sirupsen/logrus"
)
func (clnt *client) Restore(containerID string, options ...CreateOption) error {
w := clnt.getOrCreateExitNotifier(containerID)
defer w.close()
cont, err := clnt.getContainerdContainer(containerID)
if err == nil && cont.Status != "stopped" {
clnt.lock(cont.Id)
container := clnt.newContainer(cont.BundlePath)
container.systemPid = systemPid(cont)
clnt.appendContainer(container)
clnt.unlock(cont.Id)
if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil {
logrus.Errorf("error sending sigterm to %v: %v", containerID, err)
}
select {
case <-time.After(10 * time.Second):
if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil {
logrus.Errorf("error sending sigkill to %v: %v", containerID, err)
}
select {
case <-time.After(2 * time.Second):
case <-w.wait():
return nil
}
case <-w.wait():
return nil
}
}
return clnt.setExited(containerID)
}