Merge pull request #21424 from tonistiigi/revert-one_ctr_to_rule_them_all

Revert "Move layer mount refcounts to mountedLayer"
This commit is contained in:
Tibor Vass 2016-03-23 03:41:58 -04:00
Родитель f668e36652 e91de9fb9d
Коммит cb41c36108
14 изменённых файлов: 361 добавлений и 360 удалений

Просмотреть файл

@ -222,7 +222,6 @@ func (daemon *Daemon) exportContainerRw(container *container.Container) (archive
archive, err := container.RWLayer.TarStream() archive, err := container.RWLayer.TarStream()
if err != nil { if err != nil {
daemon.Unmount(container) // logging is already handled in the `Unmount` function
return nil, err return nil, err
} }
return ioutils.NewReadCloserWrapper(archive, func() error { return ioutils.NewReadCloserWrapper(archive, func() error {

Просмотреть файл

@ -29,7 +29,6 @@ import (
"os" "os"
"os/exec" "os/exec"
"path" "path"
"path/filepath"
"strings" "strings"
"sync" "sync"
"syscall" "syscall"
@ -65,13 +64,21 @@ func init() {
graphdriver.Register("aufs", Init) graphdriver.Register("aufs", Init)
} }
type data struct {
referenceCount int
path string
}
// Driver contains information about the filesystem mounted. // Driver contains information about the filesystem mounted.
// root of the filesystem
// sync.Mutex to protect against concurrent modifications
// active maps mount id to the count
type Driver struct { type Driver struct {
root string root string
uidMaps []idtools.IDMap uidMaps []idtools.IDMap
gidMaps []idtools.IDMap gidMaps []idtools.IDMap
pathCacheLock sync.Mutex sync.Mutex // Protects concurrent modification to active
pathCache map[string]string active map[string]*data
} }
// Init returns a new AUFS driver. // Init returns a new AUFS driver.
@ -104,10 +111,10 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
} }
a := &Driver{ a := &Driver{
root: root, root: root,
uidMaps: uidMaps, active: make(map[string]*data),
gidMaps: gidMaps, uidMaps: uidMaps,
pathCache: make(map[string]string), gidMaps: gidMaps,
} }
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
@ -221,7 +228,9 @@ func (a *Driver) Create(id, parent, mountLabel string) error {
} }
} }
} }
a.Lock()
a.active[id] = &data{}
a.Unlock()
return nil return nil
} }
@ -250,91 +259,108 @@ func (a *Driver) createDirsFor(id string) error {
// Remove will unmount and remove the given id. // Remove will unmount and remove the given id.
func (a *Driver) Remove(id string) error { func (a *Driver) Remove(id string) error {
a.pathCacheLock.Lock() // Protect the a.active from concurrent access
mountpoint, exists := a.pathCache[id] a.Lock()
a.pathCacheLock.Unlock() defer a.Unlock()
if !exists {
mountpoint = a.getMountpoint(id) m := a.active[id]
if m != nil {
if m.referenceCount > 0 {
return nil
}
// Make sure the dir is umounted first
if err := a.unmount(m); err != nil {
return err
}
} }
if err := a.unmount(mountpoint); err != nil { tmpDirs := []string{
// no need to return here, we can still try to remove since the `Rename` will fail below if still mounted "mnt",
logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err) "diff",
} }
// Atomically remove each directory in turn by first moving it out of the // Atomically remove each directory in turn by first moving it out of the
// way (so that docker doesn't find it anymore) before doing removal of // way (so that docker doesn't find it anymore) before doing removal of
// the whole tree. // the whole tree.
tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) for _, p := range tmpDirs {
if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { realPath := path.Join(a.rootPath(), p, id)
return err tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id))
if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) {
return err
}
defer os.RemoveAll(tmpPath)
} }
defer os.RemoveAll(tmpMntPath)
tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id))
if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) {
return err
}
defer os.RemoveAll(tmpDiffpath)
// Remove the layers file for the id // Remove the layers file for the id
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
return err return err
} }
if m != nil {
a.pathCacheLock.Lock() delete(a.active, id)
delete(a.pathCache, id) }
a.pathCacheLock.Unlock()
return nil return nil
} }
// Get returns the rootfs path for the id. // Get returns the rootfs path for the id.
// This will mount the dir at it's given path // This will mount the dir at it's given path
func (a *Driver) Get(id, mountLabel string) (string, error) { func (a *Driver) Get(id, mountLabel string) (string, error) {
// Protect the a.active from concurrent access
a.Lock()
defer a.Unlock()
m := a.active[id]
if m == nil {
m = &data{}
a.active[id] = m
}
parents, err := a.getParentLayerPaths(id) parents, err := a.getParentLayerPaths(id)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
return "", err return "", err
} }
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
a.pathCacheLock.Unlock()
if !exists {
m = a.getDiffPath(id)
if len(parents) > 0 {
m = a.getMountpoint(id)
}
}
// If a dir does not have a parent ( no layers )do not try to mount // If a dir does not have a parent ( no layers )do not try to mount
// just return the diff path to the data // just return the diff path to the data
m.path = path.Join(a.rootPath(), "diff", id)
if len(parents) > 0 { if len(parents) > 0 {
if err := a.mount(id, m, mountLabel, parents); err != nil { m.path = path.Join(a.rootPath(), "mnt", id)
return "", err if m.referenceCount == 0 {
if err := a.mount(id, m, mountLabel, parents); err != nil {
return "", err
}
} }
} }
m.referenceCount++
a.pathCacheLock.Lock() return m.path, nil
a.pathCache[id] = m
a.pathCacheLock.Unlock()
return m, nil
} }
// Put unmounts and updates list of active mounts. // Put unmounts and updates list of active mounts.
func (a *Driver) Put(id string) error { func (a *Driver) Put(id string) error {
a.pathCacheLock.Lock() // Protect the a.active from concurrent access
m, exists := a.pathCache[id] a.Lock()
if !exists { defer a.Unlock()
m = a.getMountpoint(id)
a.pathCache[id] = m
}
a.pathCacheLock.Unlock()
err := a.unmount(m) m := a.active[id]
if err != nil { if m == nil {
logrus.Debugf("Failed to unmount %s aufs: %v", id, err) // but it might be still here
if a.Exists(id) {
path := path.Join(a.rootPath(), "mnt", id)
err := Unmount(path)
if err != nil {
logrus.Debugf("Failed to unmount %s aufs: %v", id, err)
}
}
return nil
} }
return err if count := m.referenceCount; count > 1 {
m.referenceCount = count - 1
} else {
ids, _ := getParentIds(a.rootPath(), id)
// We only mounted if there are any parents
if ids != nil && len(ids) > 0 {
a.unmount(m)
}
delete(a.active, id)
}
return nil
} }
// Diff produces an archive of the changes between the specified // Diff produces an archive of the changes between the specified
@ -417,13 +443,16 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
return layers, nil return layers, nil
} }
func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error { func (a *Driver) mount(id string, m *data, mountLabel string, layers []string) error {
// If the id is mounted or we get an error return // If the id is mounted or we get an error return
if mounted, err := a.mounted(target); err != nil || mounted { if mounted, err := a.mounted(m); err != nil || mounted {
return err return err
} }
rw := a.getDiffPath(id) var (
target = m.path
rw = path.Join(a.rootPath(), "diff", id)
)
if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
return fmt.Errorf("error creating aufs mount to %s: %v", target, err) return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
@ -431,39 +460,26 @@ func (a *Driver) mount(id string, target string, mountLabel string, layers []str
return nil return nil
} }
func (a *Driver) unmount(mountPath string) error { func (a *Driver) unmount(m *data) error {
if mounted, err := a.mounted(mountPath); err != nil || !mounted { if mounted, err := a.mounted(m); err != nil || !mounted {
return err return err
} }
if err := Unmount(mountPath); err != nil { return Unmount(m.path)
return err
}
return nil
} }
func (a *Driver) mounted(mountpoint string) (bool, error) { func (a *Driver) mounted(m *data) (bool, error) {
return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) var buf syscall.Statfs_t
if err := syscall.Statfs(m.path, &buf); err != nil {
return false, nil
}
return graphdriver.FsMagic(buf.Type) == graphdriver.FsMagicAufs, nil
} }
// Cleanup aufs and unmount all mountpoints // Cleanup aufs and unmount all mountpoints
func (a *Driver) Cleanup() error { func (a *Driver) Cleanup() error {
var dirs []string for id, m := range a.active {
if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
dirs = append(dirs, path)
return nil
}); err != nil {
return err
}
for _, m := range dirs {
if err := a.unmount(m); err != nil { if err := a.unmount(m); err != nil {
logrus.Debugf("aufs error unmounting %s: %s", stringid.TruncateID(m), err) logrus.Errorf("Unmounting %s: %s", stringid.TruncateID(id), err)
} }
} }
return mountpk.Unmount(a.root) return mountpk.Unmount(a.root)

Просмотреть файл

@ -200,7 +200,7 @@ func TestMountedFalseResponse(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
response, err := d.mounted(d.getDiffPath("1")) response, err := d.mounted(d.active["1"])
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -227,7 +227,7 @@ func TestMountedTrueReponse(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
response, err := d.mounted(d.pathCache["2"]) response, err := d.mounted(d.active["2"])
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -293,7 +293,7 @@ func TestRemoveMountedDir(t *testing.T) {
t.Fatal("mntPath should not be empty string") t.Fatal("mntPath should not be empty string")
} }
mounted, err := d.mounted(d.pathCache["2"]) mounted, err := d.mounted(d.active["2"])
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

Просмотреть файл

@ -46,19 +46,3 @@ func getParentIds(root, id string) ([]string, error) {
} }
return out, s.Err() return out, s.Err()
} }
func (a *Driver) getMountpoint(id string) string {
return path.Join(a.mntPath(), id)
}
func (a *Driver) mntPath() string {
return path.Join(a.rootPath(), "mnt")
}
func (a *Driver) getDiffPath(id string) string {
return path.Join(a.diffPath(), id)
}
func (a *Driver) diffPath() string {
return path.Join(a.rootPath(), "diff")
}

Просмотреть файл

@ -69,6 +69,9 @@ type devInfo struct {
Deleted bool `json:"deleted"` Deleted bool `json:"deleted"`
devices *DeviceSet devices *DeviceSet
mountCount int
mountPath string
// The global DeviceSet lock guarantees that we serialize all // The global DeviceSet lock guarantees that we serialize all
// the calls to libdevmapper (which is not threadsafe), but we // the calls to libdevmapper (which is not threadsafe), but we
// sometimes release that lock while sleeping. In that case // sometimes release that lock while sleeping. In that case
@ -1988,6 +1991,13 @@ func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
devices.Lock() devices.Lock()
defer devices.Unlock() defer devices.Unlock()
// If mountcount is not zero, that means devices is still in use
// or has not been Put() properly. Fail device deletion.
if info.mountCount != 0 {
return fmt.Errorf("devmapper: Can't delete device %v as it is still mounted. mntCount=%v", info.Hash, info.mountCount)
}
return devices.deleteDevice(info, syncDelete) return devices.deleteDevice(info, syncDelete)
} }
@ -2106,11 +2116,13 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
} }
// Shutdown shuts down the device by unmounting the root. // Shutdown shuts down the device by unmounting the root.
func (devices *DeviceSet) Shutdown(home string) error { func (devices *DeviceSet) Shutdown() error {
logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix)
logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root)
defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix)
var devs []*devInfo
// Stop deletion worker. This should start delivering new events to // Stop deletion worker. This should start delivering new events to
// ticker channel. That means no new instance of cleanupDeletedDevice() // ticker channel. That means no new instance of cleanupDeletedDevice()
// will run after this call. If one instance is already running at // will run after this call. If one instance is already running at
@ -2127,46 +2139,30 @@ func (devices *DeviceSet) Shutdown(home string) error {
// metadata. Hence save this early before trying to deactivate devices. // metadata. Hence save this early before trying to deactivate devices.
devices.saveDeviceSetMetaData() devices.saveDeviceSetMetaData()
// ignore the error since it's just a best effort to not try to unmount something that's mounted for _, info := range devices.Devices {
mounts, _ := mount.GetMounts() devs = append(devs, info)
mounted := make(map[string]bool, len(mounts))
for _, mnt := range mounts {
mounted[mnt.Mountpoint] = true
} }
devices.Unlock()
if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { for _, info := range devs {
if err != nil { info.lock.Lock()
return err if info.mountCount > 0 {
}
if !info.IsDir() {
return nil
}
if mounted[p] {
// We use MNT_DETACH here in case it is still busy in some running // We use MNT_DETACH here in case it is still busy in some running
// container. This means it'll go away from the global scope directly, // container. This means it'll go away from the global scope directly,
// and the device will be released when that container dies. // and the device will be released when that container dies.
if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil { if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil {
logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", info.mountPath, err)
} }
}
if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { devices.Lock()
logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) if err := devices.deactivateDevice(info); err != nil {
} else { logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", info.Hash, err)
if err := devices.deactivateDevice(devInfo); err != nil {
logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err)
} }
devices.Unlock()
} }
info.lock.Unlock()
return nil
}); err != nil && !os.IsNotExist(err) {
devices.Unlock()
return err
} }
devices.Unlock()
info, _ := devices.lookupDeviceWithLock("") info, _ := devices.lookupDeviceWithLock("")
if info != nil { if info != nil {
info.lock.Lock() info.lock.Lock()
@ -2206,6 +2202,15 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
devices.Lock() devices.Lock()
defer devices.Unlock() defer devices.Unlock()
if info.mountCount > 0 {
if path != info.mountPath {
return fmt.Errorf("devmapper: Trying to mount devmapper device in multiple places (%s, %s)", info.mountPath, path)
}
info.mountCount++
return nil
}
if err := devices.activateDeviceIfNeeded(info, false); err != nil { if err := devices.activateDeviceIfNeeded(info, false); err != nil {
return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err)
} }
@ -2229,6 +2234,9 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err)
} }
info.mountCount = 1
info.mountPath = path
return nil return nil
} }
@ -2248,6 +2256,20 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
devices.Lock() devices.Lock()
defer devices.Unlock() defer devices.Unlock()
// If there are running containers when daemon crashes, during daemon
// restarting, it will kill running containers and will finally call
// Put() without calling Get(). So info.MountCount may become negative.
// if info.mountCount goes negative, we do the unmount and assign
// it to 0.
info.mountCount--
if info.mountCount > 0 {
return nil
} else if info.mountCount < 0 {
logrus.Warnf("devmapper: Mount count of device went negative. Put() called without matching Get(). Resetting count to 0")
info.mountCount = 0
}
logrus.Debugf("devmapper: Unmount(%s)", mountPath) logrus.Debugf("devmapper: Unmount(%s)", mountPath)
if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil {
return err return err
@ -2258,6 +2280,8 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
return err return err
} }
info.mountPath = ""
return nil return nil
} }

Просмотреть файл

@ -108,7 +108,7 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) {
// Cleanup unmounts a device. // Cleanup unmounts a device.
func (d *Driver) Cleanup() error { func (d *Driver) Cleanup() error {
err := d.DeviceSet.Shutdown(d.home) err := d.DeviceSet.Shutdown()
if err2 := mount.Unmount(d.home); err == nil { if err2 := mount.Unmount(d.home); err == nil {
err = err2 err = err2

Просмотреть файл

@ -1,19 +1,8 @@
package graphdriver package graphdriver
import "syscall"
var ( var (
// Slice of drivers that should be used in an order // Slice of drivers that should be used in an order
priority = []string{ priority = []string{
"zfs", "zfs",
} }
) )
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

Просмотреть файл

@ -42,8 +42,6 @@ const (
FsMagicXfs = FsMagic(0x58465342) FsMagicXfs = FsMagic(0x58465342)
// FsMagicZfs filesystem id for Zfs // FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1) FsMagicZfs = FsMagic(0x2fc12fc1)
// FsMagicOverlay filesystem id for overlay
FsMagicOverlay = FsMagic(0x794C7630)
) )
var ( var (
@ -88,12 +86,3 @@ func GetFSMagic(rootpath string) (FsMagic, error) {
} }
return FsMagic(buf.Type), nil return FsMagic(buf.Type), nil
} }
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

Просмотреть файл

@ -88,13 +88,21 @@ func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Rea
// of that. This means all child images share file (but not directory) // of that. This means all child images share file (but not directory)
// data with the parent. // data with the parent.
// ActiveMount contains information about the count, path and whether is mounted or not.
// This information is part of the Driver, that contains list of active mounts that are part of this overlay.
type ActiveMount struct {
count int
path string
mounted bool
}
// Driver contains information about the home directory and the list of active mounts that are created using this driver. // Driver contains information about the home directory and the list of active mounts that are created using this driver.
type Driver struct { type Driver struct {
home string home string
pathCacheLock sync.Mutex sync.Mutex // Protects concurrent modification to active
pathCache map[string]string active map[string]*ActiveMount
uidMaps []idtools.IDMap uidMaps []idtools.IDMap
gidMaps []idtools.IDMap gidMaps []idtools.IDMap
} }
var backingFs = "<unknown>" var backingFs = "<unknown>"
@ -143,10 +151,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
} }
d := &Driver{ d := &Driver{
home: home, home: home,
pathCache: make(map[string]string), active: make(map[string]*ActiveMount),
uidMaps: uidMaps, uidMaps: uidMaps,
gidMaps: gidMaps, gidMaps: gidMaps,
} }
return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil
@ -317,14 +325,23 @@ func (d *Driver) Remove(id string) error {
if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) {
return err return err
} }
d.pathCacheLock.Lock()
delete(d.pathCache, id)
d.pathCacheLock.Unlock()
return nil return nil
} }
// Get creates and mounts the required file system for the given id and returns the mount path. // Get creates and mounts the required file system for the given id and returns the mount path.
func (d *Driver) Get(id string, mountLabel string) (string, error) { func (d *Driver) Get(id string, mountLabel string) (string, error) {
// Protect the d.active from concurrent access
d.Lock()
defer d.Unlock()
mount := d.active[id]
if mount != nil {
mount.count++
return mount.path, nil
}
mount = &ActiveMount{count: 1}
dir := d.dir(id) dir := d.dir(id)
if _, err := os.Stat(dir); err != nil { if _, err := os.Stat(dir); err != nil {
return "", err return "", err
@ -333,10 +350,9 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
// If id has a root, just return it // If id has a root, just return it
rootDir := path.Join(dir, "root") rootDir := path.Join(dir, "root")
if _, err := os.Stat(rootDir); err == nil { if _, err := os.Stat(rootDir); err == nil {
d.pathCacheLock.Lock() mount.path = rootDir
d.pathCache[id] = rootDir d.active[id] = mount
d.pathCacheLock.Unlock() return mount.path, nil
return rootDir, nil
} }
lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
@ -349,12 +365,6 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
mergedDir := path.Join(dir, "merged") mergedDir := path.Join(dir, "merged")
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
// if it's mounted already, just return
if mounted, err := d.mounted(mergedDir); err != nil || mounted {
return "", err
}
if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
} }
@ -368,38 +378,42 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
return "", err return "", err
} }
mount.path = mergedDir
mount.mounted = true
d.active[id] = mount
d.pathCacheLock.Lock() return mount.path, nil
d.pathCache[id] = mergedDir
d.pathCacheLock.Unlock()
return mergedDir, nil
}
func (d *Driver) mounted(dir string) (bool, error) {
return graphdriver.Mounted(graphdriver.FsMagicOverlay, dir)
} }
// Put unmounts the mount path created for the give id. // Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error { func (d *Driver) Put(id string) error {
d.pathCacheLock.Lock() // Protect the d.active from concurrent access
mountpoint, exists := d.pathCache[id] d.Lock()
d.pathCacheLock.Unlock() defer d.Unlock()
if !exists { mount := d.active[id]
if mount == nil {
logrus.Debugf("Put on a non-mounted device %s", id) logrus.Debugf("Put on a non-mounted device %s", id)
// but it might be still here // but it might be still here
if d.Exists(id) { if d.Exists(id) {
mountpoint = path.Join(d.dir(id), "merged") mergedDir := path.Join(d.dir(id), "merged")
err := syscall.Unmount(mergedDir, 0)
if err != nil {
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
}
} }
return nil
d.pathCacheLock.Lock()
d.pathCache[id] = mountpoint
d.pathCacheLock.Unlock()
} }
if mounted, err := d.mounted(mountpoint); mounted || err != nil { mount.count--
if err = syscall.Unmount(mountpoint, 0); err != nil { if mount.count > 0 {
return nil
}
defer delete(d.active, id)
if mount.mounted {
err := syscall.Unmount(mount.path, 0)
if err != nil {
logrus.Debugf("Failed to unmount %s overlay: %v", id, err) logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
} }
return err return err

Просмотреть файл

@ -13,6 +13,7 @@ import (
"path" "path"
"path/filepath" "path/filepath"
"strings" "strings"
"sync"
"syscall" "syscall"
"time" "time"
@ -46,6 +47,10 @@ const (
type Driver struct { type Driver struct {
// info stores the shim driver information // info stores the shim driver information
info hcsshim.DriverInfo info hcsshim.DriverInfo
// Mutex protects concurrent modification to active
sync.Mutex
// active stores references to the activated layers
active map[string]int
} }
var _ graphdriver.DiffGetterDriver = &Driver{} var _ graphdriver.DiffGetterDriver = &Driver{}
@ -58,6 +63,7 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap)
HomeDir: home, HomeDir: home,
Flavour: filterDriver, Flavour: filterDriver,
}, },
active: make(map[string]int),
} }
return d, nil return d, nil
} }
@ -70,6 +76,7 @@ func InitDiff(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (
HomeDir: home, HomeDir: home,
Flavour: diffDriver, Flavour: diffDriver,
}, },
active: make(map[string]int),
} }
return d, nil return d, nil
} }
@ -182,6 +189,9 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
var dir string var dir string
d.Lock()
defer d.Unlock()
rID, err := d.resolveID(id) rID, err := d.resolveID(id)
if err != nil { if err != nil {
return "", err return "", err
@ -193,14 +203,16 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
return "", err return "", err
} }
if err := hcsshim.ActivateLayer(d.info, rID); err != nil { if d.active[rID] == 0 {
return "", err if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
} return "", err
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { }
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
logrus.Warnf("Failed to Deactivate %s: %s", id, err) if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
}
return "", err
} }
return "", err
} }
mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
@ -211,6 +223,8 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
return "", err return "", err
} }
d.active[rID]++
// If the layer has a mount path, use that. Otherwise, use the // If the layer has a mount path, use that. Otherwise, use the
// folder path. // folder path.
if mountPath != "" { if mountPath != "" {
@ -231,10 +245,22 @@ func (d *Driver) Put(id string) error {
return err return err
} }
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { d.Lock()
return err defer d.Unlock()
if d.active[rID] > 1 {
d.active[rID]--
} else if d.active[rID] == 1 {
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return err
}
if err := hcsshim.DeactivateLayer(d.info, rID); err != nil {
return err
}
delete(d.active, rID)
} }
return hcsshim.DeactivateLayer(d.info, rID)
return nil
} }
// Cleanup ensures the information the driver stores is properly removed. // Cleanup ensures the information the driver stores is properly removed.
@ -244,40 +270,62 @@ func (d *Driver) Cleanup() error {
// Diff produces an archive of the changes between the specified // Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "". // layer and its parent layer which may be "".
// The layer should be mounted when calling this function
func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) { func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) {
rID, err := d.resolveID(id) rID, err := d.resolveID(id)
if err != nil { if err != nil {
return return
} }
// Getting the layer paths must be done outside of the lock.
layerChain, err := d.getLayerChain(rID) layerChain, err := d.getLayerChain(rID)
if err != nil { if err != nil {
return return
} }
// this is assuming that the layer is unmounted var undo func()
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return nil, err d.Lock()
}
defer func() { // To support export, a layer must be activated but not prepared.
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { if d.info.Flavour == filterDriver {
logrus.Warnf("Failed to Deactivate %s: %s", rID, err) if d.active[rID] == 0 {
if err = hcsshim.ActivateLayer(d.info, rID); err != nil {
d.Unlock()
return
}
undo = func() {
if err := hcsshim.DeactivateLayer(d.info, rID); err != nil {
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
}
}
} else {
if err = hcsshim.UnprepareLayer(d.info, rID); err != nil {
d.Unlock()
return
}
undo = func() {
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
logrus.Warnf("Failed to re-PrepareLayer %s: %s", rID, err)
}
}
} }
}() }
d.Unlock()
arch, err := d.exportLayer(rID, layerChain) arch, err := d.exportLayer(rID, layerChain)
if err != nil { if err != nil {
undo()
return return
} }
return ioutils.NewReadCloserWrapper(arch, func() error { return ioutils.NewReadCloserWrapper(arch, func() error {
defer undo()
return arch.Close() return arch.Close()
}), nil }), nil
} }
// Changes produces a list of changes between the specified layer // Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes. // and its parent layer. If parent is "", then all changes will be ADD changes.
// The layer should be mounted when calling this function
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
rID, err := d.resolveID(id) rID, err := d.resolveID(id)
if err != nil { if err != nil {
@ -288,15 +336,31 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
return nil, err return nil, err
} }
// this is assuming that the layer is unmounted d.Lock()
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { if d.info.Flavour == filterDriver {
return nil, err if d.active[rID] == 0 {
} if err = hcsshim.ActivateLayer(d.info, rID); err != nil {
defer func() { d.Unlock()
if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil { return nil, err
logrus.Warnf("Failed to Deactivate %s: %s", rID, err) }
defer func() {
if err := hcsshim.DeactivateLayer(d.info, rID); err != nil {
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
}
}()
} else {
if err = hcsshim.UnprepareLayer(d.info, rID); err != nil {
d.Unlock()
return nil, err
}
defer func() {
if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil {
logrus.Warnf("Failed to re-PrepareLayer %s: %s", rID, err)
}
}()
} }
}() }
d.Unlock()
r, err := hcsshim.NewLayerReader(d.info, id, parentChain) r, err := hcsshim.NewLayerReader(d.info, id, parentChain)
if err != nil { if err != nil {
@ -327,7 +391,6 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
// ApplyDiff extracts the changeset from the given diff into the // ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the // layer with the specified id and parent, returning the size of the
// new layer in bytes. // new layer in bytes.
// The layer should not be mounted when calling this function
func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
rPId, err := d.resolveID(parent) rPId, err := d.resolveID(parent)
if err != nil { if err != nil {

Просмотреть файл

@ -22,6 +22,12 @@ import (
"github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runc/libcontainer/label"
) )
type activeMount struct {
count int
path string
mounted bool
}
type zfsOptions struct { type zfsOptions struct {
fsName string fsName string
mountPath string mountPath string
@ -103,6 +109,7 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
dataset: rootDataset, dataset: rootDataset,
options: options, options: options,
filesystemsCache: filesystemsCache, filesystemsCache: filesystemsCache,
active: make(map[string]*activeMount),
uidMaps: uidMaps, uidMaps: uidMaps,
gidMaps: gidMaps, gidMaps: gidMaps,
} }
@ -159,6 +166,7 @@ type Driver struct {
options zfsOptions options zfsOptions
sync.Mutex // protects filesystem cache against concurrent access sync.Mutex // protects filesystem cache against concurrent access
filesystemsCache map[string]bool filesystemsCache map[string]bool
active map[string]*activeMount
uidMaps []idtools.IDMap uidMaps []idtools.IDMap
gidMaps []idtools.IDMap gidMaps []idtools.IDMap
} }
@ -294,6 +302,17 @@ func (d *Driver) Remove(id string) error {
// Get returns the mountpoint for the given id after creating the target directories if necessary. // Get returns the mountpoint for the given id after creating the target directories if necessary.
func (d *Driver) Get(id, mountLabel string) (string, error) { func (d *Driver) Get(id, mountLabel string) (string, error) {
d.Lock()
defer d.Unlock()
mnt := d.active[id]
if mnt != nil {
mnt.count++
return mnt.path, nil
}
mnt = &activeMount{count: 1}
mountpoint := d.mountPath(id) mountpoint := d.mountPath(id)
filesystem := d.zfsPath(id) filesystem := d.zfsPath(id)
options := label.FormatMountLabel("", mountLabel) options := label.FormatMountLabel("", mountLabel)
@ -316,29 +335,48 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
} }
mnt.path = mountpoint
mnt.mounted = true
d.active[id] = mnt
return mountpoint, nil return mountpoint, nil
} }
// Put removes the existing mountpoint for the given id if it exists. // Put removes the existing mountpoint for the given id if it exists.
func (d *Driver) Put(id string) error { func (d *Driver) Put(id string) error {
mountpoint := d.mountPath(id) d.Lock()
mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint) defer d.Unlock()
if err != nil || !mounted {
return err mnt := d.active[id]
if mnt == nil {
logrus.Debugf("[zfs] Put on a non-mounted device %s", id)
// but it might be still here
if d.Exists(id) {
err := mount.Unmount(d.mountPath(id))
if err != nil {
logrus.Debugf("[zfs] Failed to unmount %s zfs fs: %v", id, err)
}
}
return nil
} }
logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) mnt.count--
if mnt.count > 0 {
return nil
}
if err := mount.Unmount(mountpoint); err != nil { defer delete(d.active, id)
return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) if mnt.mounted {
logrus.Debugf(`[zfs] unmount("%s")`, mnt.path)
if err := mount.Unmount(mnt.path); err != nil {
return fmt.Errorf("error unmounting to %s: %v", mnt.path, err)
}
} }
return nil return nil
} }
// Exists checks to see if the cache entry exists for the given id. // Exists checks to see if the cache entry exists for the given id.
func (d *Driver) Exists(id string) bool { func (d *Driver) Exists(id string) bool {
d.Lock()
defer d.Unlock()
return d.filesystemsCache[d.zfsPath(id)] == true return d.filesystemsCache[d.zfsPath(id)] == true
} }

Просмотреть файл

@ -1,95 +0,0 @@
package main
import (
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
"sync"
"github.com/docker/docker/pkg/integration/checker"
"github.com/go-check/check"
)
func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) {
maxConcurrency := runtime.GOMAXPROCS(0)
numIterations := c.N
outerGroup := &sync.WaitGroup{}
outerGroup.Add(maxConcurrency)
chErr := make(chan error, numIterations*2*maxConcurrency)
for i := 0; i < maxConcurrency; i++ {
go func() {
defer outerGroup.Done()
innerGroup := &sync.WaitGroup{}
innerGroup.Add(2)
go func() {
defer innerGroup.Done()
for i := 0; i < numIterations; i++ {
args := []string{"run", "-d", defaultSleepImage}
args = append(args, defaultSleepCommand...)
out, _, err := dockerCmdWithError(args...)
if err != nil {
chErr <- fmt.Errorf(out)
return
}
id := strings.TrimSpace(out)
tmpDir, err := ioutil.TempDir("", "docker-concurrent-test-"+id)
if err != nil {
chErr <- err
return
}
defer os.RemoveAll(tmpDir)
out, _, err = dockerCmdWithError("cp", id+":/tmp", tmpDir)
if err != nil {
chErr <- fmt.Errorf(out)
return
}
out, _, err = dockerCmdWithError("kill", id)
if err != nil {
chErr <- fmt.Errorf(out)
}
out, _, err = dockerCmdWithError("start", id)
if err != nil {
chErr <- fmt.Errorf(out)
}
out, _, err = dockerCmdWithError("kill", id)
if err != nil {
chErr <- fmt.Errorf(out)
}
// don't do an rm -f here since it can potentially ignore errors from the graphdriver
out, _, err = dockerCmdWithError("rm", id)
if err != nil {
chErr <- fmt.Errorf(out)
}
}
}()
go func() {
defer innerGroup.Done()
for i := 0; i < numIterations; i++ {
out, _, err := dockerCmdWithError("ps")
if err != nil {
chErr <- fmt.Errorf(out)
}
}
}()
innerGroup.Wait()
}()
}
outerGroup.Wait()
close(chErr)
for err := range chErr {
c.Assert(err, checker.IsNil)
}
}

Просмотреть файл

@ -49,10 +49,6 @@ var (
// to be created which would result in a layer depth // to be created which would result in a layer depth
// greater than the 125 max. // greater than the 125 max.
ErrMaxDepthExceeded = errors.New("max depth exceeded") ErrMaxDepthExceeded = errors.New("max depth exceeded")
// ErrNotSupported is used when the action is not supppoted
// on the current platform
ErrNotSupported = errors.New("not support on this platform")
) )
// ChainID is the content-addressable ID of a layer. // ChainID is the content-addressable ID of a layer.

Просмотреть файл

@ -12,7 +12,6 @@ type mountedLayer struct {
mountID string mountID string
initID string initID string
parent *roLayer parent *roLayer
path string
layerStore *layerStore layerStore *layerStore
references map[RWLayer]*referencedRWLayer references map[RWLayer]*referencedRWLayer
@ -132,21 +131,10 @@ func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
return "", ErrLayerNotRetained return "", ErrLayerNotRetained
} }
if rl.activityCount > 0 { rl.activityCount++
rl.activityCount++ return rl.mountedLayer.Mount(mountLabel)
return rl.path, nil
}
m, err := rl.mountedLayer.Mount(mountLabel)
if err == nil {
rl.activityCount++
rl.path = m
}
return m, err
} }
// Unmount decrements the activity count and unmounts the underlying layer
// Callers should only call `Unmount` once per call to `Mount`, even on error.
func (rl *referencedRWLayer) Unmount() error { func (rl *referencedRWLayer) Unmount() error {
rl.activityL.Lock() rl.activityL.Lock()
defer rl.activityL.Unlock() defer rl.activityL.Unlock()
@ -157,11 +145,7 @@ func (rl *referencedRWLayer) Unmount() error {
if rl.activityCount == -1 { if rl.activityCount == -1 {
return ErrLayerNotRetained return ErrLayerNotRetained
} }
rl.activityCount-- rl.activityCount--
if rl.activityCount > 0 {
return nil
}
return rl.mountedLayer.Unmount() return rl.mountedLayer.Unmount()
} }