зеркало из https://github.com/microsoft/docker.git
Move layer mount refcounts to mountedLayer
Instead of implementing refcounts at each graphdriver, implement this in the layer package which is what the engine actually interacts with now. This means interacting directly with the graphdriver is no longer explicitly safe with regard to Get/Put calls being refcounted. In addition, with the containerd, layers may still be mounted after a daemon restart since we will no longer explicitly kill containers when we shutdown or startup engine. Because of this ref counts would need to be repopulated. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
This commit is contained in:
Родитель
57ca2a2101
Коммит
65d79e3e5e
|
@ -222,6 +222,7 @@ func (daemon *Daemon) exportContainerRw(container *container.Container) (archive
|
|||
|
||||
archive, err := container.RWLayer.TarStream()
|
||||
if err != nil {
|
||||
daemon.Unmount(container) // logging is already handled in the `Unmount` function
|
||||
return nil, err
|
||||
}
|
||||
return ioutils.NewReadCloserWrapper(archive, func() error {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
@ -64,21 +65,13 @@ func init() {
|
|||
graphdriver.Register("aufs", Init)
|
||||
}
|
||||
|
||||
type data struct {
|
||||
referenceCount int
|
||||
path string
|
||||
}
|
||||
|
||||
// Driver contains information about the filesystem mounted.
|
||||
// root of the filesystem
|
||||
// sync.Mutex to protect against concurrent modifications
|
||||
// active maps mount id to the count
|
||||
type Driver struct {
|
||||
root string
|
||||
uidMaps []idtools.IDMap
|
||||
gidMaps []idtools.IDMap
|
||||
sync.Mutex // Protects concurrent modification to active
|
||||
active map[string]*data
|
||||
root string
|
||||
uidMaps []idtools.IDMap
|
||||
gidMaps []idtools.IDMap
|
||||
pathCacheLock sync.Mutex
|
||||
pathCache map[string]string
|
||||
}
|
||||
|
||||
// Init returns a new AUFS driver.
|
||||
|
@ -111,10 +104,10 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||
}
|
||||
|
||||
a := &Driver{
|
||||
root: root,
|
||||
active: make(map[string]*data),
|
||||
uidMaps: uidMaps,
|
||||
gidMaps: gidMaps,
|
||||
root: root,
|
||||
uidMaps: uidMaps,
|
||||
gidMaps: gidMaps,
|
||||
pathCache: make(map[string]string),
|
||||
}
|
||||
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
|
||||
|
@ -228,9 +221,7 @@ func (a *Driver) Create(id, parent, mountLabel string) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
a.Lock()
|
||||
a.active[id] = &data{}
|
||||
a.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -259,108 +250,91 @@ func (a *Driver) createDirsFor(id string) error {
|
|||
|
||||
// Remove will unmount and remove the given id.
|
||||
func (a *Driver) Remove(id string) error {
|
||||
// Protect the a.active from concurrent access
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
m := a.active[id]
|
||||
if m != nil {
|
||||
if m.referenceCount > 0 {
|
||||
return nil
|
||||
}
|
||||
// Make sure the dir is umounted first
|
||||
if err := a.unmount(m); err != nil {
|
||||
return err
|
||||
}
|
||||
a.pathCacheLock.Lock()
|
||||
mountpoint, exists := a.pathCache[id]
|
||||
a.pathCacheLock.Unlock()
|
||||
if !exists {
|
||||
mountpoint = a.getMountpoint(id)
|
||||
}
|
||||
tmpDirs := []string{
|
||||
"mnt",
|
||||
"diff",
|
||||
if err := a.unmount(mountpoint); err != nil {
|
||||
// no need to return here, we can still try to remove since the `Rename` will fail below if still mounted
|
||||
logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err)
|
||||
}
|
||||
|
||||
// Atomically remove each directory in turn by first moving it out of the
|
||||
// way (so that docker doesn't find it anymore) before doing removal of
|
||||
// the whole tree.
|
||||
for _, p := range tmpDirs {
|
||||
realPath := path.Join(a.rootPath(), p, id)
|
||||
tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id))
|
||||
if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpPath)
|
||||
tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id))
|
||||
if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpMntPath)
|
||||
|
||||
tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id))
|
||||
if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpDiffpath)
|
||||
|
||||
// Remove the layers file for the id
|
||||
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if m != nil {
|
||||
delete(a.active, id)
|
||||
}
|
||||
|
||||
a.pathCacheLock.Lock()
|
||||
delete(a.pathCache, id)
|
||||
a.pathCacheLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the rootfs path for the id.
|
||||
// This will mount the dir at it's given path
|
||||
func (a *Driver) Get(id, mountLabel string) (string, error) {
|
||||
// Protect the a.active from concurrent access
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
m := a.active[id]
|
||||
if m == nil {
|
||||
m = &data{}
|
||||
a.active[id] = m
|
||||
}
|
||||
|
||||
parents, err := a.getParentLayerPaths(id)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If a dir does not have a parent ( no layers )do not try to mount
|
||||
// just return the diff path to the data
|
||||
m.path = path.Join(a.rootPath(), "diff", id)
|
||||
if len(parents) > 0 {
|
||||
m.path = path.Join(a.rootPath(), "mnt", id)
|
||||
if m.referenceCount == 0 {
|
||||
if err := a.mount(id, m, mountLabel, parents); err != nil {
|
||||
return "", err
|
||||
}
|
||||
a.pathCacheLock.Lock()
|
||||
m, exists := a.pathCache[id]
|
||||
a.pathCacheLock.Unlock()
|
||||
|
||||
if !exists {
|
||||
m = a.getDiffPath(id)
|
||||
if len(parents) > 0 {
|
||||
m = a.getMountpoint(id)
|
||||
}
|
||||
}
|
||||
m.referenceCount++
|
||||
return m.path, nil
|
||||
|
||||
// If a dir does not have a parent ( no layers )do not try to mount
|
||||
// just return the diff path to the data
|
||||
if len(parents) > 0 {
|
||||
if err := a.mount(id, m, mountLabel, parents); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
a.pathCacheLock.Lock()
|
||||
a.pathCache[id] = m
|
||||
a.pathCacheLock.Unlock()
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Put unmounts and updates list of active mounts.
|
||||
func (a *Driver) Put(id string) error {
|
||||
// Protect the a.active from concurrent access
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
a.pathCacheLock.Lock()
|
||||
m, exists := a.pathCache[id]
|
||||
if !exists {
|
||||
m = a.getMountpoint(id)
|
||||
a.pathCache[id] = m
|
||||
}
|
||||
a.pathCacheLock.Unlock()
|
||||
|
||||
m := a.active[id]
|
||||
if m == nil {
|
||||
// but it might be still here
|
||||
if a.Exists(id) {
|
||||
path := path.Join(a.rootPath(), "mnt", id)
|
||||
err := Unmount(path)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to unmount %s aufs: %v", id, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
err := a.unmount(m)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to unmount %s aufs: %v", id, err)
|
||||
}
|
||||
if count := m.referenceCount; count > 1 {
|
||||
m.referenceCount = count - 1
|
||||
} else {
|
||||
ids, _ := getParentIds(a.rootPath(), id)
|
||||
// We only mounted if there are any parents
|
||||
if ids != nil && len(ids) > 0 {
|
||||
a.unmount(m)
|
||||
}
|
||||
delete(a.active, id)
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Diff produces an archive of the changes between the specified
|
||||
|
@ -443,16 +417,13 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
|
|||
return layers, nil
|
||||
}
|
||||
|
||||
func (a *Driver) mount(id string, m *data, mountLabel string, layers []string) error {
|
||||
func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
|
||||
// If the id is mounted or we get an error return
|
||||
if mounted, err := a.mounted(m); err != nil || mounted {
|
||||
if mounted, err := a.mounted(target); err != nil || mounted {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
target = m.path
|
||||
rw = path.Join(a.rootPath(), "diff", id)
|
||||
)
|
||||
rw := a.getDiffPath(id)
|
||||
|
||||
if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
|
||||
return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
|
||||
|
@ -460,26 +431,39 @@ func (a *Driver) mount(id string, m *data, mountLabel string, layers []string) e
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *Driver) unmount(m *data) error {
|
||||
if mounted, err := a.mounted(m); err != nil || !mounted {
|
||||
func (a *Driver) unmount(mountPath string) error {
|
||||
if mounted, err := a.mounted(mountPath); err != nil || !mounted {
|
||||
return err
|
||||
}
|
||||
return Unmount(m.path)
|
||||
if err := Unmount(mountPath); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Driver) mounted(m *data) (bool, error) {
|
||||
var buf syscall.Statfs_t
|
||||
if err := syscall.Statfs(m.path, &buf); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return graphdriver.FsMagic(buf.Type) == graphdriver.FsMagicAufs, nil
|
||||
func (a *Driver) mounted(mountpoint string) (bool, error) {
|
||||
return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint)
|
||||
}
|
||||
|
||||
// Cleanup aufs and unmount all mountpoints
|
||||
func (a *Driver) Cleanup() error {
|
||||
for id, m := range a.active {
|
||||
var dirs []string
|
||||
if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
dirs = append(dirs, path)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, m := range dirs {
|
||||
if err := a.unmount(m); err != nil {
|
||||
logrus.Errorf("Unmounting %s: %s", stringid.TruncateID(id), err)
|
||||
logrus.Debugf("aufs error unmounting %s: %s", stringid.TruncateID(m), err)
|
||||
}
|
||||
}
|
||||
return mountpk.Unmount(a.root)
|
||||
|
|
|
@ -200,7 +200,7 @@ func TestMountedFalseResponse(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
response, err := d.mounted(d.active["1"])
|
||||
response, err := d.mounted(d.getDiffPath("1"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ func TestMountedTrueReponse(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
response, err := d.mounted(d.active["2"])
|
||||
response, err := d.mounted(d.pathCache["2"])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ func TestRemoveMountedDir(t *testing.T) {
|
|||
t.Fatal("mntPath should not be empty string")
|
||||
}
|
||||
|
||||
mounted, err := d.mounted(d.active["2"])
|
||||
mounted, err := d.mounted(d.pathCache["2"])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -46,3 +46,19 @@ func getParentIds(root, id string) ([]string, error) {
|
|||
}
|
||||
return out, s.Err()
|
||||
}
|
||||
|
||||
func (a *Driver) getMountpoint(id string) string {
|
||||
return path.Join(a.mntPath(), id)
|
||||
}
|
||||
|
||||
func (a *Driver) mntPath() string {
|
||||
return path.Join(a.rootPath(), "mnt")
|
||||
}
|
||||
|
||||
func (a *Driver) getDiffPath(id string) string {
|
||||
return path.Join(a.diffPath(), id)
|
||||
}
|
||||
|
||||
func (a *Driver) diffPath() string {
|
||||
return path.Join(a.rootPath(), "diff")
|
||||
}
|
||||
|
|
|
@ -69,9 +69,6 @@ type devInfo struct {
|
|||
Deleted bool `json:"deleted"`
|
||||
devices *DeviceSet
|
||||
|
||||
mountCount int
|
||||
mountPath string
|
||||
|
||||
// The global DeviceSet lock guarantees that we serialize all
|
||||
// the calls to libdevmapper (which is not threadsafe), but we
|
||||
// sometimes release that lock while sleeping. In that case
|
||||
|
@ -1991,13 +1988,6 @@ func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
|
|||
devices.Lock()
|
||||
defer devices.Unlock()
|
||||
|
||||
// If mountcount is not zero, that means devices is still in use
|
||||
// or has not been Put() properly. Fail device deletion.
|
||||
|
||||
if info.mountCount != 0 {
|
||||
return fmt.Errorf("devmapper: Can't delete device %v as it is still mounted. mntCount=%v", info.Hash, info.mountCount)
|
||||
}
|
||||
|
||||
return devices.deleteDevice(info, syncDelete)
|
||||
}
|
||||
|
||||
|
@ -2116,13 +2106,11 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
|
|||
}
|
||||
|
||||
// Shutdown shuts down the device by unmounting the root.
|
||||
func (devices *DeviceSet) Shutdown() error {
|
||||
func (devices *DeviceSet) Shutdown(home string) error {
|
||||
logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix)
|
||||
logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root)
|
||||
defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix)
|
||||
|
||||
var devs []*devInfo
|
||||
|
||||
// Stop deletion worker. This should start delivering new events to
|
||||
// ticker channel. That means no new instance of cleanupDeletedDevice()
|
||||
// will run after this call. If one instance is already running at
|
||||
|
@ -2139,30 +2127,46 @@ func (devices *DeviceSet) Shutdown() error {
|
|||
// metadata. Hence save this early before trying to deactivate devices.
|
||||
devices.saveDeviceSetMetaData()
|
||||
|
||||
for _, info := range devices.Devices {
|
||||
devs = append(devs, info)
|
||||
// ignore the error since it's just a best effort to not try to unmount something that's mounted
|
||||
mounts, _ := mount.GetMounts()
|
||||
mounted := make(map[string]bool, len(mounts))
|
||||
for _, mnt := range mounts {
|
||||
mounted[mnt.Mountpoint] = true
|
||||
}
|
||||
devices.Unlock()
|
||||
|
||||
for _, info := range devs {
|
||||
info.lock.Lock()
|
||||
if info.mountCount > 0 {
|
||||
if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if mounted[p] {
|
||||
// We use MNT_DETACH here in case it is still busy in some running
|
||||
// container. This means it'll go away from the global scope directly,
|
||||
// and the device will be released when that container dies.
|
||||
if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil {
|
||||
logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", info.mountPath, err)
|
||||
if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil {
|
||||
logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err)
|
||||
}
|
||||
|
||||
devices.Lock()
|
||||
if err := devices.deactivateDevice(info); err != nil {
|
||||
logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", info.Hash, err)
|
||||
}
|
||||
devices.Unlock()
|
||||
}
|
||||
info.lock.Unlock()
|
||||
|
||||
if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil {
|
||||
logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err)
|
||||
} else {
|
||||
if err := devices.deactivateDevice(devInfo); err != nil {
|
||||
logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil && !os.IsNotExist(err) {
|
||||
devices.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
devices.Unlock()
|
||||
|
||||
info, _ := devices.lookupDeviceWithLock("")
|
||||
if info != nil {
|
||||
info.lock.Lock()
|
||||
|
@ -2202,15 +2206,6 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
|
|||
devices.Lock()
|
||||
defer devices.Unlock()
|
||||
|
||||
if info.mountCount > 0 {
|
||||
if path != info.mountPath {
|
||||
return fmt.Errorf("devmapper: Trying to mount devmapper device in multiple places (%s, %s)", info.mountPath, path)
|
||||
}
|
||||
|
||||
info.mountCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := devices.activateDeviceIfNeeded(info, false); err != nil {
|
||||
return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err)
|
||||
}
|
||||
|
@ -2234,9 +2229,6 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
|
|||
return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err)
|
||||
}
|
||||
|
||||
info.mountCount = 1
|
||||
info.mountPath = path
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2256,20 +2248,6 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
|
|||
devices.Lock()
|
||||
defer devices.Unlock()
|
||||
|
||||
// If there are running containers when daemon crashes, during daemon
|
||||
// restarting, it will kill running containers and will finally call
|
||||
// Put() without calling Get(). So info.MountCount may become negative.
|
||||
// if info.mountCount goes negative, we do the unmount and assign
|
||||
// it to 0.
|
||||
|
||||
info.mountCount--
|
||||
if info.mountCount > 0 {
|
||||
return nil
|
||||
} else if info.mountCount < 0 {
|
||||
logrus.Warnf("devmapper: Mount count of device went negative. Put() called without matching Get(). Resetting count to 0")
|
||||
info.mountCount = 0
|
||||
}
|
||||
|
||||
logrus.Debugf("devmapper: Unmount(%s)", mountPath)
|
||||
if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil {
|
||||
return err
|
||||
|
@ -2280,8 +2258,6 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
info.mountPath = ""
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) {
|
|||
|
||||
// Cleanup unmounts a device.
|
||||
func (d *Driver) Cleanup() error {
|
||||
err := d.DeviceSet.Shutdown()
|
||||
err := d.DeviceSet.Shutdown(d.home)
|
||||
|
||||
if err2 := mount.Unmount(d.home); err == nil {
|
||||
err = err2
|
||||
|
|
|
@ -1,8 +1,19 @@
|
|||
package graphdriver
|
||||
|
||||
import "syscall"
|
||||
|
||||
var (
|
||||
// Slice of drivers that should be used in an order
|
||||
priority = []string{
|
||||
"zfs",
|
||||
}
|
||||
)
|
||||
|
||||
// Mounted checks if the given path is mounted as the fs type
|
||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
||||
var buf syscall.Statfs_t
|
||||
if err := syscall.Statfs(mountPath, &buf); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return FsMagic(buf.Type) == fsType, nil
|
||||
}
|
||||
|
|
|
@ -42,6 +42,8 @@ const (
|
|||
FsMagicXfs = FsMagic(0x58465342)
|
||||
// FsMagicZfs filesystem id for Zfs
|
||||
FsMagicZfs = FsMagic(0x2fc12fc1)
|
||||
// FsMagicOverlay filesystem id for overlay
|
||||
FsMagicOverlay = FsMagic(0x794C7630)
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -86,3 +88,12 @@ func GetFSMagic(rootpath string) (FsMagic, error) {
|
|||
}
|
||||
return FsMagic(buf.Type), nil
|
||||
}
|
||||
|
||||
// Mounted checks if the given path is mounted as the fs type
|
||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
||||
var buf syscall.Statfs_t
|
||||
if err := syscall.Statfs(mountPath, &buf); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return FsMagic(buf.Type) == fsType, nil
|
||||
}
|
||||
|
|
|
@ -88,21 +88,13 @@ func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Rea
|
|||
// of that. This means all child images share file (but not directory)
|
||||
// data with the parent.
|
||||
|
||||
// ActiveMount contains information about the count, path and whether is mounted or not.
|
||||
// This information is part of the Driver, that contains list of active mounts that are part of this overlay.
|
||||
type ActiveMount struct {
|
||||
count int
|
||||
path string
|
||||
mounted bool
|
||||
}
|
||||
|
||||
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
|
||||
type Driver struct {
|
||||
home string
|
||||
sync.Mutex // Protects concurrent modification to active
|
||||
active map[string]*ActiveMount
|
||||
uidMaps []idtools.IDMap
|
||||
gidMaps []idtools.IDMap
|
||||
home string
|
||||
pathCacheLock sync.Mutex
|
||||
pathCache map[string]string
|
||||
uidMaps []idtools.IDMap
|
||||
gidMaps []idtools.IDMap
|
||||
}
|
||||
|
||||
var backingFs = "<unknown>"
|
||||
|
@ -151,10 +143,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||
}
|
||||
|
||||
d := &Driver{
|
||||
home: home,
|
||||
active: make(map[string]*ActiveMount),
|
||||
uidMaps: uidMaps,
|
||||
gidMaps: gidMaps,
|
||||
home: home,
|
||||
pathCache: make(map[string]string),
|
||||
uidMaps: uidMaps,
|
||||
gidMaps: gidMaps,
|
||||
}
|
||||
|
||||
return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil
|
||||
|
@ -325,23 +317,14 @@ func (d *Driver) Remove(id string) error {
|
|||
if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
d.pathCacheLock.Lock()
|
||||
delete(d.pathCache, id)
|
||||
d.pathCacheLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get creates and mounts the required file system for the given id and returns the mount path.
|
||||
func (d *Driver) Get(id string, mountLabel string) (string, error) {
|
||||
// Protect the d.active from concurrent access
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
mount := d.active[id]
|
||||
if mount != nil {
|
||||
mount.count++
|
||||
return mount.path, nil
|
||||
}
|
||||
|
||||
mount = &ActiveMount{count: 1}
|
||||
|
||||
dir := d.dir(id)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return "", err
|
||||
|
@ -350,9 +333,10 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
|
|||
// If id has a root, just return it
|
||||
rootDir := path.Join(dir, "root")
|
||||
if _, err := os.Stat(rootDir); err == nil {
|
||||
mount.path = rootDir
|
||||
d.active[id] = mount
|
||||
return mount.path, nil
|
||||
d.pathCacheLock.Lock()
|
||||
d.pathCache[id] = rootDir
|
||||
d.pathCacheLock.Unlock()
|
||||
return rootDir, nil
|
||||
}
|
||||
|
||||
lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
|
||||
|
@ -388,42 +372,38 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
|
|||
if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
|
||||
return "", err
|
||||
}
|
||||
mount.path = mergedDir
|
||||
mount.mounted = true
|
||||
d.active[id] = mount
|
||||
|
||||
return mount.path, nil
|
||||
d.pathCacheLock.Lock()
|
||||
d.pathCache[id] = mergedDir
|
||||
d.pathCacheLock.Unlock()
|
||||
|
||||
return mergedDir, nil
|
||||
}
|
||||
|
||||
func (d *Driver) mounted(dir string) (bool, error) {
|
||||
return graphdriver.Mounted(graphdriver.FsMagicOverlay, dir)
|
||||
}
|
||||
|
||||
// Put unmounts the mount path created for the give id.
|
||||
func (d *Driver) Put(id string) error {
|
||||
// Protect the d.active from concurrent access
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
d.pathCacheLock.Lock()
|
||||
mountpoint, exists := d.pathCache[id]
|
||||
d.pathCacheLock.Unlock()
|
||||
|
||||
mount := d.active[id]
|
||||
if mount == nil {
|
||||
if !exists {
|
||||
logrus.Debugf("Put on a non-mounted device %s", id)
|
||||
// but it might be still here
|
||||
if d.Exists(id) {
|
||||
mergedDir := path.Join(d.dir(id), "merged")
|
||||
err := syscall.Unmount(mergedDir, 0)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
|
||||
}
|
||||
mountpoint = path.Join(d.dir(id), "merged")
|
||||
}
|
||||
return nil
|
||||
|
||||
d.pathCacheLock.Lock()
|
||||
d.pathCache[id] = mountpoint
|
||||
d.pathCacheLock.Unlock()
|
||||
}
|
||||
|
||||
mount.count--
|
||||
if mount.count > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
defer delete(d.active, id)
|
||||
if mount.mounted {
|
||||
err := syscall.Unmount(mount.path, 0)
|
||||
if err != nil {
|
||||
if mounted, err := d.mounted(mountpoint); mounted || err != nil {
|
||||
if err = syscall.Unmount(mountpoint, 0); err != nil {
|
||||
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
|
||||
}
|
||||
return err
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
@ -47,10 +46,6 @@ const (
|
|||
type Driver struct {
|
||||
// info stores the shim driver information
|
||||
info hcsshim.DriverInfo
|
||||
// Mutex protects concurrent modification to active
|
||||
sync.Mutex
|
||||
// active stores references to the activated layers
|
||||
active map[string]int
|
||||
}
|
||||
|
||||
var _ graphdriver.DiffGetterDriver = &Driver{}
|
||||
|
@ -63,7 +58,6 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap)
|
|||
HomeDir: home,
|
||||
Flavour: filterDriver,
|
||||
},
|
||||
active: make(map[string]int),
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
@ -76,7 +70,6 @@ func InitDiff(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (
|
|||
HomeDir: home,
|
||||
Flavour: diffDriver,
|
||||
},
|
||||
active: make(map[string]int),
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
@ -189,9 +182,6 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
|
||||
var dir string
|
||||
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
rID, err := d.resolveID(id)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -203,16 +193,14 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
if d.active[rID] == 0 {
|
||||
if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
|
||||
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
|
||||
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
|
||||
}
|
||||
return "", err
|
||||
if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
|
||||
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
|
||||
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
|
||||
|
@ -223,8 +211,6 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
d.active[rID]++
|
||||
|
||||
// If the layer has a mount path, use that. Otherwise, use the
|
||||
// folder path.
|
||||
if mountPath != "" {
|
||||
|
@ -245,22 +231,10 @@ func (d *Driver) Put(id string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
if d.active[rID] > 1 {
|
||||
d.active[rID]--
|
||||
} else if d.active[rID] == 1 {
|
||||
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := hcsshim.DeactivateLayer(d.info, rID); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(d.active, rID)
|
||||
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return hcsshim.DeactivateLayer(d.info, rID)
|
||||
}
|
||||
|
||||
// Cleanup ensures the information the driver stores is properly removed.
|
||||
|
@ -270,62 +244,40 @@ func (d *Driver) Cleanup() error {
|
|||
|
||||
// Diff produces an archive of the changes between the specified
|
||||
// layer and its parent layer which may be "".
|
||||
// The layer should be mounted when calling this function
|
||||
func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) {
|
||||
rID, err := d.resolveID(id)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Getting the layer paths must be done outside of the lock.
|
||||
layerChain, err := d.getLayerChain(rID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var undo func()
|
||||
|
||||
d.Lock()
|
||||
|
||||
// To support export, a layer must be activated but not prepared.
|
||||
if d.info.Flavour == filterDriver {
|
||||
if d.active[rID] == 0 {
|
||||
if err = hcsshim.ActivateLayer(d.info, rID); err != nil {
|
||||
d.Unlock()
|
||||
return
|
||||
}
|
||||
undo = func() {
|
||||
if err := hcsshim.DeactivateLayer(d.info, rID); err != nil {
|
||||
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err = hcsshim.UnprepareLayer(d.info, rID); err != nil {
|
||||
d.Unlock()
|
||||
return
|
||||
}
|
||||
undo = func() {
|
||||
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
|
||||
logrus.Warnf("Failed to re-PrepareLayer %s: %s", rID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// this is assuming that the layer is unmounted
|
||||
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d.Unlock()
|
||||
defer func() {
|
||||
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
|
||||
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
|
||||
}
|
||||
}()
|
||||
|
||||
arch, err := d.exportLayer(rID, layerChain)
|
||||
if err != nil {
|
||||
undo()
|
||||
return
|
||||
}
|
||||
return ioutils.NewReadCloserWrapper(arch, func() error {
|
||||
defer undo()
|
||||
return arch.Close()
|
||||
}), nil
|
||||
}
|
||||
|
||||
// Changes produces a list of changes between the specified layer
|
||||
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
||||
// The layer should be mounted when calling this function
|
||||
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
||||
rID, err := d.resolveID(id)
|
||||
if err != nil {
|
||||
|
@ -336,31 +288,15 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
d.Lock()
|
||||
if d.info.Flavour == filterDriver {
|
||||
if d.active[rID] == 0 {
|
||||
if err = hcsshim.ActivateLayer(d.info, rID); err != nil {
|
||||
d.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := hcsshim.DeactivateLayer(d.info, rID); err != nil {
|
||||
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
if err = hcsshim.UnprepareLayer(d.info, rID); err != nil {
|
||||
d.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil {
|
||||
logrus.Warnf("Failed to re-PrepareLayer %s: %s", rID, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
// this is assuming that the layer is unmounted
|
||||
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.Unlock()
|
||||
defer func() {
|
||||
if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil {
|
||||
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
|
||||
}
|
||||
}()
|
||||
|
||||
r, err := hcsshim.NewLayerReader(d.info, id, parentChain)
|
||||
if err != nil {
|
||||
|
@ -391,6 +327,7 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
|||
// ApplyDiff extracts the changeset from the given diff into the
|
||||
// layer with the specified id and parent, returning the size of the
|
||||
// new layer in bytes.
|
||||
// The layer should not be mounted when calling this function
|
||||
func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
|
||||
rPId, err := d.resolveID(parent)
|
||||
if err != nil {
|
||||
|
|
|
@ -22,12 +22,6 @@ import (
|
|||
"github.com/opencontainers/runc/libcontainer/label"
|
||||
)
|
||||
|
||||
type activeMount struct {
|
||||
count int
|
||||
path string
|
||||
mounted bool
|
||||
}
|
||||
|
||||
type zfsOptions struct {
|
||||
fsName string
|
||||
mountPath string
|
||||
|
@ -109,7 +103,6 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
|
|||
dataset: rootDataset,
|
||||
options: options,
|
||||
filesystemsCache: filesystemsCache,
|
||||
active: make(map[string]*activeMount),
|
||||
uidMaps: uidMaps,
|
||||
gidMaps: gidMaps,
|
||||
}
|
||||
|
@ -166,7 +159,6 @@ type Driver struct {
|
|||
options zfsOptions
|
||||
sync.Mutex // protects filesystem cache against concurrent access
|
||||
filesystemsCache map[string]bool
|
||||
active map[string]*activeMount
|
||||
uidMaps []idtools.IDMap
|
||||
gidMaps []idtools.IDMap
|
||||
}
|
||||
|
@ -302,17 +294,6 @@ func (d *Driver) Remove(id string) error {
|
|||
|
||||
// Get returns the mountpoint for the given id after creating the target directories if necessary.
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
mnt := d.active[id]
|
||||
if mnt != nil {
|
||||
mnt.count++
|
||||
return mnt.path, nil
|
||||
}
|
||||
|
||||
mnt = &activeMount{count: 1}
|
||||
|
||||
mountpoint := d.mountPath(id)
|
||||
filesystem := d.zfsPath(id)
|
||||
options := label.FormatMountLabel("", mountLabel)
|
||||
|
@ -335,48 +316,29 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||
if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
|
||||
return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
|
||||
}
|
||||
mnt.path = mountpoint
|
||||
mnt.mounted = true
|
||||
d.active[id] = mnt
|
||||
|
||||
return mountpoint, nil
|
||||
}
|
||||
|
||||
// Put removes the existing mountpoint for the given id if it exists.
|
||||
func (d *Driver) Put(id string) error {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
mnt := d.active[id]
|
||||
if mnt == nil {
|
||||
logrus.Debugf("[zfs] Put on a non-mounted device %s", id)
|
||||
// but it might be still here
|
||||
if d.Exists(id) {
|
||||
err := mount.Unmount(d.mountPath(id))
|
||||
if err != nil {
|
||||
logrus.Debugf("[zfs] Failed to unmount %s zfs fs: %v", id, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
mountpoint := d.mountPath(id)
|
||||
mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint)
|
||||
if err != nil || !mounted {
|
||||
return err
|
||||
}
|
||||
|
||||
mnt.count--
|
||||
if mnt.count > 0 {
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf(`[zfs] unmount("%s")`, mountpoint)
|
||||
|
||||
defer delete(d.active, id)
|
||||
if mnt.mounted {
|
||||
logrus.Debugf(`[zfs] unmount("%s")`, mnt.path)
|
||||
|
||||
if err := mount.Unmount(mnt.path); err != nil {
|
||||
return fmt.Errorf("error unmounting to %s: %v", mnt.path, err)
|
||||
}
|
||||
if err := mount.Unmount(mountpoint); err != nil {
|
||||
return fmt.Errorf("error unmounting to %s: %v", mountpoint, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Exists checks to see if the cache entry exists for the given id.
|
||||
func (d *Driver) Exists(id string) bool {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
return d.filesystemsCache[d.zfsPath(id)] == true
|
||||
}
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/integration/checker"
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) {
|
||||
maxConcurrency := runtime.GOMAXPROCS(0)
|
||||
numIterations := c.N
|
||||
outerGroup := &sync.WaitGroup{}
|
||||
outerGroup.Add(maxConcurrency)
|
||||
chErr := make(chan error, numIterations*2*maxConcurrency)
|
||||
|
||||
for i := 0; i < maxConcurrency; i++ {
|
||||
go func() {
|
||||
defer outerGroup.Done()
|
||||
innerGroup := &sync.WaitGroup{}
|
||||
innerGroup.Add(2)
|
||||
|
||||
go func() {
|
||||
defer innerGroup.Done()
|
||||
for i := 0; i < numIterations; i++ {
|
||||
args := []string{"run", "-d", defaultSleepImage}
|
||||
args = append(args, defaultSleepCommand...)
|
||||
out, _, err := dockerCmdWithError(args...)
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
return
|
||||
}
|
||||
|
||||
id := strings.TrimSpace(out)
|
||||
tmpDir, err := ioutil.TempDir("", "docker-concurrent-test-"+id)
|
||||
if err != nil {
|
||||
chErr <- err
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
out, _, err = dockerCmdWithError("cp", id+":/tmp", tmpDir)
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
return
|
||||
}
|
||||
|
||||
out, _, err = dockerCmdWithError("kill", id)
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
}
|
||||
|
||||
out, _, err = dockerCmdWithError("start", id)
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
}
|
||||
|
||||
out, _, err = dockerCmdWithError("kill", id)
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
}
|
||||
|
||||
// don't do an rm -f here since it can potentially ignore errors from the graphdriver
|
||||
out, _, err = dockerCmdWithError("rm", id)
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer innerGroup.Done()
|
||||
for i := 0; i < numIterations; i++ {
|
||||
out, _, err := dockerCmdWithError("ps")
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
innerGroup.Wait()
|
||||
}()
|
||||
}
|
||||
|
||||
outerGroup.Wait()
|
||||
close(chErr)
|
||||
|
||||
for err := range chErr {
|
||||
c.Assert(err, checker.IsNil)
|
||||
}
|
||||
}
|
|
@ -49,6 +49,10 @@ var (
|
|||
// to be created which would result in a layer depth
|
||||
// greater than the 125 max.
|
||||
ErrMaxDepthExceeded = errors.New("max depth exceeded")
|
||||
|
||||
// ErrNotSupported is used when the action is not supppoted
|
||||
// on the current platform
|
||||
ErrNotSupported = errors.New("not support on this platform")
|
||||
)
|
||||
|
||||
// ChainID is the content-addressable ID of a layer.
|
||||
|
|
|
@ -12,6 +12,7 @@ type mountedLayer struct {
|
|||
mountID string
|
||||
initID string
|
||||
parent *roLayer
|
||||
path string
|
||||
layerStore *layerStore
|
||||
|
||||
references map[RWLayer]*referencedRWLayer
|
||||
|
@ -131,10 +132,21 @@ func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
|
|||
return "", ErrLayerNotRetained
|
||||
}
|
||||
|
||||
rl.activityCount++
|
||||
return rl.mountedLayer.Mount(mountLabel)
|
||||
if rl.activityCount > 0 {
|
||||
rl.activityCount++
|
||||
return rl.path, nil
|
||||
}
|
||||
|
||||
m, err := rl.mountedLayer.Mount(mountLabel)
|
||||
if err == nil {
|
||||
rl.activityCount++
|
||||
rl.path = m
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
// Unmount decrements the activity count and unmounts the underlying layer
|
||||
// Callers should only call `Unmount` once per call to `Mount`, even on error.
|
||||
func (rl *referencedRWLayer) Unmount() error {
|
||||
rl.activityL.Lock()
|
||||
defer rl.activityL.Unlock()
|
||||
|
@ -145,7 +157,11 @@ func (rl *referencedRWLayer) Unmount() error {
|
|||
if rl.activityCount == -1 {
|
||||
return ErrLayerNotRetained
|
||||
}
|
||||
|
||||
rl.activityCount--
|
||||
if rl.activityCount > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return rl.mountedLayer.Unmount()
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче