The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/2942

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===

From 933fc78102b697c31f020896c9ad8f1c90a668b9 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brau...@ubuntu.com>
Date: Thu, 23 Feb 2017 23:08:51 +0100
Subject: [PATCH 1/3] storage: lock StoragePoolVolume{M,Um}ount for lvm

Signed-off-by: Christian Brauner <christian.brau...@ubuntu.com>
---
 lxd/storage.go     |  8 ++++++
 lxd/storage_lvm.go | 81 +++++++++++++++++++++++++++++++++++++++++++-----------
 2 files changed, 73 insertions(+), 16 deletions(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index f6f36ba..9e7756d 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -55,6 +55,14 @@ func getContainerUmountLockID(poolName string, containerName 
string) string {
        return fmt.Sprintf("umount/container/%s/%s", poolName, containerName)
 }
 
+func getCustomMountLockID(poolName string, volumeName string) string {
+       return fmt.Sprintf("mount/custom/%s/%s", poolName, volumeName)
+}
+
+func getCustomUmountLockID(poolName string, volumeName string) string {
+       return fmt.Sprintf("umount/custom/%s/%s", poolName, volumeName)
+}
+
 // Filesystem magic numbers
 const (
        filesystemSuperMagicTmpfs = 0x01021994
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index 53f73b1..d319292 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -539,39 +539,88 @@ func (s *storageLvm) StoragePoolVolumeDelete() error {
 
 func (s *storageLvm) StoragePoolVolumeMount() (bool, error) {
        customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, 
s.volume.Name)
-       if shared.IsMountPoint(customPoolVolumeMntPoint) {
-               return false, nil
-       }
-
+       poolName := s.getOnDiskPoolName()
+       mountOptions := s.getLvmBlockMountOptions()
        lvFsType := s.getLvmFilesystem()
        volumeType, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.Type)
        if err != nil {
                return false, err
        }
-
-       poolName := s.getOnDiskPoolName()
        lvmVolumePath := getLvmDevPath(poolName, volumeType, s.volume.Name)
-       mountOptions := s.getLvmBlockMountOptions()
-       err = tryMount(lvmVolumePath, customPoolVolumeMntPoint, lvFsType, 0, 
mountOptions)
-       if err != nil {
-               return false, err
+
+       customMountLockID := getCustomMountLockID(s.pool.Name, s.volume.Name)
+       lxdStorageMapLock.Lock()
+       if waitChannel, ok := lxdStorageOngoingOperationMap[customMountLockID]; 
ok {
+               lxdStorageMapLock.Unlock()
+               if _, ok := <-waitChannel; ok {
+                       s.log.Warn("Received value over semaphore. This should 
not have happened.")
+               }
+               // Give the benefit of the doubt and assume that the other
+               // thread actually succeeded in mounting the storage volume.
+               return false, nil
        }
 
-       return true, nil
+       lxdStorageOngoingOperationMap[customMountLockID] = make(chan bool)
+       lxdStorageMapLock.Unlock()
+
+       var customerr error
+       ourMount := false
+       if !shared.IsMountPoint(customPoolVolumeMntPoint) {
+               customerr = tryMount(lvmVolumePath, customPoolVolumeMntPoint, 
lvFsType, 0, mountOptions)
+               ourMount = true
+       }
+
+       lxdStorageMapLock.Lock()
+       if waitChannel, ok := lxdStorageOngoingOperationMap[customMountLockID]; 
ok {
+               close(waitChannel)
+               delete(lxdStorageOngoingOperationMap, customMountLockID)
+       }
+       lxdStorageMapLock.Unlock()
+
+       if customerr != nil {
+               return false, customerr
+       }
+
+       return ourMount, nil
 }
 
 func (s *storageLvm) StoragePoolVolumeUmount() (bool, error) {
        customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, 
s.volume.Name)
-       if !shared.IsMountPoint(customPoolVolumeMntPoint) {
+
+       customUmountLockID := getCustomUmountLockID(s.pool.Name, s.volume.Name)
+       lxdStorageMapLock.Lock()
+       if waitChannel, ok := 
lxdStorageOngoingOperationMap[customUmountLockID]; ok {
+               lxdStorageMapLock.Unlock()
+               if _, ok := <-waitChannel; ok {
+                       s.log.Warn("Received value over semaphore. This should 
not have happened.")
+               }
+               // Give the benefit of the doubt and assume that the other
+               // thread actually succeeded in unmounting the storage volume.
                return false, nil
        }
 
-       err := tryUnmount(customPoolVolumeMntPoint, 0)
-       if err != nil {
-               return false, err
+       lxdStorageOngoingOperationMap[customUmountLockID] = make(chan bool)
+       lxdStorageMapLock.Unlock()
+
+       var customerr error
+       ourUmount := false
+       if shared.IsMountPoint(customPoolVolumeMntPoint) {
+               customerr = tryUnmount(customPoolVolumeMntPoint, 0)
+               ourUmount = true
        }
 
-       return true, nil
+       lxdStorageMapLock.Lock()
+       if waitChannel, ok := 
lxdStorageOngoingOperationMap[customUmountLockID]; ok {
+               close(waitChannel)
+               delete(lxdStorageOngoingOperationMap, customUmountLockID)
+       }
+       lxdStorageMapLock.Unlock()
+
+       if customerr != nil {
+               return false, customerr
+       }
+
+       return ourUmount, nil
 }
 
 func (s *storageLvm) GetStoragePoolWritable() api.StoragePoolPut {

From 861369854ffc60d9e32bd3bb8462a6cf37ab0c37 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brau...@ubuntu.com>
Date: Thu, 23 Feb 2017 23:14:12 +0100
Subject: [PATCH 2/3] storage: lock StoragePoolVolume{M,Um}ount for zfs

Signed-off-by: Christian Brauner <christian.brau...@ubuntu.com>
---
 lxd/storage_zfs.go | 68 ++++++++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 58 insertions(+), 10 deletions(-)

diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 42d4605..6e8ed4a 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -207,32 +207,80 @@ func (s *storageZfs) StoragePoolVolumeMount() (bool, 
error) {
        fs := fmt.Sprintf("custom/%s", s.volume.Name)
        customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, 
s.volume.Name)
 
-       if shared.IsMountPoint(customPoolVolumeMntPoint) {
+       customMountLockID := getCustomMountLockID(s.pool.Name, s.volume.Name)
+       lxdStorageMapLock.Lock()
+       if waitChannel, ok := lxdStorageOngoingOperationMap[customMountLockID]; 
ok {
+               lxdStorageMapLock.Unlock()
+               if _, ok := <-waitChannel; ok {
+                       s.log.Warn("Received value over semaphore. This should 
not have happened.")
+               }
+               // Give the benefit of the doubt and assume that the other
+               // thread actually succeeded in mounting the storage volume.
                return false, nil
        }
 
-       err := s.zfsPoolVolumeMount(fs)
-       if err != nil {
-               return false, err
+       lxdStorageOngoingOperationMap[customMountLockID] = make(chan bool)
+       lxdStorageMapLock.Unlock()
+
+       var customerr error
+       ourMount := false
+       if !shared.IsMountPoint(customPoolVolumeMntPoint) {
+               customerr = s.zfsPoolVolumeMount(fs)
+               ourMount = true
        }
 
-       return true, nil
+       lxdStorageMapLock.Lock()
+       if waitChannel, ok := lxdStorageOngoingOperationMap[customMountLockID]; 
ok {
+               close(waitChannel)
+               delete(lxdStorageOngoingOperationMap, customMountLockID)
+       }
+       lxdStorageMapLock.Unlock()
+
+       if customerr != nil {
+               return false, customerr
+       }
+
+       return ourMount, nil
 }
 
 func (s *storageZfs) StoragePoolVolumeUmount() (bool, error) {
        fs := fmt.Sprintf("custom/%s", s.volume.Name)
        customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, 
s.volume.Name)
 
-       if !shared.IsMountPoint(customPoolVolumeMntPoint) {
+       customUmountLockID := getCustomUmountLockID(s.pool.Name, s.volume.Name)
+       lxdStorageMapLock.Lock()
+       if waitChannel, ok := 
lxdStorageOngoingOperationMap[customUmountLockID]; ok {
+               lxdStorageMapLock.Unlock()
+               if _, ok := <-waitChannel; ok {
+                       s.log.Warn("Received value over semaphore. This should 
not have happened.")
+               }
+               // Give the benefit of the doubt and assume that the other
+               // thread actually succeeded in unmounting the storage volume.
                return false, nil
        }
 
-       err := s.zfsPoolVolumeUmount(fs)
-       if err != nil {
-               return false, err
+       lxdStorageOngoingOperationMap[customUmountLockID] = make(chan bool)
+       lxdStorageMapLock.Unlock()
+
+       var customerr error
+       ourUmount := false
+       if shared.IsMountPoint(customPoolVolumeMntPoint) {
+               customerr = s.zfsPoolVolumeUmount(fs)
+               ourUmount = true
        }
 
-       return true, nil
+       lxdStorageMapLock.Lock()
+       if waitChannel, ok := 
lxdStorageOngoingOperationMap[customUmountLockID]; ok {
+               close(waitChannel)
+               delete(lxdStorageOngoingOperationMap, customUmountLockID)
+       }
+       lxdStorageMapLock.Unlock()
+
+       if customerr != nil {
+               return false, customerr
+       }
+
+       return ourUmount, nil
 }
 
 func (s *storageZfs) GetStoragePoolWritable() api.StoragePoolPut {

From 4cf43bf57d84053d1ec0eb458486da90bd4ad6e0 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brau...@ubuntu.com>
Date: Thu, 23 Feb 2017 23:37:05 +0100
Subject: [PATCH 3/3] test: test custom storage volume creation

Signed-off-by: Christian Brauner <christian.brau...@ubuntu.com>
---
 test/suites/storage.sh | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 58 insertions(+)

diff --git a/test/suites/storage.sh b/test/suites/storage.sh
index 3f36b8f..8f07508 100644
--- a/test/suites/storage.sh
+++ b/test/suites/storage.sh
@@ -105,6 +105,11 @@ test_storage() {
 
       lxc launch testimage c4pool2 -s "lxdtest-$(basename "${LXD_DIR}")-pool2"
       lxc list -c b c4pool2 | grep "lxdtest-$(basename "${LXD_DIR}")-pool2"
+
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool1" 
c1pool1
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool1" 
c2pool2
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool2" 
c3pool1
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool2" 
c4pool2
     fi
 
     if which btrfs >/dev/null 2>&1; then
@@ -117,6 +122,11 @@ test_storage() {
       lxc list -c b c7pool3 | grep "lxdtest-$(basename "${LXD_DIR}")-pool3"
       lxc launch testimage c8pool4 -s "lxdtest-$(basename "${LXD_DIR}")-pool4"
       lxc list -c b c8pool4 | grep "lxdtest-$(basename "${LXD_DIR}")-pool4"
+
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool3" 
c5pool3
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool4" 
c6pool4
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool3" 
c7pool3
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool4" 
c8pool4
     fi
 
     lxc init testimage c9pool5 -s "lxdtest-$(basename "${LXD_DIR}")-pool5"
@@ -125,6 +135,9 @@ test_storage() {
     lxc launch testimage c11pool5 -s "lxdtest-$(basename "${LXD_DIR}")-pool5"
     lxc list -c b c11pool5 | grep "lxdtest-$(basename "${LXD_DIR}")-pool5"
 
+    lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool5" c9pool5
+    lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool5" c11pool5
+
     if which lvdisplay >/dev/null 2>&1; then
       lxc init testimage c10pool6 -s "lxdtest-$(basename "${LXD_DIR}")-pool6"
       lxc list -c b c10pool6 | grep "lxdtest-$(basename "${LXD_DIR}")-pool6"
@@ -149,6 +162,15 @@ test_storage() {
 
       lxc launch testimage c12pool13 -s "lxdtest-$(basename 
"${LXD_DIR}")-pool13"
       lxc list -c b c12pool13 | grep "lxdtest-$(basename "${LXD_DIR}")-pool13"
+
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool6" 
c10pool6
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool6" 
c12pool6
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool11" 
c10pool11
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool11" 
c12pool11
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool12" 
c10pool12
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool12" 
c12pool12
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool13" 
c10pool13
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool13" 
c12pool13
     fi
 
     if which zfs >/dev/null 2>&1; then
@@ -160,6 +182,13 @@ test_storage() {
 
       lxc launch testimage c17pool9 -s "lxdtest-$(basename "${LXD_DIR}")-pool9"
       lxc launch testimage c18pool9 -s "lxdtest-$(basename "${LXD_DIR}")-pool9"
+
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool7" 
c13pool7
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool7" 
c14pool7
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool8" 
c15pool8
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool8" 
c16pool8
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool9" 
c17pool9
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool9" 
c18pool9
     fi
 
     if which zfs >/dev/null 2>&1; then
@@ -168,6 +197,11 @@ test_storage() {
 
       lxc delete -f c4pool2
       lxc delete -f c2pool2
+
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool1" 
c1pool1
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool1" 
c2pool2
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool2" 
c3pool1
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool2" 
c4pool2
     fi
 
     if which btrfs >/dev/null 2>&1; then
@@ -176,11 +210,19 @@ test_storage() {
 
       lxc delete -f c8pool4
       lxc delete -f c6pool4
+
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool3" 
c5pool3
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool4" 
c6pool4
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool3" 
c7pool3
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool4" 
c8pool4
     fi
 
     lxc delete -f c9pool5
     lxc delete -f c11pool5
 
+    lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool5" c9pool5
+    lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool5" c11pool5
+
     if which lvdisplay >/dev/null 2>&1; then
       lxc delete -f c10pool6
       lxc delete -f c12pool6
@@ -193,6 +235,15 @@ test_storage() {
 
       lxc delete -f c10pool13
       lxc delete -f c12pool13
+
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool6" 
c10pool6
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool6"  
c12pool6
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool11" 
c10pool11
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool11" 
c12pool11
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool12" 
c10pool12
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool12" 
c12pool12
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool13" 
c10pool13
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool13" 
c12pool13
     fi
 
     if which zfs >/dev/null 2>&1; then
@@ -204,6 +255,13 @@ test_storage() {
 
       lxc delete -f c17pool9
       lxc delete -f c18pool9
+
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool7" 
c13pool7
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool7" 
c14pool7
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool8" 
c15pool8
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool8" 
c16pool8
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool9" 
c17pool9
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool9" 
c18pool9
     fi
 
     lxc image delete testimage
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to