The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/2985

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
Closes https://github.com/lxc/lxd/issues/2983.
Addresses https://github.com/lxc/lxd/issues/2866.
From 36ced25d40ead29924bd28e74dc8150c4dfb9bc9 Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brau...@ubuntu.com>
Date: Wed, 1 Mar 2017 15:06:10 +0100
Subject: [PATCH 1/2] storage_cgo: add set_autoclear_loop_device()

Signed-off-by: Christian Brauner <christian.brau...@ubuntu.com>
---
 lxd/storage_cgo.go | 30 ++++++++++++++++++++++++++++++
 1 file changed, 30 insertions(+)

diff --git a/lxd/storage_cgo.go b/lxd/storage_cgo.go
index d7a6ba3..c5922e6 100644
--- a/lxd/storage_cgo.go
+++ b/lxd/storage_cgo.go
@@ -28,6 +28,9 @@ package main
 #define LXD_MAX_LOOP_PATHLEN (2 * sizeof("loop/")) + LXD_NUMSTRLEN64 + 
sizeof("backing_file") + 1
 
 // If a loop file is already associated with a loop device, find it.
+// This looks at "/sys/block" to avoid having to parse all of "/dev". Also, 
this
+// allows to retrieve the full name of the backing file even if
+// strlen(backing file) > LO_NAME_SIZE.
 static int find_associated_loop_device(const char *loop_file,
                                       char *loop_dev_name)
 {
@@ -98,6 +101,7 @@ static int find_associated_loop_device(const char *loop_file,
                        continue;
                }
 
+               // Create path to loop device.
                ret = snprintf(loop_dev_name, LO_NAME_SIZE, "/dev/%s",
                               dp->d_name);
                if (ret < 0 || ret >= LO_NAME_SIZE) {
@@ -105,7 +109,14 @@ static int find_associated_loop_device(const char 
*loop_file,
                        fd = -1;
                        continue;
                }
+               close(fd);
 
+               // Open fd to loop device.
+               fd = open(loop_dev_name, O_RDWR);
+               if (fd < 0) {
+                       close(fd);
+                       fd = -1;
+               }
                break;
        }
 
@@ -239,6 +250,20 @@ on_error:
 
        return fd_loop;
 }
+
+// Note that this does not guarantee to clear the loop device in time so that
+// find_associated_loop_device() will not report that there still is a
+// configured device (udev and so on...). So don't call
+// find_associated_loop_device() after having called
+// set_autoclear_loop_device().
+int set_autoclear_loop_device(int fd_loop)
+{
+       struct loop_info64 lo64;
+
+       memset(&lo64, 0, sizeof(lo64));
+       lo64.lo_flags = LO_FLAGS_AUTOCLEAR;
+       return ioctl(fd_loop, LOOP_SET_STATUS64, &lo64);
+}
 */
 import "C"
 
@@ -277,3 +302,8 @@ func prepareLoopDev(source string, flags int) (*os.File, 
error) {
 
        return os.NewFile(uintptr(loopFd), C.GoString((*C.char)(cLoopDev))), nil
 }
+
+func setAutoclearOnLoopDev(loopFd int) error {
+       _, err := C.set_autoclear_loop_device(C.int(loopFd))
+       return err
+}

From 6d9dd00e2b225e8734b36b303f4b4afbeb48575c Mon Sep 17 00:00:00 2001
From: Christian Brauner <christian.brau...@ubuntu.com>
Date: Wed, 1 Mar 2017 15:06:27 +0100
Subject: [PATCH 2/2] lvm: allow loop-backed lvm storage pools

Signed-off-by: Christian Brauner <christian.brau...@ubuntu.com>
---
 lxd/storage_lvm.go          | 248 +++++++++++++++++++++++++++++++++++++++-----
 lxd/storage_pools_config.go |   2 +-
 test/suites/storage.sh      |  31 ++++++
 3 files changed, 252 insertions(+), 29 deletions(-)

diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index a9697f9..6bcf02d 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -211,6 +211,7 @@ func containerNameToLVName(containerName string) string {
 type storageLvm struct {
        vgName       string
        thinPoolName string
+       loopInfo     *os.File
        storageShared
 }
 
@@ -333,22 +334,14 @@ func (s *storageLvm) StoragePoolInit() error {
                s.vgName = s.pool.Config["lvm.vg_name"]
        }
 
-       if source == "" {
-               return fmt.Errorf("Loop backed lvm storage pools are not 
supported.")
-       } else {
-               if filepath.IsAbs(source) {
-                       if !shared.IsBlockdevPath(source) {
-                               return fmt.Errorf("Loop backed lvm storage 
pools are not supported.")
-                       }
-               } else {
-                       ok, err := storageVGExists(source)
-                       if err != nil {
-                               // Internal error.
-                               return err
-                       } else if !ok {
-                               // Volume group does not exist.
-                               return fmt.Errorf("The requested volume group 
\"%s\" does not exist.", source)
-                       }
+       if source != "" && !filepath.IsAbs(source) {
+               ok, err := storageVGExists(source)
+               if err != nil {
+                       // Internal error.
+                       return err
+               } else if !ok {
+                       // Volume group does not exist.
+                       return fmt.Errorf("The requested volume group \"%s\" 
does not exist.", source)
                }
        }
 
@@ -358,8 +351,16 @@ func (s *storageLvm) StoragePoolInit() error {
 func (s *storageLvm) StoragePoolCheck() error {
        shared.LogDebugf("Checking LVM storage pool \"%s\".", s.pool.Name)
 
+       _, err := s.StoragePoolMount()
+       if err != nil {
+               return err
+       }
+       if s.loopInfo != nil {
+               defer s.loopInfo.Close()
+       }
+
        poolName := s.getOnDiskPoolName()
-       err := storageVGActivate(poolName)
+       err = storageVGActivate(poolName)
        if err != nil {
                return err
        }
@@ -426,16 +427,77 @@ func (s *storageLvm) StoragePoolCreate() error {
                }
        }()
 
-       // Clear size as we're currently not using it for LVM.
-       s.pool.Config["size"] = ""
        poolName := s.getOnDiskPoolName()
        source := s.pool.Config["source"]
        if source == "" {
-               return fmt.Errorf("Loop backed lvm storage pools are not 
supported.")
+               source = filepath.Join(shared.VarPath("disks"), 
fmt.Sprintf("%s.img", s.pool.Name))
+               s.pool.Config["source"] = source
+
+               if s.pool.Config["lvm.vg_name"] == "" {
+                       s.pool.Config["lvm.vg_name"] = poolName
+               }
+
+               f, err := os.Create(source)
+               if err != nil {
+                       return fmt.Errorf("Failed to open %s: %s", source, err)
+               }
+               defer f.Close()
+
+               err = f.Chmod(0600)
+               if err != nil {
+                       return fmt.Errorf("Failed to chmod %s: %s", source, err)
+               }
+
+               size, err := shared.ParseByteSizeString(s.pool.Config["size"])
+               if err != nil {
+                       return err
+               }
+               err = f.Truncate(size)
+               if err != nil {
+                       return fmt.Errorf("Failed to create sparse file %s: 
%s", source, err)
+               }
+
+               _, err = s.StoragePoolMount()
+               if err != nil {
+                       return err
+               }
+               defer func() {
+                       if tryUndo {
+                               os.Remove(source)
+                       }
+               }()
+
+               // Check if the physical volume already exists.
+               loopDevicePath := s.loopInfo.Name()
+               defer s.loopInfo.Close()
+               ok, err := storagePVExists(loopDevicePath)
+               if err == nil && !ok {
+                       // Create a new lvm physical volume.
+                       output, err := exec.Command("pvcreate", 
loopDevicePath).CombinedOutput()
+                       if err != nil {
+                               return fmt.Errorf("Failed to create the 
physical volume for the lvm storage pool: %s.", output)
+                       }
+                       defer func() {
+                               if tryUndo {
+                                       exec.Command("pvremove", 
loopDevicePath).Run()
+                               }
+                       }()
+               }
+
+               // Check if the volume group already exists.
+               ok, err = storageVGExists(poolName)
+               if err == nil && !ok {
+                       // Create a volume group on the physical volume.
+                       output, err := exec.Command("vgcreate", poolName, 
loopDevicePath).CombinedOutput()
+                       if err != nil {
+                               return fmt.Errorf("Failed to create the volume 
group for the lvm storage pool: %s.", output)
+                       }
+               }
        } else {
+               s.pool.Config["size"] = ""
                if filepath.IsAbs(source) {
                        if !shared.IsBlockdevPath(source) {
-                               return fmt.Errorf("Loop backed lvm storage 
pools are not supported.")
+                               return fmt.Errorf("Custom loop file locations 
are not supported.")
                        }
 
                        if s.pool.Config["lvm.vg_name"] == "" {
@@ -503,6 +565,18 @@ func (s *storageLvm) StoragePoolCreate() error {
 func (s *storageLvm) StoragePoolDelete() error {
        shared.LogInfof("Deleting LVM storage pool \"%s\".", s.pool.Name)
 
+       _, err := s.StoragePoolMount()
+       if err != nil {
+               return err
+       }
+       if s.loopInfo != nil {
+               err := setAutoclearOnLoopDev(int(s.loopInfo.Fd()))
+               if err != nil {
+                       shared.LogWarnf("Failed to set LO_FLAGS_AUTOCLEAR on 
loop device: %s. Manual cleanup needed.", err)
+               }
+               defer s.loopInfo.Close()
+       }
+
        source := s.pool.Config["source"]
        if source == "" {
                return fmt.Errorf("No \"source\" property found for the storage 
pool.")
@@ -515,6 +589,15 @@ func (s *storageLvm) StoragePoolDelete() error {
                return fmt.Errorf("Failed to destroy the volume group for the 
lvm storage pool: %s.", output)
        }
 
+       if filepath.IsAbs(source) {
+               // This is a loop file so deconfigure the associated loop
+               // device.
+               err = os.Remove(source)
+               if err != nil {
+                       return err
+               }
+       }
+
        // Delete the mountpoint for the storage pool.
        poolMntPoint := getStoragePoolMountPoint(s.pool.Name)
        err = os.RemoveAll(poolMntPoint)
@@ -526,7 +609,66 @@ func (s *storageLvm) StoragePoolDelete() error {
        return nil
 }
 
+// Currently only used for loop-backed LVM storage pools. Can be called without
+// overhead since it is essentially a noop for non-loop-backed LVM storage
+// pools.
 func (s *storageLvm) StoragePoolMount() (bool, error) {
+       source := s.pool.Config["source"]
+       if source == "" {
+               return false, fmt.Errorf("No \"source\" property found for the 
storage pool.")
+       }
+
+       if !filepath.IsAbs(source) {
+               return true, nil
+       }
+
+       poolMountLockID := getPoolMountLockID(s.pool.Name)
+       lxdStorageMapLock.Lock()
+       if waitChannel, ok := lxdStorageOngoingOperationMap[poolMountLockID]; 
ok {
+               lxdStorageMapLock.Unlock()
+               if _, ok := <-waitChannel; ok {
+                       shared.LogWarnf("Received value over semaphore. This 
should not have happened.")
+               }
+               // Give the benefit of the doubt and assume that the other
+               // thread actually succeeded in mounting the storage pool.
+               return false, nil
+       }
+
+       lxdStorageOngoingOperationMap[poolMountLockID] = make(chan bool)
+       lxdStorageMapLock.Unlock()
+
+       removeLockFromMap := func() {
+               lxdStorageMapLock.Lock()
+               if waitChannel, ok := 
lxdStorageOngoingOperationMap[poolMountLockID]; ok {
+                       close(waitChannel)
+                       delete(lxdStorageOngoingOperationMap, poolMountLockID)
+               }
+               lxdStorageMapLock.Unlock()
+       }
+
+       defer removeLockFromMap()
+
+       if filepath.IsAbs(source) && !shared.IsBlockdevPath(source) {
+               loopF, err := prepareLoopDev(source, 0)
+               if err != nil {
+                       return false, fmt.Errorf("Could not prepare loop 
device: %s", err)
+               }
+               s.loopInfo = loopF
+
+               // Force rescan, since LVM is not working nicely with loop
+               // devices.
+               output, err := tryExec("pvscan")
+               if err != nil {
+                       shared.LogWarnf("pvscan failed: %s.", string(output))
+               }
+
+               // See comment above.
+               output, err = tryExec("vgscan")
+               if err != nil {
+                       shared.LogWarnf("vgscan failed: %s.", string(output))
+               }
+       }
+
        return true, nil
 }
 
@@ -588,8 +730,13 @@ func (s *storageLvm) StoragePoolVolumeCreate() error {
 func (s *storageLvm) StoragePoolVolumeDelete() error {
        shared.LogInfof("Deleting LVM storage volume \"%s\" on storage pool 
\"%s\".", s.volume.Name, s.pool.Name)
 
+       err := s.StoragePoolCheck()
+       if err != nil {
+               return err
+       }
+
        customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, 
s.volume.Name)
-       _, err := s.StoragePoolVolumeUmount()
+       _, err = s.StoragePoolVolumeUmount()
        if err != nil {
                return err
        }
@@ -619,6 +766,11 @@ func (s *storageLvm) StoragePoolVolumeDelete() error {
 func (s *storageLvm) StoragePoolVolumeMount() (bool, error) {
        shared.LogDebugf("Mounting LVM storage volume \"%s\" on storage pool 
\"%s\".", s.volume.Name, s.pool.Name)
 
+       err := s.StoragePoolCheck()
+       if err != nil {
+               return false, err
+       }
+
        customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, 
s.volume.Name)
        poolName := s.getOnDiskPoolName()
        mountOptions := s.getLvmBlockMountOptions()
@@ -672,6 +824,11 @@ func (s *storageLvm) StoragePoolVolumeMount() (bool, 
error) {
 func (s *storageLvm) StoragePoolVolumeUmount() (bool, error) {
        shared.LogDebugf("Unmounting LVM storage volume \"%s\" on storage pool 
\"%s\".", s.volume.Name, s.pool.Name)
 
+       err := s.StoragePoolCheck()
+       if err != nil {
+               return false, err
+       }
+
        customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, 
s.volume.Name)
 
        customUmountLockID := getCustomUmountLockID(s.pool.Name, s.volume.Name)
@@ -1049,6 +1206,11 @@ func (s *storageLvm) ContainerCanRestore(container 
container, sourceContainer co
 func (s *storageLvm) ContainerDelete(container container) error {
        shared.LogDebugf("Deleting LVM storage volume for container \"%s\" on 
storage pool \"%s\".", s.volume.Name, s.pool.Name)
 
+       err := s.StoragePoolCheck()
+       if err != nil {
+               return err
+       }
+
        containerName := container.Name()
        containerLvmName := containerNameToLVName(containerName)
        containerMntPoint := ""
@@ -1069,7 +1231,7 @@ func (s *storageLvm) ContainerDelete(container container) 
error {
        }
 
        poolName := s.getOnDiskPoolName()
-       err := s.removeLV(poolName, storagePoolVolumeApiEndpointContainers, 
containerLvmName)
+       err = s.removeLV(poolName, storagePoolVolumeApiEndpointContainers, 
containerLvmName)
        if err != nil {
                return err
        }
@@ -1227,6 +1389,11 @@ func (s *storageLvm) ContainerMount(name string, path 
string) (bool, error) {
 func (s *storageLvm) ContainerUmount(name string, path string) (bool, error) {
        shared.LogDebugf("Unmounting LVM storage volume for container \"%s\" on 
storage pool \"%s\".", s.volume.Name, s.pool.Name)
 
+       err := s.StoragePoolCheck()
+       if err != nil {
+               return false, err
+       }
+
        containerMntPoint := getContainerMountPoint(s.pool.Name, name)
 
        containerUmountLockID := getContainerUmountLockID(s.pool.Name, name)
@@ -1355,7 +1522,12 @@ func (s *storageLvm) ContainerRename(container 
container, newContainerName strin
 func (s *storageLvm) ContainerRestore(container container, sourceContainer 
container) error {
        shared.LogDebugf("Restoring LVM storage volume for container \"%s\" 
from %s -> %s.", s.volume.Name, sourceContainer.Name(), container.Name())
 
-       err := sourceContainer.StorageStart()
+       err := s.StoragePoolCheck()
+       if err != nil {
+               return err
+       }
+
+       err = sourceContainer.StorageStart()
        if err != nil {
                return err
        }
@@ -1406,7 +1578,12 @@ func (s *storageLvm) ContainerGetUsage(container 
container) (int64, error) {
 func (s *storageLvm) ContainerSnapshotCreate(snapshotContainer container, 
sourceContainer container) error {
        shared.LogDebugf("Creating LVM storage volume for snapshot \"%s\" on 
storage pool \"%s\".", s.volume.Name, s.pool.Name)
 
-       err := s.createSnapshotContainer(snapshotContainer, sourceContainer, 
true)
+       err := s.StoragePoolCheck()
+       if err != nil {
+               return err
+       }
+
+       err = s.createSnapshotContainer(snapshotContainer, sourceContainer, 
true)
        if err != nil {
                return err
        }
@@ -1467,7 +1644,12 @@ func (s *storageLvm) 
createSnapshotContainer(snapshotContainer container, source
 func (s *storageLvm) ContainerSnapshotDelete(snapshotContainer container) 
error {
        shared.LogDebugf("Deleting LVM storage volume for snapshot \"%s\" on 
storage pool \"%s\".", s.volume.Name, s.pool.Name)
 
-       err := s.ContainerDelete(snapshotContainer)
+       err := s.StoragePoolCheck()
+       if err != nil {
+               return err
+       }
+
+       err = s.ContainerDelete(snapshotContainer)
        if err != nil {
                return fmt.Errorf("Error deleting snapshot %s: %s", 
snapshotContainer.Name(), err)
        }
@@ -1713,6 +1895,11 @@ func (s *storageLvm) ImageDelete(fingerprint string) 
error {
 func (s *storageLvm) ImageMount(fingerprint string) (bool, error) {
        shared.LogDebugf("Mounting LVM storage volume for image \"%s\" on 
storage pool \"%s\".", fingerprint, s.pool.Name)
 
+       err := s.StoragePoolCheck()
+       if err != nil {
+               return false, err
+       }
+
        imageMntPoint := getImageMountPoint(s.pool.Name, fingerprint)
        if shared.IsMountPoint(imageMntPoint) {
                return false, nil
@@ -1727,7 +1914,7 @@ func (s *storageLvm) ImageMount(fingerprint string) 
(bool, error) {
        poolName := s.getOnDiskPoolName()
        lvmVolumePath := getLvmDevPath(poolName, 
storagePoolVolumeApiEndpointImages, fingerprint)
        lvmMountOptions := s.getLvmBlockMountOptions()
-       err := tryMount(lvmVolumePath, imageMntPoint, lvmFstype, 0, 
lvmMountOptions)
+       err = tryMount(lvmVolumePath, imageMntPoint, lvmFstype, 0, 
lvmMountOptions)
        if err != nil {
                shared.LogErrorf(fmt.Sprintf("Error mounting image LV for 
unpacking: %s", err))
                return false, fmt.Errorf("Error mounting image LV: %v", err)
@@ -1740,12 +1927,17 @@ func (s *storageLvm) ImageMount(fingerprint string) 
(bool, error) {
 func (s *storageLvm) ImageUmount(fingerprint string) (bool, error) {
        shared.LogDebugf("Unmounting LVM storage volume for image \"%s\" on 
storage pool \"%s\".", fingerprint, s.pool.Name)
 
+       err := s.StoragePoolCheck()
+       if err != nil {
+               return false, err
+       }
+
        imageMntPoint := getImageMountPoint(s.pool.Name, fingerprint)
        if !shared.IsMountPoint(imageMntPoint) {
                return false, nil
        }
 
-       err := tryUnmount(imageMntPoint, 0)
+       err = tryUnmount(imageMntPoint, 0)
        if err != nil {
                return false, err
        }
diff --git a/lxd/storage_pools_config.go b/lxd/storage_pools_config.go
index 9d88738..4be75a4 100644
--- a/lxd/storage_pools_config.go
+++ b/lxd/storage_pools_config.go
@@ -101,7 +101,7 @@ func storagePoolValidateConfig(name string, driver string, 
config map[string]str
 
 func storagePoolFillDefault(name string, driver string, config 
map[string]string) error {
        if driver != "dir" {
-               if driver != "lvm" && config["size"] == "" {
+               if config["size"] == "" {
                        st := syscall.Statfs_t{}
                        err := syscall.Statfs(shared.VarPath(), &st)
                        if err != nil {
diff --git a/test/suites/storage.sh b/test/suites/storage.sh
index 50d86e2..f01362e 100644
--- a/test/suites/storage.sh
+++ b/test/suites/storage.sh
@@ -84,6 +84,8 @@ test_storage() {
       pvcreate "${loop_device_8}"
       # Create new volume group "dummy_vg_4" on existing physical volume.
       lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool13" lvm 
source="${loop_device_8}" lvm.vg_name="lxdtest-$(basename 
"${LXD_DIR}")-pool13-dummy_vg_4" volume.size=25MB
+
+      lxc storage create "lxdtest-$(basename "${LXD_DIR}")-pool14" lvm
     fi
 
     # Set default storage pool for image import.
@@ -235,6 +237,12 @@ test_storage() {
       lxc launch testimage c12pool13 -s "lxdtest-$(basename 
"${LXD_DIR}")-pool13"
       lxc list -c b c12pool13 | grep "lxdtest-$(basename "${LXD_DIR}")-pool13"
 
+      lxc init testimage c10pool14 -s "lxdtest-$(basename "${LXD_DIR}")-pool14"
+      lxc list -c b c10pool14 | grep "lxdtest-$(basename "${LXD_DIR}")-pool14"
+
+      lxc launch testimage c12pool14 -s "lxdtest-$(basename 
"${LXD_DIR}")-pool14"
+      lxc list -c b c12pool14 | grep "lxdtest-$(basename "${LXD_DIR}")-pool14"
+
       lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool6" 
c10pool6
       lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool6" 
c10pool6 c10pool6 testDevice /opt
       ! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool6" 
c10pool6 c10pool6 testDevice2 /opt
@@ -298,6 +306,22 @@ test_storage() {
       lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool13" 
custom/c12pool13 c12pool13 testDevice /opt
       ! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool13" 
custom/c12pool13 c12pool13 testDevice2 /opt
       lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool13" 
c12pool13 c12pool13 testDevice
+
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool14" 
c10pool14
+      lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" 
c10pool14 c10pool14 testDevice /opt
+      ! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" 
c10pool14 c10pool14 testDevice2 /opt
+      lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool14" 
c10pool14 c10pool14 testDevice
+      lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" 
custom/c10pool14 c10pool14 testDevice /opt
+      ! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" 
custom/c10pool14 c10pool14 testDevice2 /opt
+      lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool14" 
c10pool14 c10pool14 testDevice
+
+      lxc storage volume create "lxdtest-$(basename "${LXD_DIR}")-pool14" 
c12pool14
+      lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" 
c12pool14 c12pool14 testDevice /opt
+      ! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" 
c12pool14 c12pool14 testDevice2 /opt
+      lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool14" 
c12pool14 c12pool14 testDevice
+      lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" 
custom/c12pool14 c12pool14 testDevice /opt
+      ! lxc storage volume attach "lxdtest-$(basename "${LXD_DIR}")-pool14" 
custom/c12pool14 c12pool14 testDevice2 /opt
+      lxc storage volume detach "lxdtest-$(basename "${LXD_DIR}")-pool14" 
c12pool14 c12pool14 testDevice
     fi
 
     if which zfs >/dev/null 2>&1; then
@@ -404,6 +428,9 @@ test_storage() {
       lxc delete -f c10pool13
       lxc delete -f c12pool13
 
+      lxc delete -f c10pool14
+      lxc delete -f c12pool14
+
       lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool6" 
c10pool6
       lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool6"  
c12pool6
       lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool11" 
c10pool11
@@ -412,6 +439,8 @@ test_storage() {
       lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool12" 
c12pool12
       lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool13" 
c10pool13
       lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool13" 
c12pool13
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool14" 
c10pool14
+      lxc storage volume delete "lxdtest-$(basename "${LXD_DIR}")-pool14" 
c12pool14
     fi
 
     if which zfs >/dev/null 2>&1; then
@@ -473,6 +502,8 @@ test_storage() {
       lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool13"
       # shellcheck disable=SC2154
       deconfigure_lvm_loop_device "${loop_file_8}" "${loop_device_8}"
+
+      lxc storage delete "lxdtest-$(basename "${LXD_DIR}")-pool14"
     fi
   )
 
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to