The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/3722

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
Closes #3716.

Signed-off-by: Christian Brauner <[email protected]>
From 040927aaf43e2f5ea0b5cd56d2f4f91ff055b0c7 Mon Sep 17 00:00:00 2001
From: Christian Brauner <[email protected]>
Date: Sat, 26 Aug 2017 19:47:26 +0200
Subject: [PATCH] ceph: add "ceph.osd.force_reuse" property

Closes #3716.

Signed-off-by: Christian Brauner <[email protected]>
---
 lxd/storage_ceph.go         | 16 ++++++++++++++--
 lxd/storage_pools_config.go |  5 +++--
 2 files changed, 17 insertions(+), 4 deletions(-)

diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index 849ce2a38..c0410a385 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -225,7 +225,7 @@ func (s *storageCeph) StoragePoolCreate() error {
                }
        }()
 
-       ok := cephRBDVolumeExists(s.ClusterName, s.OSDPoolName, s.pool.Name,
+       ok := cephRBDVolumeExists(s.ClusterName, s.OSDPoolName, s.OSDPoolName,
                "lxd", s.UserName)
        s.pool.Config["volatile.pool.pristine"] = "false"
        if !ok {
@@ -234,7 +234,7 @@ func (s *storageCeph) StoragePoolCreate() error {
                // this to detect whether this osd pool is already in use by
                // another LXD instance.
                err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName,
-                       s.pool.Name, "lxd", "0", s.UserName)
+                       s.OSDPoolName, "lxd", "0", s.UserName)
                if err != nil {
                        logger.Errorf(`Failed to create RBD storage volume `+
                                `"%s" on storage pool "%s": %s`, s.pool.Name,
@@ -243,6 +243,18 @@ func (s *storageCeph) StoragePoolCreate() error {
                }
                logger.Debugf(`Created RBD storage volume "%s" on storage `+
                        `pool "%s"`, s.pool.Name, s.pool.Name)
+       } else {
+               msg := fmt.Sprintf(`CEPH OSD storage pool "%s" in cluster `+
+                       `"%s" seems to be in use by another LXD instace`,
+                       s.pool.Name, s.ClusterName)
+               if s.pool.Config["ceph.osd.force_reuse"] == "" ||
+                       !shared.IsTrue(s.pool.Config["ceph.osd.force_reuse"]) {
+                       msg += `. Set "ceph.osd.force_reuse=true" to force ` +
+                               `LXD to reuse the pool`
+                       logger.Errorf(msg)
+                       return fmt.Errorf(msg)
+               }
+               logger.Warnf(msg)
        }
 
        logger.Infof(`Created CEPH OSD storage pool "%s" in cluster "%s"`,
diff --git a/lxd/storage_pools_config.go b/lxd/storage_pools_config.go
index cc429186a..948cf93de 100644
--- a/lxd/storage_pools_config.go
+++ b/lxd/storage_pools_config.go
@@ -18,8 +18,9 @@ var storagePoolConfigKeys = map[string]func(value string) 
error{
        "btrfs.mount_options": shared.IsAny,
 
        // valid drivers: ceph
-       "ceph.cluster_name":  shared.IsAny,
-       "ceph.osd.pool_name": shared.IsAny,
+       "ceph.cluster_name":    shared.IsAny,
+       "ceph.osd.force_reuse": shared.IsBool,
+       "ceph.osd.pool_name":   shared.IsAny,
        "ceph.osd.pg_num": func(value string) error {
                if value == "" {
                        return nil
_______________________________________________
lxc-devel mailing list
[email protected]
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to