The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/6297

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===
It's now possible to create a lxd storage pool that's backed by a Ceph erasure encoded pool. To do so supply both the `ceph.osd.pool_name` (this will be used to store the metadata about the rbd image) and the `ceph.osd.data_pool_name` (this is where the actual data will be stored). 

For example `storage create erasure-pool ceph ceph.osd.pool_name=rbd ceph.osd.data_pool_name=k2m1-pool`


From cac6aeb3d7d39de2d140b6317f1c4d1f27dfd9e1 Mon Sep 17 00:00:00 2001
From: Maran <ma...@protonmail.com>
Date: Wed, 9 Oct 2019 13:41:29 +0200
Subject: [PATCH] lxd/storage/ceph: Implement --data-pool argument.

It's now possible to create a lxd storage pool that's backed by a Ceph erasure 
encoded pool. To do so supply both the ceph.osd.pool_name (this will be used to 
store the metadata about the rbd image) and the ceph.osd.data_pool_name (this 
is where the actual data will be stored). I.e. 'storage create erasure-pool 
ceph ceph.osd.pool_name=rbd ceph.osd.data_pool_name=k2m1-pool'

Signed-off-by: Maran <ma...@protonmail.com>
---
 doc/storage.md              |  1 +
 lxd/storage_ceph.go         | 30 ++++++++++++++++++------------
 lxd/storage_ceph_utils.go   | 14 ++++++++------
 lxd/storage_pools_config.go |  7 ++++---
 scripts/bash/lxd-client     |  2 +-
 5 files changed, 32 insertions(+), 22 deletions(-)

diff --git a/doc/storage.md b/doc/storage.md
index ec13e5a3ab..8e40dc7310 100644
--- a/doc/storage.md
+++ b/doc/storage.md
@@ -14,6 +14,7 @@ ceph.cluster\_name              | string    | ceph driver
 ceph.osd.force\_reuse           | bool      | ceph driver                      
 | false                      | storage\_ceph\_force\_osd\_reuse   | Force 
using an osd storage pool that is already in use by another LXD instance.
 ceph.osd.pg\_num                | string    | ceph driver                      
 | 32                         | storage\_driver\_ceph              | Number of 
placement groups for the osd storage pool.
 ceph.osd.pool\_name             | string    | ceph driver                      
 | name of the pool           | storage\_driver\_ceph              | Name of 
the osd storage pool.
+ceph.osd.data\_pool\_name       | string    | ceph driver                      
 | -                          | storage\_driver\_ceph              | Name of 
the osd data pool.
 ceph.rbd.clone\_copy            | string    | ceph driver                      
 | true                       | storage\_driver\_ceph              | Whether to 
use RBD lightweight clones rather than full dataset copies.
 ceph.user.name                  | string    | ceph driver                      
 | admin                      | storage\_ceph\_user\_name          | The ceph 
user to use when creating storage pools and volumes.
 cephfs.cluster\_name            | string    | cephfs driver                    
 | ceph                       | storage\_driver\_cephfs            | Name of 
the ceph cluster in which to create new storage pools.
diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index 708b73d3c6..e68ca09261 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -27,10 +27,11 @@ import (
 )
 
 type storageCeph struct {
-       ClusterName string
-       OSDPoolName string
-       UserName    string
-       PGNum       string
+       ClusterName     string
+       OSDPoolName     string
+       OSDDataPoolName string
+       UserName        string
+       PGNum           string
        storageShared
 }
 
@@ -79,6 +80,11 @@ func (s *storageCeph) StoragePoolInit() error {
                s.OSDPoolName = s.pool.Config["ceph.osd.pool_name"]
        }
 
+       // set osd data pool name
+       if s.pool.Config["ceph.osd.data_pool_name"] != "" {
+               s.OSDDataPoolName = s.pool.Config["ceph.osd.data_pool_name"]
+       }
+
        // set ceph user name
        if s.pool.Config["ceph.user.name"] != "" {
                s.UserName = s.pool.Config["ceph.user.name"]
@@ -159,7 +165,7 @@ func (s *storageCeph) StoragePoolCreate() error {
                }()
 
                // Create dummy storage volume. Other LXD instances will use 
this to detect whether this osd pool is already in use by another LXD instance.
-               err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName, 
s.OSDPoolName, "lxd", "0", s.UserName)
+               err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName, 
s.OSDPoolName, "lxd", "0", s.UserName, s.OSDDataPoolName)
                if err != nil {
                        logger.Errorf(`Failed to create RBD storage volume "%s" 
on storage pool "%s": %s`, s.pool.Name, s.pool.Name, err)
                        return err
@@ -320,7 +326,7 @@ func (s *storageCeph) StoragePoolVolumeCreate() error {
 
        // create volume
        err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName, s.volume.Name,
-               storagePoolVolumeTypeNameCustom, RBDSize, s.UserName)
+               storagePoolVolumeTypeNameCustom, RBDSize, s.UserName, 
s.OSDDataPoolName)
        if err != nil {
                logger.Errorf(`Failed to create RBD storage volume "%s" on 
storage pool "%s": %s`, s.volume.Name, s.pool.Name, err)
                return err
@@ -852,7 +858,7 @@ func (s *storageCeph) ContainerCreateFromImage(container 
Instance, fingerprint s
        volumeName := project.Prefix(container.Project(), containerName)
        err := cephRBDCloneCreate(s.ClusterName, s.OSDPoolName, fingerprint,
                storagePoolVolumeTypeNameImage, "readonly", s.OSDPoolName,
-               volumeName, storagePoolVolumeTypeNameContainer, s.UserName)
+               volumeName, storagePoolVolumeTypeNameContainer, s.UserName, 
s.OSDDataPoolName)
        if err != nil {
                logger.Errorf(`Failed to clone new RBD storage volume for 
container "%s": %s`, containerName, err)
                return err
@@ -1164,7 +1170,7 @@ func (s *storageCeph) ContainerCopy(target Instance, 
source Instance, containerO
                // create empty dummy volume
                err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName,
                        project.Prefix(target.Project(), targetContainerName), 
storagePoolVolumeTypeNameContainer,
-                       "0", s.UserName)
+                       "0", s.UserName, s.OSDDataPoolName)
                if err != nil {
                        logger.Errorf(`Failed to create RBD storage volume "%s" 
on storage pool "%s": %s`, targetContainerName, s.pool.Name, err)
                        return err
@@ -1758,7 +1764,7 @@ func (s *storageCeph) ContainerSnapshotStart(c Instance) 
(bool, error) {
        err = cephRBDCloneCreate(s.ClusterName, s.OSDPoolName,
                containerOnlyName, storagePoolVolumeTypeNameContainer,
                prefixedSnapOnlyName, s.OSDPoolName, cloneName, "snapshots",
-               s.UserName)
+               s.UserName, s.OSDDataPoolName)
        if err != nil {
                logger.Errorf(`Failed to create clone of RBD storage volume for 
container "%s" on storage pool "%s": %s`, containerName, s.pool.Name, err)
                return false, err
@@ -2062,7 +2068,7 @@ func (s *storageCeph) ImageCreate(fingerprint string, 
tracker *ioprogress.Progre
                // create volume
                err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName,
                        fingerprint, storagePoolVolumeTypeNameImage, RBDSize,
-                       s.UserName)
+                       s.UserName, s.OSDDataPoolName)
                if err != nil {
                        logger.Errorf(`Failed to create RBD storage volume for 
image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
                        return err
@@ -2570,7 +2576,7 @@ func (s *storageCeph) StoragePoolVolumeCopy(source 
*api.StorageVolumeSource) err
                // create empty dummy volume
                err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName,
                        s.volume.Name, storagePoolVolumeTypeNameCustom,
-                       "0", s.UserName)
+                       "0", s.UserName, s.OSDDataPoolName)
                if err != nil {
                        logger.Errorf(`Failed to create RBD storage volume "%s" 
on storage pool "%s": %s`, s.volume.Name, s.pool.Name, err)
                        return err
@@ -2907,7 +2913,7 @@ func (s *storageCeph) MigrationSink(conn *websocket.Conn, 
op *operations.Operati
        // that's actually correct.
        instanceName := args.Instance.Name()
        if !cephRBDVolumeExists(s.ClusterName, s.OSDPoolName, 
project.Prefix(args.Instance.Project(), instanceName), 
storagePoolVolumeTypeNameContainer, s.UserName) {
-               err := cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName, 
project.Prefix(args.Instance.Project(), instanceName), 
storagePoolVolumeTypeNameContainer, "0", s.UserName)
+               err := cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName, 
project.Prefix(args.Instance.Project(), instanceName), 
storagePoolVolumeTypeNameContainer, "0", s.UserName, s.OSDDataPoolName)
                if err != nil {
                        logger.Errorf(`Failed to create RBD storage volume "%s" 
for cluster "%s" in OSD pool "%s" on storage pool "%s": %s`, instanceName, 
s.ClusterName, s.OSDPoolName, s.pool.Name, err)
                        return err
diff --git a/lxd/storage_ceph_utils.go b/lxd/storage_ceph_utils.go
index a23ed4352a..cca0ecc2ed 100644
--- a/lxd/storage_ceph_utils.go
+++ b/lxd/storage_ceph_utils.go
@@ -72,13 +72,14 @@ func cephOSDPoolDestroy(clusterName string, poolName 
string, userName string) er
 // library and the kernel module are minimized. Otherwise random panics might
 // occur.
 func cephRBDVolumeCreate(clusterName string, poolName string, volumeName 
string,
-       volumeType string, size string, userName string) error {
+       volumeType string, size string, userName string, dataPoolName string) 
error {
        _, err := shared.RunCommand(
                "rbd",
                "--id", userName,
                "--image-feature", "layering,",
                "--cluster", clusterName,
                "--pool", poolName,
+               "--data-pool", dataPoolName,
                "--size", size,
                "create",
                fmt.Sprintf("%s_%s", volumeType, volumeName))
@@ -364,12 +365,13 @@ func cephRBDCloneCreate(sourceClusterName string, 
sourcePoolName string,
        sourceVolumeName string, sourceVolumeType string,
        sourceSnapshotName string, targetPoolName string,
        targetVolumeName string, targetVolumeType string,
-       userName string) error {
+       userName string, targetDataPoolName string) error {
        _, err := shared.RunCommand(
                "rbd",
                "--id", userName,
                "--cluster", sourceClusterName,
                "--image-feature", "layering",
+               "--data-pool", targetDataPoolName,
                "clone",
                fmt.Sprintf("%s/%s_%s@%s", sourcePoolName, sourceVolumeType,
                        sourceVolumeName, sourceSnapshotName),
@@ -833,7 +835,7 @@ func (s *storageCeph) copyWithoutSnapshotsSparse(target 
Instance, source Instanc
        err = cephRBDCloneCreate(s.ClusterName, s.OSDPoolName,
                sourceContainerOnlyName, storagePoolVolumeTypeNameContainer,
                snapshotName, s.OSDPoolName, targetContainerName,
-               storagePoolVolumeTypeNameContainer, s.UserName)
+               storagePoolVolumeTypeNameContainer, s.UserName, 
s.OSDDataPoolName)
        if err != nil {
                logger.Errorf(`Failed to clone new RBD storage volume for 
container "%s": %s`, targetContainerName, err)
                return err
@@ -1631,7 +1633,7 @@ func (s *storageCeph) cephRBDVolumeBackupCreate(tmpPath 
string, backup backup, s
 
        // Create a new volume from the snapshot
        cloneName := uuid.NewRandom().String()
-       err = cephRBDCloneCreate(s.ClusterName, s.OSDPoolName, 
sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, 
s.OSDPoolName, cloneName, "backup", s.UserName)
+       err = cephRBDCloneCreate(s.ClusterName, s.OSDPoolName, 
sourceContainerOnlyName, storagePoolVolumeTypeNameContainer, snapshotName, 
s.OSDPoolName, cloneName, "backup", s.UserName, s.OSDDataPoolName)
        if err != nil {
                return err
        }
@@ -1714,7 +1716,7 @@ func (s *storageCeph) doContainerCreate(projectName, name 
string, privileged boo
 
        // create volume
        volumeName := project.Prefix(projectName, name)
-       err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName, volumeName, 
storagePoolVolumeTypeNameContainer, RBDSize, s.UserName)
+       err = cephRBDVolumeCreate(s.ClusterName, s.OSDPoolName, volumeName, 
storagePoolVolumeTypeNameContainer, RBDSize, s.UserName, s.OSDDataPoolName)
        if err != nil {
                logger.Errorf(`Failed to create RBD storage volume for 
container "%s" on storage pool "%s": %s`, name, s.pool.Name, err)
                return err
@@ -2026,7 +2028,7 @@ func (s *storageCeph) 
copyVolumeWithoutSnapshotsSparse(source *api.StorageVolume
        }
 
        // create new clone
-       err = cephRBDCloneCreate(s.ClusterName, s.OSDPoolName, sourceOnlyName, 
storagePoolVolumeTypeNameCustom, snapshotOnlyName, s.OSDPoolName, 
s.volume.Name, storagePoolVolumeTypeNameCustom, s.UserName)
+       err = cephRBDCloneCreate(s.ClusterName, s.OSDPoolName, sourceOnlyName, 
storagePoolVolumeTypeNameCustom, snapshotOnlyName, s.OSDPoolName, 
s.volume.Name, storagePoolVolumeTypeNameCustom, s.UserName, s.OSDDataPoolName)
        if err != nil {
                logger.Errorf("Failed to clone RBD storage volume \"%s\" on 
storage pool \"%s\": %s", source.Name, source.Pool, err)
                return err
diff --git a/lxd/storage_pools_config.go b/lxd/storage_pools_config.go
index b44f0659df..de12a2c089 100644
--- a/lxd/storage_pools_config.go
+++ b/lxd/storage_pools_config.go
@@ -55,9 +55,10 @@ var storagePoolConfigKeys = map[string]func(value string) 
error{
        "btrfs.mount_options": shared.IsAny,
 
        // valid drivers: ceph
-       "ceph.cluster_name":    shared.IsAny,
-       "ceph.osd.force_reuse": shared.IsBool,
-       "ceph.osd.pool_name":   shared.IsAny,
+       "ceph.cluster_name":       shared.IsAny,
+       "ceph.osd.force_reuse":    shared.IsBool,
+       "ceph.osd.pool_name":      shared.IsAny,
+       "ceph.osd.data_pool_name": shared.IsAny,
        "ceph.osd.pg_num": func(value string) error {
                if value == "" {
                        return nil
diff --git a/scripts/bash/lxd-client b/scripts/bash/lxd-client
index 6a78584688..2b001daf63 100644
--- a/scripts/bash/lxd-client
+++ b/scripts/bash/lxd-client
@@ -119,7 +119,7 @@ _have lxc && {
       ipv6.routes ipv6.routing raw.dnsmasq"
 
     storage_pool_keys="source size btrfs.mount_options ceph.cluster_name \
-      ceph.osd.force_reuse ceph.osd.pg_num ceph.osd.pool_name \
+      ceph.osd.force_reuse ceph.osd.pg_num ceph.osd.pool_name 
ceph.osd.data_pool_name \
       ceph.rbd.clone_copy ceph.user.name cephfs.cluster_name cephfs.path \
       cephfs.vg_name lvm.thinpool_name lvm.use_thinpool \
       lvm.vg_name rsync.bwlimit volatile.initial_source \
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to