The following pull request was submitted through Github.
It can be accessed and reviewed at: https://github.com/lxc/lxd/pull/7226

This e-mail was sent by the LXC bot, direct replies will not reach the author
unless they happen to be subscribed to this list.

=== Description (from pull-request) ===

From ccc072d6d8e8880f7af3ffb1305b94f6940fdeb6 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Mon, 20 Apr 2020 10:36:07 +0100
Subject: [PATCH 1/2] lxd/devoce/device/utils/disk: Comment on
 diskCephfsOptions

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/device/device_utils_disk.go | 1 +
 1 file changed, 1 insertion(+)

diff --git a/lxd/device/device_utils_disk.go b/lxd/device/device_utils_disk.go
index 8490bb144f..9047d62446 100644
--- a/lxd/device/device_utils_disk.go
+++ b/lxd/device/device_utils_disk.go
@@ -245,6 +245,7 @@ func cephFsConfig(clusterName string, userName string) 
([]string, string, error)
        return cephMon, cephSecret, nil
 }
 
+// diskCephfsOptions returns the mntSrcPath and fsOptions to use for mounting 
a cephfs share.
 func diskCephfsOptions(clusterName string, userName string, fsName string, 
fsPath string) (string, string, error) {
        // Get the credentials and host
        monAddresses, secret, err := cephFsConfig(clusterName, userName)

From 6ad54f8e56bc4583cd861c6cbf80272b2b1872c3 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parr...@canonical.com>
Date: Mon, 20 Apr 2020 10:36:37 +0100
Subject: [PATCH 2/2] lxd/device/disk: Adds cephfs support for VMs

Fixes #7184

Signed-off-by: Thomas Parrott <thomas.parr...@canonical.com>
---
 lxd/device/disk.go | 13 ++++++++++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/lxd/device/disk.go b/lxd/device/disk.go
index 3e25b09ffb..39a4ec9401 100644
--- a/lxd/device/disk.go
+++ b/lxd/device/disk.go
@@ -426,10 +426,10 @@ func (d *disk) startVM() (*deviceConfig.RunConfig, error) 
{
                        }
                } else {
                        srcPath := shared.HostPath(d.config["source"])
+                       var err error
 
                        // Mount the pool volume and update srcPath to mount 
path.
                        if d.config["pool"] != "" {
-                               var err error
                                srcPath, err = d.mountPoolVolume(revert)
                                if err != nil {
                                        if !isRequired {
@@ -439,6 +439,13 @@ func (d *disk) startVM() (*deviceConfig.RunConfig, error) {
                                                return nil, err
                                        }
                                }
+                       } else if strings.HasPrefix(d.config["source"], 
"cephfs:") {
+                               // Mount the cephfs directory on the host and 
then treat as a normal directory to
+                               // share with the VM using 9p below.
+                               srcPath, err = d.createDevice()
+                               if err != nil {
+                                       return nil, err
+                               }
                        }
 
                        if !shared.PathExists(srcPath) {
@@ -1139,8 +1146,8 @@ func (d *disk) postStop() error {
        // Clean any existing entry.
        if shared.PathExists(devPath) {
                // Unmount the host side if not already.
-               // Don't check for errors here as this is just to catch any 
existing mounts that
-               // we not unmounted on the host after device was started.
+               // Don't check for errors here as this is just to catch any 
existing mounts that we have not
+               // unmounted on the host after device was started (such as when 
using cephfs with VM 9p share).
                unix.Unmount(devPath, unix.MNT_DETACH)
 
                // Remove the host side.
_______________________________________________
lxc-devel mailing list
lxc-devel@lists.linuxcontainers.org
http://lists.linuxcontainers.org/listinfo/lxc-devel

Reply via email to