some general remarks from a quick run-through:
- backup does not seem to work, probably requires
  qemu-server/pve-container changes? [1]
- parts of this could be merged with RBDPlugin code, either by extending
  it, or by refactoring to use a common helper module
- ceph-fuse and the kernel have a very different view of the same cephfs
  storage[2]

some comments inline

1: "TASK ERROR: could not get storage information for 'cephfs_ext':
can't use storage type 'cephfs' for backup"

2: $ pvesm status | grep cephfs
cephfs_ext           cephfs     active       402030592        49885184       
352145408   12.41%
cephfs_ext_fuse      cephfs     active       108322816          831488       
107491328    0.77%

On Thu, May 17, 2018 at 11:17:05AM +0200, Alwin Antreich wrote:
>  - ability to mount through kernel and fuse client
>  - allow mount options
>  - get MONs from ceph config if not in storage.cfg
>  - allow the use of ceph config with fuse client
> 
> Signed-off-by: Alwin Antreich <a.antre...@proxmox.com>
> ---
>  PVE/API2/Storage/Config.pm  |   2 +-
>  PVE/Storage.pm              |   2 +
>  PVE/Storage/CephFSPlugin.pm | 262 
> ++++++++++++++++++++++++++++++++++++++++++++
>  PVE/Storage/Makefile        |   2 +-
>  PVE/Storage/Plugin.pm       |   1 +
>  debian/control              |   2 +
>  6 files changed, 269 insertions(+), 2 deletions(-)
>  create mode 100644 PVE/Storage/CephFSPlugin.pm
> 
> diff --git a/PVE/API2/Storage/Config.pm b/PVE/API2/Storage/Config.pm
> index 3b38304..368a5c9 100755
> --- a/PVE/API2/Storage/Config.pm
> +++ b/PVE/API2/Storage/Config.pm
> @@ -171,7 +171,7 @@ __PACKAGE__->register_method ({
>                   PVE::Storage::activate_storage($cfg, $baseid);
>  
>                   PVE::Storage::LVMPlugin::lvm_create_volume_group($path, 
> $opts->{vgname}, $opts->{shared});
> -             } elsif ($type eq 'rbd' && !defined($opts->{monhost})) {
> +             } elsif (($type eq 'rbd' || $type eq 'cephfs') && 
> !defined($opts->{monhost})) {
>                   my $ceph_admin_keyring = 
> '/etc/pve/priv/ceph.client.admin.keyring';
>                   my $ceph_storage_keyring = 
> "/etc/pve/priv/ceph/${storeid}.keyring";

this does not work, we need to put the key/secret, not the whole keyring
there. maybe we can also name the files accordingly (.secret instead of
.keyring) to make it more obvious.

for external clusters, this needs to go into the documentation as well.

>  
> diff --git a/PVE/Storage.pm b/PVE/Storage.pm
> index d733380..f9732fe 100755
> --- a/PVE/Storage.pm
> +++ b/PVE/Storage.pm
> @@ -28,6 +28,7 @@ use PVE::Storage::NFSPlugin;
>  use PVE::Storage::CIFSPlugin;
>  use PVE::Storage::ISCSIPlugin;
>  use PVE::Storage::RBDPlugin;
> +use PVE::Storage::CephFSPlugin;
>  use PVE::Storage::SheepdogPlugin;
>  use PVE::Storage::ISCSIDirectPlugin;
>  use PVE::Storage::GlusterfsPlugin;
> @@ -46,6 +47,7 @@ PVE::Storage::NFSPlugin->register();
>  PVE::Storage::CIFSPlugin->register();
>  PVE::Storage::ISCSIPlugin->register();
>  PVE::Storage::RBDPlugin->register();
> +PVE::Storage::CephFSPlugin->register();
>  PVE::Storage::SheepdogPlugin->register();
>  PVE::Storage::ISCSIDirectPlugin->register();
>  PVE::Storage::GlusterfsPlugin->register();
> diff --git a/PVE/Storage/CephFSPlugin.pm b/PVE/Storage/CephFSPlugin.pm
> new file mode 100644
> index 0000000..a368c5b
> --- /dev/null
> +++ b/PVE/Storage/CephFSPlugin.pm
> @@ -0,0 +1,262 @@
> +package PVE::Storage::CephFSPlugin;
> +
> +use strict;
> +use warnings;
> +use IO::File;
> +use Net::IP;
> +use File::Path;
> +use PVE::Tools qw(run_command);
> +use PVE::ProcFSTools;
> +use PVE::Storage::Plugin;
> +use PVE::JSONSchema qw(get_standard_option);
> +
> +use base qw(PVE::Storage::Plugin);
> +
> +my $hostlist = sub {
> +    my ($list_text, $separator) = @_;
> +
> +    my @monhostlist = PVE::Tools::split_list($list_text);
> +    return join($separator, map {
> +     my ($host, $port) = PVE::Tools::parse_host_and_port($_);
> +     $port = defined($port) ? ":$port" : '';
> +     $host = "[$host]" if Net::IP::ip_is_ipv6($host);
> +     "${host}${port}"
> +    } @monhostlist);
> +};

probably a candidate for merging with RBDPlugin.pm

> +
> +my $parse_ceph_config = sub {
> +    my ($filename) = @_;
> +
> +    my $cfg = {};
> +
> +    return $cfg if ! -f $filename;
> +
> +    my $fh = IO::File->new($filename, "r") ||
> +     die "unable to open '$filename' - $!\n";
> +
> +    my $section;
> +
> +    while (defined(my $line = <$fh>)) {
> +     $line =~ s/[;#].*$//;
> +     $line =~ s/^\s+//;
> +     $line =~ s/\s+$//;
> +     next if !$line;
> +
> +     $section = $1 if $line =~ m/^\[(\S+)\]$/;
> +     if (!$section) {
> +         warn "no section - skip: $line\n";
> +         next;
> +     }
> +
> +     if ($line =~ m/^(.*?\S)\s*=\s*(\S.*)$/) {
> +         $cfg->{$section}->{$1} = $2;
> +     }
> +
> +    }
> +
> +    return $cfg;
> +};

this is only used once, see below

> +
> +my $get_monaddr_list = sub {
> +    my ($scfg, $configfile) = @_;
> +
> +    my $server;
> +    my $no_mon = !defined($scfg->{monhost});
> +
> +    if (($no_mon) && defined($configfile)) {
> +     my $config = $parse_ceph_config->($configfile);

I'd prefer to get the monitor list from ceph instead of attempting to
parse the config. e.g., the following should work:

ceph [...] mon dump -f json

or the equivalent librados command?

> +     $server = join(',', sort { $a cmp $b }
> +         map { $config->{$_}->{'mon addr'} } grep {/mon/} %{$config});
> +    }else {
> +     $server = $hostlist->($scfg->{monhost}, ',');
> +    }
> +
> +    return $server;
> +};
> +
> +my $get_configfile = sub {
> +    my ($storeid) = @_;
> +
> +    my $configfile;
> +    my $pve_cephconfig = '/etc/pve/ceph.conf';
> +    my $storeid_cephconfig = "/etc/pve/priv/ceph/${storeid}.conf";
> +
> +    if (-e $pve_cephconfig) {
> +     if (-e $storeid_cephconfig) {
> +         warn "ignoring custom ceph config for storage '$storeid', 'monhost' 
> is not set (assuming pveceph managed cluster)!\n";
> +     }
> +     $configfile = $pve_cephconfig;
> +    } elsif (-e $storeid_cephconfig) {
> +     $configfile = $storeid_cephconfig;
> +    } else {
> +     die "Missing ceph config for ${storeid} storage\n";
> +    }
> +
> +    return $configfile;
> +};

$get_configfile is only called if no monhost is defined in the storage,
so it does not make much sense it its current form. IMHO it should be
called for external clusters as well though, similar to the behaviour in
RBDPlugin.pm (or even merged with it?)

> +
> +sub cephfs_is_mounted {
> +    my ($scfg, $storeid, $mountdata) = @_;
> +
> +    my $no_mon = !defined($scfg->{monhost});
> +
> +    my $configfile = $get_configfile->($storeid) if ($no_mon);

see above

> +    my $server = $get_monaddr_list->($scfg, $configfile);
> +
> +    my $subdir = $scfg->{subdir} ? $scfg->{subdir} : '/';
> +    my $mountpoint = $scfg->{path};
> +    my $source = "$server:$subdir";
> +
> +    $mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
> +    return $mountpoint if grep {
> +     $_->[2] =~ /^ceph|fuse\.ceph-fuse/ &&
> +     $_->[0] =~ m#^\Q$source\E|ceph-fuse$# &&
> +     $_->[1] eq $mountpoint

the matching is inconsistent, with // and m## ?

> +    } @$mountdata;
> +
> +    warn "A filesystem is still mounted on $mountpoint\n"
> +     if grep { $_->[1] eq $mountpoint } @$mountdata;
> +
> +    return undef;
> +}
> +
> +sub cephfs_mount {
> +    my ($scfg, $storeid) = @_;
> +
> +    my $cmd;
> +
> +    my $no_mon = !defined($scfg->{monhost});
> +
> +    my $keyfile = "/etc/pve/priv/ceph/${storeid}.keyring";
> +    my $username =  $scfg->{username} ? $scfg->{username} : 'admin';
> +    my $mountpoint = $scfg->{path};
> +    my $subdir = $scfg->{subdir} ? $scfg->{subdir} : '/';
> +
> +    my $configfile = $get_configfile->($storeid) if ($no_mon);

see above

> +    my $server = $get_monaddr_list->($scfg, $configfile);
> +
> +    # fuse -> client-enforced quotas (kernel doesn't), updates w/ ceph-fuse 
> pkg
> +    # kernel -> better performance, less frequent updates
> +    if ($scfg->{fuse}) {
> +         # FIXME: ceph-fuse client complains about missing ceph.conf or 
> keyring if
> +         # not provided on its default locations but still connects. Fix 
> upstream??

are these complaints visible somewhere?

> +         $cmd = ['/usr/bin/ceph-fuse', '-n', "client.$username", '-m', 
> $server];
> +         push @$cmd, '--keyfile', $keyfile if (-e $keyfile);
> +         push @$cmd, '-r', $subdir if !($subdir =~ m|^/$|);

$subdir ne '/'?                      ^^^^^^^^^^^^^^^^^^^^

> +         push @$cmd, $mountpoint;
> +         push @$cmd, '--conf', $configfile if defined($configfile);
> +    }else {
> +     my $source = "$server:$subdir";
> +     $cmd = ['/bin/mount', '-t', 'ceph', $source, $mountpoint, '-o', 
> "name=$username"];
> +     push @$cmd, '-o', "secretfile=$keyfile" if (-e $keyfile);

is there a way to pass a config to the kernel as well? e.g. for logging
and other options?

> +    }
> +
> +    if ($scfg->{options}) {
> +     push @$cmd, '-o', $scfg->{options};
> +    }
> +
> +    run_command($cmd, errmsg => "mount error");
> +}
> +
> +# Configuration
> +
> +sub type {
> +    return 'cephfs';
> +}
> +
> +sub plugindata {
> +    return {
> +     content => [ { vztmpl => 1, iso => 1, backup => 1},
> +                  { backup => 1 }],
> +     format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ],

the format does not make sense if we don't allow images?

> +    };
> +}
> +
> +sub properties {
> +    return {
> +     fuse => {
> +         description => "Mount cephfs through fuse.",

CephFS / FUSE

> +         type => 'boolean',
> +     },
> +     subdir => {
> +         description => "Subdir to mount.",
> +         type => 'string', format => 'pve-storage-path',
> +     },
> +    };
> +}
> +
> +sub options {
> +    return {
> +     path => { fixed => 1 },
> +     monhost => { optional => 1},
> +     nodes => { optional => 1 },
> +     subdir => { optional => 1 },
> +     disable => { optional => 1 },
> +     options => { optional => 1 },
> +     username => { optional => 1 },
> +     content => { optional => 1 },
> +     format => { optional => 1 },
> +     mkdir => { optional => 1 },
> +     fuse => { optional => 1 },
> +     bwlimit => { optional => 1 },
> +    };
> +}
> +
> +sub check_config {
> +    my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
> +
> +    $config->{path} = "/mnt/pve/$sectionId" if $create && !$config->{path};
> +
> +    return $class->SUPER::check_config($sectionId, $config, $create, 
> $skipSchemaCheck);
> +}
> +
> +# Storage implementation
> +
> +sub status {
> +    my ($class, $storeid, $scfg, $cache) = @_;
> +
> +    $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
> +     if !$cache->{mountdata};
> +
> +    return undef if !cephfs_is_mounted($scfg, $storeid, $cache->{mountdata});
> +
> +    return $class->SUPER::status($storeid, $scfg, $cache);
> +}
> +
> +sub activate_storage {
> +    my ($class, $storeid, $scfg, $cache) = @_;
> +
> +    $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
> +     if !$cache->{mountdata};
> +
> +    my $path = $scfg->{path};
> +
> +    if (!cephfs_is_mounted($scfg, $storeid, $cache->{mountdata})) {
> +
> +     # NOTE: only call mkpath when not mounted (avoid hang
> +     # when cephfs is offline
> +
> +     mkpath $path if !(defined($scfg->{mkdir}) && !$scfg->{mkdir});
> +
> +     die "unable to activate storage '$storeid' - " .
> +         "directory '$path' does not exist\n" if ! -d $path;
> +
> +     cephfs_mount($scfg, $storeid);
> +    }
> +
> +    $class->SUPER::activate_storage($storeid, $scfg, $cache);
> +}
> +
> +sub deactivate_storage {
> +    my ($class, $storeid, $scfg, $cache) = @_;
> +
> +    $cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
> +     if !$cache->{mountdata};
> +
> +    my $path = $scfg->{path};
> +
> +    if (cephfs_is_mounted($scfg, $storeid, $cache->{mountdata})) {
> +     my $cmd = ['/bin/umount', $path];
> +     run_command($cmd, errmsg => 'umount error'); 
> +    }
> +}
> diff --git a/PVE/Storage/Makefile b/PVE/Storage/Makefile
> index 7b168fa..ad69532 100644
> --- a/PVE/Storage/Makefile
> +++ b/PVE/Storage/Makefile
> @@ -1,4 +1,4 @@
> -SOURCES=Plugin.pm DirPlugin.pm LVMPlugin.pm NFSPlugin.pm CIFSPlugin.pm 
> ISCSIPlugin.pm RBDPlugin.pm SheepdogPlugin.pm ISCSIDirectPlugin.pm 
> GlusterfsPlugin.pm ZFSPoolPlugin.pm ZFSPlugin.pm DRBDPlugin.pm 
> LvmThinPlugin.pm
> +SOURCES=Plugin.pm DirPlugin.pm LVMPlugin.pm NFSPlugin.pm CIFSPlugin.pm 
> ISCSIPlugin.pm CephFSPlugin.pm RBDPlugin.pm SheepdogPlugin.pm 
> ISCSIDirectPlugin.pm GlusterfsPlugin.pm ZFSPoolPlugin.pm ZFSPlugin.pm 
> DRBDPlugin.pm LvmThinPlugin.pm
>  
>  .PHONY: install
>  install:
> diff --git a/PVE/Storage/Plugin.pm b/PVE/Storage/Plugin.pm
> index 163871d..3769e01 100644
> --- a/PVE/Storage/Plugin.pm
> +++ b/PVE/Storage/Plugin.pm
> @@ -24,6 +24,7 @@ our @SHARED_STORAGE = (
>      'nfs',
>      'cifs',
>      'rbd',
> +    'cephfs',
>      'sheepdog',
>      'iscsidirect',
>      'glusterfs',
> diff --git a/debian/control b/debian/control
> index 2cf585a..70b5e2d 100644
> --- a/debian/control
> +++ b/debian/control
> @@ -28,6 +28,8 @@ Depends: cstream,
>           udev,
>           smbclient,
>           cifs-utils,
> +         ceph-common,

what do we need ceph-common for in this series? (I think this could be
sent as separate one-line fix, since we actually do need it in
RBDPlugin.pm ;))

> +         ceph-fuse,
>           ${perl:Depends},
>  Description: Proxmox VE storage management library
>   This package contains the storage management library used by Proxmox VE.
> -- 
> 2.11.0
> 
> 
> _______________________________________________
> pve-devel mailing list
> pve-devel@pve.proxmox.com
> https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

_______________________________________________
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

Reply via email to