This is an automated email from the ASF dual-hosted git repository.

qianzhang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/mesos.git

commit 17f28563488ddaeb2daa60b53bd8dc19e25cddef
Author: Qian Zhang <zhq527...@gmail.com>
AuthorDate: Wed Aug 26 10:33:26 2020 +0800

    Enabled CSI volume access for non-root users.
    
    Review: https://reviews.apache.org/r/72804
---
 .../mesos/isolators/volume/csi/isolator.cpp        | 40 ++++++++++++++++++++++
 .../mesos/isolators/volume/csi/isolator.hpp        |  2 ++
 2 files changed, 42 insertions(+)

diff --git a/src/slave/containerizer/mesos/isolators/volume/csi/isolator.cpp 
b/src/slave/containerizer/mesos/isolators/volume/csi/isolator.cpp
index 02ef1f2..d5d8835 100644
--- a/src/slave/containerizer/mesos/isolators/volume/csi/isolator.cpp
+++ b/src/slave/containerizer/mesos/isolators/volume/csi/isolator.cpp
@@ -356,6 +356,7 @@ Future<Option<ContainerLaunchInfo>> 
VolumeCSIIsolatorProcess::prepare(
 
     Mount mount;
     mount.csiVolume = csiVolume;
+    mount.volume = volume;
     mount.target = target;
     mount.volumeMode = _volume.mode();
 
@@ -398,6 +399,9 @@ Future<Option<ContainerLaunchInfo>> 
VolumeCSIIsolatorProcess::prepare(
         &VolumeCSIIsolatorProcess::_prepare,
         containerId,
         mounts,
+        containerConfig.has_user()
+          ? containerConfig.user()
+          : Option<string>::none(),
         lambda::_1));
 }
 
@@ -405,6 +409,7 @@ Future<Option<ContainerLaunchInfo>> 
VolumeCSIIsolatorProcess::prepare(
 Future<Option<ContainerLaunchInfo>> VolumeCSIIsolatorProcess::_prepare(
     const ContainerID& containerId,
     const vector<Mount>& mounts,
+    const Option<string>& user,
     const vector<Future<string>>& futures)
 {
 
@@ -432,6 +437,41 @@ Future<Option<ContainerLaunchInfo>> 
VolumeCSIIsolatorProcess::_prepare(
     const string& source = sources[i];
     const Mount& mount = mounts[i];
 
+    if (user.isSome() && user.get() != "root") {
+      bool isVolumeInUse = false;
+
+      // Check if the volume is currently used by another container.
+      foreachpair (const ContainerID& _containerId,
+                   const Owned<Info>& info,
+                   infos) {
+        // Skip self.
+        if (_containerId == containerId) {
+          continue;
+        }
+
+        if (info->volumes.contains(mount.volume)) {
+          isVolumeInUse = true;
+          break;
+        }
+      }
+
+      if (!isVolumeInUse) {
+        LOG(INFO) << "Changing the ownership of the CSI volume at '" << source
+                  << "' to user '" << user.get() << "' for container "
+                  << containerId;
+
+        Try<Nothing> chown = os::chown(user.get(), source, false);
+        if (chown.isError()) {
+          return Failure(
+              "Failed to set '" + user.get() + "' as the owner of the "
+              "CSI volume at '" + source + "': " + chown.error());
+        }
+      } else {
+        LOG(INFO) << "Leaving the ownership of the CSI volume at '"
+                  << source << "' unchanged because it is in use";
+      }
+    }
+
     LOG(INFO) << "Mounting CSI volume mount point '" << source
               << "' to '" << mount.target << "' for container " << containerId;
 
diff --git a/src/slave/containerizer/mesos/isolators/volume/csi/isolator.hpp 
b/src/slave/containerizer/mesos/isolators/volume/csi/isolator.hpp
index 373b629..4349acd 100644
--- a/src/slave/containerizer/mesos/isolators/volume/csi/isolator.hpp
+++ b/src/slave/containerizer/mesos/isolators/volume/csi/isolator.hpp
@@ -68,6 +68,7 @@ private:
   struct Mount
   {
     Volume::Source::CSIVolume csiVolume;
+    CSIVolume volume;
     std::string target;
     Volume::Mode volumeMode;
   };
@@ -92,6 +93,7 @@ private:
   process::Future<Option<mesos::slave::ContainerLaunchInfo>> _prepare(
       const ContainerID& containerId,
       const std::vector<Mount>& mounts,
+      const Option<std::string>& user,
       const std::vector<process::Future<std::string>>& futures);
 
   process::Future<Nothing> _cleanup(

Reply via email to