smengcl commented on a change in pull request #2826:
URL: https://github.com/apache/ozone/pull/2826#discussion_r754838545



##########
File path: 
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java
##########
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.client.*;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.*;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.*;
+import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
+import static org.junit.Assert.fail;
+
+/**
+ * Test for Ozone Bucket Owner.
+ */
+public class TestBucketOwner {
+  @Rule public Timeout timeout = Timeout.seconds(120);
+
+  private static MiniOzoneCluster cluster;
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestBucketOwner.class);
+  private static  UserGroupInformation adminUser =
+          UserGroupInformation.createUserForTesting("om",
+          new String[] {"ozone"});
+  private static  UserGroupInformation user1 = UserGroupInformation
+          .createUserForTesting("user1", new String[] {"test1"});
+  private static UserGroupInformation user2 = UserGroupInformation
+          .createUserForTesting("user2", new String[] {"test2"});
+  private static UserGroupInformation user3 = UserGroupInformation
+          .createUserForTesting("user3", new String[] {"test3"});
+  private static OzoneClient client;
+  private static ObjectStore objectStore;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    // loginUser is the user running this test.
+    UserGroupInformation.setLoginUser(adminUser);
+    OzoneConfiguration conf = new OzoneConfiguration();
+    String clusterId = UUID.randomUUID().toString();
+    String scmId = UUID.randomUUID().toString();
+    String omId = UUID.randomUUID().toString();
+    conf.set(OZONE_ACL_AUTHORIZER_CLASS, OZONE_ACL_AUTHORIZER_CLASS_NATIVE);
+    conf.setBoolean(OZONE_ACL_ENABLED, true);
+    TestOMRequestUtils.configureFSOptimizedPaths(conf, true,
+            OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+    cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId)
+            .setScmId(scmId).setOmId(omId).build();
+    cluster.waitForClusterToBeReady();
+    client = cluster.getClient();
+    objectStore = client.getObjectStore();
+    /* r = READ, w = WRITE, c = CREATE, d = DELETE
+       l = LIST, a = ALL, n = NONE, x = READ_ACL, y = WRITE_ACL */
+    String aclWorldAll = "world::a";
+    createVolumeWithOwnerAndAcl(objectStore, "volume1", "user2", aclWorldAll);
+    UserGroupInformation.setLoginUser(user1);
+    client = cluster.getClient();
+    objectStore = client.getObjectStore();
+    OzoneVolume volume = objectStore.getVolume("volume1");
+    BucketArgs omBucketArgs = BucketArgs.newBuilder()
+            .setStorageType(StorageType.DISK).setOwner("user1").build();
+    volume.createBucket("bucket1", omBucketArgs);
+    volume.createBucket("bucket2", omBucketArgs);
+    volume.createBucket("bucket3", omBucketArgs);
+  }
+
+  @AfterClass
+  public static void stopCluster() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testBucketOwner() throws Exception {
+    // Test Key Operations as Bucket Owner,  Non-Volume Owner
+    UserGroupInformation.setLoginUser(user1);
+    OzoneVolume volume = cluster.getClient().getObjectStore()
+            .getVolume("volume1");
+    OzoneBucket ozoneBucket = volume.getBucket("bucket1");
+    //Key Create
+    createKey(ozoneBucket, "key1", 10, new byte[10]);
+    createKey(ozoneBucket, "key2", 10, new byte[10]);
+    //Key Delete
+    ozoneBucket.deleteKey("key1");
+    //Bucket Delete
+    volume.deleteBucket("bucket3");
+  }
+
+  @Test
+  public void testNonBucketNonVolumeOwner() throws Exception {
+    // Test Key Operations Non-Bucket Owner, Non-Volume Owner
+    //Key Create
+    UserGroupInformation.setLoginUser(user3);
+    OzoneBucket ozoneBucket;
+    try {
+      OzoneVolume volume = cluster.getClient().getObjectStore()
+              .getVolume("volume1");
+      ozoneBucket = volume.getBucket("bucket1");
+      createKey(ozoneBucket, "key3", 10, new byte[10]);
+      fail();
+    } catch (Exception ex) {
+      LOG.info(ex.getMessage());
+    }
+    //Key Delete - should fail
+    try {
+      OzoneVolume volume = cluster.getClient().getObjectStore()
+              .getVolume("volume1");
+      ozoneBucket = volume.getBucket("bucket1");
+      ozoneBucket.deleteKey("key2");
+      fail();

Review comment:
       ```suggestion
         fail("Delete key as non-volume and non-bucket owner should fail");
   ```

##########
File path: 
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java
##########
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.client.*;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.*;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.*;
+import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
+import static org.junit.Assert.fail;
+
+/**
+ * Test for Ozone Bucket Owner.
+ */
+public class TestBucketOwner {
+  @Rule public Timeout timeout = Timeout.seconds(120);
+
+  private static MiniOzoneCluster cluster;
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestBucketOwner.class);
+  private static  UserGroupInformation adminUser =
+          UserGroupInformation.createUserForTesting("om",
+          new String[] {"ozone"});
+  private static  UserGroupInformation user1 = UserGroupInformation
+          .createUserForTesting("user1", new String[] {"test1"});
+  private static UserGroupInformation user2 = UserGroupInformation
+          .createUserForTesting("user2", new String[] {"test2"});
+  private static UserGroupInformation user3 = UserGroupInformation
+          .createUserForTesting("user3", new String[] {"test3"});
+  private static OzoneClient client;
+  private static ObjectStore objectStore;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    // loginUser is the user running this test.
+    UserGroupInformation.setLoginUser(adminUser);
+    OzoneConfiguration conf = new OzoneConfiguration();
+    String clusterId = UUID.randomUUID().toString();
+    String scmId = UUID.randomUUID().toString();
+    String omId = UUID.randomUUID().toString();
+    conf.set(OZONE_ACL_AUTHORIZER_CLASS, OZONE_ACL_AUTHORIZER_CLASS_NATIVE);
+    conf.setBoolean(OZONE_ACL_ENABLED, true);
+    TestOMRequestUtils.configureFSOptimizedPaths(conf, true,
+            OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+    cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId)
+            .setScmId(scmId).setOmId(omId).build();
+    cluster.waitForClusterToBeReady();
+    client = cluster.getClient();
+    objectStore = client.getObjectStore();
+    /* r = READ, w = WRITE, c = CREATE, d = DELETE
+       l = LIST, a = ALL, n = NONE, x = READ_ACL, y = WRITE_ACL */
+    String aclWorldAll = "world::a";
+    createVolumeWithOwnerAndAcl(objectStore, "volume1", "user2", aclWorldAll);
+    UserGroupInformation.setLoginUser(user1);
+    client = cluster.getClient();
+    objectStore = client.getObjectStore();
+    OzoneVolume volume = objectStore.getVolume("volume1");
+    BucketArgs omBucketArgs = BucketArgs.newBuilder()
+            .setStorageType(StorageType.DISK).setOwner("user1").build();
+    volume.createBucket("bucket1", omBucketArgs);
+    volume.createBucket("bucket2", omBucketArgs);
+    volume.createBucket("bucket3", omBucketArgs);
+  }
+
+  @AfterClass
+  public static void stopCluster() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testBucketOwner() throws Exception {
+    // Test Key Operations as Bucket Owner,  Non-Volume Owner
+    UserGroupInformation.setLoginUser(user1);
+    OzoneVolume volume = cluster.getClient().getObjectStore()
+            .getVolume("volume1");
+    OzoneBucket ozoneBucket = volume.getBucket("bucket1");
+    //Key Create
+    createKey(ozoneBucket, "key1", 10, new byte[10]);
+    createKey(ozoneBucket, "key2", 10, new byte[10]);
+    //Key Delete
+    ozoneBucket.deleteKey("key1");
+    //Bucket Delete
+    volume.deleteBucket("bucket3");
+  }
+
+  @Test
+  public void testNonBucketNonVolumeOwner() throws Exception {
+    // Test Key Operations Non-Bucket Owner, Non-Volume Owner
+    //Key Create
+    UserGroupInformation.setLoginUser(user3);
+    OzoneBucket ozoneBucket;
+    try {
+      OzoneVolume volume = cluster.getClient().getObjectStore()
+              .getVolume("volume1");
+      ozoneBucket = volume.getBucket("bucket1");
+      createKey(ozoneBucket, "key3", 10, new byte[10]);
+      fail();

Review comment:
       ```suggestion
         fail("Create key as non-volume and non-bucket owner should fail");
   ```

##########
File path: 
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java
##########
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.client.*;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.*;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.*;
+import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
+import static org.junit.Assert.fail;
+
+/**
+ * Test for Ozone Bucket Owner.
+ */
+public class TestBucketOwner {
+  @Rule public Timeout timeout = Timeout.seconds(120);
+
+  private static MiniOzoneCluster cluster;
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestBucketOwner.class);
+  private static  UserGroupInformation adminUser =
+          UserGroupInformation.createUserForTesting("om",
+          new String[] {"ozone"});
+  private static  UserGroupInformation user1 = UserGroupInformation
+          .createUserForTesting("user1", new String[] {"test1"});
+  private static UserGroupInformation user2 = UserGroupInformation
+          .createUserForTesting("user2", new String[] {"test2"});
+  private static UserGroupInformation user3 = UserGroupInformation
+          .createUserForTesting("user3", new String[] {"test3"});
+  private static OzoneClient client;
+  private static ObjectStore objectStore;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    // loginUser is the user running this test.
+    UserGroupInformation.setLoginUser(adminUser);
+    OzoneConfiguration conf = new OzoneConfiguration();
+    String clusterId = UUID.randomUUID().toString();
+    String scmId = UUID.randomUUID().toString();
+    String omId = UUID.randomUUID().toString();
+    conf.set(OZONE_ACL_AUTHORIZER_CLASS, OZONE_ACL_AUTHORIZER_CLASS_NATIVE);
+    conf.setBoolean(OZONE_ACL_ENABLED, true);
+    TestOMRequestUtils.configureFSOptimizedPaths(conf, true,
+            OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+    cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId)
+            .setScmId(scmId).setOmId(omId).build();
+    cluster.waitForClusterToBeReady();
+    client = cluster.getClient();
+    objectStore = client.getObjectStore();
+    /* r = READ, w = WRITE, c = CREATE, d = DELETE
+       l = LIST, a = ALL, n = NONE, x = READ_ACL, y = WRITE_ACL */
+    String aclWorldAll = "world::a";
+    createVolumeWithOwnerAndAcl(objectStore, "volume1", "user2", aclWorldAll);
+    UserGroupInformation.setLoginUser(user1);
+    client = cluster.getClient();
+    objectStore = client.getObjectStore();
+    OzoneVolume volume = objectStore.getVolume("volume1");
+    BucketArgs omBucketArgs = BucketArgs.newBuilder()
+            .setStorageType(StorageType.DISK).setOwner("user1").build();
+    volume.createBucket("bucket1", omBucketArgs);
+    volume.createBucket("bucket2", omBucketArgs);
+    volume.createBucket("bucket3", omBucketArgs);
+  }
+
+  @AfterClass
+  public static void stopCluster() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testBucketOwner() throws Exception {
+    // Test Key Operations as Bucket Owner,  Non-Volume Owner
+    UserGroupInformation.setLoginUser(user1);
+    OzoneVolume volume = cluster.getClient().getObjectStore()
+            .getVolume("volume1");
+    OzoneBucket ozoneBucket = volume.getBucket("bucket1");
+    //Key Create
+    createKey(ozoneBucket, "key1", 10, new byte[10]);
+    createKey(ozoneBucket, "key2", 10, new byte[10]);
+    //Key Delete
+    ozoneBucket.deleteKey("key1");
+    //Bucket Delete
+    volume.deleteBucket("bucket3");
+  }
+
+  @Test
+  public void testNonBucketNonVolumeOwner() throws Exception {
+    // Test Key Operations Non-Bucket Owner, Non-Volume Owner
+    //Key Create
+    UserGroupInformation.setLoginUser(user3);
+    OzoneBucket ozoneBucket;
+    try {
+      OzoneVolume volume = cluster.getClient().getObjectStore()
+              .getVolume("volume1");
+      ozoneBucket = volume.getBucket("bucket1");
+      createKey(ozoneBucket, "key3", 10, new byte[10]);
+      fail();
+    } catch (Exception ex) {
+      LOG.info(ex.getMessage());
+    }
+    //Key Delete - should fail
+    try {
+      OzoneVolume volume = cluster.getClient().getObjectStore()
+              .getVolume("volume1");
+      ozoneBucket = volume.getBucket("bucket1");
+      ozoneBucket.deleteKey("key2");
+      fail();
+    } catch (Exception ex) {
+      LOG.info(ex.getMessage());
+    }
+    //Key Rename - should fail
+    try {
+      OzoneVolume volume = cluster.getClient().getObjectStore()
+              .getVolume("volume1");
+      ozoneBucket = volume.getBucket("bucket1");
+      ozoneBucket.renameKey("key2", "key4");
+      fail();

Review comment:
       ```suggestion
         fail("Rename key as non-volume and non-bucket owner should fail");
   ```




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to