linyiqun commented on a change in pull request #1764:
URL: https://github.com/apache/ozone/pull/1764#discussion_r554361583



##########
File path: 
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
##########
@@ -32,12 +32,13 @@
 import java.util.ArrayList;
 import java.util.List;
 
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
 
 /**
  * Response for create file request layout version V1.
  */
-@CleanupTableInfo(cleanupTables = OPEN_FILE_TABLE)
+@CleanupTableInfo(cleanupTables = {FILE_TABLE, OPEN_FILE_TABLE})

Review comment:
       The dir table and open file table is updated here, so the FILE_TABLE 
should be DIRECTORY_TABLE.

##########
File path: 
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseV1.java
##########
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for CreateKey request layout version V1.
+ */
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, FILE_TABLE})

Review comment:
       Here should also be cleanupTables = {OPEN_FILE_TABLE, DIRECTORY_TABLE}

##########
File path: 
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
##########
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.Assert;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.UUID;
+
+public class TestObjectStoreV1 {
+
+  private static MiniOzoneCluster cluster = null;
+  private static OzoneConfiguration conf;
+  private static String clusterId;
+  private static String scmId;
+  private static String omId;
+
+  @Rule
+  public Timeout timeout = new Timeout(240000);
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omId = UUID.randomUUID().toString();
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setClusterId(clusterId)
+            .setScmId(scmId)
+            .setOmId(omId)
+            .build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  @Test
+  public void testCreateKey() throws Exception {
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String parent = "a/b/c/";
+    String file = "key" + RandomStringUtils.randomNumeric(5);
+    String key = parent + file;
+
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    objectStore.createVolume(volumeName);
+
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+    ozoneVolume.createBucket(bucketName);
+
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    Table<String, OmKeyInfo> openKeyTable =
+            cluster.getOzoneManager().getMetadataManager().getOpenKeyTable();
+
+    // before file creation
+    verifyKeyInFileTable(openKeyTable, file, 0, true);
+
+    String data = "random data";
+    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
+            data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
+            new HashMap<>());
+
+    OmDirectoryInfo dirPathC = getDirInfo(volumeName, bucketName, parent);
+    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+
+    // after file creation
+    verifyKeyInFileTable(openKeyTable, file, dirPathC.getObjectID(), false);
+
+    ozoneOutputStream.write(data.getBytes(), 0, data.length());
+    ozoneOutputStream.close();
+
+    Table<String, OmKeyInfo> keyTable =
+            cluster.getOzoneManager().getMetadataManager().getKeyTable();
+
+    // after closing the file
+    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), false);
+

Review comment:
       Can we additionally add key check for openKeyTable? Open key table 
should become empty after file is closed.

##########
File path: 
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
##########
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.Assert;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.UUID;
+
+public class TestObjectStoreV1 {
+
+  private static MiniOzoneCluster cluster = null;
+  private static OzoneConfiguration conf;
+  private static String clusterId;
+  private static String scmId;
+  private static String omId;
+
+  @Rule
+  public Timeout timeout = new Timeout(240000);
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omId = UUID.randomUUID().toString();
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setClusterId(clusterId)
+            .setScmId(scmId)
+            .setOmId(omId)
+            .build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  @Test
+  public void testCreateKey() throws Exception {
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String parent = "a/b/c/";
+    String file = "key" + RandomStringUtils.randomNumeric(5);
+    String key = parent + file;
+
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    objectStore.createVolume(volumeName);
+
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+    ozoneVolume.createBucket(bucketName);
+
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    Table<String, OmKeyInfo> openKeyTable =
+            cluster.getOzoneManager().getMetadataManager().getOpenKeyTable();
+
+    // before file creation
+    verifyKeyInFileTable(openKeyTable, file, 0, true);
+
+    String data = "random data";
+    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
+            data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
+            new HashMap<>());
+
+    OmDirectoryInfo dirPathC = getDirInfo(volumeName, bucketName, parent);
+    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+
+    // after file creation
+    verifyKeyInFileTable(openKeyTable, file, dirPathC.getObjectID(), false);
+
+    ozoneOutputStream.write(data.getBytes(), 0, data.length());
+    ozoneOutputStream.close();
+
+    Table<String, OmKeyInfo> keyTable =
+            cluster.getOzoneManager().getMetadataManager().getKeyTable();
+
+    // after closing the file
+    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), false);
+
+    ozoneBucket.deleteKey(key);
+
+    // after key delete
+    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), true);
+  }
+
+  private OmDirectoryInfo getDirInfo(String volumeName, String bucketName,
+      String parentKey) throws Exception {
+    OMMetadataManager omMetadataManager =
+            cluster.getOzoneManager().getMetadataManager();
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(parentKey, '/');
+    long parentId = bucketId;
+    OmDirectoryInfo dirInfo = null;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      String dbKey = omMetadataManager.getOzonePathKey(parentId,
+              pathElement);
+      dirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      parentId = dirInfo.getObjectID();
+    }
+    return dirInfo;
+  }
+
+  private void verifyKeyInFileTable(Table<String, OmKeyInfo> keyTable,
+      String fileName, long parentID, boolean isEmpty) throws IOException {
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator
+            = keyTable.iterator();
+
+    if (isEmpty) {
+      Assert.assertTrue("Table is not empty!", keyTable.isEmpty());
+    } else {
+      Assert.assertFalse("Table is empty!", keyTable.isEmpty());
+      while (iterator.hasNext()) {
+        Table.KeyValue<String, OmKeyInfo> next = iterator.next();
+        Assert.assertTrue("Invalid Key: " + next.getKey(),
+                next.getKey().startsWith(parentID + "/" + fileName));

Review comment:
       Seems we could use the equals function to compare. There is only test 
file key in key table/open key table in this test.

##########
File path: 
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
##########
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.Assert;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.UUID;
+
+public class TestObjectStoreV1 {
+
+  private static MiniOzoneCluster cluster = null;
+  private static OzoneConfiguration conf;
+  private static String clusterId;
+  private static String scmId;
+  private static String omId;
+
+  @Rule
+  public Timeout timeout = new Timeout(240000);
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omId = UUID.randomUUID().toString();
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setClusterId(clusterId)
+            .setScmId(scmId)
+            .setOmId(omId)
+            .build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  @Test
+  public void testCreateKey() throws Exception {
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String parent = "a/b/c/";
+    String file = "key" + RandomStringUtils.randomNumeric(5);
+    String key = parent + file;
+
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    objectStore.createVolume(volumeName);
+
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+    ozoneVolume.createBucket(bucketName);
+
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    Table<String, OmKeyInfo> openKeyTable =
+            cluster.getOzoneManager().getMetadataManager().getOpenKeyTable();
+
+    // before file creation
+    verifyKeyInFileTable(openKeyTable, file, 0, true);
+
+    String data = "random data";
+    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
+            data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
+            new HashMap<>());
+
+    OmDirectoryInfo dirPathC = getDirInfo(volumeName, bucketName, parent);
+    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+
+    // after file creation
+    verifyKeyInFileTable(openKeyTable, file, dirPathC.getObjectID(), false);
+
+    ozoneOutputStream.write(data.getBytes(), 0, data.length());
+    ozoneOutputStream.close();
+
+    Table<String, OmKeyInfo> keyTable =
+            cluster.getOzoneManager().getMetadataManager().getKeyTable();
+
+    // after closing the file
+    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), false);
+
+    ozoneBucket.deleteKey(key);
+
+    // after key delete
+    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), true);
+  }
+
+  private OmDirectoryInfo getDirInfo(String volumeName, String bucketName,
+      String parentKey) throws Exception {
+    OMMetadataManager omMetadataManager =
+            cluster.getOzoneManager().getMetadataManager();
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(parentKey, '/');
+    long parentId = bucketId;
+    OmDirectoryInfo dirInfo = null;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      String dbKey = omMetadataManager.getOzonePathKey(parentId,
+              pathElement);
+      dirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      parentId = dirInfo.getObjectID();
+    }
+    return dirInfo;
+  }
+
+  private void verifyKeyInFileTable(Table<String, OmKeyInfo> keyTable,
+      String fileName, long parentID, boolean isEmpty) throws IOException {
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator
+            = keyTable.iterator();
+
+    if (isEmpty) {
+      Assert.assertTrue("Table is not empty!", keyTable.isEmpty());
+    } else {
+      Assert.assertFalse("Table is empty!", keyTable.isEmpty());
+      while (iterator.hasNext()) {
+        Table.KeyValue<String, OmKeyInfo> next = iterator.next();
+        Assert.assertTrue("Invalid Key: " + next.getKey(),
+                next.getKey().startsWith(parentID + "/" + fileName));

Review comment:
       Seems we could use the equals function to compare. There is only one 
test file key in key table/open key table in this test.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to