adoroszlai commented on code in PR #5920:
URL: https://github.com/apache/ozone/pull/5920#discussion_r1449121624


##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java:
##########
@@ -549,6 +555,134 @@ public void testOzoneAdminCmdList() throws 
UnsupportedEncodingException {
     execute(ozoneAdminShell, args);
   }
 
+  @Test
+  public void testOzoneAdminCmdListOpenFiles()
+      throws IOException, InterruptedException {
+
+    OzoneConfiguration conf = cluster.getConf();
+    final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId;
+
+    OzoneConfiguration clientConf = getClientConfForOFS(hostPrefix, conf);
+    clientConf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
+    FileSystem fs = FileSystem.get(clientConf);
+
+    assertNotEquals(fs.getConf().get(OZONE_FS_HSYNC_ENABLED),
+        "false", OZONE_FS_HSYNC_ENABLED + " is set to false " +
+            "by external force. Must be true to allow hsync to function");
+
+    final String volumeName = "volume-lof";
+    final String bucketName = "buck1";
+
+    String dir1 = hostPrefix + OM_KEY_PREFIX + volumeName + OM_KEY_PREFIX +
+        bucketName + OM_KEY_PREFIX + "dir1";
+    // Create volume, bucket, dir
+    assertTrue(fs.mkdirs(new Path(dir1)));
+    String keyPrefix = OM_KEY_PREFIX + "key";
+
+    final int numKeys = 5;
+    String[] keys = new String[numKeys];
+
+    for (int i = 0; i < numKeys; i++) {
+      keys[i] = dir1 + keyPrefix + i;
+    }
+
+    int pageSize = 3;
+
+    FSDataOutputStream[] streams = new FSDataOutputStream[numKeys];
+    // Create multiple keys and hold them open
+    for (int i = 0; i < numKeys; i++) {
+      streams[i] = fs.create(new Path(keys[i]));
+      streams[i].write(1);
+    }

Review Comment:
   Move into the `try` block below.



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/ListOpenFilesSubCommand.java:
##########
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.admin.om;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.server.JsonUtils;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+import picocli.CommandLine;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+/**
+ * Handler of ozone admin om list-open-files command.
+ */
[email protected](
+    name = "list-open-files",
+    aliases = {"list-open-keys", "lof", "lok"},
+    description = "Lists open files (keys) in Ozone Manager.",
+    mixinStandardHelpOptions = true,
+    versionProvider = HddsVersionProvider.class
+)
+public class ListOpenFilesSubCommand implements Callable<Void> {
+
+  @CommandLine.ParentCommand
+  private OMAdmin parent;
+
+  @CommandLine.Option(
+      names = {"-id", "--service-id"},
+      description = "Ozone Manager Service ID",
+      required = false
+  )
+  private String omServiceId;
+
+  @CommandLine.Option(
+      names = {"-host", "--service-host"},

Review Comment:
   Please do not add `-id` and `-host` (see HDDS-6736).



##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java:
##########
@@ -1451,7 +1585,7 @@ public void testKeyDeleteWhenTrashEnableOBS()
   }
 
   @Test
-  public void testRecursiveBucketDelete()
+  public void testZRecursiveBucketDelete()

Review Comment:
   If you intend to run this last, renaming it may not achieve that, since 
default order is "obscure":
   
   > By default, test classes and methods will be ordered using an algorithm 
that is deterministic but intentionally nonobvious.
   
   Please use [JUnit5 
ordering](https://junit.org/junit5/docs/current/user-guide/#writing-tests-test-execution-order-methods).
   
   Example:
   
   
https://github.com/apache/ozone/blob/46b6f3def1d84ca769affb4d3f0d84dece6e8567/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java#L77-L78
   
   and
   
   
https://github.com/apache/ozone/blob/46b6f3def1d84ca769affb4d3f0d84dece6e8567/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java#L241-L243



##########
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java:
##########
@@ -558,6 +561,128 @@ public void testListKeysWithFewDeleteEntriesInCache() 
throws Exception {
 
   }
 
+  @Test
+  public void testListOpenFilesFSO() throws Exception {
+    testListOpenFiles(BucketLayout.FILE_SYSTEM_OPTIMIZED);
+  }
+
+  @Test
+  public void testListOpenFilesOBS() throws Exception {
+    testListOpenFiles(BucketLayout.OBJECT_STORE);
+  }
+
+  @Test
+  public void testListOpenFilesLegacy() throws Exception {
+    // OBS and LEGACY should share the same internal structure for the most 
part
+    // still, testing both here for the sake of completeness
+    testListOpenFiles(BucketLayout.LEGACY);
+  }
+
+  /**
+   * Tests inner impl of listOpenFiles with different bucket types with and
+   * without pagination. NOTE: This UT does NOT test hsync in this since hsync
+   * status check is done purely on the client side.
+   * @param bucketLayout BucketLayout
+   */
+  public void testListOpenFiles(BucketLayout bucketLayout) throws Exception {

Review Comment:
   Let's use `@ParameterizedTest` for the same.
   
   ```suggestion
     /**
      * Tests inner impl of listOpenFiles with different bucket types with and
      * without pagination. NOTE: This UT does NOT test hsync in this since 
hsync
      * status check is done purely on the client side.
      */
     @ParameterizedTest
     @EnumSource
     void testListOpenFiles(BucketLayout bucketLayout) throws Exception {
   ```
   
   (additional import needed for `EnumSource`)



##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java:
##########
@@ -549,6 +555,134 @@ public void testOzoneAdminCmdList() throws 
UnsupportedEncodingException {
     execute(ozoneAdminShell, args);
   }
 
+  @Test
+  public void testOzoneAdminCmdListOpenFiles()
+      throws IOException, InterruptedException {
+
+    OzoneConfiguration conf = cluster.getConf();
+    final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId;
+
+    OzoneConfiguration clientConf = getClientConfForOFS(hostPrefix, conf);
+    clientConf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
+    FileSystem fs = FileSystem.get(clientConf);
+
+    assertNotEquals(fs.getConf().get(OZONE_FS_HSYNC_ENABLED),
+        "false", OZONE_FS_HSYNC_ENABLED + " is set to false " +
+            "by external force. Must be true to allow hsync to function");
+
+    final String volumeName = "volume-lof";
+    final String bucketName = "buck1";
+
+    String dir1 = hostPrefix + OM_KEY_PREFIX + volumeName + OM_KEY_PREFIX +
+        bucketName + OM_KEY_PREFIX + "dir1";
+    // Create volume, bucket, dir
+    assertTrue(fs.mkdirs(new Path(dir1)));
+    String keyPrefix = OM_KEY_PREFIX + "key";
+
+    final int numKeys = 5;
+    String[] keys = new String[numKeys];
+
+    for (int i = 0; i < numKeys; i++) {
+      keys[i] = dir1 + keyPrefix + i;
+    }
+
+    int pageSize = 3;
+
+    FSDataOutputStream[] streams = new FSDataOutputStream[numKeys];
+    // Create multiple keys and hold them open
+    for (int i = 0; i < numKeys; i++) {
+      streams[i] = fs.create(new Path(keys[i]));
+      streams[i].write(1);
+    }
+
+    String path = "/" +  volumeName + "/" + bucketName;
+    try {
+      // Wait for DB flush
+      cluster.getOzoneManager().awaitDoubleBufferFlush();
+
+      String[] args = new String[] {"om", "lof",
+          "-id", omServiceId,
+          "-l", String.valueOf(numKeys + 1),  // pagination
+          "-p", path};
+      // Run listopenfiles
+      execute(ozoneAdminShell, args);
+      String cmdRes = getStdOut();
+      // Should have retrieved all 5 open keys
+      for (int i = 0; i < numKeys; i++) {
+        assertTrue(cmdRes.contains(keyPrefix + i));
+      }
+
+      // Try pagination
+      args = new String[] {"om", "lof",
+          "-id", omServiceId,
+          "-l", String.valueOf(pageSize),  // pagination
+          "-p", path};
+      execute(ozoneAdminShell, args);
+      cmdRes = getStdOut();
+
+      // Should have retrieved the 1st page only (3 keys)
+      for (int i = 0; i < pageSize; i++) {
+        assertTrue(cmdRes.contains(keyPrefix + i));
+      }
+      for (int i = pageSize; i < numKeys; i++) {
+        assertFalse(cmdRes.contains(keyPrefix + i));
+      }
+      // No hsync'ed file/key at this point
+      assertFalse(cmdRes.contains("\tYes\t"));
+
+      // Get last line of the output which has the continuation token
+      String[] lines = cmdRes.split("\n");
+      String nextCmd = lines[lines.length - 1].trim();
+      String kw = "--start=";
+      String contToken =
+          nextCmd.substring(nextCmd.lastIndexOf(kw) + kw.length());
+
+      args = new String[] {"om", "lof",
+          "-id", omServiceId,
+          "-l", String.valueOf(pageSize),  // pagination
+          "-p", path,
+          "-s", contToken};
+      execute(ozoneAdminShell, args);
+      cmdRes = getStdOut();
+
+      // Should have retrieved the 2nd page only (2 keys)
+      for (int i = 0; i < pageSize - 1; i++) {
+        assertFalse(cmdRes.contains(keyPrefix + i));
+      }
+      // Note: key2 is shown in the continuation token prompt
+      for (int i = pageSize - 1; i < numKeys; i++) {
+        assertTrue(cmdRes.contains(keyPrefix + i));
+      }
+
+      // hsync last key
+      streams[numKeys - 1].hsync();
+      // Wait for flush
+      cluster.getOzoneManager().awaitDoubleBufferFlush();
+
+      execute(ozoneAdminShell, args);
+      cmdRes = getStdOut();
+
+      // Verify that only one key is hsync'ed
+      assertTrue(cmdRes.contains("\tYes\t"), "One key should be hsync'ed");
+      assertTrue(cmdRes.contains("\tNo\t"), "One key should not be hsync'ed");
+    } finally {
+      // Cleanup
+      for (int i = 0; i < numKeys; i++) {
+        streams[i].close();
+      }

Review Comment:
   If `streams[i]` assignment is moved into the `try` block as suggested above, 
we need to check for `null` element here.  Direct `close()` call can be 
replaced with `IOUtils.closeQuietly` for that.
   
   What's more, it can handle all streams in one call.
   
   ```suggestion
         IOUtils.closeQuietly(streams);
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to