rakeshadr commented on code in PR #3383:
URL: https://github.com/apache/ozone/pull/3383#discussion_r871201848


##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/AbstractOmBucketReadWriteOps.java:
##########
@@ -0,0 +1,241 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import com.codahale.metrics.Timer;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import picocli.CommandLine.Option;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutorCompletionService;
+
+/**
+ * Abstract class for OmBucketReadWriteFileOps/KeyOps Freon class
+ * implementations.
+ */
+public abstract class AbstractOmBucketReadWriteOps extends BaseFreonGenerator
+    implements Callable<Void> {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AbstractOmBucketReadWriteOps.class);
+
+  @Option(names = {"-g", "--size"},
+      description = "Generated data size (in bytes) of each key/file to be " +
+          "written.",
+      defaultValue = "256")
+  private long sizeInBytes;
+
+  @Option(names = {"--buffer"},
+      description = "Size of buffer used for generating the key/file content.",
+      defaultValue = "64")
+  private int bufferSize;
+
+  @Option(names = {"-l", "--name-len"},
+      description = "Length of the random name of path you want to create.",
+      defaultValue = "10")
+  private int length;
+
+  @Option(names = {"-c", "--total-thread-count"},
+      description = "Total number of threads to be executed.",
+      defaultValue = "100")
+  private int totalThreadCount;
+
+  @Option(names = {"-T", "--read-thread-percentage"},
+      description = "Percentage of the total number of threads to be " +
+          "allocated for read operations. The remaining percentage of " +
+          "threads will be allocated for write operations.",
+      defaultValue = "90")
+  private int readThreadPercentage;
+
+  @Option(names = {"-R", "--num-of-read-operations"},
+      description = "Number of read operations to be performed by each 
thread.",
+      defaultValue = "50")
+  private int numOfReadOperations;
+
+  @Option(names = {"-W", "--num-of-write-operations"},
+      description = "Number of write operations to be performed by each " +
+          "thread.",
+      defaultValue = "10")
+  private int numOfWriteOperations;
+
+  private OzoneConfiguration ozoneConfiguration;
+  private Timer timer;
+  private ContentGenerator contentGenerator;
+  private int readThreadCount;
+  private int writeThreadCount;
+
+  protected abstract void display();
+
+  protected abstract void initialize(OzoneConfiguration configuration)
+      throws Exception;
+
+  @Override
+  public Void call() throws Exception {
+    init();
+
+    readThreadCount = (readThreadPercentage * totalThreadCount) / 100;
+    writeThreadCount = totalThreadCount - readThreadCount;
+
+    display();
+    print("SizeInBytes: " + sizeInBytes);
+    print("bufferSize: " + bufferSize);
+    print("totalThreadCount: " + totalThreadCount);
+    print("readThreadPercentage: " + readThreadPercentage);
+    print("writeThreadPercentage: " + (100 - readThreadPercentage));
+    print("readThreadCount: " + readThreadCount);
+    print("writeThreadCount: " + writeThreadCount);
+    print("numOfReadOperations: " + numOfReadOperations);
+    print("numOfWriteOperations: " + numOfWriteOperations);
+
+    ozoneConfiguration = createOzoneConfiguration();
+    contentGenerator = new ContentGenerator(sizeInBytes, bufferSize);
+    timer = getMetrics().timer("om-bucket-read-write-ops");
+
+    initialize(ozoneConfiguration);
+
+    return null;
+  }
+
+  protected abstract String createPath(String path) throws IOException;
+
+  protected int readOperations(int keyCountForRead) throws Exception {
+
+    // Create keyCountForRead/fileCountForRead (defaultValue = 1000) keys/files
+    // under rootPath/readPath
+    String readPath = createPath("readPath");
+    create(readPath, keyCountForRead);
+
+    // Start readThreadCount (defaultValue = 90) concurrent read threads
+    // performing numOfReadOperations (defaultValue = 50) iterations
+    // of read operations (bucket.listKeys(readPath) or
+    // fileSystem.listStatus(rootPath/readPath))
+    ExecutorService readService = 
Executors.newFixedThreadPool(readThreadCount);
+    CompletionService<Integer> readExecutorCompletionService =
+        new ExecutorCompletionService<>(readService);
+    List<Future<Integer>> readFutures = new ArrayList<>();
+    for (int i = 0; i < readThreadCount; i++) {
+      readFutures.add(readExecutorCompletionService.submit(() -> {
+        int readCount = 0;
+        try {
+          for (int j = 0; j < numOfReadOperations; j++) {
+            readCount = getReadCount(readCount, "readPath");
+          }
+        } catch (IOException e) {
+          LOG.warn("Exception while listing keys/files ", e);
+        }
+        return readCount;
+      }));
+    }
+
+    int readResult = 0;
+    for (int i = 0; i < readFutures.size(); i++) {
+      try {
+        readResult += readExecutorCompletionService.take().get();

Review Comment:
   @tanvipenumudy we don't need this try-catch block. Lets throw exception back 
to the caller.
   
      ```
      } catch (InterruptedException e) {
           e.printStackTrace();
         } catch (ExecutionException e) {
           e.printStackTrace();
         }
   ```



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/AbstractOmBucketReadWriteOps.java:
##########
@@ -0,0 +1,241 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import com.codahale.metrics.Timer;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import picocli.CommandLine.Option;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutorCompletionService;
+
+/**
+ * Abstract class for OmBucketReadWriteFileOps/KeyOps Freon class
+ * implementations.
+ */
+public abstract class AbstractOmBucketReadWriteOps extends BaseFreonGenerator
+    implements Callable<Void> {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AbstractOmBucketReadWriteOps.class);
+
+  @Option(names = {"-g", "--size"},
+      description = "Generated data size (in bytes) of each key/file to be " +
+          "written.",
+      defaultValue = "256")
+  private long sizeInBytes;
+
+  @Option(names = {"--buffer"},
+      description = "Size of buffer used for generating the key/file content.",
+      defaultValue = "64")
+  private int bufferSize;
+
+  @Option(names = {"-l", "--name-len"},
+      description = "Length of the random name of path you want to create.",
+      defaultValue = "10")
+  private int length;
+
+  @Option(names = {"-c", "--total-thread-count"},
+      description = "Total number of threads to be executed.",
+      defaultValue = "100")
+  private int totalThreadCount;
+
+  @Option(names = {"-T", "--read-thread-percentage"},
+      description = "Percentage of the total number of threads to be " +
+          "allocated for read operations. The remaining percentage of " +
+          "threads will be allocated for write operations.",
+      defaultValue = "90")
+  private int readThreadPercentage;
+
+  @Option(names = {"-R", "--num-of-read-operations"},
+      description = "Number of read operations to be performed by each 
thread.",
+      defaultValue = "50")
+  private int numOfReadOperations;
+
+  @Option(names = {"-W", "--num-of-write-operations"},
+      description = "Number of write operations to be performed by each " +
+          "thread.",
+      defaultValue = "10")
+  private int numOfWriteOperations;
+
+  private OzoneConfiguration ozoneConfiguration;
+  private Timer timer;
+  private ContentGenerator contentGenerator;
+  private int readThreadCount;
+  private int writeThreadCount;
+
+  protected abstract void display();
+
+  protected abstract void initialize(OzoneConfiguration configuration)
+      throws Exception;
+
+  @Override
+  public Void call() throws Exception {
+    init();
+
+    readThreadCount = (readThreadPercentage * totalThreadCount) / 100;
+    writeThreadCount = totalThreadCount - readThreadCount;
+
+    display();
+    print("SizeInBytes: " + sizeInBytes);
+    print("bufferSize: " + bufferSize);
+    print("totalThreadCount: " + totalThreadCount);
+    print("readThreadPercentage: " + readThreadPercentage);
+    print("writeThreadPercentage: " + (100 - readThreadPercentage));
+    print("readThreadCount: " + readThreadCount);
+    print("writeThreadCount: " + writeThreadCount);
+    print("numOfReadOperations: " + numOfReadOperations);
+    print("numOfWriteOperations: " + numOfWriteOperations);
+
+    ozoneConfiguration = createOzoneConfiguration();
+    contentGenerator = new ContentGenerator(sizeInBytes, bufferSize);
+    timer = getMetrics().timer("om-bucket-read-write-ops");
+
+    initialize(ozoneConfiguration);
+
+    return null;
+  }
+
+  protected abstract String createPath(String path) throws IOException;
+
+  protected int readOperations(int keyCountForRead) throws Exception {
+
+    // Create keyCountForRead/fileCountForRead (defaultValue = 1000) keys/files
+    // under rootPath/readPath
+    String readPath = createPath("readPath");
+    create(readPath, keyCountForRead);
+
+    // Start readThreadCount (defaultValue = 90) concurrent read threads
+    // performing numOfReadOperations (defaultValue = 50) iterations
+    // of read operations (bucket.listKeys(readPath) or
+    // fileSystem.listStatus(rootPath/readPath))
+    ExecutorService readService = 
Executors.newFixedThreadPool(readThreadCount);
+    CompletionService<Integer> readExecutorCompletionService =
+        new ExecutorCompletionService<>(readService);
+    List<Future<Integer>> readFutures = new ArrayList<>();
+    for (int i = 0; i < readThreadCount; i++) {
+      readFutures.add(readExecutorCompletionService.submit(() -> {
+        int readCount = 0;
+        try {
+          for (int j = 0; j < numOfReadOperations; j++) {
+            readCount = getReadCount(readCount, "readPath");
+          }
+        } catch (IOException e) {
+          LOG.warn("Exception while listing keys/files ", e);
+        }
+        return readCount;
+      }));
+    }
+
+    int readResult = 0;
+    for (int i = 0; i < readFutures.size(); i++) {
+      try {
+        readResult += readExecutorCompletionService.take().get();
+      } catch (InterruptedException e) {
+        e.printStackTrace();
+      } catch (ExecutionException e) {
+        e.printStackTrace();
+      }
+    }
+    readService.shutdown();
+
+    return readResult;
+  }
+
+  protected abstract int getReadCount(int readCount, String readPath)
+      throws IOException;
+
+  protected int writeOperations(int keyCountForWrite) throws Exception {
+
+    // Start writeThreadCount (defaultValue = 10) concurrent write threads
+    // performing numOfWriteOperations (defaultValue = 10) iterations
+    // of write operations (createKeys(writePath) or
+    // createFiles(rootPath/writePath))
+    String writePath = createPath("writePath");
+
+    ExecutorService writeService =
+        Executors.newFixedThreadPool(writeThreadCount);
+    CompletionService<Integer> writeExecutorCompletionService =
+        new ExecutorCompletionService<>(writeService);
+    List<Future<Integer>> writeFutures = new ArrayList<>();
+    for (int i = 0; i < writeThreadCount; i++) {
+      writeFutures.add(writeExecutorCompletionService.submit(() -> {
+        int writeCount = 0;
+        try {
+          for (int j = 0; j < numOfWriteOperations; j++) {
+            create(writePath, keyCountForWrite);
+            writeCount++;
+          }
+        } catch (IOException e) {
+          LOG.warn("Exception while creating keys/files ", e);
+        }
+        return writeCount;
+      }));
+    }
+
+    int writeResult = 0;
+    for (int i = 0; i < writeFutures.size(); i++) {
+      try {
+        writeResult += writeExecutorCompletionService.take().get();
+      } catch (InterruptedException e) {

Review Comment:
   @tanvipenumudy we don't need this try-catch block. Lets throw exception back 
to the caller.
   
   ```
   } catch (InterruptedException e) {
        e.printStackTrace();
      } catch (ExecutionException e) {
        e.printStackTrace();
      }
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to