ahmarsuhail commented on code in PR #6240:
URL: https://github.com/apache/hadoop/pull/6240#discussion_r1399090061


##########
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/BlockManagerParameters.java:
##########
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.fs.impl.prefetch;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.statistics.DurationTrackerFactory;
+
+/**
+ * This class is used to provide parameters to {@link BlockManager}.
+ */
+@InterfaceAudience.Private
+public final class BlockManagerParameters {
+
+  /**
+   * Asynchronous tasks are performed in this pool.
+   */
+  private ExecutorServiceFuturePool futurePool;
+
+  /**
+   * Information about each block of the underlying file.
+   */
+  private BlockData blockData;
+
+  /**
+   * Size of the in-memory cache in terms of number of blocks.
+   */
+  private int bufferPoolSize;
+
+  /**
+   * Statistics for the stream.
+   */
+  private PrefetchingStatistics prefetchingStatistics;
+
+  /**
+   * The configuration object.
+   */
+  private Configuration conf;
+
+  /**
+   * The local dir allocator instance.
+   */
+  private LocalDirAllocator localDirAllocator;
+
+  /**
+   * Max blocks count to be kept in cache at any time.
+   */
+  private int maxBlocksCount;
+
+  /**
+   * Tracker with statistics to update.
+   */
+  private DurationTrackerFactory trackerFactory;
+
+  public ExecutorServiceFuturePool getFuturePool() {
+    return futurePool;
+  }
+
+  public BlockData getBlockData() {
+    return blockData;
+  }
+
+  public int getBufferPoolSize() {
+    return bufferPoolSize;
+  }
+
+  public PrefetchingStatistics getPrefetchingStatistics() {
+    return prefetchingStatistics;
+  }
+
+  public Configuration getConf() {
+    return conf;
+  }
+
+  public LocalDirAllocator getLocalDirAllocator() {
+    return localDirAllocator;
+  }
+
+  public int getMaxBlocksCount() {
+    return maxBlocksCount;
+  }
+
+  public DurationTrackerFactory getTrackerFactory() {
+    return trackerFactory;
+  }
+
+  public BlockManagerParameters setFuturePool(

Review Comment:
   could you rename the setters to "withFuturePool" instead of set, keeps 
things uniform with other builders in the project.



##########
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/prefetch/TestS3ACachingBlockManager.java:
##########
@@ -61,50 +65,93 @@ public class TestS3ACachingBlockManager extends 
AbstractHadoopTestBase {
 
   private final BlockData blockData = new BlockData(FILE_SIZE, BLOCK_SIZE);
 
+  private static final Configuration CONF =
+      S3ATestUtils.prepareTestConfiguration(new Configuration());
+
   @Test
   public void testArgChecks() throws Exception {
     MockS3ARemoteObject s3File = new MockS3ARemoteObject(FILE_SIZE, false);
     S3ARemoteObjectReader reader = new S3ARemoteObjectReader(s3File);
 
     Configuration conf = new Configuration();
+    BlockManagerParameters blockManagerParamsBuilder1 =

Review Comment:
   with the builder, this test is getting really long now. How about we split 
it into separate tests? One test for the valid blockManager and the assertions 
on it that are done. And then split each of the illegal arg ones into a 
separate test? eg: testNullFuturePool(), etc. 



##########
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/BlockManagerParameters.java:
##########
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.fs.impl.prefetch;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.statistics.DurationTrackerFactory;
+
+/**
+ * This class is used to provide parameters to {@link BlockManager}.
+ */
+@InterfaceAudience.Private
+public final class BlockManagerParameters {
+
+  /**
+   * Asynchronous tasks are performed in this pool.
+   */
+  private ExecutorServiceFuturePool futurePool;
+
+  /**
+   * Information about each block of the underlying file.
+   */
+  private BlockData blockData;
+
+  /**
+   * Size of the in-memory cache in terms of number of blocks.
+   */
+  private int bufferPoolSize;
+
+  /**
+   * Statistics for the stream.
+   */
+  private PrefetchingStatistics prefetchingStatistics;
+
+  /**
+   * The configuration object.
+   */
+  private Configuration conf;
+
+  /**
+   * The local dir allocator instance.
+   */
+  private LocalDirAllocator localDirAllocator;
+
+  /**
+   * Max blocks count to be kept in cache at any time.
+   */
+  private int maxBlocksCount;
+
+  /**
+   * Tracker with statistics to update.
+   */
+  private DurationTrackerFactory trackerFactory;
+
+  public ExecutorServiceFuturePool getFuturePool() {
+    return futurePool;
+  }
+
+  public BlockData getBlockData() {
+    return blockData;
+  }
+
+  public int getBufferPoolSize() {
+    return bufferPoolSize;
+  }
+
+  public PrefetchingStatistics getPrefetchingStatistics() {
+    return prefetchingStatistics;
+  }
+
+  public Configuration getConf() {
+    return conf;
+  }
+
+  public LocalDirAllocator getLocalDirAllocator() {
+    return localDirAllocator;
+  }
+
+  public int getMaxBlocksCount() {
+    return maxBlocksCount;
+  }
+
+  public DurationTrackerFactory getTrackerFactory() {
+    return trackerFactory;
+  }
+
+  public BlockManagerParameters setFuturePool(

Review Comment:
   Also could you pelase add javadocs for the getters and setters 



##########
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/prefetch/TestS3ACachingBlockManager.java:
##########
@@ -244,9 +292,18 @@ private void testPrefetchHelper(boolean 
forcePrefetchFailure)
       throws IOException, InterruptedException {
     MockS3ARemoteObject s3File = new MockS3ARemoteObject(FILE_SIZE, false);
     S3ARemoteObjectReader reader = new S3ARemoteObjectReader(s3File);
+    BlockManagerParameters blockManagerParamsBuilder =

Review Comment:
   looks like the code to create `blockManagerParamsBuilder` is common in these 
three cases.  Consider moving to a common method that returns the builder. 
   
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to