Apache9 commented on a change in pull request #1774:
URL: https://github.com/apache/hbase/pull/1774#discussion_r436477988



##########
File path: 
hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
##########
@@ -44,25 +51,51 @@
 @InterfaceAudience.Private
 class AsyncRegionLocator {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(AsyncRegionLocator.class);
+  @VisibleForTesting
+  static final String MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE =
+    "hbase.client.meta.max.concurrent.locate.per.table";
+
+  private static final int DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE = 8;
+
+  @VisibleForTesting
+  static final String MAX_CONCURRENT_LOCATE_META_REQUEST =
+    "hbase.client.meta.max.concurrent.locate";
+
+  @VisibleForTesting
+  static String LOCATE_PREFETCH_LIMIT = "hbase.client.locate.prefetch.limit";
+
+  private static final int DEFAULT_LOCATE_PREFETCH_LIMIT = 10;
 
   private final HashedWheelTimer retryTimer;
 
   private final AsyncConnectionImpl conn;
 
-  private final AsyncMetaRegionLocator metaRegionLocator;
+  private final int maxConcurrentLocateRequestPerTable;
+
+  private final int maxConcurrentLocateMetaRequest;
+
+  private final int locatePrefetchLimit;
 
-  private final AsyncNonMetaRegionLocator nonMetaRegionLocator;
+  private final boolean useMetaReplicas;
 
-  AsyncRegionLocator(AsyncConnectionImpl conn, HashedWheelTimer retryTimer) {
+  private final ConcurrentMap<TableName, AbstractAsyncTableRegionLocator> 
table2Locator =

Review comment:
       Every table will have a locator. The cache is in the locator, and the 
concurrency locating limit is also implemented per table. We make it abstract 
is because we use different ways to locate meta and other table regions.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to