This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2.2 by this push:
new d60288a HBASE-22236 AsyncNonMetaRegionLocator should not cache
HRegionLocation with null location
d60288a is described below
commit d60288a7b9711c82e1a5cfd17fa3fdf99241bde8
Author: zhangduo <[email protected]>
AuthorDate: Sun Apr 14 16:49:51 2019 +0800
HBASE-22236 AsyncNonMetaRegionLocator should not cache HRegionLocation with
null location
---
.../org/apache/hadoop/hbase/RegionLocations.java | 17 +++++++++++-
.../hbase/client/AsyncNonMetaRegionLocator.java | 4 +++
.../hbase/client/AsyncRegionLocatorHelper.java | 3 +-
.../client/TestAsyncTableGetMultiThreaded.java | 32 ++++++++++++++++++----
4 files changed, 49 insertions(+), 7 deletions(-)
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java
index f98bf03..e119ebb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase;
import java.util.Collection;
-
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.util.Bytes;
@@ -186,6 +185,22 @@ public class RegionLocations {
}
/**
+ * Set the element to null if its getServerName method returns null. Returns
null if all the
+ * elements are removed.
+ */
+ public RegionLocations removeElementsWithNullLocation() {
+ HRegionLocation[] newLocations = new HRegionLocation[locations.length];
+ boolean hasNonNullElement = false;
+ for (int i = 0; i < locations.length; i++) {
+ if (locations[i] != null && locations[i].getServerName() != null) {
+ hasNonNullElement = true;
+ newLocations[i] = locations[i];
+ }
+ }
+ return hasNonNullElement ? new RegionLocations(newLocations) : null;
+ }
+
+ /**
* Merges this RegionLocations list with the given list assuming
* same range, and keeping the most up to date version of the
* HRegionLocation entries from either list according to seqNum. If seqNums
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
index 447d073..4a9b7dc 100644
---
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
@@ -321,6 +321,10 @@ class AsyncNonMetaRegionLocator {
LOG.debug("The fetched location of '{}', row='{}', locateType={} is {}",
tableName,
Bytes.toStringBinary(req.row), req.locateType, locs);
}
+ // remove HRegionLocation with null location, i.e, getServerName returns
null.
+ if (locs != null) {
+ locs = locs.removeElementsWithNullLocation();
+ }
// the default region location should always be presented when fetching
from meta, otherwise
// let's fail the request.
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java
index 2836e4b..5f4bc9f 100644
---
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java
@@ -76,7 +76,8 @@ final class AsyncRegionLocatorHelper {
RegionMovedException rme = (RegionMovedException) cause;
HRegionLocation newLoc =
new HRegionLocation(loc.getRegion(), rme.getServerName(),
rme.getLocationSeqNum());
- LOG.debug("Try updating {} with the new location {} constructed by {}",
loc, newLoc, rme);
+ LOG.debug("Try updating {} with the new location {} constructed by {}",
loc, newLoc,
+ rme.toString());
addToCache.accept(newLoc);
} else {
LOG.debug("Try removing {} from cache", loc);
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
index 48aba06..75800ba 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
@@ -126,11 +126,14 @@ public class TestAsyncTableGetMultiThreaded {
assertEquals(i, Bytes.toInt(TABLE.get(new
Get(Bytes.toBytes(String.format("%03d", i))))
.get().getValue(FAMILY, QUALIFIER)));
}
+ // sleep a bit so we do not add to much load to the test machine as we
have 20 threads here
+ Thread.sleep(10);
}
}
@Test
public void test() throws Exception {
+ LOG.info("====== Test started ======");
int numThreads = 20;
AtomicBoolean stop = new AtomicBoolean(false);
ExecutorService executor =
@@ -140,10 +143,13 @@ public class TestAsyncTableGetMultiThreaded {
run(stop);
return null;
})));
+ LOG.info("====== Scheduled {} read threads ======", numThreads);
Collections.shuffle(Arrays.asList(SPLIT_KEYS), new Random(123));
Admin admin = TEST_UTIL.getAdmin();
for (byte[] splitPoint : SPLIT_KEYS) {
int oldRegionCount = admin.getRegions(TABLE_NAME).size();
+ LOG.info("====== Splitting at {} ======, region count before splitting
is {}",
+ Bytes.toStringBinary(splitPoint), oldRegionCount);
admin.split(TABLE_NAME, splitPoint);
TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
@Override
@@ -156,12 +162,16 @@ public class TestAsyncTableGetMultiThreaded {
return "Split has not finished yet";
}
});
-
- for (HRegion region :
TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME)) {
+ List<HRegion> regions =
TEST_UTIL.getMiniHBaseCluster().getRegions(TABLE_NAME);
+ LOG.info("====== Split at {} ======, region count after splitting is {}",
+ Bytes.toStringBinary(splitPoint), regions.size());
+ for (HRegion region : regions) {
+ LOG.info("====== Compact {} ======", region.getRegionInfo());
region.compact(true);
}
- for (HRegion region :
TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME)) {
+ for (HRegion region : regions) {
// Waiting for compaction to complete and references are cleaned up
+ LOG.info("====== Waiting for compaction on {} ======",
region.getRegionInfo());
RetryCounter retrier = new RetryCounter(30, 1, TimeUnit.SECONDS);
for (;;) {
try {
@@ -178,22 +188,34 @@ public class TestAsyncTableGetMultiThreaded {
}
retrier.sleepUntilNextRetry();
}
+ LOG.info("====== Compaction on {} finished, close and archive
compacted files ======",
+ region.getRegionInfo());
region.getStores().get(0).closeAndArchiveCompactedFiles();
+ LOG.info("====== Close and archive compacted files on {} done ======",
+ region.getRegionInfo());
}
Thread.sleep(5000);
+ LOG.info("====== Balancing cluster ======");
admin.balance(true);
+ LOG.info("====== Balance cluster done ======");
Thread.sleep(5000);
ServerName metaServer =
TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
ServerName newMetaServer =
TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream()
.map(t -> t.getRegionServer().getServerName()).filter(s ->
!s.equals(metaServer))
.findAny().get();
+ LOG.info("====== Moving meta from {} to {} ======", metaServer,
newMetaServer);
admin.move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
newMetaServer);
+ LOG.info("====== Move meta done ======");
Thread.sleep(5000);
}
+ LOG.info("====== Read test finished, shutdown thread pool ======");
stop.set(true);
executor.shutdown();
- for (Future<?> future : futures) {
- future.get();
+ for (int i = 0; i < numThreads; i++) {
+ LOG.info("====== Waiting for {} threads to finish, remaining {} ======",
numThreads,
+ numThreads - i);
+ futures.get(i).get();
}
+ LOG.info("====== Test test finished ======");
}
}