[ https://issues.apache.org/jira/browse/HBASE-20697?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16510816#comment-16510816 ]
Hadoop QA commented on HBASE-20697: ----------------------------------- | (x) *{color:red}-1 overall{color}* | \\ \\ || Vote || Subsystem || Runtime || Comment || | {color:blue}0{color} | {color:blue} reexec {color} | {color:blue} 1m 9s{color} | {color:blue} Docker mode activated. {color} | || || || || {color:brown} Prechecks {color} || | {color:blue}0{color} | {color:blue} findbugs {color} | {color:blue} 0m 1s{color} | {color:blue} Findbugs executables are not available. {color} | | {color:green}+1{color} | {color:green} hbaseanti {color} | {color:green} 0m 0s{color} | {color:green} Patch does not have any anti-patterns. {color} | | {color:green}+1{color} | {color:green} @author {color} | {color:green} 0m 0s{color} | {color:green} The patch does not contain any @author tags. {color} | | {color:green}+1{color} | {color:green} test4tests {color} | {color:green} 0m 0s{color} | {color:green} The patch appears to include 1 new or modified test files. {color} | || || || || {color:brown} branch-1.2 Compile Tests {color} || | {color:blue}0{color} | {color:blue} mvndep {color} | {color:blue} 1m 27s{color} | {color:blue} Maven dependency ordering for branch {color} | | {color:green}+1{color} | {color:green} mvninstall {color} | {color:green} 7m 48s{color} | {color:green} branch-1.2 passed {color} | | {color:green}+1{color} | {color:green} compile {color} | {color:green} 0m 46s{color} | {color:green} branch-1.2 passed with JDK v1.8.0_172 {color} | | {color:green}+1{color} | {color:green} compile {color} | {color:green} 0m 52s{color} | {color:green} branch-1.2 passed with JDK v1.7.0_181 {color} | | {color:green}+1{color} | {color:green} checkstyle {color} | {color:green} 1m 45s{color} | {color:green} branch-1.2 passed {color} | | {color:green}+1{color} | {color:green} shadedjars {color} | {color:green} 2m 24s{color} | {color:green} branch has no errors when building our shaded downstream artifacts. {color} | | {color:green}+1{color} | {color:green} javadoc {color} | {color:green} 0m 41s{color} | {color:green} branch-1.2 passed with JDK v1.8.0_172 {color} | | {color:green}+1{color} | {color:green} javadoc {color} | {color:green} 0m 53s{color} | {color:green} branch-1.2 passed with JDK v1.7.0_181 {color} | || || || || {color:brown} Patch Compile Tests {color} || | {color:blue}0{color} | {color:blue} mvndep {color} | {color:blue} 0m 11s{color} | {color:blue} Maven dependency ordering for patch {color} | | {color:green}+1{color} | {color:green} mvninstall {color} | {color:green} 1m 25s{color} | {color:green} the patch passed {color} | | {color:green}+1{color} | {color:green} compile {color} | {color:green} 0m 44s{color} | {color:green} the patch passed with JDK v1.8.0_172 {color} | | {color:green}+1{color} | {color:green} javac {color} | {color:green} 0m 44s{color} | {color:green} the patch passed {color} | | {color:green}+1{color} | {color:green} compile {color} | {color:green} 0m 51s{color} | {color:green} the patch passed with JDK v1.7.0_181 {color} | | {color:green}+1{color} | {color:green} javac {color} | {color:green} 0m 51s{color} | {color:green} the patch passed {color} | | {color:red}-1{color} | {color:red} checkstyle {color} | {color:red} 0m 27s{color} | {color:red} hbase-client: The patch generated 3 new + 2 unchanged - 0 fixed = 5 total (was 2) {color} | | {color:green}+1{color} | {color:green} whitespace {color} | {color:green} 0m 0s{color} | {color:green} The patch has no whitespace issues. {color} | | {color:green}+1{color} | {color:green} shadedjars {color} | {color:green} 2m 21s{color} | {color:green} patch has no errors when building our shaded downstream artifacts. {color} | | {color:green}+1{color} | {color:green} hadoopcheck {color} | {color:green} 8m 16s{color} | {color:green} Patch does not cause any errors with Hadoop 2.4.1 2.5.2 2.6.5 2.7.4. {color} | | {color:green}+1{color} | {color:green} javadoc {color} | {color:green} 0m 38s{color} | {color:green} the patch passed with JDK v1.8.0_172 {color} | | {color:green}+1{color} | {color:green} javadoc {color} | {color:green} 0m 54s{color} | {color:green} the patch passed with JDK v1.7.0_181 {color} | || || || || {color:brown} Other Tests {color} || | {color:green}+1{color} | {color:green} unit {color} | {color:green} 1m 51s{color} | {color:green} hbase-client in the patch passed. {color} | | {color:red}-1{color} | {color:red} unit {color} | {color:red} 88m 14s{color} | {color:red} hbase-server in the patch failed. {color} | | {color:green}+1{color} | {color:green} asflicense {color} | {color:green} 0m 31s{color} | {color:green} The patch does not generate ASF License warnings. {color} | | {color:black}{color} | {color:black} {color} | {color:black}125m 55s{color} | {color:black} {color} | \\ \\ || Reason || Tests || | Failed junit tests | hadoop.hbase.mapreduce.TestMultiTableSnapshotInputFormat | \\ \\ || Subsystem || Report/Notes || | Docker | Client=17.05.0-ce Server=17.05.0-ce Image:yetus/hbase:13ca256 | | JIRA Issue | HBASE-20697 | | JIRA Patch URL | https://issues.apache.org/jira/secure/attachment/12927595/HBASE-20697.branch-1.2.004.patch | | Optional Tests | asflicense javac javadoc unit findbugs shadedjars hadoopcheck hbaseanti checkstyle compile | | uname | Linux 9e5bbf171592 3.13.0-137-generic #186-Ubuntu SMP Mon Dec 4 19:09:19 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux | | Build tool | maven | | Personality | /home/jenkins/jenkins-slave/workspace/PreCommit-HBASE-Build/component/dev-support/hbase-personality.sh | | git revision | branch-1.2 / 595307f | | maven | version: Apache Maven 3.0.5 | | Default Java | 1.7.0_181 | | Multi-JDK versions | /usr/lib/jvm/java-8-openjdk-amd64:1.8.0_172 /usr/lib/jvm/java-7-openjdk-amd64:1.7.0_181 | | checkstyle | https://builds.apache.org/job/PreCommit-HBASE-Build/13220/artifact/patchprocess/diff-checkstyle-hbase-client.txt | | unit | https://builds.apache.org/job/PreCommit-HBASE-Build/13220/artifact/patchprocess/patch-unit-hbase-server.txt | | Test Results | https://builds.apache.org/job/PreCommit-HBASE-Build/13220/testReport/ | | Max. process+thread count | 3635 (vs. ulimit of 10000) | | modules | C: hbase-client hbase-server U: . | | Console output | https://builds.apache.org/job/PreCommit-HBASE-Build/13220/console | | Powered by | Apache Yetus 0.7.0 http://yetus.apache.org | This message was automatically generated. > Can't cache All region locations of the specify table by calling > table.getRegionLocator().getAllRegionLocations() > ----------------------------------------------------------------------------------------------------------------- > > Key: HBASE-20697 > URL: https://issues.apache.org/jira/browse/HBASE-20697 > Project: HBase > Issue Type: Bug > Affects Versions: 1.3.1, 1.2.6 > Reporter: zhaoyuan > Assignee: zhaoyuan > Priority: Minor > Fix For: 1.2.7, 1.3.3 > > Attachments: HBASE-20697.branch-1.2.001.patch, > HBASE-20697.branch-1.2.002.patch, HBASE-20697.branch-1.2.003.patch, > HBASE-20697.branch-1.2.004.patch > > > When we upgrade and restart a new version application which will read and > write to HBase, we will get some operation timeout. The time out is expected > because when the application restarts,It will not hold any region locations > cache and do communication with zk and meta regionserver to get region > locations. > We want to avoid these timeouts so we do warmup work and as far as I am > concerned,the method table.getRegionLocator().getAllRegionLocations() will > fetch all region locations and cache them. However, it didn't work good. > There are still a lot of time outs,so it confused me. > I dig into the source code and find something below > {code:java} > // code placeholder > public List<HRegionLocation> getAllRegionLocations() throws IOException { > TableName tableName = getName(); > NavigableMap<HRegionInfo, ServerName> locations = > MetaScanner.allTableRegions(this.connection, tableName); > ArrayList<HRegionLocation> regions = new ArrayList<>(locations.size()); > for (Entry<HRegionInfo, ServerName> entry : locations.entrySet()) { > regions.add(new HRegionLocation(entry.getKey(), entry.getValue())); > } > if (regions.size() > 0) { > connection.cacheLocation(tableName, new RegionLocations(regions)); > } > return regions; > } > In MetaCache > public void cacheLocation(final TableName tableName, final RegionLocations > locations) { > byte [] startKey = > locations.getRegionLocation().getRegionInfo().getStartKey(); > ConcurrentMap<byte[], RegionLocations> tableLocations = > getTableLocations(tableName); > RegionLocations oldLocation = tableLocations.putIfAbsent(startKey, > locations); > boolean isNewCacheEntry = (oldLocation == null); > if (isNewCacheEntry) { > if (LOG.isTraceEnabled()) { > LOG.trace("Cached location: " + locations); > } > addToCachedServers(locations); > return; > } > {code} > It will collect all regions into one RegionLocations object and only cache > the first not null region location and then when we put or get to hbase, we > do getCacheLocation() > {code:java} > // code placeholder > public RegionLocations getCachedLocation(final TableName tableName, final > byte [] row) { > ConcurrentNavigableMap<byte[], RegionLocations> tableLocations = > getTableLocations(tableName); > Entry<byte[], RegionLocations> e = tableLocations.floorEntry(row); > if (e == null) { > if (metrics!= null) metrics.incrMetaCacheMiss(); > return null; > } > RegionLocations possibleRegion = e.getValue(); > // make sure that the end key is greater than the row we're looking > // for, otherwise the row actually belongs in the next region, not > // this one. the exception case is when the endkey is > // HConstants.EMPTY_END_ROW, signifying that the region we're > // checking is actually the last region in the table. > byte[] endKey = > possibleRegion.getRegionLocation().getRegionInfo().getEndKey(); > if (Bytes.equals(endKey, HConstants.EMPTY_END_ROW) || > getRowComparator(tableName).compareRows( > endKey, 0, endKey.length, row, 0, row.length) > 0) { > if (metrics != null) metrics.incrMetaCacheHit(); > return possibleRegion; > } > // Passed all the way through, so we got nothing - complete cache miss > if (metrics != null) metrics.incrMetaCacheMiss(); > return null; > } > {code} > It will choose the first location to be possibleRegion and possibly it will > miss match. > So did I forget something or may be wrong somewhere? If this is indeed a bug > I think it can be fixed not very hard. > Hope commiters and PMC review this ! > > -- This message was sent by Atlassian JIRA (v7.6.3#76005)