[03/13] hbase-site git commit: Published site at f32d2618430f70e1b0db92785294b2c7892cc02b.

2018-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/testdevapidocs/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.html
index f3c245a..0572f45 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":10,"i2":9,"i3":10,"i4":10,"i5":9,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10};
+var methods = 
{"i0":9,"i1":10,"i2":9,"i3":10,"i4":10,"i5":9,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestSnapshotFromMaster
+public class TestSnapshotFromMaster
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Test the master-related aspects of a snapshot
 
@@ -250,19 +250,23 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 void
-testDeleteSnapshot()
+testAsyncSnapshotWillNotBlockSnapshotHFileCleaner()
 
 
 void
-testGetCompletedSnapshots()
+testDeleteSnapshot()
 
 
 void
+testGetCompletedSnapshots()
+
+
+void
 testIsDoneContract()
 Test that the contract from the master for checking on a 
snapshot are valid.
 
 
-
+
 void
 testSnapshotHFileArchiving()
 Test that the snapshot hfile archive cleaner works 
correctly.
@@ -296,7 +300,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 CLASS_RULE
-public static finalHBaseClassTestRule CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
 
 
 
@@ -305,7 +309,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -314,7 +318,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 UTIL
-private static finalHBaseTestingUtility UTIL
+private static finalHBaseTestingUtility UTIL
 
 
 
@@ -323,7 +327,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 NUM_RS
-private static finalint NUM_RS
+private static finalint NUM_RS
 
 See Also:
 Constant
 Field Values
@@ -336,7 +340,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 rootDir
-private staticorg.apache.hadoop.fs.Path rootDir
+private staticorg.apache.hadoop.fs.Path rootDir
 
 
 
@@ -345,7 +349,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 fs
-private staticorg.apache.hadoop.fs.FileSystem fs
+private staticorg.apache.hadoop.fs.FileSystem fs
 
 
 
@@ -354,7 +358,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 master
-private staticorg.apache.hadoop.hbase.master.HMaster master
+private staticorg.apache.hadoop.hbase.master.HMaster master
 
 
 
@@ -363,7 +367,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 archiveDir
-private staticorg.apache.hadoop.fs.Path archiveDir
+private staticorg.apache.hadoop.fs.Path archiveDir
 
 
 
@@ -372,7 +376,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TEST_FAM
-private static finalbyte[] TEST_FAM
+private static finalbyte[] TEST_FAM
 
 
 
@@ -381,7 +385,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TABLE_NAME
-private static finalorg.apache.hadoop.hbase.TableName TABLE_NAME
+private static finalorg.apache.hadoop.hbase.TableName TABLE_NAME
 
 
 
@@ -390,7 +394,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 cacheRefreshPeriod
-private static finallong cacheRefreshPeriod
+private static finallong cacheRefreshPeriod
 
 See Also:
 Constant
 Field Values
@@ -403,7 +407,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 blockingStoreFiles
-private static finalint blockingStoreFiles
+private static finalint blockingStoreFiles
 
 See Also:
 Constant
 Field Values
@@ -424,7 +428,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TestSnapshotFromMaster
-publicTestSnapshotFromMaster()
+publicTestSnapshotFromMaster()
 
 
 
@@ -441,7 +445,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setupCluster
-public staticvoidsetupCluster()
+public staticvoidsetupCluster()
  throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or 

[09/13] hbase-site git commit: Published site at f32d2618430f70e1b0db92785294b2c7892cc02b.

2018-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index 1815591..ead5e90 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -229,13 +229,13 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.quotas.QuotaType
-org.apache.hadoop.hbase.quotas.ThrottlingException.Type
 org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
-org.apache.hadoop.hbase.quotas.RpcThrottlingException.Type
-org.apache.hadoop.hbase.quotas.QuotaScope
 org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
 org.apache.hadoop.hbase.quotas.ThrottleType
+org.apache.hadoop.hbase.quotas.QuotaScope
+org.apache.hadoop.hbase.quotas.QuotaType
+org.apache.hadoop.hbase.quotas.ThrottlingException.Type
+org.apache.hadoop.hbase.quotas.RpcThrottlingException.Type
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
index 06b0a56..55d821f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
@@ -107,7 +107,7 @@
 
 
 
-static interface MemStoreFlusher.FlushQueueEntry
+static interface MemStoreFlusher.FlushQueueEntry
 extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Delayed.html?is-external=true;
 title="class or interface in java.util.concurrent">Delayed
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
index bad1317..f6ec963 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class MemStoreFlusher.FlushRegionEntry
+static class MemStoreFlusher.FlushRegionEntry
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements MemStoreFlusher.FlushQueueEntry
 Datastructure used in the flush queue.  Holds region and 
retry count.
@@ -270,7 +270,7 @@ implements 
 
 region
-private finalHRegion region
+private finalHRegion region
 
 
 
@@ -279,7 +279,7 @@ implements 
 
 createTime
-private finallong createTime
+private finallong createTime
 
 
 
@@ -288,7 +288,7 @@ implements 
 
 whenToExpire
-privatelong whenToExpire
+privatelong whenToExpire
 
 
 
@@ -297,7 +297,7 @@ implements 
 
 requeueCount
-privateint requeueCount
+privateint requeueCount
 
 
 
@@ -306,7 +306,7 @@ implements 
 
 forceFlushAllStores
-private finalboolean forceFlushAllStores
+private finalboolean forceFlushAllStores
 
 
 
@@ -315,7 +315,7 @@ implements 
 
 tracker
-private finalFlushLifeCycleTracker tracker
+private finalFlushLifeCycleTracker tracker
 
 
 
@@ -332,7 +332,7 @@ implements 
 
 FlushRegionEntry
-FlushRegionEntry(HRegionr,
+FlushRegionEntry(HRegionr,
  booleanforceFlushAllStores,
  FlushLifeCycleTrackertracker)
 
@@ -351,7 +351,7 @@ implements 
 
 isMaximumWait
-publicbooleanisMaximumWait(longmaximumWait)
+publicbooleanisMaximumWait(longmaximumWait)
 
 Parameters:
 maximumWait - 
@@ -366,7 +366,7 @@ implements 
 
 getRequeueCount
-publicintgetRequeueCount()
+publicintgetRequeueCount()
 
 Returns:
 Count of times requeue(long)
 was called; i.e this is
@@ -380,7 +380,7 @@ implements 
 
 isForceFlushAllStores
-publicbooleanisForceFlushAllStores()
+publicbooleanisForceFlushAllStores()
 
 Returns:
 whether we need to flush all stores.
@@ -393,7 +393,7 @@ implements 
 
 getTracker

[10/13] hbase-site git commit: Published site at f32d2618430f70e1b0db92785294b2c7892cc02b.

2018-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
index 0c1525b..b483901 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
@@ -125,7 +125,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.LimitedPrivate(value="Configuration")
  @InterfaceStability.Unstable
-public class SnapshotManager
+public class SnapshotManager
 extends MasterProcedureManager
 implements Stoppable
 This class manages the procedure of taking and restoring 
snapshots. There is only one
@@ -168,31 +168,43 @@ implements 
 
 
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+HBASE_SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLIS
+Wait time before removing a finished sentinel from the 
in-progress map
+
+ NOTE: This is used as a safety auto cleanup.
+
+
+
 private boolean
 isSnapshotSupported
 
-
+
 private static org.slf4j.Logger
 LOG
 
-
+
 private MasterServices
 master
 
-
+
 static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION
 Name of the operation to use in the controller
 
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,https://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
 restoreTableToProcIdMap
 
-
+
 private org.apache.hadoop.fs.Path
 rootDir
 
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ScheduledExecutorService
+scheduleThreadPool
+
 
 private static int
 SNAPSHOT_POOL_THREADS_DEFAULT
@@ -206,12 +218,8 @@ implements 
 
 
-private static int
-SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT
-Wait time before removing a finished sentinel from the 
in-progress map
-
- NOTE: This is used as a safety auto cleanup.
-
+static long
+SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLS_DEFAULT
 
 
 private static int
@@ -227,14 +235,18 @@ implements 
 
 
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ScheduledFuture?
+snapshotHandlerChoreCleanerTask
+
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,SnapshotSentinel
 snapshotHandlers
 
-
+
 private boolean
 stopped
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html?is-external=true;
 title="class or interface in 
java.util.concurrent.locks">ReentrantReadWriteLock
 takingSnapshotLock
 Read write lock between taking snapshot and snapshot HFile 
cleaner.
@@ -258,10 +270,10 @@ implements SnapshotManager()
 
 
-SnapshotManager(MasterServicesmaster,
-   MetricsMastermetricsMaster,
+SnapshotManager(MasterServicesmaster,
ProcedureCoordinatorcoordinator,
-   ExecutorServicepool)
+   ExecutorServicepool,
+   intsentinelCleanInterval)
 Fully specify all necessary components of a snapshot 
manager.
 
 
@@ -449,7 +461,7 @@ implements 
 
 
-(package private) void
+private void
 resetTempDir()
 Cleans up any snapshots in the snapshot/.tmp directory that 
were left from failed
  snapshot attempts.
@@ -571,7 +583,7 @@ implements 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -580,7 +592,7 @@ implements 
 
 SNAPSHOT_WAKE_MILLIS_DEFAULT
-private static finalint SNAPSHOT_WAKE_MILLIS_DEFAULT
+private static finalint SNAPSHOT_WAKE_MILLIS_DEFAULT
 By default, check to see if the snapshot is complete every 
WAKE MILLIS (ms)
 
 See Also:
@@ -588,13 +600,13 @@ implements 
 
 
-
+
 
 
 
 
-SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT
-private static finalint SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT
+HBASE_SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLIS
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBASE_SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLIS
 Wait time before removing a finished sentinel from the 
in-progress map
 
  NOTE: This is used as a safety auto cleanup.
@@ -606,7 +618,20 @@ implements 
 
 See Also:
-Constant
 Field Values
+Constant
 Field Values
+
+
+
+
+
+
+
+
+SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLS_DEFAULT
+public static 

[13/13] hbase-site git commit: Published site at f32d2618430f70e1b0db92785294b2c7892cc02b.

2018-12-13 Thread git-site-role
Published site at f32d2618430f70e1b0db92785294b2c7892cc02b.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/c727a708
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/c727a708
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/c727a708

Branch: refs/heads/asf-site
Commit: c727a708d68c52b23c6725c191877a280e3a0516
Parents: e281236
Author: jenkins 
Authored: Thu Dec 13 14:52:47 2018 +
Committer: jenkins 
Committed: Thu Dec 13 14:52:47 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 15232 -
 checkstyle.rss  | 4 +-
 coc.html| 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html |29 +-
 devapidocs/index-all.html   |20 +-
 .../hadoop/hbase/backup/package-tree.html   | 2 +-
 .../hadoop/hbase/client/package-tree.html   |24 +-
 .../executor/class-use/ExecutorService.html | 6 +-
 .../hadoop/hbase/filter/package-tree.html   |10 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 8 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 2 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../hbase/master/class-use/MasterServices.html  | 6 +-
 .../hbase/master/class-use/MetricsMaster.html   |16 -
 .../hadoop/hbase/master/package-tree.html   | 6 +-
 .../hbase/master/procedure/package-tree.html| 4 +-
 .../hbase/master/snapshot/SnapshotManager.html  |   216 +-
 .../hadoop/hbase/monitoring/package-tree.html   | 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |16 +-
 .../class-use/ProcedureCoordinator.html | 6 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 6 +-
 .../hadoop/hbase/quotas/package-tree.html   | 8 +-
 .../MemStoreFlusher.FlushQueueEntry.html| 2 +-
 .../MemStoreFlusher.FlushRegionEntry.html   |36 +-
 .../hbase/regionserver/MemStoreFlusher.html |12 +-
 .../hadoop/hbase/regionserver/package-tree.html |18 +-
 .../regionserver/querymatcher/package-tree.html | 2 +-
 .../hbase/regionserver/wal/package-tree.html| 2 +-
 .../hbase/security/access/package-tree.html | 2 +-
 .../hadoop/hbase/security/package-tree.html | 2 +-
 .../SnapshotDoesNotExistException.html  |44 +-
 .../hadoop/hbase/snapshot/package-use.html  | 5 -
 .../hadoop/hbase/thrift/package-tree.html   | 2 +-
 .../apache/hadoop/hbase/util/package-tree.html  | 8 +-
 .../org/apache/hadoop/hbase/Version.html| 4 +-
 .../hbase/master/snapshot/SnapshotManager.html  |  2334 +--
 .../MemStoreFlusher.FlushHandler.html   |   476 +-
 .../MemStoreFlusher.FlushQueueEntry.html|   476 +-
 .../MemStoreFlusher.FlushRegionEntry.html   |   476 +-
 .../hbase/regionserver/MemStoreFlusher.html |   476 +-
 downloads.html  | 4 +-
 export_control.html | 4 +-
 index.html  | 4 +-
 integration.html| 4 +-
 issue-tracking.html | 4 +-
 license.html| 4 +-
 mail-lists.html | 4 +-
 metrics.html| 4 +-
 old_news.html   | 4 +-
 plugin-management.html  | 4 +-
 plugins.html| 4 +-
 poweredbyhbase.html | 4 +-
 project-info.html   | 4 +-
 project-reports.html| 4 +-
 project-summary.html| 4 +-
 pseudo-distributed.html | 4 +-
 replication.html| 4 +-
 resources.html  | 4 +-
 source-repository.html  | 4 +-
 sponsors.html   | 4 +-
 supportingprojects.html | 4 +-
 team-list.html  | 4 +-
 testdevapidocs/index-all.html   | 8 +-
 .../master/cleaner/TestSnapshotFromMaster.html  |80 +-
 

[12/13] hbase-site git commit: Published site at f32d2618430f70e1b0db92785294b2c7892cc02b.

2018-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index c3aefbb..b825b03 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -294,7 +294,7 @@
 3819
 0
 0
-14729
+14728
 
 Files
 
@@ -5177,7 +5177,7 @@
 org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
 0
 0
-16
+15
 
 org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
 0
@@ -9664,7 +9664,7 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#LeftCurly;>LeftCurly
-183
+182
 Error
 
 
@@ -9736,7 +9736,7 @@
 sortStaticImportsAlphabetically: true
 groups: 
*,org.apache.hbase.thirdparty,org.apache.hadoop.hbase.shaded
 option: top
-1129
+1130
 Error
 
 
@@ -9766,12 +9766,12 @@
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation
 
 offset: 2
-729
+728
 Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-3416
+3417
 Error
 
 misc
@@ -9789,7 +9789,7 @@
 
 max: 100
 ignorePattern: ^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated
-1425
+1424
 Error
 
 
@@ -18765,7 +18765,7 @@
 
 Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 0 has parse error. Details: no viable 
alternative at input '   *' while parsing JAVADOC_TAG
 117
 
@@ -65696,7 +65696,7 @@
 sizes
 LineLength
 Line is longer than 100 characters (found 111).
-402
+409
 
 org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java
 
@@ -66692,122 +66692,116 @@
 Line
 
 Error
+imports
+ImportOrder
+Wrong order for 
'org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting' 
import.
+98
+
+Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-218
-
+234
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-289
-
-Error
-sizes
-LineLength
-Line is longer than 100 characters (found 110).
-293
+305
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-331
+347
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 102).
-406
+422
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-568
+584
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-666
+682
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-669
+685
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-719
+735
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-788
+804
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-790
+806
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-841
+857
 
 Error
 blocks
-LeftCurly
-'{' at column 7 should be on the previous line.
-999
-
-Error
-blocks
 NeedBraces
 'if' construct must use '{}'s.
-1027
-
+1043
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1082
-
+1100
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1086
+1104
 
 org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 103).
 68
-
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
 212
-
+
 Error
 blocks
 NeedBraces
@@ -66816,31 +66810,31 @@
 
 org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
 92
-
+
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 103).
 125
-
+
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 104).
 137
-
+
 Error
 sizes
 LineLength
@@ -66849,28 +66843,28 @@
 
 org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 indentation
 Indentation
 'method def modifier' has incorrect indentation level 3, expected level 
should be 2.
-78
+76
 
 org/apache/hadoop/hbase/mob/CachedMobFile.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 blocks
 NeedBraces
@@ -66879,55 +66873,55 @@
 
 org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 imports
 ImportOrder
 Wrong order for 'org.apache.hadoop.hbase.KeyValue' import.
 31
-
+
 Error
 indentation
 Indentation
 'method def modifier' has incorrect 

[11/13] hbase-site git commit: Published site at f32d2618430f70e1b0db92785294b2c7892cc02b.

2018-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 75dae16..4cdf785 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 2007 - 2018 The Apache Software Foundation
 
   File: 3819,
- Errors: 14729,
+ Errors: 14728,
  Warnings: 0,
  Infos: 0
   
@@ -25283,7 +25283,7 @@ under the License.
   0
 
 
-  16
+  15
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/coc.html
--
diff --git a/coc.html b/coc.html
index a7d4493..17ee4bc 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -385,7 +385,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-12-12
+  Last Published: 
2018-12-13
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 7c585aa..9cd4449 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -450,7 +450,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-12-12
+  Last Published: 
2018-12-13
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 69dd69e..70d3224 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -680,7 +680,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-12-12
+  Last Published: 
2018-12-13
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index cb723c6..672080f 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Dependency Information
 
@@ -323,7 +323,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-12-12
+  Last Published: 
2018-12-13
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index 5c34a11..473f9fc 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependency Management
 
@@ -1009,7 +1009,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-12-12
+  Last Published: 
2018-12-13
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/devapidocs/constant-values.html
--
diff --git a/devapidocs/constant-values.html b/devapidocs/constant-values.html
index a9fff1f..2044dc9 100644
--- a/devapidocs/constant-values.html
+++ b/devapidocs/constant-values.html
@@ -3831,7 +3831,7 @@
 
 publicstaticfinalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 date
-"Wed Dec 12 14:43:53 UTC 2018"
+"Thu Dec 13 14:43:55 UTC 2018"
 
 
 
@@ -3845,7 +3845,7 @@
 
 publicstaticfinalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 srcChecksum

[08/13] hbase-site git commit: Published site at f32d2618430f70e1b0db92785294b2c7892cc02b.

2018-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/devapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
index de5c849..8959499 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
@@ -36,1192 +36,1212 @@
 028import java.util.Map;
 029import java.util.Set;
 030import 
java.util.concurrent.ConcurrentHashMap;
-031import 
java.util.concurrent.ThreadPoolExecutor;
-032import 
java.util.concurrent.locks.ReadWriteLock;
-033import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-034
-035import 
org.apache.hadoop.conf.Configuration;
-036import 
org.apache.hadoop.fs.FSDataInputStream;
-037import org.apache.hadoop.fs.FileStatus;
-038import org.apache.hadoop.fs.FileSystem;
-039import org.apache.hadoop.fs.Path;
-040import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-041import 
org.apache.hadoop.hbase.HConstants;
-042import 
org.apache.hadoop.hbase.MetaTableAccessor;
-043import 
org.apache.hadoop.hbase.Stoppable;
-044import 
org.apache.hadoop.hbase.TableName;
-045import 
org.apache.hadoop.hbase.client.TableDescriptor;
-046import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-047import 
org.apache.hadoop.hbase.client.TableState;
-048import 
org.apache.hadoop.hbase.errorhandling.ForeignException;
-049import 
org.apache.hadoop.hbase.executor.ExecutorService;
-050import 
org.apache.hadoop.hbase.ipc.RpcServer;
-051import 
org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-052import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-053import 
org.apache.hadoop.hbase.master.MasterServices;
-054import 
org.apache.hadoop.hbase.master.MetricsMaster;
-055import 
org.apache.hadoop.hbase.master.SnapshotSentinel;
-056import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-057import 
org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner;
-058import 
org.apache.hadoop.hbase.master.procedure.CloneSnapshotProcedure;
-059import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-060import 
org.apache.hadoop.hbase.master.procedure.RestoreSnapshotProcedure;
-061import 
org.apache.hadoop.hbase.procedure.MasterProcedureManager;
-062import 
org.apache.hadoop.hbase.procedure.Procedure;
-063import 
org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
-064import 
org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
-065import 
org.apache.hadoop.hbase.procedure.ZKProcedureCoordinator;
-066import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-067import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-068import 
org.apache.hadoop.hbase.security.User;
-069import 
org.apache.hadoop.hbase.security.access.AccessChecker;
-070import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-071import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-072import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-073import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-074import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-075import 
org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
-076import 
org.apache.hadoop.hbase.snapshot.SnapshotExistsException;
-077import 
org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-078import 
org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
-079import 
org.apache.hadoop.hbase.snapshot.TablePartiallyOpenException;
-080import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-081import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-082import 
org.apache.hadoop.hbase.util.FSUtils;
-083import 
org.apache.hadoop.hbase.util.NonceKey;
-084import 
org.apache.yetus.audience.InterfaceAudience;
-085import 
org.apache.yetus.audience.InterfaceStability;
-086import 
org.apache.zookeeper.KeeperException;
-087import org.slf4j.Logger;
-088import org.slf4j.LoggerFactory;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Type;
-094
-095/**
-096 * This class manages the procedure of 
taking and restoring snapshots. There is only one
-097 * SnapshotManager for the master.
-098 * p
-099 * The class provides methods for 
monitoring in-progress snapshot actions.
-100 * p
-101 * Note: Currently there can only be one 
snapshot being taken at a time over the cluster. This is a
-102 * 

[07/13] hbase-site git commit: Published site at f32d2618430f70e1b0db92785294b2c7892cc02b.

2018-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
index 42dc187..6557d2d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
@@ -711,247 +711,251 @@
 703if (flushType != FlushType.NORMAL) 
{
 704  
TraceUtil.addTimelineAnnotation("Force Flush. We're above high water mark.");
 705  long start = 
EnvironmentEdgeManager.currentTime();
-706  synchronized (this.blockSignal) {
-707boolean blocked = false;
-708long startTime = 0;
-709boolean interrupted = false;
-710try {
-711  flushType = 
isAboveHighWaterMark();
-712  while (flushType != 
FlushType.NORMAL  !server.isStopped()) {
-713
server.cacheFlusher.setFlushType(flushType);
-714if (!blocked) {
-715  startTime = 
EnvironmentEdgeManager.currentTime();
-716  if 
(!server.getRegionServerAccounting().isOffheap()) {
-717logMsg("global memstore 
heapsize",
-718
server.getRegionServerAccounting().getGlobalMemStoreHeapSize(),
-719
server.getRegionServerAccounting().getGlobalMemStoreLimit());
-720  } else {
-721switch (flushType) {
-722case 
ABOVE_OFFHEAP_HIGHER_MARK:
-723  logMsg("the global 
offheap memstore datasize",
-724  
server.getRegionServerAccounting().getGlobalMemStoreOffHeapSize(),
-725  
server.getRegionServerAccounting().getGlobalMemStoreLimit());
-726  break;
-727case 
ABOVE_ONHEAP_HIGHER_MARK:
-728  logMsg("global memstore 
heapsize",
-729  
server.getRegionServerAccounting().getGlobalMemStoreHeapSize(),
-730  
server.getRegionServerAccounting().getGlobalOnHeapMemStoreLimit());
-731  break;
-732default:
-733  break;
-734}
-735  }
-736}
-737blocked = true;
-738wakeupFlushThread();
-739try {
-740  // we should be able to 
wait forever, but we've seen a bug where
-741  // we miss a notify, so put 
a 5 second bound on it at least.
-742  blockSignal.wait(5 * 
1000);
-743} catch (InterruptedException 
ie) {
-744  LOG.warn("Interrupted while 
waiting");
-745  interrupted = true;
-746}
-747long took = 
EnvironmentEdgeManager.currentTime() - start;
-748LOG.warn("Memstore is above 
high water mark and block " + took + "ms");
-749flushType = 
isAboveHighWaterMark();
-750  }
-751} finally {
-752  if (interrupted) {
-753
Thread.currentThread().interrupt();
+706  long nextLogTimeMs = start;
+707  synchronized (this.blockSignal) {
+708boolean blocked = false;
+709long startTime = 0;
+710boolean interrupted = false;
+711try {
+712  flushType = 
isAboveHighWaterMark();
+713  while (flushType != 
FlushType.NORMAL  !server.isStopped()) {
+714
server.cacheFlusher.setFlushType(flushType);
+715if (!blocked) {
+716  startTime = 
EnvironmentEdgeManager.currentTime();
+717  if 
(!server.getRegionServerAccounting().isOffheap()) {
+718logMsg("global memstore 
heapsize",
+719
server.getRegionServerAccounting().getGlobalMemStoreHeapSize(),
+720
server.getRegionServerAccounting().getGlobalMemStoreLimit());
+721  } else {
+722switch (flushType) {
+723case 
ABOVE_OFFHEAP_HIGHER_MARK:
+724  logMsg("the global 
offheap memstore datasize",
+725  
server.getRegionServerAccounting().getGlobalMemStoreOffHeapSize(),
+726  
server.getRegionServerAccounting().getGlobalMemStoreLimit());
+727  break;
+728case 
ABOVE_ONHEAP_HIGHER_MARK:
+729  logMsg("global memstore 
heapsize",
+730  
server.getRegionServerAccounting().getGlobalMemStoreHeapSize(),
+731  
server.getRegionServerAccounting().getGlobalOnHeapMemStoreLimit());
+732  break;
+733default:
+734  break;
+735}
+736  }
+737}
+738blocked = true;
+739

[04/13] hbase-site git commit: Published site at f32d2618430f70e1b0db92785294b2c7892cc02b.

2018-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
index 42dc187..6557d2d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
@@ -711,247 +711,251 @@
 703if (flushType != FlushType.NORMAL) 
{
 704  
TraceUtil.addTimelineAnnotation("Force Flush. We're above high water mark.");
 705  long start = 
EnvironmentEdgeManager.currentTime();
-706  synchronized (this.blockSignal) {
-707boolean blocked = false;
-708long startTime = 0;
-709boolean interrupted = false;
-710try {
-711  flushType = 
isAboveHighWaterMark();
-712  while (flushType != 
FlushType.NORMAL  !server.isStopped()) {
-713
server.cacheFlusher.setFlushType(flushType);
-714if (!blocked) {
-715  startTime = 
EnvironmentEdgeManager.currentTime();
-716  if 
(!server.getRegionServerAccounting().isOffheap()) {
-717logMsg("global memstore 
heapsize",
-718
server.getRegionServerAccounting().getGlobalMemStoreHeapSize(),
-719
server.getRegionServerAccounting().getGlobalMemStoreLimit());
-720  } else {
-721switch (flushType) {
-722case 
ABOVE_OFFHEAP_HIGHER_MARK:
-723  logMsg("the global 
offheap memstore datasize",
-724  
server.getRegionServerAccounting().getGlobalMemStoreOffHeapSize(),
-725  
server.getRegionServerAccounting().getGlobalMemStoreLimit());
-726  break;
-727case 
ABOVE_ONHEAP_HIGHER_MARK:
-728  logMsg("global memstore 
heapsize",
-729  
server.getRegionServerAccounting().getGlobalMemStoreHeapSize(),
-730  
server.getRegionServerAccounting().getGlobalOnHeapMemStoreLimit());
-731  break;
-732default:
-733  break;
-734}
-735  }
-736}
-737blocked = true;
-738wakeupFlushThread();
-739try {
-740  // we should be able to 
wait forever, but we've seen a bug where
-741  // we miss a notify, so put 
a 5 second bound on it at least.
-742  blockSignal.wait(5 * 
1000);
-743} catch (InterruptedException 
ie) {
-744  LOG.warn("Interrupted while 
waiting");
-745  interrupted = true;
-746}
-747long took = 
EnvironmentEdgeManager.currentTime() - start;
-748LOG.warn("Memstore is above 
high water mark and block " + took + "ms");
-749flushType = 
isAboveHighWaterMark();
-750  }
-751} finally {
-752  if (interrupted) {
-753
Thread.currentThread().interrupt();
+706  long nextLogTimeMs = start;
+707  synchronized (this.blockSignal) {
+708boolean blocked = false;
+709long startTime = 0;
+710boolean interrupted = false;
+711try {
+712  flushType = 
isAboveHighWaterMark();
+713  while (flushType != 
FlushType.NORMAL  !server.isStopped()) {
+714
server.cacheFlusher.setFlushType(flushType);
+715if (!blocked) {
+716  startTime = 
EnvironmentEdgeManager.currentTime();
+717  if 
(!server.getRegionServerAccounting().isOffheap()) {
+718logMsg("global memstore 
heapsize",
+719
server.getRegionServerAccounting().getGlobalMemStoreHeapSize(),
+720
server.getRegionServerAccounting().getGlobalMemStoreLimit());
+721  } else {
+722switch (flushType) {
+723case 
ABOVE_OFFHEAP_HIGHER_MARK:
+724  logMsg("the global 
offheap memstore datasize",
+725  
server.getRegionServerAccounting().getGlobalMemStoreOffHeapSize(),
+726  
server.getRegionServerAccounting().getGlobalMemStoreLimit());
+727  break;
+728case 
ABOVE_ONHEAP_HIGHER_MARK:
+729  logMsg("global memstore 
heapsize",
+730  
server.getRegionServerAccounting().getGlobalMemStoreHeapSize(),
+731  
server.getRegionServerAccounting().getGlobalOnHeapMemStoreLimit());
+732  break;
+733default:
+734  break;
+735}
+736  }
+737}
+738blocked = true;
+739wakeupFlushThread();
+740try {
+741  // 

[01/13] hbase-site git commit: Published site at f32d2618430f70e1b0db92785294b2c7892cc02b.

2018-12-13 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site e28123691 -> c727a708d


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/testdevapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.html
index 49c9649..1294010 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.html
@@ -39,145 +39,173 @@
 031import 
org.apache.hadoop.hbase.executor.ExecutorService;
 032import 
org.apache.hadoop.hbase.master.MasterFileSystem;
 033import 
org.apache.hadoop.hbase.master.MasterServices;
-034import 
org.apache.hadoop.hbase.master.MetricsMaster;
-035import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-036import 
org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner;
-037import 
org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
-038import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-039import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-040import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-041import 
org.apache.zookeeper.KeeperException;
-042import org.junit.ClassRule;
-043import org.junit.Rule;
-044import org.junit.Test;
-045import 
org.junit.experimental.categories.Category;
-046import org.junit.rules.TestName;
-047import org.mockito.Mockito;
-048
-049/**
-050 * Test basic snapshot manager 
functionality
-051 */
-052@Category({MasterTests.class, 
SmallTests.class})
-053public class TestSnapshotManager {
-054
-055  @ClassRule
-056  public static final HBaseClassTestRule 
CLASS_RULE =
-057  
HBaseClassTestRule.forClass(TestSnapshotManager.class);
-058
-059  private static final 
HBaseTestingUtility UTIL = new HBaseTestingUtility();
-060
-061  @Rule
-062  public TestName name = new 
TestName();
-063
-064  MasterServices services = 
Mockito.mock(MasterServices.class);
-065  MetricsMaster metrics = 
Mockito.mock(MetricsMaster.class);
-066  ProcedureCoordinator coordinator = 
Mockito.mock(ProcedureCoordinator.class);
-067  ExecutorService pool = 
Mockito.mock(ExecutorService.class);
-068  MasterFileSystem mfs = 
Mockito.mock(MasterFileSystem.class);
-069  FileSystem fs;
-070  {
-071try {
-072  fs = UTIL.getTestFileSystem();
-073} catch (IOException e) {
-074  throw new 
RuntimeException("Couldn't get test filesystem", e);
-075}
-076  }
-077
-078   private SnapshotManager 
getNewManager() throws IOException, KeeperException {
-079return 
getNewManager(UTIL.getConfiguration());
-080  }
-081
-082  private SnapshotManager 
getNewManager(final Configuration conf)
-083  throws IOException, KeeperException 
{
-084Mockito.reset(services);
-085
Mockito.when(services.getConfiguration()).thenReturn(conf);
-086
Mockito.when(services.getMasterFileSystem()).thenReturn(mfs);
-087
Mockito.when(mfs.getFileSystem()).thenReturn(fs);
-088
Mockito.when(mfs.getRootDir()).thenReturn(UTIL.getDataTestDir());
-089return new SnapshotManager(services, 
metrics, coordinator, pool);
-090  }
-091
-092  @Test
-093  public void testInProcess() throws 
KeeperException, IOException {
-094final TableName tableName = 
TableName.valueOf(name.getMethodName());
-095SnapshotManager manager = 
getNewManager();
-096TakeSnapshotHandler handler = 
Mockito.mock(TakeSnapshotHandler.class);
-097assertFalse("Manager is in process 
when there is no current handler",
-098
manager.isTakingSnapshot(tableName));
-099
manager.setSnapshotHandlerForTesting(tableName, handler);
-100
Mockito.when(handler.isFinished()).thenReturn(false);
-101assertTrue("Manager isn't in process 
when handler is running",
-102
manager.isTakingSnapshot(tableName));
-103
Mockito.when(handler.isFinished()).thenReturn(true);
-104assertFalse("Manager is process when 
handler isn't running",
-105
manager.isTakingSnapshot(tableName));
-106  }
-107
-108  /**
-109   * Verify the snapshot support based on 
the configuration.
-110   */
-111  @Test
-112  public void 
testSnapshotSupportConfiguration() throws Exception {
-113// No configuration (no cleaners, not 
enabled): snapshot feature disabled
-114Configuration conf = new 
Configuration();
-115SnapshotManager manager = 
getNewManager(conf);
-116assertFalse("Snapshot should be 
disabled with no configuration", isSnapshotSupported(manager));
-117
-118// force snapshot feature to be 
enabled
-119conf = new Configuration();
-120
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
-121manager = getNewManager(conf);
-122assertTrue("Snapshot should be 
enabled", isSnapshotSupported(manager));
-123
-124 

hbase-site git commit: INFRA-10751 Empty commit

2018-12-13 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site c727a708d -> c41320576


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/c4132057
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/c4132057
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/c4132057

Branch: refs/heads/asf-site
Commit: c413205768639e13cda9eac2387a01cdfb776e42
Parents: c727a70
Author: jenkins 
Authored: Thu Dec 13 14:53:06 2018 +
Committer: jenkins 
Committed: Thu Dec 13 14:53:06 2018 +

--

--




[05/13] hbase-site git commit: Published site at f32d2618430f70e1b0db92785294b2c7892cc02b.

2018-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
index 42dc187..6557d2d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
@@ -711,247 +711,251 @@
 703if (flushType != FlushType.NORMAL) 
{
 704  
TraceUtil.addTimelineAnnotation("Force Flush. We're above high water mark.");
 705  long start = 
EnvironmentEdgeManager.currentTime();
-706  synchronized (this.blockSignal) {
-707boolean blocked = false;
-708long startTime = 0;
-709boolean interrupted = false;
-710try {
-711  flushType = 
isAboveHighWaterMark();
-712  while (flushType != 
FlushType.NORMAL  !server.isStopped()) {
-713
server.cacheFlusher.setFlushType(flushType);
-714if (!blocked) {
-715  startTime = 
EnvironmentEdgeManager.currentTime();
-716  if 
(!server.getRegionServerAccounting().isOffheap()) {
-717logMsg("global memstore 
heapsize",
-718
server.getRegionServerAccounting().getGlobalMemStoreHeapSize(),
-719
server.getRegionServerAccounting().getGlobalMemStoreLimit());
-720  } else {
-721switch (flushType) {
-722case 
ABOVE_OFFHEAP_HIGHER_MARK:
-723  logMsg("the global 
offheap memstore datasize",
-724  
server.getRegionServerAccounting().getGlobalMemStoreOffHeapSize(),
-725  
server.getRegionServerAccounting().getGlobalMemStoreLimit());
-726  break;
-727case 
ABOVE_ONHEAP_HIGHER_MARK:
-728  logMsg("global memstore 
heapsize",
-729  
server.getRegionServerAccounting().getGlobalMemStoreHeapSize(),
-730  
server.getRegionServerAccounting().getGlobalOnHeapMemStoreLimit());
-731  break;
-732default:
-733  break;
-734}
-735  }
-736}
-737blocked = true;
-738wakeupFlushThread();
-739try {
-740  // we should be able to 
wait forever, but we've seen a bug where
-741  // we miss a notify, so put 
a 5 second bound on it at least.
-742  blockSignal.wait(5 * 
1000);
-743} catch (InterruptedException 
ie) {
-744  LOG.warn("Interrupted while 
waiting");
-745  interrupted = true;
-746}
-747long took = 
EnvironmentEdgeManager.currentTime() - start;
-748LOG.warn("Memstore is above 
high water mark and block " + took + "ms");
-749flushType = 
isAboveHighWaterMark();
-750  }
-751} finally {
-752  if (interrupted) {
-753
Thread.currentThread().interrupt();
+706  long nextLogTimeMs = start;
+707  synchronized (this.blockSignal) {
+708boolean blocked = false;
+709long startTime = 0;
+710boolean interrupted = false;
+711try {
+712  flushType = 
isAboveHighWaterMark();
+713  while (flushType != 
FlushType.NORMAL  !server.isStopped()) {
+714
server.cacheFlusher.setFlushType(flushType);
+715if (!blocked) {
+716  startTime = 
EnvironmentEdgeManager.currentTime();
+717  if 
(!server.getRegionServerAccounting().isOffheap()) {
+718logMsg("global memstore 
heapsize",
+719
server.getRegionServerAccounting().getGlobalMemStoreHeapSize(),
+720
server.getRegionServerAccounting().getGlobalMemStoreLimit());
+721  } else {
+722switch (flushType) {
+723case 
ABOVE_OFFHEAP_HIGHER_MARK:
+724  logMsg("the global 
offheap memstore datasize",
+725  
server.getRegionServerAccounting().getGlobalMemStoreOffHeapSize(),
+726  
server.getRegionServerAccounting().getGlobalMemStoreLimit());
+727  break;
+728case 
ABOVE_ONHEAP_HIGHER_MARK:
+729  logMsg("global memstore 
heapsize",
+730  
server.getRegionServerAccounting().getGlobalMemStoreHeapSize(),
+731  
server.getRegionServerAccounting().getGlobalOnHeapMemStoreLimit());
+732  break;
+733default:
+734  break;
+735}
+736  }
+737}
+738blocked = 

[02/13] hbase-site git commit: Published site at f32d2618430f70e1b0db92785294b2c7892cc02b.

2018-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c727a708/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.html
index ae144fb..42b6912 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.html
@@ -26,408 +26,433 @@
 018package 
org.apache.hadoop.hbase.master.cleaner;
 019
 020import static 
org.junit.Assert.assertEquals;
-021import static 
org.junit.Assert.assertTrue;
-022import static org.junit.Assert.fail;
-023
-024import java.io.IOException;
-025import java.util.Collection;
-026import java.util.List;
-027import java.util.Set;
-028import 
org.apache.hadoop.conf.Configuration;
-029import org.apache.hadoop.fs.FileSystem;
-030import org.apache.hadoop.fs.Path;
-031import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-032import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-033import 
org.apache.hadoop.hbase.HConstants;
-034import 
org.apache.hadoop.hbase.TableName;
-035import 
org.apache.hadoop.hbase.client.Admin;
-036import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-037import 
org.apache.hadoop.hbase.client.TableDescriptor;
-038import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-039import 
org.apache.hadoop.hbase.master.HMaster;
-040import 
org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler;
-041import 
org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
-042import 
org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
-043import 
org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger;
-044import 
org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
-045import 
org.apache.hadoop.hbase.regionserver.HRegion;
-046import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-047import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-048import 
org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
-049import 
org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
-050import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-051import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-052import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-053import 
org.apache.hadoop.hbase.util.Bytes;
-054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-055import 
org.apache.hadoop.hbase.util.FSUtils;
-056import 
org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-057import org.junit.After;
-058import org.junit.AfterClass;
-059import org.junit.Before;
-060import org.junit.BeforeClass;
-061import org.junit.ClassRule;
-062import org.junit.Test;
-063import 
org.junit.experimental.categories.Category;
-064import org.mockito.Mockito;
-065import org.slf4j.Logger;
-066import org.slf4j.LoggerFactory;
-067
-068import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-069
-070import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-071import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-072import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
+021import static 
org.junit.Assert.assertFalse;
+022import static 
org.junit.Assert.assertTrue;
+023import static org.junit.Assert.fail;
+024
+025import java.io.IOException;
+026import java.util.Collection;
+027import java.util.List;
+028import java.util.Set;
+029import java.util.regex.Pattern;
+030
+031import 
org.apache.hadoop.conf.Configuration;
+032import org.apache.hadoop.fs.FileSystem;
+033import org.apache.hadoop.fs.Path;
+034import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+035import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+036import 
org.apache.hadoop.hbase.HConstants;
+037import 
org.apache.hadoop.hbase.TableName;
+038import org.apache.hadoop.hbase.Waiter;
+039import 
org.apache.hadoop.hbase.client.Admin;
+040import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+041import 
org.apache.hadoop.hbase.client.Put;
+042import 
org.apache.hadoop.hbase.client.SnapshotType;
+043import 
org.apache.hadoop.hbase.client.Table;
+044import 
org.apache.hadoop.hbase.client.TableDescriptor;
+045import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+046import 
org.apache.hadoop.hbase.master.HMaster;
+047import 

[15/50] [abbrv] hbase git commit: HBASE-18248 Warn if monitored RPC task has been tied up beyond a configurable threshold

2018-12-13 Thread apurtell
HBASE-18248 Warn if monitored RPC task has been tied up beyond a configurable 
threshold


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ac241dcd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ac241dcd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ac241dcd

Branch: refs/heads/branch-1.3
Commit: ac241dcde6850df351d6dbe8b00a319488fb465a
Parents: c35fa2a
Author: Andrew Purtell 
Authored: Wed Aug 9 18:11:28 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:17 2018 -0800

--
 .../monitoring/MonitoredRPCHandlerImpl.java |  8 +-
 .../hadoop/hbase/monitoring/MonitoredTask.java  |  2 +
 .../hbase/monitoring/MonitoredTaskImpl.java | 16 +++-
 .../hadoop/hbase/monitoring/TaskMonitor.java| 90 +---
 .../hbase/monitoring/TestTaskMonitor.java   | 29 ++-
 5 files changed, 128 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ac241dcd/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
index a29595b..08c8c9f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
@@ -251,6 +251,12 @@ public class MonitoredRPCHandlerImpl extends 
MonitoredTaskImpl
 if (getState() != State.RUNNING) {
   return super.toString();
 }
-return super.toString() + ", rpcMethod=" + getRPC();
+return super.toString()
++ ", queuetimems=" + getRPCQueueTime()
++ ", starttimems=" + getRPCStartTime()
++ ", clientaddress=" + clientAddress
++ ", remoteport=" + remotePort
++ ", packetlength=" + getRPCPacketLength()
++ ", rpcMethod=" + getRPC();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ac241dcd/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
index 34fd8ce..5ba0969 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
@@ -46,6 +46,7 @@ public interface MonitoredTask extends Cloneable {
   State getState();
   long getStateTime();
   long getCompletionTimestamp();
+  long getWarnTime();
 
   void markComplete(String msg);
   void pause(String msg);
@@ -55,6 +56,7 @@ public interface MonitoredTask extends Cloneable {
 
   void setStatus(String status);
   void setDescription(String description);
+  void setWarnTime(final long t);
 
   List getStatusJournal();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ac241dcd/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
index 5270e7d..ed04212 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
@@ -34,7 +34,8 @@ class MonitoredTaskImpl implements MonitoredTask {
   private long startTime;
   private long statusTime;
   private long stateTime;
-  
+  private long warnTime;
+
   private volatile String status;
   private volatile String description;
   
@@ -49,6 +50,7 @@ class MonitoredTaskImpl implements MonitoredTask {
 startTime = System.currentTimeMillis();
 statusTime = startTime;
 stateTime = startTime;
+warnTime = startTime;
   }
 
   private static class StatusJournalEntryImpl implements StatusJournalEntry {
@@ -118,7 +120,12 @@ class MonitoredTaskImpl implements MonitoredTask {
   public long getStateTime() {
 return stateTime;
   }
-  
+
+  @Override
+  public long getWarnTime() {
+return warnTime;
+  }
+
   @Override
   public long getCompletionTimestamp() {
 if (state == State.COMPLETE || state == State.ABORTED) {
@@ -171,6 +178,11 @@ class MonitoredTaskImpl implements MonitoredTask {
   }
 
   @Override
+  public void setWarnTime(long t) {
+this.warnTime = t;
+  }
+
+  @Override
   public void 

[22/50] [abbrv] hbase git commit: HBASE-20047 AuthenticationTokenIdentifier should provide a toString

2018-12-13 Thread apurtell
HBASE-20047 AuthenticationTokenIdentifier should provide a toString

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/35e94c98
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/35e94c98
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/35e94c98

Branch: refs/heads/branch-1.3
Commit: 35e94c98785095f83c7dd0c7b97b04678fb9c875
Parents: ecfa9a8
Author: maoling 
Authored: Mon Mar 12 22:01:16 2018 +0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:18 2018 -0800

--
 .../hbase/security/token/AuthenticationTokenIdentifier.java   | 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/35e94c98/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java
index 4299003..568ace7 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java
@@ -186,4 +186,11 @@ public class AuthenticationTokenIdentifier extends 
TokenIdentifier {
   public int hashCode() {
 return (int)sequenceNumber;
   }
+
+  @Override
+  public String toString() {
+return "(username=" + username + ", keyId="
++ keyId + ", issueDate=" + issueDate
++ ", expirationDate=" + expirationDate + ", sequenceNumber=" + 
sequenceNumber + ")";
+  }
 }



[19/50] [abbrv] hbase git commit: HBASE-19391 Calling HRegion#initializeRegionInternals from a region replica can still re-create a region directory

2018-12-13 Thread apurtell
HBASE-19391 Calling HRegion#initializeRegionInternals from a region replica can 
still re-create a region directory

HBASE-19391 Calling HRegion#initializeRegionInternals from a region replica can 
still re-create a region directory; ADDEDNDUM to fix TestRegionOpen failure

Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/453935f3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/453935f3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/453935f3

Branch: refs/heads/branch-1.3
Commit: 453935f31d3cd56e0db54fce34fdd3078d0b7981
Parents: 8a4b2b5
Author: Esteban Gutierrez 
Authored: Wed Feb 21 15:28:50 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:18 2018 -0800

--
 .../hbase/regionserver/HRegionFileSystem.java   | 29 ++--
 .../hbase/regionserver/TestRegionOpen.java  |  4 ++-
 2 files changed, 18 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/453935f3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 619358c..7672204 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -889,28 +889,29 @@ public class HRegionFileSystem {
   public static HRegionFileSystem createRegionOnFileSystem(final Configuration 
conf,
   final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) 
throws IOException {
 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, 
regionInfo);
-Path regionDir = regionFs.getRegionDir();
 
-if (fs.exists(regionDir)) {
-  LOG.warn("Trying to create a region that already exists on disk: " + 
regionDir);
-  throw new IOException("The specified region already exists on disk: " + 
regionDir);
-}
+// We only create a .regioninfo and the region directory if this is the 
default region replica
+if (regionInfo.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+  Path regionDir = regionFs.getRegionDir();
+  if (fs.exists(regionDir)) {
+LOG.warn("Trying to create a region that already exists on disk: " + 
regionDir);
+throw new IOException("The specified region already exists on disk: " 
+ regionDir);
+  }
 
-// Create the region directory
-if (!createDirOnFileSystem(fs, conf, regionDir)) {
-  LOG.warn("Unable to create the region directory: " + regionDir);
-  throw new IOException("Unable to create region directory: " + regionDir);
-}
+  // Create the region directory
+  if (!createDirOnFileSystem(fs, conf, regionDir)) {
+LOG.warn("Unable to create the region directory: " + regionDir);
+throw new IOException("Unable to create region directory: " + 
regionDir);
+  }
 
-// Write HRI to a file in case we need to recover hbase:meta
-// Only primary replicas should write region info
-if (regionInfo.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+  // Write HRI to a file in case we need to recover hbase:meta
   regionFs.writeRegionInfoOnFilesystem(false);
+  return regionFs;
 } else {
   if (LOG.isDebugEnabled())
 LOG.debug("Skipping creation of .regioninfo file for " + regionInfo);
 }
-return regionFs;
+return null;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/453935f3/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
index edb1d52..62aabce 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -97,7 +98,8 @@ public class TestRegionOpen {
 assertEquals(2, exec.getCompletedTaskCount());
   }
 
-  @Test(timeout = 6)
+  @Ignore // Needs rewrite since HBASE-19391 which returns null out of 
createRegionOnFileSystem
+  

[10/50] [abbrv] hbase git commit: HBASE-17118 StoreScanner leaked in KeyValueHeap (binlijin)

2018-12-13 Thread apurtell
HBASE-17118 StoreScanner leaked in KeyValueHeap (binlijin)

HBASE-17118 StoreScanner leaked in KeyValueHeap (addendum)

Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/25135dd0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/25135dd0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/25135dd0

Branch: refs/heads/branch-1.3
Commit: 25135dd0a13538e4dd4ffdd34dcb3120a010be3b
Parents: 286ade8
Author: tedyu 
Authored: Thu Nov 17 08:46:11 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:16 2018 -0800

--
 .../hadoop/hbase/regionserver/KeyValueHeap.java | 68 
 1 file changed, 42 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/25135dd0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
index ac76bfd..5073c7e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
@@ -24,6 +24,8 @@ import java.util.Comparator;
 import java.util.List;
 import java.util.PriorityQueue;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -44,6 +46,7 @@ import 
org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
 @InterfaceAudience.Private
 public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner
 implements KeyValueScanner, InternalScanner {
+  private static final Log LOG = LogFactory.getLog(KeyValueHeap.class);
   protected PriorityQueue heap = null;
 
   /**
@@ -289,35 +292,48 @@ public class KeyValueHeap extends 
NonReversedNonLazyKeyValueScanner
 heap.add(current);
 current = null;
 
-KeyValueScanner scanner;
-while ((scanner = heap.poll()) != null) {
-  Cell topKey = scanner.peek();
-  if (comparator.getComparator().compare(seekKey, topKey) <= 0) {
-// Top KeyValue is at-or-after Seek KeyValue. We only know that all
-// scanners are at or after seekKey (because fake keys of
-// scanners where a lazy-seek operation has been done are not greater
-// than their real next keys) but we still need to enforce our
-// invariant that the top scanner has done a real seek. This way
-// StoreScanner and RegionScanner do not have to worry about fake keys.
-heap.add(scanner);
-current = pollRealKV();
-return current != null;
-  }
+KeyValueScanner scanner = null;
+try {
+  while ((scanner = heap.poll()) != null) {
+Cell topKey = scanner.peek();
+if (comparator.getComparator().compare(seekKey, topKey) <= 0) {
+  // Top KeyValue is at-or-after Seek KeyValue. We only know that all
+  // scanners are at or after seekKey (because fake keys of
+  // scanners where a lazy-seek operation has been done are not greater
+  // than their real next keys) but we still need to enforce our
+  // invariant that the top scanner has done a real seek. This way
+  // StoreScanner and RegionScanner do not have to worry about fake
+  // keys.
+  heap.add(scanner);
+  scanner = null;
+  current = pollRealKV();
+  return current != null;
+}
 
-  boolean seekResult;
-  if (isLazy && heap.size() > 0) {
-// If there is only one scanner left, we don't do lazy seek.
-seekResult = scanner.requestSeek(seekKey, forward, useBloom);
-  } else {
-seekResult = NonLazyKeyValueScanner.doRealSeek(
-scanner, seekKey, forward);
-  }
+boolean seekResult;
+if (isLazy && heap.size() > 0) {
+  // If there is only one scanner left, we don't do lazy seek.
+  seekResult = scanner.requestSeek(seekKey, forward, useBloom);
+} else {
+  seekResult = NonLazyKeyValueScanner.doRealSeek(scanner, seekKey,
+  forward);
+}
 
-  if (!seekResult) {
-scanner.close();
-  } else {
-heap.add(scanner);
+if (!seekResult) {
+  scanner.close();
+} else {
+  heap.add(scanner);
+}
+  }
+} catch (Exception e) {
+  if (scanner != null) {
+try {
+  scanner.close();
+} catch (Exception ce) {
+  LOG.warn("close KeyValueScanner 

[24/50] [abbrv] hbase git commit: HBASE-19553 Old replica regions should be cleared from AM memory after primary region split or merge

2018-12-13 Thread apurtell
HBASE-19553 Old replica regions should be cleared from AM memory after primary 
region split or merge

Signed-off-by: Ted Yu 
Signed-off-by: Huaxiang Sun 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5c799c18
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5c799c18
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5c799c18

Branch: refs/heads/branch-1.3
Commit: 5c799c18f684121352b1f071676a442a8efdc8a1
Parents: b3f911c
Author: Pankaj Kumar 
Authored: Thu Jan 25 13:25:22 2018 +0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:18 2018 -0800

--
 .../hadoop/hbase/master/AssignmentManager.java  |  17 
 .../apache/hadoop/hbase/client/TestAdmin1.java  | 102 +--
 2 files changed, 111 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5c799c18/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 63ef5ff..79fe596 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -4256,6 +4256,8 @@ public class AssignmentManager extends ZooKeeperListener {
   LOG.warn("Couldn't assign all replica(s) of region " + mergedHri+ " 
because of " +
 ie.getMessage());
 }
+// Remove merged region's replica from AM's memory
+clearReplicaRegions(c);
   }
 
   private void doSplittingOfReplicas(final HRegionInfo parentHri, final 
HRegionInfo hri_a,
@@ -4299,6 +4301,21 @@ public class AssignmentManager extends ZooKeeperListener 
{
 } catch (InterruptedException e) {
   LOG.warn("Caught exception " + e + " while trying to assign replica(s) 
of daughter(s)");
 }
+// Remove parent region's replica from AM's memory
+clearReplicaRegions(c);
+  }
+
+  /*
+   * Clear the replica regions after region split or merge.
+   */
+  private void clearReplicaRegions(Collection> regionInfos) {
+for (List regionInfoList : regionInfos) {
+  for (HRegionInfo regionInfo : regionInfoList) {
+if (!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
+  regionStates.deleteRegion(regionInfo);
+}
+  }
+}
   }
 
   private void prepareDaughterReplicaForAssignment(HRegionInfo daughterHri, 
HRegionInfo parentHri,

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c799c18/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index d153a85..e9fa3e3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -56,7 +57,9 @@ import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
+import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
@@ -1168,15 +1171,8 @@ public class TestAdmin1 {
 // regions.
 // Set up a table with 3 regions and replication set to 3
 TableName tableName = 
TableName.valueOf("testSplitAndMergeWithReplicaTable");
-HTableDescriptor desc = new HTableDescriptor(tableName);
-desc.setRegionReplication(3);
 byte[] cf = "f".getBytes();
-HColumnDescriptor hcd = new HColumnDescriptor(cf);
-desc.addFamily(hcd);
-byte[][] splitRows = new byte[2][];
-splitRows[0] = new byte[]{(byte)'4'};
-splitRows[1] = new byte[]{(byte)'7'};
-TEST_UTIL.getHBaseAdmin().createTable(desc, 

[48/50] [abbrv] hbase git commit: HBASE-21275 - Disable TRACE HTTP method for thrift http server (branch 1 only)

2018-12-13 Thread apurtell
HBASE-21275 - Disable TRACE HTTP method for thrift http server (branch 1 only)

Conflicts:

hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/82f187ef
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/82f187ef
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/82f187ef

Branch: refs/heads/branch-1.3
Commit: 82f187efba6e476cd1b88bc6ae8b238e4c670288
Parents: 10c4f59
Author: Wellington Chevreuil 

Authored: Wed Oct 17 12:11:41 2018 +0100
Committer: Andrew Purtell 
Committed: Thu Dec 13 10:22:39 2018 -0800

--
 .../hadoop/hbase/thrift/ThriftServerRunner.java | 10 ++-
 .../hbase/thrift/TestThriftHttpServer.java  | 66 ++--
 2 files changed, 69 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/82f187ef/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
index e13b148..76c033d 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
@@ -101,6 +101,7 @@ import org.apache.hadoop.hbase.thrift.generated.TScan;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ConnectionCache;
 import org.apache.hadoop.hbase.util.DNS;
+import org.apache.hadoop.hbase.util.HttpServerUtil;
 import org.apache.hadoop.hbase.util.Strings;
 import org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -128,6 +129,7 @@ import org.mortbay.jetty.Server;
 import org.mortbay.jetty.nio.SelectChannelConnector;
 import org.mortbay.jetty.servlet.Context;
 import org.mortbay.jetty.servlet.ServletHolder;
+import org.mortbay.jetty.webapp.WebAppContext;
 import org.mortbay.thread.QueuedThreadPool;
 
 import com.google.common.base.Joiner;
@@ -203,6 +205,9 @@ public class ThriftServerRunner implements Runnable {
   private final boolean securityEnabled;
   private final boolean doAsEnabled;
 
+  static String THRIFT_HTTP_ALLOW_OPTIONS_METHOD = 
"hbase.thrift.http.allow.options.method";
+  private static boolean THRIFT_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT = false;
+
   /** An enum of server implementation selections */
   enum ImplType {
 HS_HA("hsha", true, THsHaServer.class, true),
@@ -410,11 +415,14 @@ public class ThriftServerRunner implements Runnable {
 
 httpServer = new Server();
 // Context handler
-Context context = new Context(httpServer, "/", Context.SESSIONS);
+Context context = new WebAppContext();
 context.setContextPath("/");
+context.setResourceBase("hbase-webapps/");
 String httpPath = "/*";
 httpServer.setHandler(context);
 context.addServlet(new ServletHolder(thriftHttpServlet), httpPath);
+HttpServerUtil.constrainHttpMethods(context,
+conf.getBoolean(THRIFT_HTTP_ALLOW_OPTIONS_METHOD, 
THRIFT_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT));
 
 // set up Jetty and run the embedded server
 Connector connector = new SelectChannelConnector();

http://git-wip-us.apache.org/repos/asf/hbase/blob/82f187ef/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
--
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
index cf14e87..b21de38 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
@@ -18,10 +18,13 @@
  */
 package org.apache.hadoop.hbase.thrift;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.fail;
 
+import java.net.HttpURLConnection;
+import java.net.URL;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -149,6 +152,46 @@ public class TestThriftHttpServer {
 runThriftServer(0);
   }
 
+  @Test
+  public void testThriftServerHttpTraceForbiddenWhenOptionsDisabled() throws 
Exception {
+// HTTP TRACE method should be disabled for security
+// See https://www.owasp.org/index.php/Cross_Site_Tracing
+checkHttpMethods("TRACE", HttpURLConnection.HTTP_FORBIDDEN);
+  }
+
+  @Test
+  public void 

[36/50] [abbrv] hbase git commit: HBASE-20558 Port HBASE-17854 (Use StealJobQueue in HFileCleaner after HBASE-17215) to branch-1

2018-12-13 Thread apurtell
HBASE-20558 Port HBASE-17854 (Use StealJobQueue in HFileCleaner after 
HBASE-17215) to branch-1

The third port commit of HBASE-20555

Signed-off-by: Zach York 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/976f07e8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/976f07e8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/976f07e8

Branch: refs/heads/branch-1.3
Commit: 976f07e87cbeb35b80a7ca8eea46f973e932bf83
Parents: 30b1dc0
Author: TAK LON WU 
Authored: Mon Jul 9 16:34:06 2018 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:20 2018 -0800

--
 .../hbase/master/cleaner/HFileCleaner.java  | 98 +---
 .../apache/hadoop/hbase/util/StealJobQueue.java | 29 +-
 .../hbase/master/cleaner/TestHFileCleaner.java  | 28 +++---
 .../hadoop/hbase/util/TestStealJobQueue.java| 14 +--
 4 files changed, 113 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/976f07e8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
index defe851..70548b4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
@@ -23,7 +23,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -35,6 +34,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.StealJobQueue;
 
 /**
  * This Chore, every time it runs, will clear the HFiles in the hfile archive
@@ -56,23 +56,23 @@ public class HFileCleaner extends 
CleanerChore impleme
   "hbase.regionserver.thread.hfilecleaner.throttle";
   public final static int DEFAULT_HFILE_DELETE_THROTTLE_THRESHOLD = 64 * 1024 
* 1024;// 64M
 
-  // Configuration key for large queue size
-  public final static String LARGE_HFILE_DELETE_QUEUE_SIZE =
+  // Configuration key for large queue initial size
+  public final static String LARGE_HFILE_QUEUE_INIT_SIZE =
   "hbase.regionserver.hfilecleaner.large.queue.size";
-  public final static int DEFAULT_LARGE_HFILE_DELETE_QUEUE_SIZE = 1048576;
+  public final static int DEFAULT_LARGE_HFILE_QUEUE_INIT_SIZE = 10240;
 
-  // Configuration key for small queue size
-  public final static String SMALL_HFILE_DELETE_QUEUE_SIZE =
+  // Configuration key for small queue initial size
+  public final static String SMALL_HFILE_QUEUE_INIT_SIZE =
   "hbase.regionserver.hfilecleaner.small.queue.size";
-  public final static int DEFAULT_SMALL_HFILE_DELETE_QUEUE_SIZE = 1048576;
+  public final static int DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE = 10240;
 
   private static final Log LOG = LogFactory.getLog(HFileCleaner.class);
 
-  BlockingQueue largeFileQueue;
+  StealJobQueue largeFileQueue;
   BlockingQueue smallFileQueue;
   private int throttlePoint;
-  private int largeQueueSize;
-  private int smallQueueSize;
+  private int largeQueueInitSize;
+  private int smallQueueInitSize;
   private List threads = new ArrayList();
   private boolean running;
 
@@ -93,12 +93,12 @@ public class HFileCleaner extends 
CleanerChore impleme
   directory, MASTER_HFILE_CLEANER_PLUGINS, params);
 throttlePoint =
 conf.getInt(HFILE_DELETE_THROTTLE_THRESHOLD, 
DEFAULT_HFILE_DELETE_THROTTLE_THRESHOLD);
-largeQueueSize =
-conf.getInt(LARGE_HFILE_DELETE_QUEUE_SIZE, 
DEFAULT_LARGE_HFILE_DELETE_QUEUE_SIZE);
-smallQueueSize =
-conf.getInt(SMALL_HFILE_DELETE_QUEUE_SIZE, 
DEFAULT_SMALL_HFILE_DELETE_QUEUE_SIZE);
-largeFileQueue = new 
LinkedBlockingQueue(largeQueueSize);
-smallFileQueue = new 
LinkedBlockingQueue(smallQueueSize);
+largeQueueInitSize =
+conf.getInt(LARGE_HFILE_QUEUE_INIT_SIZE, 
DEFAULT_LARGE_HFILE_QUEUE_INIT_SIZE);
+smallQueueInitSize =
+conf.getInt(SMALL_HFILE_QUEUE_INIT_SIZE, 
DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE);
+largeFileQueue = new StealJobQueue<>(largeQueueInitSize, 
smallQueueInitSize);
+smallFileQueue = largeFileQueue.getStealFromQueue();
 startHFileDeleteThreads();
   }
 
@@ -151,6 +151,7 @@ public class HFileCleaner extends 

[31/50] [abbrv] hbase git commit: HBASE-20770 WAL cleaner logs way too much; gets clogged when lots of work to do

2018-12-13 Thread apurtell
HBASE-20770 WAL cleaner logs way too much; gets clogged when lots of work to do

General log cleanup; setting stuff that can flood the log to TRACE.

Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7bfec2a8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7bfec2a8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7bfec2a8

Branch: refs/heads/branch-1.3
Commit: 7bfec2a8989083c70c7fcc1b8c7a1b7af4d65eb8
Parents: 03885b4
Author: Michael Stack 
Authored: Sat Jun 23 23:29:11 2018 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:19 2018 -0800

--
 .../hadoop/hbase/security/HBaseSaslRpcClient.java   | 16 
 .../hadoop/hbase/security/SaslClientHandler.java| 12 ++--
 .../java/org/apache/hadoop/hbase/ipc/RpcServer.java |  4 ++--
 .../hbase/master/balancer/RegionLocationFinder.java |  3 +--
 .../hadoop/hbase/master/cleaner/CleanerChore.java   |  6 ++
 5 files changed, 19 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7bfec2a8/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
index bb6763f..f18b489 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
@@ -200,8 +200,8 @@ public class HBaseSaslRpcClient {
   return false;
 }
 saslToken = new byte[len];
-if (LOG.isDebugEnabled())
-  LOG.debug("Will read input token of size " + saslToken.length
+if (LOG.isTraceEnabled())
+  LOG.trace("Will read input token of size " + saslToken.length
   + " for processing by initSASLContext");
 inStream.readFully(saslToken);
   }
@@ -209,8 +209,8 @@ public class HBaseSaslRpcClient {
   while (!saslClient.isComplete()) {
 saslToken = saslClient.evaluateChallenge(saslToken);
 if (saslToken != null) {
-  if (LOG.isDebugEnabled())
-LOG.debug("Will send token of size " + saslToken.length
+  if (LOG.isTraceEnabled())
+LOG.trace("Will send token of size " + saslToken.length
 + " from initSASLContext.");
   outStream.writeInt(saslToken.length);
   outStream.write(saslToken, 0, saslToken.length);
@@ -219,14 +219,14 @@ public class HBaseSaslRpcClient {
 if (!saslClient.isComplete()) {
   readStatus(inStream);
   saslToken = new byte[inStream.readInt()];
-  if (LOG.isDebugEnabled())
-LOG.debug("Will read input token of size " + saslToken.length
+  if (LOG.isTraceEnabled())
+LOG.trace("Will read input token of size " + saslToken.length
 + " for processing by initSASLContext");
   inStream.readFully(saslToken);
 }
   }
-  if (LOG.isDebugEnabled()) {
-LOG.debug("SASL client context established. Negotiated QoP: "
+  if (LOG.isTraceEnabled()) {
+LOG.trace("SASL client context established. Negotiated QoP: "
 + saslClient.getNegotiatedProperty(Sasl.QOP));
   }
   return true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/7bfec2a8/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslClientHandler.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslClientHandler.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslClientHandler.java
index c79cde7..f43cb24 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslClientHandler.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslClientHandler.java
@@ -212,16 +212,16 @@ public class SaslClientHandler extends 
ChannelDuplexHandler {
   }
 }
 saslToken = new byte[len];
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Will read input token of size " + saslToken.length
+if (LOG.isTraceEnabled()) {
+  LOG.trace("Will read input token of size " + saslToken.length
   + " for processing by initSASLContext");
 }
 in.readBytes(saslToken);
 
 saslToken = evaluateChallenge(saslToken);
 if (saslToken != null) {
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Will send token of size " + saslToken.length + " from 
initSASLContext.");
+  if (LOG.isTraceEnabled()) {
+

[41/50] [abbrv] hbase git commit: HBASE-20469 Directory used for sidelining old recovered edits files should be made configurable

2018-12-13 Thread apurtell
HBASE-20469 Directory used for sidelining old recovered edits files should be 
made configurable

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1d362f6d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1d362f6d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1d362f6d

Branch: refs/heads/branch-1.3
Commit: 1d362f6de3e5c21c9f124656a928a555e4853a2b
Parents: 2702f23
Author: Nihal Jain 
Authored: Sat Apr 21 16:40:21 2018 +0530
Committer: Andrew Purtell 
Committed: Wed Dec 12 19:18:54 2018 -0800

--
 .../java/org/apache/hadoop/hbase/wal/WALSplitter.java | 10 ++
 .../java/org/apache/hadoop/hbase/wal/TestWALSplit.java|  7 +--
 2 files changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1d362f6d/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index c88e6d3..45dbb11 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -539,6 +539,7 @@ public class WALSplitter {
* creating it if necessary.
* @param logEntry
* @param fileNameBeingSplit the file being split currently. Used to 
generate tmp file name.
+   * @param tmpDirName of the directory used to sideline old recovered edits 
file
* @param conf
* @return Path to file into which to dump split log edits.
* @throws IOException
@@ -546,8 +547,7 @@ public class WALSplitter {
   @SuppressWarnings("deprecation")
   @VisibleForTesting
   static Path getRegionSplitEditsPath(final Entry logEntry, String 
fileNameBeingSplit,
-  Configuration conf)
-  throws IOException {
+  String tmpDirName, Configuration conf) throws IOException {
 FileSystem fs = FileSystem.get(conf);
 Path rootDir = FSUtils.getRootDir(conf);
 Path tableDir = FSUtils.getTableDir(rootDir, 
logEntry.getKey().getTablename());
@@ -562,7 +562,7 @@ public class WALSplitter {
   return null;
 }
 if (fs.exists(dir) && fs.isFile(dir)) {
-  Path tmp = new Path("/tmp");
+  Path tmp = new Path(tmpDirName);
   if (!fs.exists(tmp)) {
 fs.mkdirs(tmp);
   }
@@ -1587,8 +1587,10 @@ public class WALSplitter {
  * @return a path with a write for that path. caller should close.
  */
 WriterAndPath createWAP(byte[] region, Entry entry) throws IOException {
+  String tmpDirName = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
+HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
   Path regionedits = getRegionSplitEditsPath(entry,
-  fileBeingSplit.getPath().getName(), conf);
+  fileBeingSplit.getPath().getName(), tmpDirName, conf);
   if (regionedits == null) {
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1d362f6d/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
index 0d0bbec..9b9c23d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
@@ -123,6 +123,7 @@ public class TestWALSplit {
   private Path OLDLOGDIR;
   private Path CORRUPTDIR;
   private Path TABLEDIR;
+  private String TMPDIRNAME;
 
   private static final int NUM_WRITERS = 10;
   private static final int ENTRIES = 10; // entries per writer per region
@@ -185,6 +186,8 @@ public class TestWALSplit {
 OLDLOGDIR = new Path(HBASELOGDIR, HConstants.HREGION_OLDLOGDIR_NAME);
 CORRUPTDIR = new Path(HBASELOGDIR, HConstants.CORRUPT_DIR_NAME);
 TABLEDIR = FSUtils.getTableDir(HBASEDIR, TABLE_NAME);
+TMPDIRNAME = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
+  HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
 REGIONS.clear();
 Collections.addAll(REGIONS, "bbb", "ccc");
 InstrumentedLogWriter.activateFailure = false;
@@ -386,7 +389,7 @@ public class TestWALSplit {
 TableName.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID),
 new WALEdit());
 Path p = WALSplitter.getRegionSplitEditsPath(entry,
-FILENAME_BEING_SPLIT, conf);
+FILENAME_BEING_SPLIT, TMPDIRNAME, conf);
 String parentOfParent = p.getParent().getParent().getName();
 assertEquals(parentOfParent, 

[45/50] [abbrv] hbase git commit: HBASE-21212 Wrong flush time when update flush metric

2018-12-13 Thread apurtell
HBASE-21212 Wrong flush time when update flush metric


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a4baeebd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a4baeebd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a4baeebd

Branch: refs/heads/branch-1.3
Commit: a4baeebdfc16878011633aa548b90dd53afddc37
Parents: 09069df
Author: Allan Yang 
Authored: Wed Sep 26 19:40:43 2018 +0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 19:25:46 2018 -0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a4baeebd/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index d41e7cb..71785fd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2639,7 +2639,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 status.setStatus(msg);
 
 if (rsServices != null && rsServices.getMetrics() != null) {
-  rsServices.getMetrics().updateFlush(time - startTime,
+  rsServices.getMetrics().updateFlush(time,
 totalFlushableSizeOfFlushableStores, flushedOutputFileSize);
 }
 



[35/50] [abbrv] hbase git commit: HBASE-20401 Make `MAX_WAIT` and `waitIfNotFinished` in CleanerContext configurable

2018-12-13 Thread apurtell
HBASE-20401 Make `MAX_WAIT` and `waitIfNotFinished` in CleanerContext 
configurable

Signed-off-by: Reid Chan 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/614b5f6e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/614b5f6e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/614b5f6e

Branch: refs/heads/branch-1.3
Commit: 614b5f6e724db594b37d900d5b0fa4ada636eee5
Parents: 2434162
Author: TAK LON WU 
Authored: Sun Jul 22 21:16:45 2018 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:20 2018 -0800

--
 .../hbase/master/cleaner/HFileCleaner.java  | 65 
 .../hadoop/hbase/master/cleaner/LogCleaner.java | 65 +++-
 .../hbase/master/cleaner/TestHFileCleaner.java  | 15 -
 .../hbase/master/cleaner/TestLogsCleaner.java   | 24 ++--
 4 files changed, 138 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/614b5f6e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
index 8f0b4be..6691f66 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -77,6 +78,16 @@ public class HFileCleaner extends 
CleanerChore impleme
   "hbase.regionserver.hfilecleaner.small.thread.count";
   public final static int DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER = 1;
 
+  public static final String HFILE_DELETE_THREAD_TIMEOUT_MSEC =
+  "hbase.regionserver.hfilecleaner.thread.timeout.msec";
+  @VisibleForTesting
+  static final long DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC = 60 * 1000L;
+
+  public static final String HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC =
+  "hbase.regionserver.hfilecleaner.thread.check.interval.msec";
+  @VisibleForTesting
+  static final long DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC = 1000L;
+
   private static final Log LOG = LogFactory.getLog(HFileCleaner.class);
 
   StealJobQueue largeFileQueue;
@@ -86,6 +97,8 @@ public class HFileCleaner extends 
CleanerChore impleme
   private int smallQueueInitSize;
   private int largeFileDeleteThreadNumber;
   private int smallFileDeleteThreadNumber;
+  private long cleanerThreadTimeoutMsec;
+  private long cleanerThreadCheckIntervalMsec;
   private List threads = new ArrayList();
   private boolean running;
 
@@ -116,6 +129,11 @@ public class HFileCleaner extends 
CleanerChore impleme
 conf.getInt(LARGE_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER);
 smallFileDeleteThreadNumber =
 conf.getInt(SMALL_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER);
+cleanerThreadTimeoutMsec =
+conf.getLong(HFILE_DELETE_THREAD_TIMEOUT_MSEC, 
DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC);
+cleanerThreadCheckIntervalMsec =
+conf.getLong(HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC,
+DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC);
 startHFileDeleteThreads();
   }
 
@@ -147,7 +165,7 @@ public class HFileCleaner extends 
CleanerChore impleme
 }
 // wait for each submitted task to finish
 for (HFileDeleteTask task : tasks) {
-  if (task.getResult()) {
+  if (task.getResult(cleanerThreadCheckIntervalMsec)) {
 deletedFiles++;
   }
 }
@@ -160,7 +178,7 @@ public class HFileCleaner extends 
CleanerChore impleme
* @return HFileDeleteTask to track progress
*/
   private HFileDeleteTask deleteFile(FileStatus file) {
-HFileDeleteTask task = new HFileDeleteTask(file);
+HFileDeleteTask task = new HFileDeleteTask(file, cleanerThreadTimeoutMsec);
 boolean enqueued = dispatch(task);
 return enqueued ? task : null;
   }
@@ -299,17 +317,17 @@ public class HFileCleaner extends 
CleanerChore impleme
   }
 
   static class HFileDeleteTask implements Comparable {
-private static final long MAX_WAIT = 60 * 1000L;
-private static final long WAIT_UNIT = 1000L;
 
 boolean done = false;
 boolean result;
 final Path filePath;
 final long fileLength;
+final long timeoutMsec;
 
-public HFileDeleteTask(FileStatus file) {
+public HFileDeleteTask(FileStatus file, long timeoutMsec) {
   

[49/50] [abbrv] hbase git commit: HBASE-21582 If call HBaseAdmin#snapshotAsync but forget call isSnapshotFinished, then SnapshotHFileCleaner will skip to run every time

2018-12-13 Thread apurtell
HBASE-21582 If call HBaseAdmin#snapshotAsync but forget call 
isSnapshotFinished, then SnapshotHFileCleaner will skip to run every time

Conflicts:

hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d3729417
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d3729417
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d3729417

Branch: refs/heads/branch-1.3
Commit: d3729417462dc70a1fe3bdf5ffb3bea136987d4c
Parents: e063aa8
Author: huzheng 
Authored: Tue Dec 11 20:27:56 2018 +0800
Committer: Andrew Purtell 
Committed: Thu Dec 13 10:25:10 2018 -0800

--
 .../hbase/master/snapshot/SnapshotManager.java  | 55 +++-
 .../master/cleaner/TestSnapshotFromMaster.java  | 32 +++-
 .../master/snapshot/TestSnapshotManager.java| 36 +++--
 3 files changed, 104 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d3729417/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index f480092..2859cc5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -27,7 +27,11 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -85,6 +89,9 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.zookeeper.KeeperException;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
 /**
  * This class manages the procedure of taking and restoring snapshots. There 
is only one
  * SnapshotManager for the master.
@@ -113,7 +120,9 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
* At this point, if the user asks for the snapshot/restore status, the 
result will be
* snapshot done if exists or failed if it doesn't exists.
*/
-  private static final int SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT = 60 * 1000;
+  public static final String HBASE_SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLIS =
+  "hbase.snapshot.sentinels.cleanup.timeoutMillis";
+  public static final long SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLS_DEFAULT = 
60 * 1000L;
 
   /** Enable or disable snapshot support */
   public static final String HBASE_SNAPSHOT_ENABLED = "hbase.snapshot.enabled";
@@ -144,8 +153,12 @@ public class SnapshotManager extends 
MasterProcedureManager implements Stoppable
   // The map is always accessed and modified under the object lock using 
synchronized.
   // snapshotTable() will insert an Handler in the table.
   // isSnapshotDone() will remove the handler requested if the operation is 
finished.
-  private Map snapshotHandlers =
+  private final Map snapshotHandlers =
   new HashMap();
+  private final ScheduledExecutorService scheduleThreadPool =
+Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
+  
.setNameFormat("SnapshotHandlerChoreCleaner").setDaemon(true).build());
+  private ScheduledFuture snapshotHandlerChoreCleanerTask;
 
   // Restore Sentinels map, with table name as key.
   // The map is always accessed and modified under the object lock using 
synchronized.
@@ -173,17 +186,29 @@ public class SnapshotManager extends 
MasterProcedureManager implements Stoppable
* @param coordinator procedure coordinator instance.  exposed for testing.
* @param pool HBase ExecutorServcie instance, exposed for testing.
*/
-  public SnapshotManager(final MasterServices master, final MetricsMaster 
metricsMaster,
-  ProcedureCoordinator coordinator, ExecutorService pool)
+  @VisibleForTesting
+  SnapshotManager(final MasterServices master, ProcedureCoordinator 
coordinator,
+  ExecutorService pool, int sentinelCleanInterval)
   throws IOException, UnsupportedOperationException {
 this.master = master;
 
 this.rootDir = master.getMasterFileSystem().getRootDir();
-checkSnapshotSupport(master.getConfiguration(), 

[42/50] [abbrv] hbase git commit: HBASE-20734 Colocate recovered edits directory with hbase.wal.dir

2018-12-13 Thread apurtell
HBASE-20734 Colocate recovered edits directory with hbase.wal.dir

Signed-off-by: Andrew Purtell 
Signed-off-by: Reid Chan 

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9675ad38
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9675ad38
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9675ad38

Branch: refs/heads/branch-1.3
Commit: 9675ad387627588bdf0c37f56f701a0d0074d196
Parents: 1d362f6
Author: Zach York 
Authored: Wed Jun 27 16:18:53 2018 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 19:22:42 2018 -0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 190 ---
 .../org/apache/hadoop/hbase/util/FSUtils.java   |  27 +++
 .../apache/hadoop/hbase/wal/WALSplitter.java| 166 
 .../hadoop/hbase/regionserver/TestHRegion.java  |   8 +-
 .../hbase/regionserver/TestRecoveredEdits.java  |   2 +-
 .../hbase/regionserver/wal/TestWALReplay.java   |   6 +-
 .../apache/hadoop/hbase/wal/TestWALFactory.java |   2 +-
 .../apache/hadoop/hbase/wal/TestWALSplit.java   |  50 ++---
 8 files changed, 263 insertions(+), 188 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9675ad38/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 9d8c0c6..d41e7cb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -41,6 +41,7 @@ import java.util.NavigableSet;
 import java.util.RandomAccess;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.TreeSet;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
 import java.util.concurrent.ConcurrentHashMap;
@@ -309,6 +310,9 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   private final int rowLockWaitDuration;
   static final int DEFAULT_ROWLOCK_WAIT_DURATION = 3;
 
+  private Path regionDir;
+  private FileSystem walFS;
+
   // The internal wait duration to acquire a lock before read/update
   // from the region. It is not per row. The purpose of this wait time
   // is to avoid waiting a long time while the region is busy, so that
@@ -840,7 +844,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 if (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
   // Recover any edits if available.
   maxSeqId = Math.max(maxSeqId,
-replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, 
reporter, status));
+replayRecoveredEditsIfAny(maxSeqIdInStores, reporter, status));
   // Make sure mvcc is up to max.
   this.mvcc.advanceTo(maxSeqId);
 }
@@ -884,8 +888,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 // is opened before recovery completes. So we add a safety bumper to avoid 
new sequence number
 // overlaps used sequence numbers
 if (this.writestate.writesEnabled) {
-  nextSeqid = 
WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(), this.fs
-  .getRegionDir(), nextSeqid, (this.recovering ? (this.flushPerChanges 
+ 1000) : 1));
+  nextSeqid = WALSplitter.writeRegionSequenceIdFile(getWalFileSystem(), 
getWALRegionDir(),
+  nextSeqid, (this.recovering ? (this.flushPerChanges + 1000) : 
1));
 } else {
   nextSeqid++;
 }
@@ -1023,11 +1027,11 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   getRegionServerServices().getServerName(), storeFiles);
 WALUtil.writeRegionEventMarker(wal, getTableDesc(), getRegionInfo(), 
regionEventDesc, mvcc);
 
-// Store SeqId in HDFS when a region closes
+// Store SeqId in WAL FileSystem when a region closes
 // checking region folder exists is due to many tests which delete the 
table folder while a
 // table is still online
-if (this.fs.getFileSystem().exists(this.fs.getRegionDir())) {
-  WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(), 
this.fs.getRegionDir(),
+if (getWalFileSystem().exists(getWALRegionDir())) {
+  WALSplitter.writeRegionSequenceIdFile(getWalFileSystem(), 
getWALRegionDir(),
 mvcc.getReadPoint(), 0);
 }
   }
@@ -1702,6 +1706,32 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 return this.fs;
   }
 
+  /** @return the WAL {@link HRegionFileSystem} used by 

[29/50] [abbrv] hbase git commit: HBASE-20647 Backport HBASE-20616 "TruncateTableProcedure is stuck in retry loop in TRUNCATE_TABLE_CREATE_FS_LAYOUT state" to branch-1

2018-12-13 Thread apurtell
HBASE-20647 Backport HBASE-20616 "TruncateTableProcedure is stuck in retry loop 
in TRUNCATE_TABLE_CREATE_FS_LAYOUT state" to branch-1

Signed-off-by: tedyu 

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java

hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/03885b43
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/03885b43
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/03885b43

Branch: refs/heads/branch-1.3
Commit: 03885b434f0cfbaaa0ed367d5d30c062eb2a48b7
Parents: 511a4c4
Author: Toshihiro Suzuki 
Authored: Fri May 25 09:37:19 2018 +0900
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:19 2018 -0800

--
 .../procedure/TruncateTableProcedure.java   |  32 --
 .../procedure/TestTruncateTableProcedure.java   | 101 +--
 2 files changed, 116 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/03885b43/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
index da539a1..8471777 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java
@@ -18,29 +18,29 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
-import java.io.InputStream;
+import com.google.common.annotations.VisibleForTesting;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.OutputStream;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.exceptions.HBaseException;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableState;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.util.ModifyRegionUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -102,15 +102,15 @@ public class TruncateTableProcedure
   break;
 case TRUNCATE_TABLE_CLEAR_FS_LAYOUT:
   DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, 
true);
+  setNextState(TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT);
   if (!preserveSplits) {
-// if we are not preserving splits, generate a new single region
 regions = 
Arrays.asList(ModifyRegionUtils.createHRegionInfos(hTableDescriptor, null));
   } else {
 regions = recreateRegionInfo(regions);
   }
-  setNextState(TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT);
   break;
 case TRUNCATE_TABLE_CREATE_FS_LAYOUT:
+  DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, 
true);
   regions = CreateTableProcedure.createFsLayout(env, hTableDescriptor, 
regions);
   CreateTableProcedure.updateTableDescCache(env, getTableName());
   setNextState(TruncateTableState.TRUNCATE_TABLE_ADD_TO_META);
@@ -183,7 +183,9 @@ public class TruncateTableProcedure
 
   @Override
   protected boolean acquireLock(final MasterProcedureEnv env) {
-if (env.waitInitialized(this)) return false;
+if (env.waitInitialized(this)) {
+  return false;
+}
 return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, 
getTableName());
   }
 
@@ -295,4 +297,12 @@ public class TruncateTableProcedure
   });

[26/50] [abbrv] hbase git commit: HBASE-20404 Fixes to CleanChore correctness and operability.

2018-12-13 Thread apurtell
HBASE-20404 Fixes to CleanChore correctness and operability.

* Make CleanerChore less chatty: move WARN message to DEBUG when we expect 
non-empty dirs
* Make CleanerChore less chatty: move IOE we'll retry to INFO
* CleanerChore should treat IOE for FileStatus as a failure
* Add tests asserting assumptions in above

Signed-off-by: Reid Chan 
Signed-off-by: Mike Drob 

 Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java

hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java
hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/be4915eb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/be4915eb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/be4915eb

Branch: refs/heads/branch-1.3
Commit: be4915eb4f6604edd570b2bb937800e663f76043
Parents: 193d1dc
Author: Sean Busbey 
Authored: Fri Apr 13 00:57:35 2018 -0500
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:19 2018 -0800

--
 .../hbase/master/cleaner/CleanerChore.java  | 24 +++--
 .../hbase/master/cleaner/TestCleanerChore.java  | 54 ++--
 .../apache/hadoop/hbase/util/TestFSUtils.java   | 12 +
 3 files changed, 84 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/be4915eb/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
index dc614fb..5a4c407 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -406,6 +407,10 @@ public abstract class CleanerChore extends Schedu
 T act() throws IOException;
   }
 
+  /**
+   * Attemps to clean up a directory, its subdirectories, and files.
+   * Return value is true if everything was deleted. false on partial / total 
failures.
+   */
   private class CleanerTask extends RecursiveTask {
 private final Path dir;
 private final boolean root;
@@ -425,6 +430,8 @@ public abstract class CleanerChore extends Schedu
   List subDirs;
   final List files;
   try {
+// if dir doesn't exist, we'll get null back for both of these
+// which will fall through to succeeding.
 subDirs = FSUtils.listStatusWithStatusFilter(fs, dir, new 
FileStatusFilter() {
   @Override
   public boolean accept(FileStatus f) {
@@ -438,8 +445,8 @@ public abstract class CleanerChore extends Schedu
   }
 });
   } catch (IOException ioe) {
-LOG.warn(dir + " doesn't exist, just skip it. ", ioe);
-return true;
+LOG.warn("failed to get FileStatus for contents of '" + dir + "'", 
ioe);
+return false;
   }
 
   boolean nullSubDirs = subDirs == null;
@@ -497,8 +504,19 @@ public abstract class CleanerChore extends Schedu
   try {
 LOG.trace("Start deleting " + type + " under " + dir);
 deleted = deletion.act();
+  } catch (PathIsNotEmptyDirectoryException exception) {
+// N.B. HDFS throws this exception when we try to delete a non-empty 
directory, but
+// LocalFileSystem throws a bare IOException. So some test code will 
get the verbose
+// message below.
+LOG.debug("Couldn't delete '" + dir + "' yet because it isn't empty. 
Probably transient. " +
+"exception details at TRACE.");
+LOG.trace("Couldn't delete '" + dir + "' yet because it isn't empty 
w/exception.",
+exception);
+deleted = false;
   } catch (IOException ioe) {
-LOG.warn("Could not delete " + type + " under " + dir, ioe);
+LOG.info("Could not delete " + type + " under " + dir + ". might be 
transient; we'll " +
+"retry. if it keeps happening, use following exception when asking 
on mailing list.",
+ioe);
 deleted = false;
   }
   LOG.trace("Finish deleting " + type + " under " + dir + " deleted=" + 
deleted);


[05/50] [abbrv] hbase git commit: HBASE-17924 Consider sorting the row order when processing multi() ops before taking rowlocks (Allan Yang)

2018-12-13 Thread apurtell
HBASE-17924 Consider sorting the row order when processing multi() ops before 
taking rowlocks
 (Allan Yang)

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4e4756e6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4e4756e6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4e4756e6

Branch: refs/heads/branch-1.3
Commit: 4e4756e68887e3a1d77e4483f52f431b441611a5
Parents: 25135dd
Author: Andrew Purtell 
Authored: Mon May 8 16:23:13 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:16 2018 -0800

--
 .../hbase/regionserver/RSRpcServices.java   | 21 ++--
 .../apache/hadoop/hbase/wal/WALSplitter.java| 21 +++-
 2 files changed, 39 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4e4756e6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 151a864..cd7a4c7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -34,6 +34,7 @@ import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -847,6 +848,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
 long before = EnvironmentEdgeManager.currentTime();
 boolean batchContainsPuts = false, batchContainsDelete = false;
 try {
+  /** HBASE-17924
+   * mutationActionMap is a map to map the relation between mutations and 
actions
+   * since mutation array may have been reoredered.In order to return the 
right
+   * result or exception to the corresponding actions, We need to know 
which action
+   * is the mutation belong to. We can't sort ClientProtos.Action array, 
since they
+   * are bonded to cellscanners.
+   */
+  Map mutationActionMap = new 
HashMap();
   int i = 0;
   for (ClientProtos.Action action: mutations) {
 MutationProto m = action.getMutation();
@@ -858,6 +867,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   mutation = ProtobufUtil.toDelete(m, cells);
   batchContainsDelete = true;
 }
+mutationActionMap.put(mutation, action);
 mArray[i++] = mutation;
 quota.addMutation(mutation);
   }
@@ -865,11 +875,15 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
   if (!region.getRegionInfo().isMetaTable()) {
 regionServer.cacheFlusher.reclaimMemStoreMemory();
   }
-
+  // HBASE-17924
+  // sort to improve lock efficiency
+  Arrays.sort(mArray);
   OperationStatus[] codes = region.batchMutate(mArray, HConstants.NO_NONCE,
 HConstants.NO_NONCE);
   for (i = 0; i < codes.length; i++) {
-int index = mutations.get(i).getIndex();
+Mutation currentMutation = mArray[i];
+ClientProtos.Action currentAction = 
mutationActionMap.get(currentMutation);
+int index = currentAction.getIndex();
 Exception e = null;
 switch (codes[i].getOperationStatusCode()) {
   case BAD_FAMILY:
@@ -1927,6 +1941,9 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
   walEntries.add(walEntry);
 }
 if(edits!=null && !edits.isEmpty()) {
+  // HBASE-17924
+  // sort to improve lock efficiency
+  Collections.sort(edits);
   long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ?
 entry.getKey().getOrigSequenceNumber() : 
entry.getKey().getLogSequenceNumber();
   OperationStatus[] result = doReplayBatchOp(region, edits, 
replaySeqId);

http://git-wip-us.apache.org/repos/asf/hbase/blob/4e4756e6/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index eed9a8b..cc065e5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -2272,7 +2272,7 @@ public class WALSplitter {
   }
 
   /** A 

[08/50] [abbrv] hbase git commit: Amend HBASE-18830 TestCanaryTool does not check Canary monitor's error code

2018-12-13 Thread apurtell
Amend HBASE-18830 TestCanaryTool does not check Canary monitor's error code

Adjust exception control flow to fix findbugs warning
NP_NULL_ON_SOME_PATH_EXCEPTION, Possible null pointer dereference of
regionSink in org.apache.hadoop.hbase.tool.Canary$RegionMonitor.run()
on exception path


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5d0d3aa2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5d0d3aa2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5d0d3aa2

Branch: refs/heads/branch-1.3
Commit: 5d0d3aa2b962a27a7eb33b20ef91476893380b89
Parents: e72ed4f
Author: Andrew Purtell 
Authored: Tue Sep 26 08:33:19 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:16 2018 -0800

--
 .../main/java/org/apache/hadoop/hbase/tool/Canary.java | 13 +
 1 file changed, 5 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5d0d3aa2/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index c7323fe..dcaa057 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -1022,19 +1022,14 @@ public final class Canary implements Tool {
   if (this.initAdmin()) {
 try {
   List> taskFutures = new LinkedList<>();
-  RegionStdOutSink regionSink = null;
-  try {
-regionSink = this.getSink();
-  } catch (RuntimeException e) {
-LOG.error("Run RegionMonitor failed!", e);
-this.errorCode = ERROR_EXIT_CODE;
-  }
+  RegionStdOutSink regionSink = this.getSink();
   if (this.targets != null && this.targets.length > 0) {
 String[] tables = generateMonitorTables(this.targets);
 // Check to see that each table name passed in the 
-readTableTimeouts argument is also passed as a monitor target.
 if (! new 
HashSet<>(Arrays.asList(tables)).containsAll(this.configuredReadTableTimeouts.keySet()))
 {
   LOG.error("-readTableTimeouts can only specify read timeouts for 
monitor targets passed via command line.");
   this.errorCode = USAGE_EXIT_CODE;
+  return;
 }
 this.initialized = true;
 for (String table : tables) {
@@ -1096,7 +1091,9 @@ public final class Canary implements Tool {
 } catch (Exception e) {
   LOG.error("Run regionMonitor failed", e);
   this.errorCode = ERROR_EXIT_CODE;
-}
+} finally {
+  this.done = true;
+   }
   }
   this.done = true;
 }



[13/50] [abbrv] hbase git commit: HBASE-19364 Truncate_preserve fails with table when replica region > 1

2018-12-13 Thread apurtell
HBASE-19364 Truncate_preserve fails with table when replica region > 1

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java

hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java

hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cd1726f5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cd1726f5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cd1726f5

Branch: refs/heads/branch-1.3
Commit: cd1726f53181e655246bda16b63a7f1fb990955a
Parents: 4b6e589
Author: Pankaj Kumar 
Authored: Thu Dec 7 22:51:01 2017 +0530
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:17 2018 -0800

--
 .../apache/hadoop/hbase/MetaTableAccessor.java  | 45 --
 .../master/procedure/ProcedureSyncWait.java | 25 ++
 .../procedure/TruncateTableProcedure.java   |  2 +-
 .../MasterProcedureTestingUtility.java  |  5 ++
 .../procedure/TestTruncateTableProcedure.java   | 50 
 5 files changed, 112 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cd1726f5/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 3f11558..440f8c6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -392,12 +392,27 @@ public class MetaTableAccessor {
* @return Ordered list of {@link HRegionInfo}.
* @throws IOException
*/
-  public static List getTableRegions(ZooKeeperWatcher zkw,
-  Connection connection, TableName tableName, final boolean 
excludeOfflinedSplitParents)
-throws IOException {
+  public static List getTableRegions(ZooKeeperWatcher zkw, 
Connection connection,
+  TableName tableName, final boolean excludeOfflinedSplitParents) throws 
IOException {
+return getTableRegions(zkw, connection, tableName, 
excludeOfflinedSplitParents, false);
+  }
+
+  /**
+   * Gets all of the regions of the specified table.
+   * @param zkw zookeeper connection to access meta table
+   * @param connection connection we're using
+   * @param tableName table we're looking for
+   * @param excludeOfflinedSplitParents If true, do not include offlined split 
parents in the
+   *  return.
+   * @param excludeReplicaRegions If true, do not include replica regions in 
the result.
+   * @return Ordered list of {@link HRegionInfo}.
+   */
+  public static List getTableRegions(ZooKeeperWatcher zkw, 
Connection connection,
+  TableName tableName, final boolean excludeOfflinedSplitParents,
+  final boolean excludeReplicaRegions) throws IOException {
 List> result = null;
-  result = getTableRegionsAndLocations(zkw, connection, tableName,
-excludeOfflinedSplitParents);
+result = getTableRegionsAndLocations(zkw, connection, tableName, 
excludeOfflinedSplitParents,
+  excludeReplicaRegions);
 return getListOfHRegionInfos(result);
   }
 
@@ -478,7 +493,22 @@ public class MetaTableAccessor {
   public static List> 
getTableRegionsAndLocations(
   ZooKeeperWatcher zkw, Connection connection, final TableName tableName,
   final boolean excludeOfflinedSplitParents) throws IOException {
+return getTableRegionsAndLocations(zkw, connection, tableName, 
excludeOfflinedSplitParents,
+  false);
+  }
 
+  /**
+   * @param zkw ZooKeeperWatcher instance we're using to get hbase:meta 
location
+   * @param connection connection we're using
+   * @param tableName table to work with
+   * @param excludeOfflinedSplitParents Exclude offline regions
+   * @param excludeReplicaRegions If true, do not include replica regions in 
the result.
+   * @return List of regioninfos and server addresses.
+   */
+  public static List> 
getTableRegionsAndLocations(
+  ZooKeeperWatcher zkw, Connection connection, final TableName tableName,
+  final boolean excludeOfflinedSplitParents, final boolean 
excludeReplicaRegions)
+  throws IOException {
 if (tableName.equals(TableName.META_TABLE_NAME)) {
   // If meta, do a bit of special handling.
   ServerName serverName = new 
MetaTableLocator().getMetaRegionLocation(zkw);
@@ -514,6 +544,11 @@ public class MetaTableAccessor {
   }
   for (HRegionLocation loc : current.getRegionLocations()) {
 if (loc != null) {
+  // 

[17/50] [abbrv] hbase git commit: HBASE-18786 FileNotFoundException should not be silently handled for primary region replicas

2018-12-13 Thread apurtell
HBASE-18786 FileNotFoundException should not be silently handled for primary 
region replicas

Amend HBASE-18786 Remove now invalid unit test TestCorruptedRegionStoreFile

Amend HBASE-18786 Remove now invalid unit test TestRegionServerAbort

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java

Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b442a7fc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b442a7fc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b442a7fc

Branch: refs/heads/branch-1.3
Commit: b442a7fc92763fb63ed1c01ef4072c9823836d79
Parents: ac241dc
Author: Andrew Purtell 
Authored: Thu Sep 21 13:49:07 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:17 2018 -0800

--
 .../hadoop/hbase/regionserver/HRegion.java  |   2 -
 .../TestCorruptedRegionStoreFile.java   | 249 ---
 .../regionserver/TestRegionServerAbort.java | 210 
 3 files changed, 461 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b442a7fc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 74148ea..499cfe4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -6410,8 +6410,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 if (this.joinedHeap != null) {
   result = this.joinedHeap.requestSeek(kv, true, true) || result;
 }
-  } catch (FileNotFoundException e) {
-throw handleFileNotFound(e);
   } finally {
 closeRegionOperation();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b442a7fc/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
deleted file mode 100644
index 969ef34..000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
+++ /dev/null
@@ -1,249 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.util.ArrayList;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.FSVisitor;
-import org.apache.hadoop.hbase.util.TestTableName;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import static org.junit.Assert.assertEquals;
-import static 

[04/50] [abbrv] hbase git commit: HBASE-18058 Zookeeper retry sleep time should have an upper limit (Allan Yang)

2018-12-13 Thread apurtell
HBASE-18058 Zookeeper retry sleep time should have an upper limit (Allan Yang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ed4f7d1b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ed4f7d1b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ed4f7d1b

Branch: refs/heads/branch-1.3
Commit: ed4f7d1b1b4497caf859da6119c2940fdfaba9a9
Parents: 4e4756e
Author: tedyu 
Authored: Fri May 19 10:58:38 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:16 2018 -0800

--
 .../apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java  | 8 
 .../main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java  | 3 ++-
 hbase-common/src/main/resources/hbase-default.xml| 7 +++
 3 files changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ed4f7d1b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
index e74aeb4..f799148 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
@@ -98,20 +98,20 @@ public class RecoverableZooKeeper {
   private static final int ID_LENGTH_SIZE =  Bytes.SIZEOF_INT;
 
   public RecoverableZooKeeper(String quorumServers, int sessionTimeout,
-  Watcher watcher, int maxRetries, int retryIntervalMillis)
+  Watcher watcher, int maxRetries, int retryIntervalMillis, int 
maxSleepTime)
   throws IOException {
-this(quorumServers, sessionTimeout, watcher, maxRetries, 
retryIntervalMillis,
+this(quorumServers, sessionTimeout, watcher, maxRetries, 
retryIntervalMillis, maxSleepTime,
 null);
   }
 
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE",
   justification="None. Its always been this way.")
   public RecoverableZooKeeper(String quorumServers, int sessionTimeout,
-  Watcher watcher, int maxRetries, int retryIntervalMillis, String 
identifier)
+  Watcher watcher, int maxRetries, int retryIntervalMillis, int 
maxSleepTime, String identifier)
   throws IOException {
 // TODO: Add support for zk 'chroot'; we don't add it to the quorumServers 
String as we should.
 this.retryCounterFactory =
-  new RetryCounterFactory(maxRetries+1, retryIntervalMillis);
+  new RetryCounterFactory(maxRetries+1, retryIntervalMillis, maxSleepTime);
 
 if (identifier == null || identifier.length() == 0) {
   // the identifier = processID@hostName

http://git-wip-us.apache.org/repos/asf/hbase/blob/ed4f7d1b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
index 3cbc317..4f4b2eb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
@@ -133,10 +133,11 @@ public class ZKUtil {
 int retry = conf.getInt("zookeeper.recovery.retry", 3);
 int retryIntervalMillis =
   conf.getInt("zookeeper.recovery.retry.intervalmill", 1000);
+int maxSleepTime = conf.getInt("zookeeper.recovery.retry.maxsleeptime", 
6);
 zkDumpConnectionTimeOut = conf.getInt("zookeeper.dump.connection.timeout",
 1000);
 return new RecoverableZooKeeper(ensemble, timeout, watcher,
-retry, retryIntervalMillis, identifier);
+retry, retryIntervalMillis, maxSleepTime, identifier);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/ed4f7d1b/hbase-common/src/main/resources/hbase-default.xml
--
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index e28f355..88e57d7 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -103,6 +103,13 @@ possible configurations would overwhelm and obscure the 
important.
 and running -->
 
   
+zookeeper.recovery.retry.maxsleeptime
+6
+Max sleep time before retry zookeeper operations in 
milliseconds,
+a max time is needed here so that sleep time won't grow unboundedly
+
+  
+  
 hbase.local.dir
 ${hbase.tmp.dir}/local/
 Directory on the local filesystem to be used



[18/50] [abbrv] hbase git commit: HBASE-20141 Fix TooManyFiles exception when RefreshingChannels

2018-12-13 Thread apurtell
HBASE-20141 Fix TooManyFiles exception when RefreshingChannels

HBASE-19435 implements a fix for reopening file channels when they are 
unnexpected closed
to avoid disabling the BucketCache. However, it was missed that the the 
channels might not
actually be completely closed (the write or read channel might still be open
(see 
https://docs.oracle.com/javase/7/docs/api/java/nio/channels/ClosedChannelException.html)
This commit closes any open channels before creating a new channel.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ecfa9a8a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ecfa9a8a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ecfa9a8a

Branch: refs/heads/branch-1.3
Commit: ecfa9a8a7997a188c1f3ec2d7776245664dc7dbe
Parents: 8bc84fd
Author: Zach York 
Authored: Wed Feb 28 10:40:38 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:18 2018 -0800

--
 .../hadoop/hbase/io/hfile/bucket/FileIOEngine.java | 13 +++--
 .../hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java | 11 +++
 2 files changed, 22 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ecfa9a8a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
index cb454d4..7b773bd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
 import java.io.File;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
@@ -234,7 +233,17 @@ public class FileIOEngine implements IOEngine {
 return fileNum;
   }
 
-  private void refreshFileConnection(int accessFileNum) throws 
FileNotFoundException {
+  @VisibleForTesting
+  FileChannel[] getFileChannels() {
+return fileChannels;
+  }
+
+  @VisibleForTesting
+  void refreshFileConnection(int accessFileNum) throws IOException {
+FileChannel fileChannel = fileChannels[accessFileNum];
+if (fileChannel != null) {
+  fileChannel.close();
+}
 rafs[accessFileNum] = new RandomAccessFile(filePaths[accessFileNum], "rw");
 fileChannels[accessFileNum] = rafs[accessFileNum].getChannel();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ecfa9a8a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
index adf7fd0..8c2bc6e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
@@ -19,10 +19,13 @@
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
 import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 
 import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -129,4 +132,12 @@ public class TestFileIOEngine {
 fileIOEngine.read(ByteBuffer.wrap(data2), offset);
 assertArrayEquals(data1, data2);
   }
+
+  @Test
+  public void testRefreshFileConnectionClosesConnections() throws IOException {
+FileChannel fileChannel = fileIOEngine.getFileChannels()[0];
+assertNotNull(fileChannel);
+fileIOEngine.refreshFileConnection(0);
+assertFalse(fileChannel.isOpen());
+  }
 }



[03/50] [abbrv] hbase git commit: HBASE-17991 Add more details about compaction queue on /dump (Guangxu Cheng)

2018-12-13 Thread apurtell
HBASE-17991 Add more details about compaction queue on /dump (Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3e0d6939
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3e0d6939
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3e0d6939

Branch: refs/heads/branch-1.3
Commit: 3e0d693935e9a03cf4b9a078b32f2b7cc4e0c95f
Parents: ed4f7d1
Author: tedyu 
Authored: Thu May 4 08:28:02 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:16 2018 -0800

--
 .../apache/hadoop/hbase/regionserver/CompactSplitThread.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3e0d6939/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index 6f95a09..71a8803 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -460,6 +460,7 @@ public class CompactSplitThread implements 
CompactionRequestor, PropagatingConfi
 private int queuedPriority;
 private ThreadPoolExecutor parent;
 private User user;
+private long time;
 
 public CompactionRunner(Store store, Region region,
 CompactionContext compaction, ThreadPoolExecutor parent, User user) {
@@ -471,12 +472,14 @@ public class CompactSplitThread implements 
CompactionRequestor, PropagatingConfi
   ? store.getCompactPriority() : compaction.getRequest().getPriority();
   this.parent = parent;
   this.user = user;
+  this.time =  System.currentTimeMillis();
 }
 
 @Override
 public String toString() {
   return (this.compaction != null) ? ("Request = " + 
compaction.getRequest())
-  : ("Store = " + store.toString() + ", pri = " + queuedPriority);
+  : ("regionName = " + region.toString() + ", storeName = " + 
store.toString() +
+ ", priority = " + queuedPriority + ", time = " + time);
 }
 
 private void doCompaction(User user) {



[28/50] [abbrv] hbase git commit: HBASE-20554 "WALs outstanding" message from CleanerChore is noisy

2018-12-13 Thread apurtell
HBASE-20554 "WALs outstanding" message from CleanerChore is noisy


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/511a4c49
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/511a4c49
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/511a4c49

Branch: refs/heads/branch-1.3
Commit: 511a4c49ceb223b289462121c788994fbfa2713b
Parents: 017cb75
Author: Andrew Purtell 
Authored: Wed May 9 10:16:44 2018 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:19 2018 -0800

--
 .../hadoop/hbase/master/cleaner/CleanerChore.java   | 12 +---
 1 file changed, 9 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/511a4c49/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
index 5a4c407..28023c6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
@@ -273,9 +273,13 @@ public abstract class CleanerChore extends Schedu
   try {
 POOL.latchCountUp();
 if (runCleaner()) {
-  LOG.debug("Cleaned all WALs under " + oldFileDir);
+  if (LOG.isTraceEnabled()) {
+LOG.trace("Cleaned all WALs under " + oldFileDir);
+  }
 } else {
-  LOG.warn("WALs outstanding under " + oldFileDir);
+  if (LOG.isTraceEnabled()) {
+LOG.trace("WALs outstanding under " + oldFileDir);
+  }
 }
   } finally {
 POOL.latchCountDown();
@@ -288,7 +292,9 @@ public abstract class CleanerChore extends Schedu
 POOL.updatePool((long) (0.8 * getTimeUnit().toMillis(getPeriod(;
   }
 } else {
-  LOG.debug("Cleaner chore disabled! Not cleaning.");
+  if (LOG.isTraceEnabled()) {
+LOG.trace("Cleaner chore disabled! Not cleaning.");
+  }
 }
   }
 



[30/50] [abbrv] hbase git commit: HBASE-20672 New metrics ReadRequestRate and WriteRequestRate

2018-12-13 Thread apurtell
HBASE-20672 New metrics ReadRequestRate and WriteRequestRate

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/08246957
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/08246957
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/08246957

Branch: refs/heads/branch-1.3
Commit: 08246957e7b37f71ed3874e153ec79e69a87fc5a
Parents: 7bfec2a
Author: Ankit 
Authored: Thu Jun 7 15:03:03 2018 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:19 2018 -0800

--
 .../regionserver/MetricsRegionServerSource.java |  6 
 .../MetricsRegionServerWrapper.java | 10 +++
 .../MetricsRegionServerSourceImpl.java  |  4 +++
 .../MetricsRegionServerWrapperImpl.java | 29 
 .../MetricsRegionServerWrapperStub.java | 10 +++
 5 files changed, 59 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/08246957/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 1991105..e3eab04 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -225,6 +225,12 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   String WRITE_REQUEST_COUNT = "writeRequestCount";
   String WRITE_REQUEST_COUNT_DESC =
   "Number of mutation requests this region server has answered.";
+  String READ_REQUEST_RATE = "readRequestRate";
+  String READ_REQUEST_RATE_DESC =
+  "Rate of answering the read requests by this region server per second.";
+  String WRITE_REQUEST_RATE = "writeRequestRate";
+  String WRITE_REQUEST_RATE_DESC =
+  "Rate of answering the mutation requests by this region server per 
second.";
   String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount";
   String CHECK_MUTATE_FAILED_COUNT_DESC =
   "Number of Check and Mutate calls that failed the checks.";

http://git-wip-us.apache.org/repos/asf/hbase/blob/08246957/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index 7232063..6b34ca6 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -133,6 +133,16 @@ public interface MetricsRegionServerWrapper {
   long getWriteRequestsCount();
 
   /**
+   * Get the rate of read requests per second to regions hosted on this region 
server.
+   */
+  double getReadRequestsRate();
+
+  /**
+   * Get the rate of write requests per second to regions hosted on this 
region server.
+   */
+  double getWriteRequestsRate();
+
+  /**
* Get the number of CAS operations that failed.
*/
   long getCheckAndMutateChecksFailed();

http://git-wip-us.apache.org/repos/asf/hbase/blob/08246957/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index 1c79612..3156039 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -329,6 +329,10 @@ public class MetricsRegionServerSourceImpl
   .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC),
   rsWrap.getStartCode())
   .addGauge(Interns.info(AVERAGE_REGION_SIZE, 
AVERAGE_REGION_SIZE_DESC), rsWrap.getAverageRegionSize())
+  .addGauge(Interns.info(READ_REQUEST_RATE, READ_REQUEST_RATE_DESC),
+  rsWrap.getReadRequestsRate())
+  

[40/50] [abbrv] hbase git commit: HBASE-20723 Custom hbase.wal.dir results in data loss because we write recovered edits into a different place than where the recovering region server looks for them

2018-12-13 Thread apurtell
HBASE-20723 Custom hbase.wal.dir results in data loss because we write 
recovered edits into a different place than where the recovering region server 
looks for them

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2702f23a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2702f23a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2702f23a

Branch: refs/heads/branch-1.3
Commit: 2702f23a4addaa459d6f221c0008be0c556f672d
Parents: 979438d
Author: tedyu 
Authored: Sat Jun 16 10:34:47 2018 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 19:12:42 2018 -0800

--
 .../hadoop/hbase/wal/DefaultWALProvider.java|  8 +--
 .../apache/hadoop/hbase/wal/WALSplitter.java| 60 
 .../hbase/regionserver/wal/TestWALReplay.java   | 72 ++--
 .../apache/hadoop/hbase/wal/TestWALFactory.java |  3 +-
 .../apache/hadoop/hbase/wal/TestWALSplit.java   |  8 +--
 5 files changed, 77 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2702f23a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
index 01234a1..9cb5da0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
@@ -18,9 +18,6 @@
  */
 package org.apache.hadoop.hbase.wal;
 
-import java.io.Closeable;
-import java.io.DataInput;
-import java.io.DataOutput;
 import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -370,8 +367,9 @@ public class DefaultWALProvider implements WALProvider {
 ProtobufLogWriter.class, Writer.class);
 Writer writer = null;
 try {
-  writer = logWriterClass.newInstance();
-  writer.init(fs, path, conf, overwritable);
+  writer = logWriterClass.getDeclaredConstructor().newInstance();
+  FileSystem rootFs = FileSystem.get(path.toUri(), conf);
+  writer.init(rootFs, path, conf, overwritable);
   return writer;
 } catch (Exception e) {
   LOG.debug("Error instantiating log writer.", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/2702f23a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 50bb79f..c88e6d3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -140,7 +140,7 @@ public class WALSplitter {
   public static final boolean SPLIT_SKIP_ERRORS_DEFAULT = false;
 
   // Parameters for split process
-  protected final Path rootDir;
+  protected final Path walDir;
   protected final FileSystem fs;
   protected final Configuration conf;
 
@@ -187,14 +187,14 @@ public class WALSplitter {
   public final static String SPLIT_WRITER_CREATION_BOUNDED = 
"hbase.split.writer.creation.bounded";
 
   @VisibleForTesting
-  WALSplitter(final WALFactory factory, Configuration conf, Path rootDir,
+  WALSplitter(final WALFactory factory, Configuration conf, Path walDir,
   FileSystem fs, LastSequenceId idChecker,
   CoordinatedStateManager csm, RecoveryMode mode) {
 this.conf = HBaseConfiguration.create(conf);
 String codecClassName = conf
 .get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, 
WALCellCodec.class.getName());
 this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-this.rootDir = rootDir;
+this.walDir = walDir;
 this.fs = fs;
 this.sequenceIdChecker = idChecker;
 this.csm = (BaseCoordinatedStateManager)csm;
@@ -244,10 +244,10 @@ public class WALSplitter {
* @return false if it is interrupted by the progress-able.
* @throws IOException
*/
-  public static boolean splitLogFile(Path rootDir, FileStatus logfile, 
FileSystem fs,
+  public static boolean splitLogFile(Path walDir, FileStatus logfile, 
FileSystem fs,
   Configuration conf, CancelableProgressable reporter, LastSequenceId 
idChecker,
   CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) 
throws IOException {
-WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, 

[03/11] hbase git commit: HBASE-21570 Add write buffer periodic flush support for AsyncBufferedMutator

2018-12-13 Thread elserj
HBASE-21570 Add write buffer periodic flush support for AsyncBufferedMutator


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b09b87d1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b09b87d1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b09b87d1

Branch: refs/heads/HBASE-20952
Commit: b09b87d143730db00ec56114a752d3a74f8982c4
Parents: da9508d
Author: zhangduo 
Authored: Tue Dec 11 08:39:43 2018 +0800
Committer: Duo Zhang 
Committed: Tue Dec 11 14:51:26 2018 +0800

--
 .../hbase/client/AsyncBufferedMutator.java  |  16 +-
 .../client/AsyncBufferedMutatorBuilder.java |  19 +++
 .../client/AsyncBufferedMutatorBuilderImpl.java |  19 ++-
 .../hbase/client/AsyncBufferedMutatorImpl.java  |  67 +---
 .../client/AsyncConnectionConfiguration.java|  37 +++--
 .../hbase/client/AsyncConnectionImpl.java   |  11 +-
 .../hbase/client/TestAsyncBufferMutator.java| 161 ++-
 7 files changed, 277 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b09b87d1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java
index 6fe4b9a..7b21eb5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java
@@ -18,13 +18,16 @@
 package org.apache.hadoop.hbase.client;
 
 import java.io.Closeable;
+import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
-
+import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
 
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
+
 /**
  * Used to communicate with a single HBase table in batches. Obtain an 
instance from a
  * {@link AsyncConnection} and call {@link #close()} afterwards.
@@ -52,7 +55,9 @@ public interface AsyncBufferedMutator extends Closeable {
* part of a batch. Currently only supports {@link Put} and {@link Delete} 
mutations.
* @param mutation The data to send.
*/
-  CompletableFuture mutate(Mutation mutation);
+  default CompletableFuture mutate(Mutation mutation) {
+return 
Iterables.getOnlyElement(mutate(Collections.singletonList(mutation)));
+  }
 
   /**
* Send some {@link Mutation}s to the table. The mutations will be buffered 
and sent over the wire
@@ -81,4 +86,11 @@ public interface AsyncBufferedMutator extends Closeable {
* @return The size of the write buffer in bytes.
*/
   long getWriteBufferSize();
+
+  /**
+   * Returns the periodical flush interval, 0 means disabled.
+   */
+  default long getPeriodicalFlushTimeout(TimeUnit unit) {
+throw new UnsupportedOperationException("Not implemented");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b09b87d1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java
index 45959bb..c617c8e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java
@@ -46,6 +46,25 @@ public interface AsyncBufferedMutatorBuilder {
   AsyncBufferedMutatorBuilder setRetryPause(long pause, TimeUnit unit);
 
   /**
+   * Set the periodical flush interval. If the data in the buffer has not been 
flush for a long
+   * time, i.e, reach this timeout limit, we will flush it automatically.
+   * 
+   * Notice that, set the timeout to 0 or a negative value means disable 
periodical flush, not
+   * 'flush immediately'. If you want to flush immediately then you should not 
use this class, as it
+   * is designed to be 'buffered'.
+   */
+  default AsyncBufferedMutatorBuilder setWriteBufferPeriodicFlush(long 
timeout, TimeUnit unit) {
+throw new UnsupportedOperationException("Not implemented");
+  }
+
+  /**
+   * Disable the periodical flush, i.e, set the timeout to 0.
+   */
+  default AsyncBufferedMutatorBuilder disableWriteBufferPeriodicFlush() {
+return setWriteBufferPeriodicFlush(0, TimeUnit.NANOSECONDS);
+  }
+
+  /**
* Set the max retry times for an operation. Usually it is 

[09/11] hbase git commit: Revert "HIVE-21575 : memstore above high watermark message is logged too much"

2018-12-13 Thread elserj
Revert "HIVE-21575 : memstore above high watermark message is logged too much"

This reverts commit 4640ff5959af4865966126a503a7cd15e26a7408.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9a25d0c2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9a25d0c2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9a25d0c2

Branch: refs/heads/HBASE-20952
Commit: 9a25d0c249e595a1f8aef41cd677b44ff1c72d73
Parents: cb1966d
Author: Sergey Shelukhin 
Authored: Thu Dec 13 12:46:39 2018 -0800
Committer: Sergey Shelukhin 
Committed: Thu Dec 13 12:46:39 2018 -0800

--
 .../apache/hadoop/hbase/regionserver/MemStoreFlusher.java| 8 ++--
 1 file changed, 2 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9a25d0c2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index 804a2f8..699c9b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -703,7 +703,6 @@ class MemStoreFlusher implements FlushRequester {
 if (flushType != FlushType.NORMAL) {
   TraceUtil.addTimelineAnnotation("Force Flush. We're above high water 
mark.");
   long start = EnvironmentEdgeManager.currentTime();
-  long nextLogTimeMs = start;
   synchronized (this.blockSignal) {
 boolean blocked = false;
 long startTime = 0;
@@ -745,11 +744,8 @@ class MemStoreFlusher implements FlushRequester {
   LOG.warn("Interrupted while waiting");
   interrupted = true;
 }
-long nowMs = EnvironmentEdgeManager.currentTime();
-if (nowMs >= nextLogTimeMs) {
-  LOG.warn("Memstore is above high water mark and block {} ms", 
nowMs - start);
-  nextLogTimeMs = nowMs + 1000;
-}
+long took = EnvironmentEdgeManager.currentTime() - start;
+LOG.warn("Memstore is above high water mark and block " + took + 
"ms");
 flushType = isAboveHighWaterMark();
   }
 } finally {



[08/11] hbase git commit: HBASE-21590 Optimize trySkipToNextColumn in StoreScanner a bit.

2018-12-13 Thread elserj
HBASE-21590 Optimize trySkipToNextColumn in StoreScanner a bit.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cb1966dc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cb1966dc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cb1966dc

Branch: refs/heads/HBASE-20952
Commit: cb1966dc2d94fba10d9b6af3c5719e03f621df92
Parents: f32d261
Author: Lars Hofhansl 
Authored: Thu Dec 13 11:57:16 2018 -0800
Committer: Lars Hofhansl 
Committed: Thu Dec 13 11:57:16 2018 -0800

--
 .../apache/hadoop/hbase/regionserver/StoreScanner.java  | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cb1966dc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 736c08a..e7a4528 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -802,12 +802,16 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
   @VisibleForTesting
   protected boolean trySkipToNextRow(Cell cell) throws IOException {
 Cell nextCell = null;
+// used to guard against a changed next indexed key by doing a identity 
comparison
+// when the identity changes we need to compare the bytes again
+Cell previousIndexedKey = null;
 do {
   Cell nextIndexedKey = getNextIndexedKey();
   if (nextIndexedKey != null && nextIndexedKey != 
KeyValueScanner.NO_NEXT_INDEXED_KEY
-  && matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0) {
+  && (nextIndexedKey == previousIndexedKey || 
matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0)) {
 this.heap.next();
 ++kvsScanned;
+previousIndexedKey = nextIndexedKey;
   } else {
 return false;
   }
@@ -823,12 +827,16 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
   @VisibleForTesting
   protected boolean trySkipToNextColumn(Cell cell) throws IOException {
 Cell nextCell = null;
+// used to guard against a changed next indexed key by doing a identity 
comparison
+// when the identity changes we need to compare the bytes again
+Cell previousIndexedKey = null;
 do {
   Cell nextIndexedKey = getNextIndexedKey();
   if (nextIndexedKey != null && nextIndexedKey != 
KeyValueScanner.NO_NEXT_INDEXED_KEY
-  && matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0) {
+  && (nextIndexedKey == previousIndexedKey || 
matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0)) {
 this.heap.next();
 ++kvsScanned;
+previousIndexedKey = nextIndexedKey;
   } else {
 return false;
   }



[07/11] hbase git commit: HBASE-21582 If call HBaseAdmin#snapshotAsync but forget call isSnapshotFinished, then SnapshotHFileCleaner will skip to run every time

2018-12-13 Thread elserj
HBASE-21582 If call HBaseAdmin#snapshotAsync but forget call 
isSnapshotFinished, then SnapshotHFileCleaner will skip to run every time


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f32d2618
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f32d2618
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f32d2618

Branch: refs/heads/HBASE-20952
Commit: f32d2618430f70e1b0db92785294b2c7892cc02b
Parents: 4640ff5
Author: huzheng 
Authored: Tue Dec 11 20:27:56 2018 +0800
Committer: huzheng 
Committed: Thu Dec 13 10:35:20 2018 +0800

--
 .../hbase/master/snapshot/SnapshotManager.java  | 48 ++--
 .../master/cleaner/TestSnapshotFromMaster.java  | 27 ++-
 .../master/snapshot/TestSnapshotManager.java| 36 +--
 3 files changed, 92 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f32d2618/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index 2b963b2..05db4ab 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -28,7 +28,11 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -91,6 +95,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringP
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Type;
+import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
  * This class manages the procedure of taking and restoring snapshots. There 
is only one
@@ -120,7 +126,9 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
* At this point, if the user asks for the snapshot/restore status, the 
result will be
* snapshot done if exists or failed if it doesn't exists.
*/
-  private static final int SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT = 60 * 1000;
+  public static final String HBASE_SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLIS =
+  "hbase.snapshot.sentinels.cleanup.timeoutMillis";
+  public static final long SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLS_DEFAULT = 
60 * 1000L;
 
   /** Enable or disable snapshot support */
   public static final String HBASE_SNAPSHOT_ENABLED = "hbase.snapshot.enabled";
@@ -151,7 +159,11 @@ public class SnapshotManager extends 
MasterProcedureManager implements Stoppable
   // The map is always accessed and modified under the object lock using 
synchronized.
   // snapshotTable() will insert an Handler in the table.
   // isSnapshotDone() will remove the handler requested if the operation is 
finished.
-  private Map snapshotHandlers = new 
ConcurrentHashMap<>();
+  private final Map snapshotHandlers = new 
ConcurrentHashMap<>();
+  private final ScheduledExecutorService scheduleThreadPool =
+  Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
+  
.setNameFormat("SnapshotHandlerChoreCleaner").setDaemon(true).build());
+  private ScheduledFuture snapshotHandlerChoreCleanerTask;
 
   // Restore map, with table name as key, procedure ID as value.
   // The map is always accessed and modified under the object lock using 
synchronized.
@@ -181,17 +193,21 @@ public class SnapshotManager extends 
MasterProcedureManager implements Stoppable
* @param coordinator procedure coordinator instance.  exposed for testing.
* @param pool HBase ExecutorServcie instance, exposed for testing.
*/
-  public SnapshotManager(final MasterServices master, final MetricsMaster 
metricsMaster,
-  ProcedureCoordinator coordinator, ExecutorService pool)
+  @VisibleForTesting
+  SnapshotManager(final MasterServices master, ProcedureCoordinator 
coordinator,
+  ExecutorService pool, int sentinelCleanInterval)
   throws IOException, 

[01/11] hbase git commit: HBASE-21560 Return a new TableDescriptor for MasterObserver#preModifyTable to allow coprocessor modify the TableDescriptor [Forced Update!]

2018-12-13 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/HBASE-20952 fb59426b7 -> ebfc04d85 (forced update)


HBASE-21560 Return a new TableDescriptor for MasterObserver#preModifyTable to 
allow coprocessor modify the TableDescriptor


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/79d90c87
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/79d90c87
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/79d90c87

Branch: refs/heads/HBASE-20952
Commit: 79d90c87b5bc6d4aa50e6edc52a3f20da708ee29
Parents: 8d7061a
Author: Guanghao Zhang 
Authored: Fri Dec 7 16:51:19 2018 +0800
Committer: Guanghao Zhang 
Committed: Sat Dec 8 09:28:14 2018 +0800

--
 .../hbase/coprocessor/MasterObserver.java   |   6 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  11 +-
 .../hbase/master/MasterCoprocessorHost.java |  22 ++--
 .../hbase/security/access/AccessController.java |   9 +-
 .../CoprocessorWhitelistMasterObserver.java |   5 +-
 .../visibility/VisibilityController.java|  15 ++-
 .../hbase/coprocessor/TestMasterObserver.java   |   3 +-
 .../TestMasterObserverToModifyTableSchema.java  | 128 +++
 8 files changed, 169 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/79d90c87/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index a0863e4..1a8db79 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -240,11 +240,13 @@ public interface MasterObserver {
* @param currentDescriptor current TableDescriptor of the table
* @param newDescriptor after modify operation, table will have this 
descriptor
*/
-  default void preModifyTable(final 
ObserverContext ctx,
+  default TableDescriptor preModifyTable(final 
ObserverContext ctx,
   final TableName tableName, TableDescriptor currentDescriptor, 
TableDescriptor newDescriptor)
-throws IOException {
+  throws IOException {
 preModifyTable(ctx, tableName, newDescriptor);
+return newDescriptor;
   }
+
   /**
* Called after the modifyTable operation has been requested.  Called as part
* of modify table RPC call.

http://git-wip-us.apache.org/repos/asf/hbase/blob/79d90c87/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index e96dc36..a16e09d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2631,13 +2631,12 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, 
nonceGroup, nonce) {
   @Override
   protected void run() throws IOException {
-TableDescriptor newDescriptor = newDescriptorGetter.get();
-sanityCheckTableDescriptor(newDescriptor);
 TableDescriptor oldDescriptor = 
getMaster().getTableDescriptors().get(tableName);
-getMaster().getMasterCoprocessorHost().preModifyTable(tableName, 
oldDescriptor,
-  newDescriptor);
-
-LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
+TableDescriptor newDescriptor = 
getMaster().getMasterCoprocessorHost()
+.preModifyTable(tableName, oldDescriptor, 
newDescriptorGetter.get());
+sanityCheckTableDescriptor(newDescriptor);
+LOG.info("{} modify table {} from {} to {}", 
getClientIdAuditPrefix(), tableName,
+oldDescriptor, newDescriptor);
 
 // Execute the operation synchronously - wait for the operation 
completes before
 // continuing.

http://git-wip-us.apache.org/repos/asf/hbase/blob/79d90c87/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 51e30c4..e7b166c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ 

[10/11] hbase git commit: HBASE-21575 : memstore above high watermark message is logged too much

2018-12-13 Thread elserj
HBASE-21575 : memstore above high watermark message is logged too much


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3ff274e2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3ff274e2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3ff274e2

Branch: refs/heads/HBASE-20952
Commit: 3ff274e22eb5710f4301fb0fce364e22a11288d7
Parents: 9a25d0c
Author: Sergey Shelukhin 
Authored: Wed Dec 12 11:02:25 2018 -0800
Committer: Sergey Shelukhin 
Committed: Thu Dec 13 12:47:11 2018 -0800

--
 .../apache/hadoop/hbase/regionserver/MemStoreFlusher.java| 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3ff274e2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index 699c9b6..804a2f8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -703,6 +703,7 @@ class MemStoreFlusher implements FlushRequester {
 if (flushType != FlushType.NORMAL) {
   TraceUtil.addTimelineAnnotation("Force Flush. We're above high water 
mark.");
   long start = EnvironmentEdgeManager.currentTime();
+  long nextLogTimeMs = start;
   synchronized (this.blockSignal) {
 boolean blocked = false;
 long startTime = 0;
@@ -744,8 +745,11 @@ class MemStoreFlusher implements FlushRequester {
   LOG.warn("Interrupted while waiting");
   interrupted = true;
 }
-long took = EnvironmentEdgeManager.currentTime() - start;
-LOG.warn("Memstore is above high water mark and block " + took + 
"ms");
+long nowMs = EnvironmentEdgeManager.currentTime();
+if (nowMs >= nextLogTimeMs) {
+  LOG.warn("Memstore is above high water mark and block {} ms", 
nowMs - start);
+  nextLogTimeMs = nowMs + 1000;
+}
 flushType = isAboveHighWaterMark();
   }
 } finally {



[06/11] hbase git commit: HIVE-21575 : memstore above high watermark message is logged too much

2018-12-13 Thread elserj
HIVE-21575 : memstore above high watermark message is logged too much


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4640ff59
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4640ff59
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4640ff59

Branch: refs/heads/HBASE-20952
Commit: 4640ff5959af4865966126a503a7cd15e26a7408
Parents: 67d6d50
Author: Sergey Shelukhin 
Authored: Wed Dec 12 11:02:25 2018 -0800
Committer: Sergey Shelukhin 
Committed: Wed Dec 12 11:02:25 2018 -0800

--
 .../apache/hadoop/hbase/regionserver/MemStoreFlusher.java| 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4640ff59/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index 699c9b6..804a2f8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -703,6 +703,7 @@ class MemStoreFlusher implements FlushRequester {
 if (flushType != FlushType.NORMAL) {
   TraceUtil.addTimelineAnnotation("Force Flush. We're above high water 
mark.");
   long start = EnvironmentEdgeManager.currentTime();
+  long nextLogTimeMs = start;
   synchronized (this.blockSignal) {
 boolean blocked = false;
 long startTime = 0;
@@ -744,8 +745,11 @@ class MemStoreFlusher implements FlushRequester {
   LOG.warn("Interrupted while waiting");
   interrupted = true;
 }
-long took = EnvironmentEdgeManager.currentTime() - start;
-LOG.warn("Memstore is above high water mark and block " + took + 
"ms");
+long nowMs = EnvironmentEdgeManager.currentTime();
+if (nowMs >= nextLogTimeMs) {
+  LOG.warn("Memstore is above high water mark and block {} ms", 
nowMs - start);
+  nextLogTimeMs = nowMs + 1000;
+}
 flushType = isAboveHighWaterMark();
   }
 } finally {



[05/11] hbase git commit: HBASE-21568 Use CacheConfig.DISABLED where we don't expect to have blockcache running

2018-12-13 Thread elserj
HBASE-21568 Use CacheConfig.DISABLED where we don't expect to have blockcache 
running

This includes removing the "old way" of disabling blockcache in favor of the
new API.

Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/67d6d508
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/67d6d508
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/67d6d508

Branch: refs/heads/HBASE-20952
Commit: 67d6d5084cf8fc094cda4bd3f091d8a0a9cb1d3e
Parents: f88224e
Author: Josh Elser 
Authored: Fri Dec 7 17:18:49 2018 -0500
Committer: Josh Elser 
Committed: Tue Dec 11 10:02:18 2018 -0500

--
 .../org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java  | 6 ++
 .../src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java  | 4 +---
 .../org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java   | 2 +-
 .../org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java| 6 +++---
 .../java/org/apache/hadoop/hbase/util/CompressionTest.java | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java  | 5 ++---
 .../apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java  | 2 +-
 7 files changed, 11 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/67d6d508/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index c911e8c..274a506 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -414,8 +414,6 @@ public class HFileOutputFormat2
 DataBlockEncoding encoding = overriddenEncoding;
 encoding = encoding == null ? datablockEncodingMap.get(tableAndFamily) 
: encoding;
 encoding = encoding == null ? DataBlockEncoding.NONE : encoding;
-Configuration tempConf = new Configuration(conf);
-tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
 HFileContextBuilder contextBuilder = new HFileContextBuilder()
 .withCompression(compression)
 
.withChecksumType(HStore.getChecksumType(conf))
@@ -430,12 +428,12 @@ public class HFileOutputFormat2
 HFileContext hFileContext = contextBuilder.build();
 if (null == favoredNodes) {
   wl.writer =
-  new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), fs)
+  new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs)
   .withOutputDir(familydir).withBloomType(bloomType)
   
.withComparator(CellComparator.getInstance()).withFileContext(hFileContext).build();
 } else {
   wl.writer =
-  new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), new 
HFileSystem(fs))
+  new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, new 
HFileSystem(fs))
   .withOutputDir(familydir).withBloomType(bloomType)
   
.withComparator(CellComparator.getInstance()).withFileContext(hFileContext)
   .withFavoredNodes(favoredNodes).build();

http://git-wip-us.apache.org/repos/asf/hbase/blob/67d6d508/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 5bcaa17..78ebedc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -356,9 +356,7 @@ public class HFile {
*/
   public static final WriterFactory getWriterFactoryNoCache(Configuration
conf) {
-Configuration tempConf = new Configuration(conf);
-tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
-return HFile.getWriterFactory(conf, new CacheConfig(tempConf));
+return HFile.getWriterFactory(conf, CacheConfig.DISABLED);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/67d6d508/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index 

[2/2] hbase git commit: HBASE-21246 Introduce WALIdentity to identify WALs instead of a Path

2018-12-13 Thread elserj
HBASE-21246 Introduce WALIdentity to identify WALs instead of a Path

Builds on top of tyu's original idea.

Signed-off-by: Josh Elser 
Signed-off-by: Reid Chan 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c738e157
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c738e157
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c738e157

Branch: refs/heads/HBASE-20952
Commit: c738e1575f37000c6feff00362399312740b3c74
Parents: ebfc04d
Author: Ankit Singhal 
Authored: Thu Dec 13 16:59:23 2018 -0500
Committer: Josh Elser 
Committed: Thu Dec 13 17:10:28 2018 -0500

--
 .../apache/hadoop/hbase/wal/FSWALIdentity.java  | 78 
 .../apache/hadoop/hbase/wal/WALIdentity.java| 39 
 .../regionserver/ReplicationStatusTmpl.jamon|  4 +-
 .../hadoop/hbase/coprocessor/WALObserver.java   | 14 +--
 .../hbase/regionserver/wal/AbstractFSWAL.java   | 24 ++---
 .../regionserver/wal/WALActionsListener.java| 26 +++---
 .../regionserver/wal/WALCoprocessorHost.java| 18 ++--
 .../RecoveredReplicationSource.java | 40 +
 .../RecoveredReplicationSourceShipper.java  |  7 +-
 .../regionserver/ReplicationSource.java | 44 -
 .../ReplicationSourceInterface.java |  5 +-
 .../regionserver/ReplicationSourceManager.java  | 64 +++---
 .../regionserver/ReplicationSourceShipper.java  | 18 ++--
 .../ReplicationSourceWALActionListener.java | 10 +--
 .../ReplicationSourceWALReader.java | 39 
 .../regionserver/ReplicationStatus.java | 16 ++--
 .../SerialReplicationSourceWALReader.java   | 20 ++---
 .../replication/regionserver/WALEntryBatch.java | 22 ++---
 .../regionserver/WALEntryStream.java| 93 +++-
 .../regionserver/WALFileLengthProvider.java |  4 +-
 .../hadoop/hbase/wal/DisabledWALProvider.java   | 36 ++--
 .../apache/hadoop/hbase/wal/FSHLogProvider.java |  2 +-
 .../coprocessor/SampleRegionWALCoprocessor.java |  6 +-
 .../hbase/fs/TestBlockReorderMultiBlocks.java   |  4 +-
 .../wal/AbstractTestLogRollPeriod.java  | 18 ++--
 .../hbase/regionserver/wal/TestLogRolling.java  | 15 ++--
 .../wal/TestWALActionsListener.java |  5 +-
 .../replication/ReplicationSourceDummy.java | 12 +--
 .../replication/TestMasterReplication.java  |  4 +-
 .../replication/TestMultiSlaveReplication.java  |  5 +-
 .../TestReplicationEmptyWALRecovery.java| 13 ++-
 .../TestReplicationMetricsforUI.java|  8 +-
 .../master/TestRecoverStandbyProcedure.java |  1 +
 .../regionserver/TestReplicationSource.java |  6 +-
 .../TestReplicationSourceManager.java   | 26 +++---
 .../regionserver/TestWALEntryStream.java| 51 +--
 36 files changed, 490 insertions(+), 307 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c738e157/hbase-common/src/main/java/org/apache/hadoop/hbase/wal/FSWALIdentity.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/wal/FSWALIdentity.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/wal/FSWALIdentity.java
new file mode 100644
index 000..d12a1cf
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/wal/FSWALIdentity.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.wal;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/*
+ * This is distributed FS oriented implementation for WALIdentity
+ */
+@InterfaceAudience.Private
+public class FSWALIdentity implements WALIdentity{
+  private String name;
+  private Path path;
+
+  public FSWALIdentity(String name) {
+this.path = new Path(name);
+this.name = path.getName();
+  }
+
+  public FSWALIdentity(Path path) {
+this.path = path;
+if (path !=null) {
+  this.name = path.getName();
+}
+  }
+
+  @Override
+  public String getName() {
+return 

[1/2] hbase git commit: HBASE-21246 Introduce WALIdentity to identify WALs instead of a Path

2018-12-13 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/HBASE-20952 ebfc04d85 -> c738e1575


http://git-wip-us.apache.org/repos/asf/hbase/blob/c738e157/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java
index 1da31da..e82ccc2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java
@@ -24,13 +24,13 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Optional;
 
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.WALIdentity;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -148,13 +148,13 @@ public class SampleRegionWALCoprocessor implements 
WALCoprocessor, RegionCoproce
 
   @Override
   public void preWALRoll(ObserverContext 
ctx,
-  Path oldPath, Path newPath) throws IOException {
+  WALIdentity oldWalId, WALIdentity newWalId) throws IOException {
 preWALRollCalled = true;
   }
 
   @Override
   public void postWALRoll(ObserverContext 
ctx,
-  Path oldPath, Path newPath) throws IOException {
+  WALIdentity oldWalId, WALIdentity newWalId) throws IOException {
 postWALRollCalled = true;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c738e157/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java
index ad2b2d4..cd96586 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.WALIdentity;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -155,7 +156,8 @@ public class TestBlockReorderMultiBlocks {
   // listen for successful log rolls
   final WALActionsListener listener = new WALActionsListener() {
 @Override
-public void postLogRoll(final Path oldPath, final Path newPath) 
throws IOException {
+public void postLogRoll(final WALIdentity oldWalId, final 
WALIdentity newWalId)
+throws IOException {
   latch.countDown();
 }
   };

http://git-wip-us.apache.org/repos/asf/hbase/blob/c738e157/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java
index 9322c5e..8dcca93 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java
@@ -22,7 +22,6 @@ import static org.junit.Assert.assertFalse;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Put;
@@ -30,6 +29,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALIdentity;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -130,12 +130,12 @@ public abstract class AbstractTestLogRollPeriod {
 
   private void checkMinLogRolls(final WAL log, final int minRolls)
   throws Exception {
-final List paths = new ArrayList<>();
+final List walIds = new ArrayList();
 

hbase git commit: HBASE-21590 Optimize trySkipToNextColumn in StoreScanner a bit.

2018-12-13 Thread larsh
Repository: hbase
Updated Branches:
  refs/heads/branch-1 81dc223ae -> 4e7fc18fb


HBASE-21590 Optimize trySkipToNextColumn in StoreScanner a bit.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4e7fc18f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4e7fc18f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4e7fc18f

Branch: refs/heads/branch-1
Commit: 4e7fc18fb6946a0e5728c4f6265c840e2d2a1434
Parents: 81dc223
Author: Lars Hofhansl 
Authored: Thu Dec 13 11:55:28 2018 -0800
Committer: Lars Hofhansl 
Committed: Thu Dec 13 11:55:28 2018 -0800

--
 .../apache/hadoop/hbase/regionserver/StoreScanner.java  | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4e7fc18f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 31856e3..96d5946 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -796,12 +796,16 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
   @VisibleForTesting
   protected boolean trySkipToNextRow(Cell cell) throws IOException {
 Cell nextCell = null;
+// used to guard against a changed next indexed key by doing a identity 
comparison
+// when the identity changes we need to compare the bytes again
+Cell previousIndexedKey = null;
 do {
   Cell nextIndexedKey = getNextIndexedKey();
   if (nextIndexedKey != null && nextIndexedKey != 
KeyValueScanner.NO_NEXT_INDEXED_KEY
-  && matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0) {
+  && (nextIndexedKey == previousIndexedKey || 
matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0)) {
 this.heap.next();
 ++kvsScanned;
+previousIndexedKey = nextIndexedKey;
   } else {
 return false;
   }
@@ -817,12 +821,16 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
   @VisibleForTesting
   protected boolean trySkipToNextColumn(Cell cell) throws IOException {
 Cell nextCell = null;
+// used to guard against a changed next indexed key by doing a identity 
comparison
+// when the identity changes we need to compare the bytes again
+Cell previousIndexedKey = null;
 do {
   Cell nextIndexedKey = getNextIndexedKey();
   if (nextIndexedKey != null && nextIndexedKey != 
KeyValueScanner.NO_NEXT_INDEXED_KEY
-  && matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0) {
+  && (nextIndexedKey == previousIndexedKey || 
matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0)) {
 this.heap.next();
 ++kvsScanned;
+previousIndexedKey = nextIndexedKey;
   } else {
 return false;
   }



[2/2] hbase git commit: HBASE-21575 : memstore above high watermark message is logged too much

2018-12-13 Thread sershe
HBASE-21575 : memstore above high watermark message is logged too much


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3ff274e2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3ff274e2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3ff274e2

Branch: refs/heads/master
Commit: 3ff274e22eb5710f4301fb0fce364e22a11288d7
Parents: 9a25d0c
Author: Sergey Shelukhin 
Authored: Wed Dec 12 11:02:25 2018 -0800
Committer: Sergey Shelukhin 
Committed: Thu Dec 13 12:47:11 2018 -0800

--
 .../apache/hadoop/hbase/regionserver/MemStoreFlusher.java| 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3ff274e2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index 699c9b6..804a2f8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -703,6 +703,7 @@ class MemStoreFlusher implements FlushRequester {
 if (flushType != FlushType.NORMAL) {
   TraceUtil.addTimelineAnnotation("Force Flush. We're above high water 
mark.");
   long start = EnvironmentEdgeManager.currentTime();
+  long nextLogTimeMs = start;
   synchronized (this.blockSignal) {
 boolean blocked = false;
 long startTime = 0;
@@ -744,8 +745,11 @@ class MemStoreFlusher implements FlushRequester {
   LOG.warn("Interrupted while waiting");
   interrupted = true;
 }
-long took = EnvironmentEdgeManager.currentTime() - start;
-LOG.warn("Memstore is above high water mark and block " + took + 
"ms");
+long nowMs = EnvironmentEdgeManager.currentTime();
+if (nowMs >= nextLogTimeMs) {
+  LOG.warn("Memstore is above high water mark and block {} ms", 
nowMs - start);
+  nextLogTimeMs = nowMs + 1000;
+}
 flushType = isAboveHighWaterMark();
   }
 } finally {



[1/2] hbase git commit: Revert "HIVE-21575 : memstore above high watermark message is logged too much"

2018-12-13 Thread sershe
Repository: hbase
Updated Branches:
  refs/heads/master cb1966dc2 -> 3ff274e22


Revert "HIVE-21575 : memstore above high watermark message is logged too much"

This reverts commit 4640ff5959af4865966126a503a7cd15e26a7408.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9a25d0c2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9a25d0c2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9a25d0c2

Branch: refs/heads/master
Commit: 9a25d0c249e595a1f8aef41cd677b44ff1c72d73
Parents: cb1966d
Author: Sergey Shelukhin 
Authored: Thu Dec 13 12:46:39 2018 -0800
Committer: Sergey Shelukhin 
Committed: Thu Dec 13 12:46:39 2018 -0800

--
 .../apache/hadoop/hbase/regionserver/MemStoreFlusher.java| 8 ++--
 1 file changed, 2 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9a25d0c2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index 804a2f8..699c9b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -703,7 +703,6 @@ class MemStoreFlusher implements FlushRequester {
 if (flushType != FlushType.NORMAL) {
   TraceUtil.addTimelineAnnotation("Force Flush. We're above high water 
mark.");
   long start = EnvironmentEdgeManager.currentTime();
-  long nextLogTimeMs = start;
   synchronized (this.blockSignal) {
 boolean blocked = false;
 long startTime = 0;
@@ -745,11 +744,8 @@ class MemStoreFlusher implements FlushRequester {
   LOG.warn("Interrupted while waiting");
   interrupted = true;
 }
-long nowMs = EnvironmentEdgeManager.currentTime();
-if (nowMs >= nextLogTimeMs) {
-  LOG.warn("Memstore is above high water mark and block {} ms", 
nowMs - start);
-  nextLogTimeMs = nowMs + 1000;
-}
+long took = EnvironmentEdgeManager.currentTime() - start;
+LOG.warn("Memstore is above high water mark and block " + took + 
"ms");
 flushType = isAboveHighWaterMark();
   }
 } finally {



[1/2] hbase git commit: HBASE-21359 Fix build problem against Hadoop 2.8.5

2018-12-13 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 d37294174 -> c4ed66a05


HBASE-21359 Fix build problem against Hadoop 2.8.5


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e7563cce
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e7563cce
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e7563cce

Branch: refs/heads/branch-1.3
Commit: e7563cce32d54e41d5536b4b50e703fed42b349b
Parents: d372941
Author: Andrew Purtell 
Authored: Fri Nov 30 12:03:36 2018 -0800
Committer: Andrew Purtell 
Committed: Thu Dec 13 16:35:22 2018 -0800

--
 .../src/main/resources/supplemental-models.xml| 18 ++
 1 file changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e7563cce/hbase-resource-bundle/src/main/resources/supplemental-models.xml
--
diff --git a/hbase-resource-bundle/src/main/resources/supplemental-models.xml 
b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
index eac4cad..9c4ceea 100644
--- a/hbase-resource-bundle/src/main/resources/supplemental-models.xml
+++ b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
@@ -1351,6 +1351,24 @@ Copyright (c) 2000-2005 INRIA, France Telecom
   
   
 
+  org.ow2.asm
+  asm
+  ASM: a very small and fast Java bytecode manipulation 
framework
+
+  
+
+  BSD 3-Clause License
+  
http://cvs.forge.objectweb.org/cgi-bin/viewcvs.cgi/*checkout*/asm/asm/LICENSE.txt?rev=1.3only_with_tag=ASM_3_1_MVN
+  repo
+  
+Copyright (c) 2000-2005 INRIA, France Telecom
+
+
+  
+
+  
+  
+
   org.fusesource.leveldbjni
   leveldbjni-all
 



[04/11] hbase git commit: HBASE-21453 Convert ReadOnlyZKClient to DEBUG instead of INFO

2018-12-13 Thread elserj
HBASE-21453 Convert ReadOnlyZKClient to DEBUG instead of INFO


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f88224ee
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f88224ee
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f88224ee

Branch: refs/heads/HBASE-20952
Commit: f88224ee34ba2c23f794ec1219ffd93783b20e51
Parents: b09b87d
Author: Sakthi 
Authored: Thu Nov 29 18:52:50 2018 -0800
Committer: Peter Somogyi 
Committed: Tue Dec 11 08:18:02 2018 +0100

--
 .../java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f88224ee/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
index fc2d5f0..09f8984 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
@@ -136,7 +136,7 @@ public final class ReadOnlyZKClient implements Closeable {
 this.retryIntervalMs =
 conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, 
DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS);
 this.keepAliveTimeMs = conf.getInt(KEEPALIVE_MILLIS, 
DEFAULT_KEEPALIVE_MILLIS);
-LOG.info(
+LOG.debug(
   "Connect {} to {} with session timeout={}ms, retries {}, " +
 "retry interval {}ms, keepAlive={}ms",
   getId(), connectString, sessionTimeoutMs, maxRetries, retryIntervalMs, 
keepAliveTimeMs);
@@ -347,7 +347,7 @@ public final class ReadOnlyZKClient implements Closeable {
   @Override
   public void close() {
 if (closed.compareAndSet(false, true)) {
-  LOG.info("Close zookeeper connection {} to {}", getId(), connectString);
+  LOG.debug("Close zookeeper connection {} to {}", getId(), connectString);
   tasks.add(CLOSE);
 }
   }



[11/11] hbase git commit: HBASE-20952 run the extended tests weekly until branch activity picks up.

2018-12-13 Thread elserj
HBASE-20952 run the extended tests weekly until branch activity picks up.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ebfc04d8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ebfc04d8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ebfc04d8

Branch: refs/heads/HBASE-20952
Commit: ebfc04d85e44d5af75afa68789ef1ae5e3b0ed35
Parents: 3ff274e
Author: Sean Busbey 
Authored: Fri Nov 16 07:51:08 2018 -0600
Committer: Josh Elser 
Committed: Thu Dec 13 16:57:55 2018 -0500

--
 dev-support/Jenkinsfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ebfc04d8/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index b333afb..bea425a 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -21,7 +21,7 @@ pipeline {
 }
   }
   triggers {
-cron('@daily')
+cron('@weekly')
   }
   options {
 buildDiscarder(logRotator(numToKeepStr: '30'))



[02/11] hbase git commit: HBASE-21567 Allow overriding configs starting up the shell

2018-12-13 Thread elserj
HBASE-21567 Allow overriding configs starting up the shell

Adds support for -D as option to 'hbase shell'


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/da9508d4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/da9508d4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/da9508d4

Branch: refs/heads/HBASE-20952
Commit: da9508d4271ea12410e289692f10791b0e05266b
Parents: 79d90c8
Author: stack 
Authored: Thu Dec 6 23:05:21 2018 -0800
Committer: stack 
Committed: Sat Dec 8 15:08:19 2018 -0800

--
 bin/hirb.rb| 40 -
 src/main/asciidoc/_chapters/shell.adoc | 16 
 2 files changed, 49 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/da9508d4/bin/hirb.rb
--
diff --git a/bin/hirb.rb b/bin/hirb.rb
index 790ecdc..e857db7 100644
--- a/bin/hirb.rb
+++ b/bin/hirb.rb
@@ -54,21 +54,47 @@ $LOAD_PATH.unshift Pathname.new(sources)
 cmdline_help =  @shell.hbase.configuration.get("hbase.zookeeper.quorum")
+=> 
"ZK0.remote.cluster.example.org,ZK1.remote.cluster.example.org,ZK2.remote.cluster.example.org"
+hbase(main):002:0> @shell.hbase.configuration.get("raining")
+=> "false"
+
+
 == Shell Tricks
 
 === Table variables



hbase git commit: HBASE-21590 Optimize trySkipToNextColumn in StoreScanner a bit.

2018-12-13 Thread larsh
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 efc054c6a -> 64369d04c


HBASE-21590 Optimize trySkipToNextColumn in StoreScanner a bit.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/64369d04
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/64369d04
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/64369d04

Branch: refs/heads/branch-1.4
Commit: 64369d04cb9c5ff39ac02c8c8497be7ef0bd4d64
Parents: efc054c
Author: Lars Hofhansl 
Authored: Thu Dec 13 14:09:45 2018 -0800
Committer: Lars Hofhansl 
Committed: Thu Dec 13 14:09:45 2018 -0800

--
 .../apache/hadoop/hbase/regionserver/StoreScanner.java  | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/64369d04/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 31856e3..96d5946 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -796,12 +796,16 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
   @VisibleForTesting
   protected boolean trySkipToNextRow(Cell cell) throws IOException {
 Cell nextCell = null;
+// used to guard against a changed next indexed key by doing a identity 
comparison
+// when the identity changes we need to compare the bytes again
+Cell previousIndexedKey = null;
 do {
   Cell nextIndexedKey = getNextIndexedKey();
   if (nextIndexedKey != null && nextIndexedKey != 
KeyValueScanner.NO_NEXT_INDEXED_KEY
-  && matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0) {
+  && (nextIndexedKey == previousIndexedKey || 
matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0)) {
 this.heap.next();
 ++kvsScanned;
+previousIndexedKey = nextIndexedKey;
   } else {
 return false;
   }
@@ -817,12 +821,16 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
   @VisibleForTesting
   protected boolean trySkipToNextColumn(Cell cell) throws IOException {
 Cell nextCell = null;
+// used to guard against a changed next indexed key by doing a identity 
comparison
+// when the identity changes we need to compare the bytes again
+Cell previousIndexedKey = null;
 do {
   Cell nextIndexedKey = getNextIndexedKey();
   if (nextIndexedKey != null && nextIndexedKey != 
KeyValueScanner.NO_NEXT_INDEXED_KEY
-  && matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0) {
+  && (nextIndexedKey == previousIndexedKey || 
matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0)) {
 this.heap.next();
 ++kvsScanned;
+previousIndexedKey = nextIndexedKey;
   } else {
 return false;
   }



hbase git commit: HBASE-21590 Optimize trySkipToNextColumn in StoreScanner a bit.

2018-12-13 Thread larsh
Repository: hbase
Updated Branches:
  refs/heads/branch-2 9b139ca01 -> 11193d7cc


HBASE-21590 Optimize trySkipToNextColumn in StoreScanner a bit.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/11193d7c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/11193d7c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/11193d7c

Branch: refs/heads/branch-2
Commit: 11193d7cc1f5a0a7ffb73777da7ce5c1b6af6c8c
Parents: 9b139ca
Author: Lars Hofhansl 
Authored: Thu Dec 13 11:56:39 2018 -0800
Committer: Lars Hofhansl 
Committed: Thu Dec 13 11:56:39 2018 -0800

--
 .../apache/hadoop/hbase/regionserver/StoreScanner.java  | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/11193d7c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 736c08a..e7a4528 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -802,12 +802,16 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
   @VisibleForTesting
   protected boolean trySkipToNextRow(Cell cell) throws IOException {
 Cell nextCell = null;
+// used to guard against a changed next indexed key by doing a identity 
comparison
+// when the identity changes we need to compare the bytes again
+Cell previousIndexedKey = null;
 do {
   Cell nextIndexedKey = getNextIndexedKey();
   if (nextIndexedKey != null && nextIndexedKey != 
KeyValueScanner.NO_NEXT_INDEXED_KEY
-  && matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0) {
+  && (nextIndexedKey == previousIndexedKey || 
matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0)) {
 this.heap.next();
 ++kvsScanned;
+previousIndexedKey = nextIndexedKey;
   } else {
 return false;
   }
@@ -823,12 +827,16 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
   @VisibleForTesting
   protected boolean trySkipToNextColumn(Cell cell) throws IOException {
 Cell nextCell = null;
+// used to guard against a changed next indexed key by doing a identity 
comparison
+// when the identity changes we need to compare the bytes again
+Cell previousIndexedKey = null;
 do {
   Cell nextIndexedKey = getNextIndexedKey();
   if (nextIndexedKey != null && nextIndexedKey != 
KeyValueScanner.NO_NEXT_INDEXED_KEY
-  && matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0) {
+  && (nextIndexedKey == previousIndexedKey || 
matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0)) {
 this.heap.next();
 ++kvsScanned;
+previousIndexedKey = nextIndexedKey;
   } else {
 return false;
   }



hbase git commit: HBASE-21590 Optimize trySkipToNextColumn in StoreScanner a bit.

2018-12-13 Thread larsh
Repository: hbase
Updated Branches:
  refs/heads/master f32d26184 -> cb1966dc2


HBASE-21590 Optimize trySkipToNextColumn in StoreScanner a bit.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cb1966dc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cb1966dc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cb1966dc

Branch: refs/heads/master
Commit: cb1966dc2d94fba10d9b6af3c5719e03f621df92
Parents: f32d261
Author: Lars Hofhansl 
Authored: Thu Dec 13 11:57:16 2018 -0800
Committer: Lars Hofhansl 
Committed: Thu Dec 13 11:57:16 2018 -0800

--
 .../apache/hadoop/hbase/regionserver/StoreScanner.java  | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cb1966dc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 736c08a..e7a4528 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -802,12 +802,16 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
   @VisibleForTesting
   protected boolean trySkipToNextRow(Cell cell) throws IOException {
 Cell nextCell = null;
+// used to guard against a changed next indexed key by doing a identity 
comparison
+// when the identity changes we need to compare the bytes again
+Cell previousIndexedKey = null;
 do {
   Cell nextIndexedKey = getNextIndexedKey();
   if (nextIndexedKey != null && nextIndexedKey != 
KeyValueScanner.NO_NEXT_INDEXED_KEY
-  && matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0) {
+  && (nextIndexedKey == previousIndexedKey || 
matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0)) {
 this.heap.next();
 ++kvsScanned;
+previousIndexedKey = nextIndexedKey;
   } else {
 return false;
   }
@@ -823,12 +827,16 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
   @VisibleForTesting
   protected boolean trySkipToNextColumn(Cell cell) throws IOException {
 Cell nextCell = null;
+// used to guard against a changed next indexed key by doing a identity 
comparison
+// when the identity changes we need to compare the bytes again
+Cell previousIndexedKey = null;
 do {
   Cell nextIndexedKey = getNextIndexedKey();
   if (nextIndexedKey != null && nextIndexedKey != 
KeyValueScanner.NO_NEXT_INDEXED_KEY
-  && matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0) {
+  && (nextIndexedKey == previousIndexedKey || 
matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0)) {
 this.heap.next();
 ++kvsScanned;
+previousIndexedKey = nextIndexedKey;
   } else {
 return false;
   }



[2/2] hbase git commit: HBASE-19458 Allow building HBase 1.3.x against Hadoop 2.8 (Lars Hofhansl)

2018-12-13 Thread apurtell
HBASE-19458 Allow building HBase 1.3.x against Hadoop 2.8 (Lars Hofhansl)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c4ed66a0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c4ed66a0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c4ed66a0

Branch: refs/heads/branch-1.3
Commit: c4ed66a05a11e196ac5f5df3436132af28db707f
Parents: e7563cc
Author: Andrew Purtell 
Authored: Thu Dec 13 16:28:47 2018 -0800
Committer: Andrew Purtell 
Committed: Thu Dec 13 16:35:23 2018 -0800

--
 .../src/main/resources/supplemental-models.xml  | 46 
 1 file changed, 46 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c4ed66a0/hbase-resource-bundle/src/main/resources/supplemental-models.xml
--
diff --git a/hbase-resource-bundle/src/main/resources/supplemental-models.xml 
b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
index 9c4ceea..9db6f94 100644
--- a/hbase-resource-bundle/src/main/resources/supplemental-models.xml
+++ b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
@@ -1082,6 +1082,34 @@ Copyright 2006 Envoi Solutions LLC
   
 
   
+  
+
+  com.squareup.okhttp
+  okhttp
+  2.4.0
+  
+
+  Apache License, Version 2.0
+  http://www.apache.org/licenses/LICENSE-2.0.txt
+  repo
+
+  
+
+  
+  
+
+  com.squareup.okio
+  okio
+  1.4.0
+  
+
+  Apache License, Version 2.0
+  http://www.apache.org/licenses/LICENSE-2.0.txt
+  repo
+
+  
+
+  
 
   
 
@@ -2066,4 +2094,22 @@ Copyright (c) 2007-2011 The JRuby project
   
 
   
+  
+
+  net.jcip
+  jcip-annotations
+  1.0
+  
+Brian Goetz and Tim Peierls
+http://www.jcip.net
+  
+  
+
+  Creative Commons Attribution License, Version 2.5
+  http://creativecommons.org/licenses/by/2.5
+  repo
+
+  
+
+  
 



hbase git commit: Update POMs and CHANGES.txt for 1.3.3RC0

2018-12-13 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 c4ed66a05 -> 05fed6558
Updated Tags:  refs/tags/1.3.3RC0 [created] 86009810a


Update POMs and CHANGES.txt for 1.3.3RC0


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/05fed655
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/05fed655
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/05fed655

Branch: refs/heads/branch-1.3
Commit: 05fed65583a8309df83178c331214c9b115bf2e1
Parents: c4ed66a
Author: Andrew Purtell 
Authored: Thu Dec 13 17:20:51 2018 -0800
Committer: Andrew Purtell 
Committed: Thu Dec 13 17:20:51 2018 -0800

--
 CHANGES.txt | 189 +++
 hbase-annotations/pom.xml   |   2 +-
 .../hbase-archetype-builder/pom.xml |   2 +-
 hbase-archetypes/hbase-client-project/pom.xml   |   2 +-
 .../hbase-shaded-client-project/pom.xml |   2 +-
 hbase-archetypes/pom.xml|   2 +-
 hbase-assembly/pom.xml  |   2 +-
 hbase-checkstyle/pom.xml|   4 +-
 hbase-client/pom.xml|   2 +-
 hbase-common/pom.xml|   2 +-
 hbase-examples/pom.xml  |   2 +-
 hbase-external-blockcache/pom.xml   |   2 +-
 hbase-hadoop-compat/pom.xml |   2 +-
 hbase-hadoop2-compat/pom.xml|   2 +-
 hbase-it/pom.xml|   2 +-
 hbase-prefix-tree/pom.xml   |   2 +-
 hbase-procedure/pom.xml |   2 +-
 hbase-protocol/pom.xml  |   2 +-
 hbase-resource-bundle/pom.xml   |   2 +-
 hbase-rest/pom.xml  |   2 +-
 hbase-server/pom.xml|   2 +-
 .../hbase-shaded-check-invariants/pom.xml   |   2 +-
 hbase-shaded/hbase-shaded-client/pom.xml|   2 +-
 hbase-shaded/hbase-shaded-server/pom.xml|   2 +-
 hbase-shaded/pom.xml|   2 +-
 hbase-shell/pom.xml |   2 +-
 hbase-testing-util/pom.xml  |   2 +-
 hbase-thrift/pom.xml|   2 +-
 pom.xml |   2 +-
 29 files changed, 218 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/05fed655/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index a0b5440..9377707 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,5 +1,194 @@
 HBase Change Log
 
+Release Notes - HBase - Version 1.3.3 12/21/2018
+
+** Sub-task
+* [HBASE-16848] - Usage for show_peer_tableCFs command doesn't include peer
+* [HBASE-18786] - FileNotFoundException should not be silently handled for 
primary region replicas
+* [HBASE-20316] - Backport HBASE-20229 
"ConnectionImplementation.locateRegions() returns duplicated entries when 
region replication is on" to branch-1
+* [HBASE-20317] - Backport HBASE-20261 "Table page (table.jsp) in Master 
UI does not show replicaIds for hbase meta table" to branch-1
+* [HBASE-20557] - Backport HBASE-17215 to branch-1
+* [HBASE-20558] - Backport HBASE-17854 to branch-1
+* [HBASE-20559] - Backport HBASE-18083 to branch-1
+* [HBASE-20647] - Backport HBASE-20616 "TruncateTableProcedure is stuck in 
retry loop in TRUNCATE_TABLE_CREATE_FS_LAYOUT state" to branch-1
+* [HBASE-20974] - Backport HBASE-20583 (SplitLogWorker should handle 
FileNotFoundException when split a wal) to branch-1
+* [HBASE-21060] - fix dead store in SecureBulkLoadEndpoint
+* [HBASE-21061] - fix synchronization of 
org.apache.hadoop.hbase.ipc.RpcServer$Connection.useWrap
+* [HBASE-21189] - flaky job should gather machine stats
+* [HBASE-21190] - Log files and count of entries in each as we load from 
the MasterProcWAL store
+* [HBASE-21347] - Backport HBASE-21200 "Memstore flush doesn't finish 
because of seekToPreviousRow() in memstore scanner." to branch-1
+
+** Bug
+* [HBASE-15291] - FileSystem not closed in secure bulkLoad
+* [HBASE-16091] - Canary takes lot more time when there are delete markers 
in the table
+* [HBASE-16576] - Shell add_peer doesn't allow setting cluster_key for 
custom endpoints
+* [HBASE-16910] - Avoid NPE when starting StochasticLoadBalancer
+* [HBASE-17072] - CPU usage starts to climb up to 90-100% when using G1GC; 
purge ThreadLocal usage
+* [HBASE-17118] - StoreScanner leaked in KeyValueHeap
+* [HBASE-17565] - StochasticLoadBalancer may incorrectly skip balancing 
due to skewed multiplier sum
+* [HBASE-17631] - Canary interval too low
+* [HBASE-17798] - 

svn commit: r31531 - in /dev/hbase/hbase-1.3.3RC0: compat-check-report.html hbase-1.3.3-bin.tar.gz hbase-1.3.3-bin.tar.gz.asc hbase-1.3.3-bin.tar.gz.sha512 hbase-1.3.3-src.tar.gz hbase-1.3.3-src.tar.g

2018-12-13 Thread apurtell
Author: apurtell
Date: Fri Dec 14 02:35:15 2018
New Revision: 31531

Log:
Stage remaining HBase 1.3.3RC0 artifacts

Added:
dev/hbase/hbase-1.3.3RC0/hbase-1.3.3-bin.tar.gz   (with props)
dev/hbase/hbase-1.3.3RC0/hbase-1.3.3-bin.tar.gz.asc
dev/hbase/hbase-1.3.3RC0/hbase-1.3.3-bin.tar.gz.sha512
dev/hbase/hbase-1.3.3RC0/hbase-1.3.3-src.tar.gz   (with props)
dev/hbase/hbase-1.3.3RC0/hbase-1.3.3-src.tar.gz.asc
dev/hbase/hbase-1.3.3RC0/hbase-1.3.3-src.tar.gz.sha512
Modified:
dev/hbase/hbase-1.3.3RC0/compat-check-report.html

Modified: dev/hbase/hbase-1.3.3RC0/compat-check-report.html
==
--- dev/hbase/hbase-1.3.3RC0/compat-check-report.html (original)
+++ dev/hbase/hbase-1.3.3RC0/compat-check-report.html Fri Dec 14 02:35:15 2018
@@ -6,8 +6,8 @@
 
 
 
-
-hbase: rel/1.3.2 to branch-1.3 compatibility report
+
+hbase: rel/1.3.2 to 1.3.3RC0 compatibility report
 

svn commit: r31530 [1/2] - in /dev/hbase/hbase-1.3.3RC0: ./ compat-check-report.html

2018-12-13 Thread apurtell
Author: apurtell
Date: Fri Dec 14 01:14:34 2018
New Revision: 31530

Log:
Start staging HBase 1.3.3RC0; just compat check report for now

Added:
dev/hbase/hbase-1.3.3RC0/
dev/hbase/hbase-1.3.3RC0/compat-check-report.html



[02/50] [abbrv] hbase git commit: HBASE-17965 Canary tool should print the regionserver name on failure

2018-12-13 Thread apurtell
HBASE-17965 Canary tool should print the regionserver name on failure

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/212e86db
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/212e86db
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/212e86db

Branch: refs/heads/branch-1.3
Commit: 212e86dbe3b23947a3560e0f24cf1ea6fdcced09
Parents: 59d36e5
Author: Karan Mehta 
Authored: Wed Apr 26 17:14:02 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:15 2018 -0800

--
 .../org/apache/hadoop/hbase/tool/Canary.java| 115 +--
 .../hadoop/hbase/tool/TestCanaryTool.java   |   7 +-
 2 files changed, 32 insertions(+), 90 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/212e86db/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index c58..068e0ad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -114,15 +114,15 @@ public final class Canary implements Tool {
   public interface Sink {
 public long getReadFailureCount();
 public long incReadFailureCount();
-public void publishReadFailure(HRegionInfo region, Exception e);
-public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
+public void publishReadFailure(ServerName serverName, HRegionInfo region, 
Exception e);
+public void publishReadFailure(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, Exception e);
 public void updateReadFailedHostList(HRegionInfo region, String 
serverName);
 public Map getReadFailures();
-public void publishReadTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
+public void publishReadTiming(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, long msTime);
 public long getWriteFailureCount();
-public void publishWriteFailure(HRegionInfo region, Exception e);
-public void publishWriteFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
-public void publishWriteTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
+public void publishWriteFailure(ServerName serverName, HRegionInfo region, 
Exception e);
+public void publishWriteFailure(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, Exception e);
+public void publishWriteTiming(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, long msTime);
 public void updateWriteFailedHostList(HRegionInfo region, String 
serverName);
 public Map getWriteFailures();
   }
@@ -153,16 +153,16 @@ public final class Canary implements Tool {
 }
 
 @Override
-public void publishReadFailure(HRegionInfo region, Exception e) {
+public void publishReadFailure(ServerName serverName, HRegionInfo region, 
Exception e) {
   readFailureCount.incrementAndGet();
-  LOG.error(String.format("read from region %s failed", 
region.getRegionNameAsString()), e);
+  LOG.error(String.format("read from region %s on regionserver %s failed", 
region.getRegionNameAsString(), serverName), e);
 }
 
 @Override
-public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e) {
+public void publishReadFailure(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, Exception e) {
   readFailureCount.incrementAndGet();
-  LOG.error(String.format("read from region %s column family %s failed",
-region.getRegionNameAsString(), column.getNameAsString()), e);
+  LOG.error(String.format("read from region %s on regionserver %s column 
family %s failed",
+region.getRegionNameAsString(), serverName, 
column.getNameAsString()), e);
 }
 
 @Override
@@ -171,9 +171,9 @@ public final class Canary implements Tool {
 }
 
 @Override
-public void publishReadTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime) {
-  LOG.info(String.format("read from region %s column family %s in %dms",
-region.getRegionNameAsString(), column.getNameAsString(), msTime));
+public void publishReadTiming(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, long msTime) {
+  LOG.info(String.format("read from region %s on regionserver %s column 
family %s in %dms",
+region.getRegionNameAsString(), serverName, 

[43/50] [abbrv] hbase git commit: HBASE-18549 Add metrics for failed replication queue recovery

2018-12-13 Thread apurtell
HBASE-18549 Add metrics for failed replication queue recovery

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java

hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java

hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java

hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1ecfca3a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1ecfca3a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1ecfca3a

Branch: refs/heads/branch-1.3
Commit: 1ecfca3a5c7587a3a69f718ffbeec0d36cbcf4d1
Parents: 9675ad3
Author: Xu Cang 
Authored: Wed Aug 29 16:39:57 2018 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 19:25:33 2018 -0800

--
 .../hadoop/hbase/replication/ReplicationQueuesZKImpl.java| 5 -
 .../regionserver/MetricsReplicationSourceSource.java | 3 +++
 .../regionserver/MetricsReplicationGlobalSourceSource.java   | 8 
 .../regionserver/MetricsReplicationSourceSourceImpl.java | 5 +
 .../hadoop/hbase/replication/regionserver/MetricsSource.java | 4 
 .../replication/regionserver/ReplicationSourceManager.java   | 3 +++
 .../hadoop/hbase/replication/TestReplicationEndpoint.java| 8 +++-
 7 files changed, 34 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1ecfca3a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
index 3554a08..5ae2f5b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -490,7 +490,10 @@ public class ReplicationQueuesZKImpl extends 
ReplicationStateZKBase implements R
   }
   return new Pair<>(newCluster, logQueue);
 } catch (KeeperException e) {
-  LOG.warn("Got exception in copyQueueFromLockedRS: ", e);
+  LOG.warn("Got exception in copyQueueFromLockedRS: "+
+" Possible problem: check if znode size exceeds jute.maxBuffer value. "
+  + "If so, increase it for both client and server side." ,e);
+
 } catch (InterruptedException e) {
   LOG.warn(e);
   Thread.currentThread().interrupt();

http://git-wip-us.apache.org/repos/asf/hbase/blob/1ecfca3a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java
index 1ed5a6b..4a2c807 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java
@@ -47,6 +47,7 @@ public interface MetricsReplicationSourceSource {
   public static final String SOURCE_REPEATED_LOG_FILE_BYTES = 
"source.repeatedLogFileBytes";
   public static final String SOURCE_COMPLETED_LOGS = "source.completedLogs";
   public static final String SOURCE_COMPLETED_RECOVERY_QUEUES = 
"source.completedRecoverQueues";
+  public static final String SOURCE_FAILED_RECOVERY_QUEUES = 
"source.failedRecoverQueues";
 
   void setLastShippedAge(long age);
   void incrSizeOfLogQueue(int size);
@@ -70,4 +71,6 @@ public interface MetricsReplicationSourceSource {
   void incrRepeatedFileBytes(final long bytes);
   void incrCompletedWAL();
   void incrCompletedRecoveryQueue();
+  void incrFailedRecoveryQueue();
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1ecfca3a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
 

[21/50] [abbrv] hbase git commit: HBASE-20052 TestRegionOpen#testNonExistentRegionReplica fails due to NPE

2018-12-13 Thread apurtell
HBASE-20052 TestRegionOpen#testNonExistentRegionReplica fails due to NPE


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8bc84fdc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8bc84fdc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8bc84fdc

Branch: refs/heads/branch-1.3
Commit: 8bc84fdc1b67ce2b28a2a8774defe669dfca1501
Parents: 453935f
Author: tedyu 
Authored: Thu Feb 22 12:41:13 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:18 2018 -0800

--
 .../org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java   | 3 +--
 .../java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java | 2 --
 2 files changed, 1 insertion(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8bc84fdc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 7672204..ba5249b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -906,12 +906,11 @@ public class HRegionFileSystem {
 
   // Write HRI to a file in case we need to recover hbase:meta
   regionFs.writeRegionInfoOnFilesystem(false);
-  return regionFs;
 } else {
   if (LOG.isDebugEnabled())
 LOG.debug("Skipping creation of .regioninfo file for " + regionInfo);
 }
-return null;
+return regionFs;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bc84fdc/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
index 62aabce..f45df18 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -98,7 +97,6 @@ public class TestRegionOpen {
 assertEquals(2, exec.getCompletedTaskCount());
   }
 
-  @Ignore // Needs rewrite since HBASE-19391 which returns null out of 
createRegionOnFileSystem
   @Test
   public void testNonExistentRegionReplica() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());



[09/50] [abbrv] hbase git commit: HBASE-19816 Refresh repliation sinks on UnknownHostException

2018-12-13 Thread apurtell
HBASE-19816 Refresh repliation sinks on UnknownHostException

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/109219d3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/109219d3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/109219d3

Branch: refs/heads/branch-1.3
Commit: 109219d3c9e9eaac0342f6b70109215ce3fbeda6
Parents: 217ee60
Author: Scott Wilson 
Authored: Wed Jan 17 13:18:30 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:16 2018 -0800

--
 .../regionserver/HBaseInterClusterReplicationEndpoint.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/109219d3/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
index 85bd11a..e5f2a14 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.replication.regionserver;
 import java.io.IOException;
 import java.net.ConnectException;
 import java.net.SocketTimeoutException;
+import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -327,7 +328,7 @@ public class HBaseInterClusterReplicationEndpoint extends 
HBaseReplicationEndpoi
   "call to the remote cluster timed out, which is usually " +
   "caused by a machine failure or a massive slowdown",
   this.socketTimeoutMultiplier);
-  } else if (ioe instanceof ConnectException) {
+  } else if (ioe instanceof ConnectException || ioe instanceof 
UnknownHostException) {
 LOG.warn("Peer is unavailable, rechecking all sinks: ", ioe);
 replicationSinkMgr.chooseSinks();
   } else {



[33/50] [abbrv] hbase git commit: HBASE-20732 Shutdown scan pool when master is stopped

2018-12-13 Thread apurtell
HBASE-20732 Shutdown scan pool when master is stopped

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/955264ed
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/955264ed
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/955264ed

Branch: refs/heads/branch-1.3
Commit: 955264ed437165fbb26f04172f617fb44835ff6b
Parents: 0824695
Author: Reid Chan 
Authored: Wed Jun 27 18:55:21 2018 +0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:19 2018 -0800

--
 .../org/apache/hadoop/hbase/master/HMaster.java |  1 +
 .../hbase/master/cleaner/CleanerChore.java  | 74 
 .../hbase/master/cleaner/TestCleanerChore.java  | 16 +++--
 3 files changed, 55 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/955264ed/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 67c7787..b47fecb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1228,6 +1228,7 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
 }
 super.stopServiceThreads();
 stopChores();
+CleanerChore.shutDownChorePool();
 
 // Wait for all the remaining region servers to report in IFF we were
 // running a cluster shutdown AND we were NOT aborting.

http://git-wip-us.apache.org/repos/asf/hbase/blob/955264ed/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
index b3f1f0a..7d38ddb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.master.cleaner;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -118,7 +119,7 @@ public abstract class CleanerChore extends Schedu
   break;
 }
   }
-  pool.shutdownNow();
+  shutDownNow();
   LOG.info("Update chore's pool size from " + pool.getParallelism() + " to 
" + size);
   pool = new ForkJoinPool(size);
 }
@@ -136,6 +137,13 @@ public abstract class CleanerChore extends Schedu
 synchronized void submit(ForkJoinTask task) {
   pool.submit(task);
 }
+
+synchronized void shutDownNow() {
+  if (pool == null || pool.isShutdown()) {
+return;
+  }
+  pool.shutdownNow();
+}
   }
   // It may be waste resources for each cleaner chore own its pool,
   // so let's make pool for all cleaner chores.
@@ -148,17 +156,24 @@ public abstract class CleanerChore extends Schedu
   protected Map params;
   private AtomicBoolean enabled = new AtomicBoolean(true);
 
-  public CleanerChore(String name, final int sleepPeriod, final Stoppable s, 
Configuration conf,
-  FileSystem fs, Path oldFileDir, String confKey) {
-this(name, sleepPeriod, s, conf, fs, oldFileDir, confKey, null);
-  }
-
   public static void initChorePool(Configuration conf) {
 if (POOL == null) {
   POOL = new DirScanPool(conf);
 }
   }
 
+  public static void shutDownChorePool() {
+if (POOL != null) {
+  POOL.shutDownNow();
+  POOL = null;
+}
+  }
+
+  public CleanerChore(String name, final int sleepPeriod, final Stoppable s, 
Configuration conf,
+  FileSystem fs, Path oldFileDir, String confKey) {
+this(name, sleepPeriod, s, conf, fs, oldFileDir, confKey, null);
+  }
+
   /**
* @param name name of the chore being run
* @param sleepPeriod the period of time to sleep between each run
@@ -432,6 +447,7 @@ public abstract class CleanerChore extends Schedu
 protected Boolean compute() {
   LOG.trace("Cleaning under " + dir);
   List subDirs;
+  List tmpFiles;
   final List files;
   try {
 // if dir doesn't exist, we'll get null back for both of these
@@ -442,48 +458,48 @@ public abstract class CleanerChore extends Schedu
 return f.isDirectory();
   }
 });
-files = FSUtils.listStatusWithStatusFilter(fs, dir, 

[12/50] [abbrv] hbase git commit: HBASE-19358 Improve the stability of splitting log when do fail over

2018-12-13 Thread apurtell
HBASE-19358 Improve the stability of splitting log when do fail over

Signed-off-by: Yu Li 

Conflicts:
hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e4f46f53
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e4f46f53
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e4f46f53

Branch: refs/heads/branch-1.3
Commit: e4f46f53b737d0a5ea0c4c079b6182a5d918e91d
Parents: cd1726f
Author: Jingyun Tian 
Authored: Tue Jan 2 17:21:32 2018 +0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:17 2018 -0800

--
 .../apache/hadoop/hbase/wal/WALSplitter.java| 358 +--
 .../TestWALReplayBoundedLogWriterCreation.java  |  33 ++
 .../TestWALSplitBoundedLogWriterCreation.java   |  44 +++
 3 files changed, 330 insertions(+), 105 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e4f46f53/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index cc065e5..1927eb3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -25,6 +25,7 @@ import java.io.InterruptedIOException;
 import java.text.ParseException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -161,6 +162,9 @@ public class WALSplitter {
 
   protected boolean distributedLogReplay;
 
+  private final boolean splitWriterCreationBounded;
+
+
   // Map encodedRegionName -> lastFlushedSequenceId
   protected Map lastFlushedSequenceIds = new 
ConcurrentHashMap();
 
@@ -180,6 +184,8 @@ public class WALSplitter {
   // the file being split currently
   private FileStatus fileBeingSplit;
 
+  public final static String SPLIT_WRITER_CREATION_BOUNDED = 
"hbase.split.writer.creation.bounded";
+
   @VisibleForTesting
   WALSplitter(final WALFactory factory, Configuration conf, Path rootDir,
   FileSystem fs, LastSequenceId idChecker,
@@ -194,10 +200,10 @@ public class WALSplitter {
 this.csm = (BaseCoordinatedStateManager)csm;
 this.walFactory = factory;
 this.controller = new PipelineController();
-
+this.splitWriterCreationBounded = 
conf.getBoolean(SPLIT_WRITER_CREATION_BOUNDED, false);
 entryBuffers = new EntryBuffers(controller,
 this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-128*1024*1024));
+128*1024*1024), splitWriterCreationBounded);
 
 // a larger minBatchSize may slow down recovery because replay writer has 
to wait for
 // enough edits before replaying them
@@ -212,7 +218,12 @@ public class WALSplitter {
 LOG.info("ZooKeeperWatcher is passed in as NULL so disable 
distrubitedLogRepaly.");
   }
   this.distributedLogReplay = false;
-  outputSink = new LogRecoveredEditsOutputSink(controller, entryBuffers, 
numWriterThreads);
+  if(splitWriterCreationBounded){
+outputSink = new BoundedLogWriterCreationOutputSink(controller,
+entryBuffers, numWriterThreads);
+  }else {
+outputSink = new LogRecoveredEditsOutputSink(controller, entryBuffers, 
numWriterThreads);
+  }
 }
 
   }
@@ -923,11 +934,19 @@ public class WALSplitter {
 Set currentlyWriting = new TreeSet(Bytes.BYTES_COMPARATOR);
 
 long totalBuffered = 0;
-long maxHeapUsage;
+final long maxHeapUsage;
+boolean splitWriterCreationBounded;
+
 
 public EntryBuffers(PipelineController controller, long maxHeapUsage) {
+  this(controller, maxHeapUsage, false);
+}
+
+public EntryBuffers(PipelineController controller, long maxHeapUsage,
+boolean splitWriterCreationBounded) {
   this.controller = controller;
   this.maxHeapUsage = maxHeapUsage;
+  this.splitWriterCreationBounded = splitWriterCreationBounded;
 }
 
 /**
@@ -967,6 +986,14 @@ public class WALSplitter {
  * @return RegionEntryBuffer a buffer of edits to be written or replayed.
  */
 synchronized RegionEntryBuffer getChunkToWrite() {
+  // The core part of limiting opening writers is it doesn't return chunk 
only if the heap size
+  // is over maxHeapUsage. Thus it doesn't need to create a writer for 
each region
+  // during splitting. It will flush all the logs in the buffer after 
splitting through a
+  // threadpool, which means the number of writers it created is under 
control
+  if(splitWriterCreationBounded && 

[11/50] [abbrv] hbase git commit: HBASE-19163 "Maximum lock count exceeded" from region server's batch processing

2018-12-13 Thread apurtell
HBASE-19163 "Maximum lock count exceeded" from region server's batch processing

Signed-off-by: Umesh Agashe 
Signed-off-by: Michael Stack 

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b3f911c1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b3f911c1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b3f911c1

Branch: refs/heads/branch-1.3
Commit: b3f911c1c6381c801d88b1d0fe8f4620860aada0
Parents: e4f46f5
Author: huaxiangsun 
Authored: Fri Jan 19 11:22:00 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:17 2018 -0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 43 
 .../hbase/client/TestFromClientSide3.java   | 27 
 .../hbase/regionserver/TestAtomicOperation.java |  6 +--
 3 files changed, 66 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f911c1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 499cfe4..c2ccf83 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -112,6 +112,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionObserver;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
 import org.apache.hadoop.hbase.exceptions.RegionInRecoveryException;
+import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -3098,6 +3099,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 // We try to set up a batch in the range [firstIndex,lastIndexExclusive)
 int firstIndex = batchOp.nextIndexToProcess;
 int lastIndexExclusive = firstIndex;
+RowLock prevRowLock = null;
 boolean success = false;
 int noOfPuts = 0, noOfDeletes = 0;
 WALKey walKey = null;
@@ -3160,7 +3162,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 boolean shouldBlock = numReadyToWrite == 0;
 RowLock rowLock = null;
 try {
-  rowLock = getRowLockInternal(mutation.getRow(), true, shouldBlock);
+  rowLock = getRowLockInternal(mutation.getRow(), true, shouldBlock, 
prevRowLock);
+} catch (TimeoutIOException e) {
+  // We will retry when other exceptions, but we should stop if we 
timeout .
+  throw e;
 } catch (IOException ioe) {
   LOG.warn("Failed getting lock in batch put, row="
 + Bytes.toStringBinary(mutation.getRow()), ioe);
@@ -3171,7 +3176,12 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   break;
 
 } else {
-  acquiredRowLocks.add(rowLock);
+  if (rowLock != prevRowLock) {
+// It is a different row now, add this to the acquiredRowLocks and
+// set prevRowLock to the new returned rowLock
+acquiredRowLocks.add(rowLock);
+prevRowLock = rowLock;
+  }
 }
 
 lastIndexExclusive++;
@@ -3265,7 +3275,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   checkAndPrepareMutation(cpMutation, isInReplay, cpFamilyMap, 
now);
 
   // Acquire row locks. If not, the whole batch will fail.
-  acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), 
true, true));
+  acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), 
true, true, null));
 
   if (cpMutation.getDurability() == Durability.SKIP_WAL) {
 recordMutationWithoutWal(cpFamilyMap);
@@ -5360,17 +5370,17 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   public RowLock getRowLock(byte[] row, boolean readLock, boolean waitForLock) 
throws IOException {
 // Make sure the row is inside of this region before getting the lock for 
it.
 checkRow(row, "row lock");
-return getRowLockInternal(row, readLock, waitForLock);
+return getRowLockInternal(row, readLock, waitForLock, null);
   }
 
   // getRowLock calls checkRow. Call this to skip checkRow.
   protected RowLock getRowLockInternal(byte[] row)
   throws 

[47/50] [abbrv] hbase git commit: HBASE-20857 balancer status tag in jmx metrics

2018-12-13 Thread apurtell
HBASE-20857 balancer status tag in jmx metrics

Signed-off-by: Andrew Purtell 
Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/10c4f59e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/10c4f59e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/10c4f59e

Branch: refs/heads/branch-1.3
Commit: 10c4f59eca5c8b6e037e5551388e19928cd20fce
Parents: 785e21f
Author: Kiran Kumar Maturi 
Authored: Tue Sep 18 09:45:26 2018 +0530
Committer: Andrew Purtell 
Committed: Wed Dec 12 19:25:56 2018 -0800

--
 .../master/balancer/MetricsBalancerSource.java  |   3 +
 .../balancer/MetricsBalancerSourceImpl.java |   7 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   5 +
 .../hadoop/hbase/master/LoadBalancer.java   |   3 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   1 +
 .../hadoop/hbase/master/MasterServices.java |   5 +
 .../hbase/master/balancer/BaseLoadBalancer.java |   7 ++
 .../hbase/master/balancer/MetricsBalancer.java  |   4 +
 .../balancer/MetricsStochasticBalancer.java |   8 ++
 .../hbase/master/MockNoopMasterServices.java|   5 +
 .../hadoop/hbase/master/TestCatalogJanitor.java |   5 +
 .../TestBalancerStatusTagInJMXMetrics.java  | 108 +++
 12 files changed, 160 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/10c4f59e/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java
index 1c9a61e..f9cc62b 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java
@@ -39,6 +39,7 @@ public interface MetricsBalancerSource extends BaseSource  {
 
   String BALANCE_CLUSTER = "balancerCluster";
   String MISC_INVOATION_COUNT = "miscInvocationCount";
+  String BALANCER_STATUS = "isBalancerActive";
 
   /**
* Description
@@ -48,4 +49,6 @@ public interface MetricsBalancerSource extends BaseSource  {
   void updateBalanceCluster(long time);
 
   void incrMiscInvocations();
+
+  void updateBalancerStatus(boolean status);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/10c4f59e/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java
index 0a74630..de904d4 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java
@@ -37,6 +37,7 @@ public class MetricsBalancerSourceImpl extends BaseSourceImpl 
implements Metrics
String metricsDescription,
String metricsContext, String 
metricsJmxContext) {
 super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+updateBalancerStatus(true);
   }
 
 
@@ -44,7 +45,6 @@ public class MetricsBalancerSourceImpl extends BaseSourceImpl 
implements Metrics
   public void init() {
 blanceClusterHisto = metricsRegistry.newTimeHistogram(BALANCE_CLUSTER);
 miscCount = metricsRegistry.newCounter(MISC_INVOATION_COUNT, "", 0L);
-
   }
 
   @Override
@@ -56,4 +56,9 @@ public class MetricsBalancerSourceImpl extends BaseSourceImpl 
implements Metrics
   public void incrMiscInvocations() {
  miscCount.incr();
   }
+
+  @Override
+  public void updateBalancerStatus(boolean status) {
+metricsRegistry.tag(BALANCER_STATUS,"", String.valueOf(status), true);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/10c4f59e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 74e1500..e1f7ad1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2427,6 +2427,11 @@ public class HMaster extends HRegionServer 

[16/50] [abbrv] hbase git commit: HBASE-15134 Add visibility into Flush and Compaction queues

2018-12-13 Thread apurtell
HBASE-15134 Add visibility into Flush and Compaction queues

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c35fa2a3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c35fa2a3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c35fa2a3

Branch: refs/heads/branch-1.3
Commit: c35fa2a3e115a41b1fcb7afc533dddeefa74163d
Parents: 3e0d693
Author: Abhishek Singh Chouhan 
Authored: Fri Jul 28 13:17:32 2017 +0530
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:17 2018 -0800

--
 .../hbase/regionserver/MetricsRegionSource.java |  8 +
 .../regionserver/MetricsRegionWrapper.java  | 24 +++
 .../regionserver/MetricsRegionSourceImpl.java   | 19 +++-
 .../TestMetricsRegionSourceImpl.java| 20 
 .../hbase/regionserver/CompactSplitThread.java  |  9 +-
 .../hadoop/hbase/regionserver/HRegion.java  | 20 +++-
 .../hbase/regionserver/MemStoreFlusher.java |  2 ++
 .../regionserver/MetricsRegionWrapperImpl.java  | 32 
 .../regionserver/MetricsRegionWrapperStub.java  | 20 
 .../hbase/regionserver/TestMetricsRegion.java   | 12 
 10 files changed, 163 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c35fa2a3/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
index decf841..d5738cf 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
@@ -30,11 +30,19 @@ public interface MetricsRegionSource extends 
Comparable {
   String COMPACTIONS_COMPLETED_COUNT = "compactionsCompletedCount";
   String COMPACTIONS_FAILED_COUNT = "compactionsFailedCount";
   String LAST_MAJOR_COMPACTION_AGE = "lastMajorCompactionAge";
+  String COMPACTIONS_QUEUED_COUNT = "compactionsQueuedCount";
+  String MAX_COMPACTION_QUEUE_SIZE = "maxCompactionQueueSize";
   String NUM_BYTES_COMPACTED_COUNT = "numBytesCompactedCount";
   String NUM_FILES_COMPACTED_COUNT = "numFilesCompactedCount";
+  String FLUSHES_QUEUED_COUNT = "flushesQueuedCount";
+  String MAX_FLUSH_QUEUE_SIZE = "maxFlushQueueSize";
   String COMPACTIONS_COMPLETED_DESC = "Number of compactions that have 
completed.";
   String COMPACTIONS_FAILED_DESC = "Number of compactions that have failed.";
   String LAST_MAJOR_COMPACTION_DESC = "Age of the last major compaction in 
milliseconds.";
+  String COMPACTIONS_QUEUED_DESC = "Number of compactions that are 
queued/running for this region";
+  String MAX_COMPACTION_QUEUE_DESC = "Max number of compactions queued for 
this region";
+  String FLUSHES_QUEUED_DESC = "Number flushes requested/queued for this 
region";
+  String MAX_FLUSH_QUEUE_DESC = "Max number of flushes queued for this region";
   String  NUM_BYTES_COMPACTED_DESC =
   "Sum of filesize on all files entering a finished, successful or 
aborted, compaction";
   String NUM_FILES_COMPACTED_DESC =

http://git-wip-us.apache.org/repos/asf/hbase/blob/c35fa2a3/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
index 9b7acd3..9a725cd 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -112,6 +112,30 @@ public interface MetricsRegionWrapper {
*/
   long getNumCompactionsFailed();
 
+  /**
+   * @return the total number of compactions that are currently queued(or 
being executed) at point in
+   *  time
+   */
+  long getNumCompactionsQueued();
+
+  /**
+   * @return the total number of flushes currently queued(being executed) for 
this region at point in
+   *  time
+   */
+  long getNumFlushesQueued();
+
+  /**
+   * @return the max number of compactions queued for this region
+   * Note that this metric is updated periodically and hence might miss some 
data points
+   */
+  long getMaxCompactionQueueSize();
+
+  /**
+   * @return the max number of flushes queued for this 

[44/50] [abbrv] hbase git commit: HBASE-21126 Configurable number of allowed failures for ZooKeeper Canary

2018-12-13 Thread apurtell
HBASE-21126 Configurable number of allowed failures for ZooKeeper Canary

Signed-off-by: Josh Elser 

Conflicts:

hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/09069df2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/09069df2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/09069df2

Branch: refs/heads/branch-1.3
Commit: 09069df2f8a9cb19ce368a54770d333f0e36fe5d
Parents: 1ecfca3
Author: David Manning 
Authored: Fri Aug 31 18:32:15 2018 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 19:25:40 2018 -0800

--
 .../org/apache/hadoop/hbase/tool/Canary.java| 51 +++-
 .../hadoop/hbase/tool/TestCanaryTool.java   | 35 +-
 2 files changed, 62 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/09069df2/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index dcaa057..081ef90 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -580,6 +580,7 @@ public final class Canary implements Tool {
   private boolean failOnError = true;
   private boolean regionServerMode = false;
   private boolean zookeeperMode = false;
+  private long permittedFailures = 0;
   private boolean regionServerAllRegions = false;
   private boolean writeSniffing = false;
   private long configuredWriteTableTimeout = DEFAULT_TIMEOUT;
@@ -723,6 +724,19 @@ public final class Canary implements Tool {
 }
 this.configuredReadTableTimeouts.put(nameTimeout[0], timeoutVal);
   }
+} else if (cmd.equals("-permittedZookeeperFailures")) {
+  i++;
+
+  if (i == args.length) {
+System.err.println("-permittedZookeeperFailures needs a numeric 
value argument.");
+printUsageAndExit();
+  }
+  try {
+this.permittedFailures = Long.parseLong(args[i]);
+  } catch (NumberFormatException e) {
+System.err.println("-permittedZookeeperFailures needs a numeric 
value argument.");
+printUsageAndExit();
+  }
 } else {
   // no options match
   System.err.println(cmd + " options is invalid.");
@@ -744,6 +758,10 @@ public final class Canary implements Tool {
 printUsageAndExit();
   }
 }
+if (this.permittedFailures != 0 && !this.zookeeperMode) {
+  System.err.println("-permittedZookeeperFailures requires -zookeeper 
mode.");
+  printUsageAndExit();
+}
 if (!this.configuredReadTableTimeouts.isEmpty() && (this.regionServerMode 
|| this.zookeeperMode)) {
   System.err.println("-readTableTimeouts can only be configured in region 
mode.");
   printUsageAndExit();
@@ -842,6 +860,8 @@ public final class Canary implements Tool {
 System.err.println("  only works in regionserver mode.");
 System.err.println("   -zookeeperTries to grab zookeeper.znode.parent 
");
 System.err.println("  on each zookeeper instance");
+System.err.println("   -permittedZookeeperFailures Ignore first N 
failures when attempting to ");
+System.err.println("  connect to individual zookeeper nodes in the 
ensemble");
 System.err.println("   -daemonContinuous check at defined 
intervals.");
 System.err.println("   -interval   Interval between checks (sec)");
 System.err.println("   -e Use table/regionserver as regular 
expression");
@@ -884,17 +904,18 @@ public final class Canary implements Tool {
   monitor =
   new RegionServerMonitor(connection, monitorTargets, this.useRegExp,
   (StdOutSink) this.sink, this.executor, 
this.regionServerAllRegions,
-  this.treatFailureAsError);
+  this.treatFailureAsError, this.permittedFailures);
 } else if (this.sink instanceof ZookeeperStdOutSink || this.zookeeperMode) 
{
   monitor =
   new ZookeeperMonitor(connection, monitorTargets, this.useRegExp,
-  (StdOutSink) this.sink, this.executor, this.treatFailureAsError);
+  (StdOutSink) this.sink, this.executor, this.treatFailureAsError,
+  this.permittedFailures);
 } else {
   monitor =
   new RegionMonitor(connection, monitorTargets, this.useRegExp,
   (StdOutSink) this.sink, this.executor, this.writeSniffing,
   this.writeTableName, this.treatFailureAsError, 

[38/50] [abbrv] hbase git commit: HBASE-17437 Support specifying a WAL directory outside of the root directory (Yishan Yang and Zach York) HBASE-17588 Remove unused imports brought in by HBASE-17437 (

2018-12-13 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/979438d2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
index 760cdc1..1fcb241 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hbase.regionserver.wal;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -88,6 +89,8 @@ public class TestFSHLog {
   protected static Configuration conf;
   protected static FileSystem fs;
   protected static Path dir;
+  protected static Path rootDir;
+  protected static Path walRootDir;
   protected final static HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
 
   @Rule
@@ -99,8 +102,10 @@ public class TestFSHLog {
 for (FileStatus dir : entries) {
   fs.delete(dir.getPath(), true);
 }
-final Path hbaseDir = TEST_UTIL.createRootDir();
-dir = new Path(hbaseDir, currentTest.getMethodName());
+rootDir = TEST_UTIL.createRootDir();
+walRootDir = TEST_UTIL.createWALRootDir();
+dir = new Path(walRootDir, currentTest.getMethodName());
+assertNotEquals(rootDir, walRootDir);
   }
 
   @After
@@ -133,6 +138,8 @@ public class TestFSHLog {
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
+fs.delete(rootDir, true);
+fs.delete(walRootDir, true);
 TEST_UTIL.shutdownMiniCluster();
   }
 
@@ -144,7 +151,7 @@ public class TestFSHLog {
 // test to see whether the coprocessor is loaded or not.
 FSHLog log = null;
 try {
-  log = new FSHLog(fs, FSUtils.getRootDir(conf), dir.toString(),
+  log = new FSHLog(fs, walRootDir, dir.toString(),
   HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null);
   WALCoprocessorHost host = log.getCoprocessorHost();
   Coprocessor c = 
host.findCoprocessor(SampleRegionWALObserver.class.getName());
@@ -195,7 +202,7 @@ public class TestFSHLog {
 FSHLog wal1 = null;
 FSHLog walMeta = null;
 try {
-  wal1 = new FSHLog(fs, FSUtils.getRootDir(conf), dir.toString(),
+  wal1 = new FSHLog(fs, walRootDir, dir.toString(),
   HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null);
   LOG.debug("Log obtained is: " + wal1);
   Comparator comp = wal1.LOG_NAME_COMPARATOR;
@@ -205,7 +212,7 @@ public class TestFSHLog {
   assertTrue(comp.compare(p1, p1) == 0);
   // comparing with different filenum.
   assertTrue(comp.compare(p1, p2) < 0);
-  walMeta = new FSHLog(fs, FSUtils.getRootDir(conf), dir.toString(),
+  walMeta = new FSHLog(fs, walRootDir, dir.toString(),
   HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null,
   DefaultWALProvider.META_WAL_PROVIDER_ID);
   Comparator compMeta = walMeta.LOG_NAME_COMPARATOR;
@@ -253,7 +260,7 @@ public class TestFSHLog {
 LOG.debug("testFindMemStoresEligibleForFlush");
 Configuration conf1 = HBaseConfiguration.create(conf);
 conf1.setInt("hbase.regionserver.maxlogs", 1);
-FSHLog wal = new FSHLog(fs, FSUtils.getRootDir(conf1), dir.toString(),
+FSHLog wal = new FSHLog(fs, walRootDir, dir.toString(),
 HConstants.HREGION_OLDLOGDIR_NAME, conf1, null, true, null, null);
 HTableDescriptor t1 =
 new HTableDescriptor(TableName.valueOf("t1")).addFamily(new 
HColumnDescriptor("row"));
@@ -330,7 +337,7 @@ public class TestFSHLog {
   @Test(expected=IOException.class)
   public void testFailedToCreateWALIfParentRenamed() throws IOException {
 final String name = "testFailedToCreateWALIfParentRenamed";
-FSHLog log = new FSHLog(fs, FSUtils.getRootDir(conf), name, 
HConstants.HREGION_OLDLOGDIR_NAME,
+FSHLog log = new FSHLog(fs, walRootDir, name, 
HConstants.HREGION_OLDLOGDIR_NAME,
 conf, null, true, null, null);
 long filenum = System.currentTimeMillis();
 Path path = log.computeFilename(filenum);
@@ -359,13 +366,13 @@ public class TestFSHLog {
 final byte[] rowName = tableName.getName();
 final HTableDescriptor htd = new HTableDescriptor(tableName);
 htd.addFamily(new HColumnDescriptor("f"));
-HRegion r = HRegion.createHRegion(hri, TEST_UTIL.getDefaultRootDirPath(),
+HRegion r = HRegion.createHRegion(hri, rootDir,
   TEST_UTIL.getConfiguration(), htd);
 HRegion.closeHRegion(r);
 final int countPerFamily = 10;
 final MutableBoolean goslow = new MutableBoolean(false);
 // subclass and doctor a method.
-FSHLog wal = new 

[46/50] [abbrv] hbase git commit: HBASE-18451 PeriodicMemstoreFlusher should inspect the queue before adding a delayed flush request

2018-12-13 Thread apurtell
HBASE-18451 PeriodicMemstoreFlusher should inspect the queue before adding a 
delayed flush request

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/785e21fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/785e21fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/785e21fe

Branch: refs/heads/branch-1.3
Commit: 785e21fe545da33811a50e0718d7cfeb7dc74df7
Parents: a4baeeb
Author: xcang 
Authored: Sun Sep 23 23:42:57 2018 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 19:25:50 2018 -0800

--
 .../hadoop/hbase/regionserver/FlushRequester.java   |  6 --
 .../apache/hadoop/hbase/regionserver/HRegionServer.java | 12 +++-
 .../hadoop/hbase/regionserver/MemStoreFlusher.java  | 12 
 .../hbase/regionserver/TestHeapMemoryManager.java   |  7 ---
 .../hadoop/hbase/regionserver/wal/TestWALReplay.java| 11 +--
 5 files changed, 28 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/785e21fe/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java
index c7e155a..243546c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java
@@ -32,8 +32,9 @@ public interface FlushRequester {
* @param region the Region requesting the cache flush
* @param forceFlushAllStores whether we want to flush all stores. e.g., 
when request from log
*  rolling.
+   * @return true if our region is added into the queue, false otherwise
*/
-  void requestFlush(Region region, boolean forceFlushAllStores);
+  boolean requestFlush(Region region, boolean forceFlushAllStores);
 
   /**
* Tell the listener the cache needs to be flushed after a delay
@@ -42,8 +43,9 @@ public interface FlushRequester {
* @param delay after how much time should the flush happen
* @param forceFlushAllStores whether we want to flush all stores. e.g., 
when request from log
*  rolling.
+   * @return true if our region is added into the queue, false otherwise
*/
-  void requestDelayedFlush(Region region, long delay, boolean 
forceFlushAllStores);
+  boolean requestDelayedFlush(Region region, long delay, boolean 
forceFlushAllStores);
 
   /**
* Register a FlushRequestListener

http://git-wip-us.apache.org/repos/asf/hbase/blob/785e21fe/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 21f269e..bbf488a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -28,6 +28,7 @@ import java.net.BindException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.security.PrivilegedExceptionAction;
+import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -1635,17 +1636,18 @@ public class HRegionServer extends HasThread implements
   final StringBuffer whyFlush = new StringBuffer();
   for (Region r : this.server.onlineRegions.values()) {
 if (r == null) continue;
-if (((HRegion)r).shouldFlush(whyFlush)) {
+if (((HRegion) r).shouldFlush(whyFlush)) {
   FlushRequester requester = server.getFlushRequester();
   if (requester != null) {
 long randomDelay = (long) RandomUtils.nextInt(rangeOfDelay) + 
MIN_DELAY_TIME;
-LOG.info(getName() + " requesting flush of " +
-  r.getRegionInfo().getRegionNameAsString() + " because " +
-  whyFlush.toString() + " after random delay " + randomDelay + 
"ms");
 //Throttle the flushes by putting a delay. If we don't throttle, 
and there
 //is a balanced write-load on the regions in a table, we might end 
up
 //overwhelming the filesystem with too many flushes at once.
-requester.requestDelayedFlush(r, 

[06/50] [abbrv] hbase git commit: HBASE-17731 Fractional latency reporting in MultiThreadedAction

2018-12-13 Thread apurtell
HBASE-17731 Fractional latency reporting in MultiThreadedAction


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/286ade81
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/286ade81
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/286ade81

Branch: refs/heads/branch-1.3
Commit: 286ade8155e5c198feef83bbd5fcc4f00a3d7796
Parents: 109219d
Author: Andrew Purtell 
Authored: Thu Mar 9 16:54:23 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:16 2018 -0800

--
 .../org/apache/hadoop/hbase/util/MultiThreadedAction.java| 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/286ade81/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
index 91b6d3b..0d25a68 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
@@ -218,11 +218,13 @@ public abstract class MultiThreadedAction {
   + ", time="
   + formatTime(time)
   + ((numKeys > 0 && time > 0) ? (" Overall: [" + "keys/s= "
-  + numKeys * 1000 / time + ", latency=" + totalOpTime
-  / numKeys + " ms]") : "")
+  + numKeys * 1000 / time + ", latency="
+  + String.format("%.2f", (double)totalOpTime / 
(double)numKeys)
+  + " ms]") : "")
   + ((numKeysDelta > 0) ? (" Current: [" + "keys/s="
   + numKeysDelta * 1000 / REPORTING_INTERVAL_MS + ", latency="
-  + totalOpTimeDelta / numKeysDelta + " ms]") : "")
+  + String.format("%.2f", (double)totalOpTimeDelta / 
(double)numKeysDelta)
+  + " ms]") : "")
   + progressInfo());
 
   if (streamingCounters) {



[50/50] [abbrv] hbase git commit: HBASE-21546 ConnectException in TestThriftHttpServer

2018-12-13 Thread apurtell
HBASE-21546 ConnectException in TestThriftHttpServer

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e063aa8d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e063aa8d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e063aa8d

Branch: refs/heads/branch-1.3
Commit: e063aa8db09d8e8224a923d01aa027742f253aff
Parents: 82f187e
Author: Peter Somogyi 
Authored: Tue Dec 4 14:57:50 2018 +0100
Committer: Andrew Purtell 
Committed: Thu Dec 13 10:25:10 2018 -0800

--
 .../org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java | 8 ++--
 1 file changed, 2 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e063aa8d/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
--
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
index b21de38..8ed2259 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
@@ -185,11 +185,7 @@ public class TestThriftHttpServer {
 
   private void waitThriftServerStartup() throws Exception{
 // wait up to 10s for the server to start
-for (int i = 0; i < 100
-&& ( thriftServer.serverRunner == null ||  
thriftServer.serverRunner.httpServer ==
-null); i++) {
-  Thread.sleep(100);
-}
+HBaseTestingUtility.waitForHostPort(HConstants.LOCALHOST, port);
   }
 
   private void runThriftServer(int customHeaderSize) throws Exception {
@@ -252,7 +248,7 @@ public class TestThriftHttpServer {
   }
 
   private void stopHttpServerThread() throws Exception {
-LOG.debug("Stopping " + " Thrift HTTP server");
+LOG.debug("Stopping Thrift HTTP server");
 thriftServer.stop();
 httpServerThread.join();
 if (httpServerException != null) {



[14/50] [abbrv] hbase git commit: HBASE-19435 Reopen Files for ClosedChannelException in BucketCache

2018-12-13 Thread apurtell
HBASE-19435 Reopen Files for ClosedChannelException in BucketCache

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4b6e589b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4b6e589b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4b6e589b

Branch: refs/heads/branch-1.3
Commit: 4b6e589bbbe600ad68c205c758ea7f90ff3c67c7
Parents: b442a7f
Author: Zach York 
Authored: Mon Dec 4 12:11:21 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:17 2018 -0800

--
 .../hbase/io/hfile/bucket/FileIOEngine.java | 28 +++-
 .../hbase/io/hfile/bucket/TestFileIOEngine.java | 15 +++
 2 files changed, 42 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4b6e589b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
index 3419587..cb454d4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
@@ -19,12 +19,15 @@
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
+import java.nio.channels.ClosedChannelException;
 import java.nio.channels.FileChannel;
 import java.util.Arrays;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -108,6 +111,17 @@ public class FileIOEngine implements IOEngine {
 return 0;
   }
 
+  @VisibleForTesting
+  void closeFileChannels() {
+for (FileChannel fileChannel: fileChannels) {
+  try {
+fileChannel.close();
+  } catch (IOException e) {
+LOG.warn("Failed to close FileChannel", e);
+  }
+}
+  }
+
   /**
* Transfers data from the given byte buffer to file
* @param srcBuffer the given byte buffer from which bytes are to be read
@@ -169,11 +183,18 @@ public class FileIOEngine implements IOEngine {
 int bufLimit = buffer.limit();
 while (true) {
   FileChannel fileChannel = fileChannels[accessFileNum];
+  int accessLen = 0;
   if (endFileNum > accessFileNum) {
 // short the limit;
 buffer.limit((int) (buffer.limit() - remainingAccessDataLen + 
sizePerFile - accessOffset));
   }
-  int accessLen = accessor.access(fileChannel, buffer, accessOffset);
+  try {
+accessLen = accessor.access(fileChannel, buffer, accessOffset);
+  } catch (ClosedChannelException e) {
+LOG.warn("Caught ClosedChannelException accessing BucketCache, 
reopening file. ", e);
+refreshFileConnection(accessFileNum);
+continue;
+  }
   // recover the limit
   buffer.limit(bufLimit);
   if (accessLen < remainingAccessDataLen) {
@@ -213,6 +234,11 @@ public class FileIOEngine implements IOEngine {
 return fileNum;
   }
 
+  private void refreshFileConnection(int accessFileNum) throws 
FileNotFoundException {
+rafs[accessFileNum] = new RandomAccessFile(filePaths[accessFileNum], "rw");
+fileChannels[accessFileNum] = rafs[accessFileNum].getChannel();
+  }
+
   private static interface FileAccessor {
 int access(FileChannel fileChannel, ByteBuffer byteBuffer, long 
accessOffset)
 throws IOException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4b6e589b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
index a03818b..adf7fd0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
@@ -114,4 +114,19 @@ public class TestFileIOEngine {
 fileIOEngine.read(ByteBuffer.wrap(data2), 0);
 assertArrayEquals(data1, data2);
   }
+
+  @Test
+  public void testClosedChannelException() throws IOException {
+fileIOEngine.closeFileChannels();
+int len = 5;
+long offset = 0L;
+byte[] data1 = new byte[len];
+for (int j = 0; j < data1.length; ++j) {
+  data1[j] = (byte) 

[01/50] [abbrv] hbase git commit: HBASE 17959 Canary timeout should be configurable on a per-table basis

2018-12-13 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 b9adb955c -> d37294174


HBASE 17959 Canary timeout should be configurable on a per-table basis

For branch-1: Added support for configuring read/write timeouts on a per-table 
basis
when in region mode.
Added unit test for per-table timeout checks.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/178b675b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/178b675b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/178b675b

Branch: refs/heads/branch-1.3
Commit: 178b675b792b4e9d3ddabd625a79042231a6fc91
Parents: 212e86d
Author: Chinmay Kulkarni 
Authored: Wed May 31 14:38:41 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:15 2018 -0800

--
 .../org/apache/hadoop/hbase/tool/Canary.java| 167 ---
 .../hadoop/hbase/tool/TestCanaryTool.java   |  75 -
 pom.xml |   2 +-
 3 files changed, 211 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/178b675b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 068e0ad..259690b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -244,6 +244,30 @@ public final class Canary implements Tool {
 }
   }
 
+  public static class RegionStdOutSink extends StdOutSink {
+
+private Map perTableReadLatency = new HashMap<>();
+private AtomicLong writeLatency = new AtomicLong();
+
+public Map getReadLatencyMap() {
+  return this.perTableReadLatency;
+}
+
+public AtomicLong initializeAndGetReadLatencyForTable(String tableName) {
+  AtomicLong initLatency = new AtomicLong(0L);
+  this.perTableReadLatency.put(tableName, initLatency);
+  return initLatency;
+}
+
+public void initializeWriteLatency() {
+  this.writeLatency.set(0L);
+}
+
+public AtomicLong getWriteLatency() {
+  return this.writeLatency;
+}
+  }
+
   static class ZookeeperTask implements Callable {
 private final Connection connection;
 private final String host;
@@ -291,19 +315,21 @@ public final class Canary implements Tool {
 }
 private Connection connection;
 private HRegionInfo region;
-private Sink sink;
+private RegionStdOutSink sink;
 private TaskType taskType;
 private boolean rawScanEnabled;
 private ServerName serverName;
+private AtomicLong readWriteLatency;
 
-RegionTask(Connection connection, HRegionInfo region, ServerName 
serverName, Sink sink,
-TaskType taskType, boolean rawScanEnabled) {
+RegionTask(Connection connection, HRegionInfo region, ServerName 
serverName, RegionStdOutSink sink,
+TaskType taskType, boolean rawScanEnabled, AtomicLong rwLatency) {
   this.connection = connection;
   this.region = region;
   this.serverName = serverName;
   this.sink = sink;
   this.taskType = taskType;
   this.rawScanEnabled = rawScanEnabled;
+  this.readWriteLatency = rwLatency;
 }
 
 @Override
@@ -384,6 +410,7 @@ public final class Canary implements Tool {
 rs.next();
   }
   stopWatch.stop();
+  this.readWriteLatency.addAndGet(stopWatch.getTime());
   sink.publishReadTiming(serverName, region, column, 
stopWatch.getTime());
 } catch (Exception e) {
   sink.publishReadFailure(serverName, region, column, e);
@@ -394,7 +421,6 @@ public final class Canary implements Tool {
   }
   scan = null;
   get = null;
-  startKey = null;
 }
   }
   try {
@@ -436,6 +462,7 @@ public final class Canary implements Tool {
 long startTime = System.currentTimeMillis();
 table.put(put);
 long time = System.currentTimeMillis() - startTime;
+this.readWriteLatency.addAndGet(time);
 sink.publishWriteTiming(serverName, region, column, time);
   } catch (Exception e) {
 sink.publishWriteFailure(serverName, region, column, e);
@@ -569,8 +596,10 @@ public final class Canary implements Tool {
   private boolean zookeeperMode = false;
   private boolean regionServerAllRegions = false;
   private boolean writeSniffing = false;
+  private long configuredWriteTableTimeout = DEFAULT_TIMEOUT;
   private boolean treatFailureAsError = false;
   private TableName writeTableName = DEFAULT_WRITE_TABLE_NAME;
+  private HashMap 

[25/50] [abbrv] hbase git commit: HBASE-20523 PE tool should support configuring client side buffering sizes (Ram)

2018-12-13 Thread apurtell
HBASE-20523 PE tool should support configuring client side buffering sizes (Ram)

Conflicts:

hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/017cb75b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/017cb75b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/017cb75b

Branch: refs/heads/branch-1.3
Commit: 017cb75bd306226dbae7904b6181b99c7576be22
Parents: be4915e
Author: Vasudevan 
Authored: Mon May 7 14:42:29 2018 +0530
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:19 2018 -0800

--
 .../hadoop/hbase/PerformanceEvaluation.java | 22 +++-
 .../hadoop/hbase/TestPerformanceEvaluation.java | 13 +++-
 2 files changed, 33 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/017cb75b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 66a2dce..c2418e8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -55,6 +55,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.BufferedMutatorParams;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Consistency;
@@ -621,6 +622,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 int families = 1;
 int caching = 30;
 boolean addColumns = true;
+long bufferSize = 2l * 1024l * 1024l;
 
 public TestOptions() {}
 
@@ -664,6 +666,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   this.columns = that.columns;
   this.families = that.families;
   this.caching = that.caching;
+  this.bufferSize = that.bufferSize;
 }
 
 public int getCaching() {
@@ -830,6 +833,14 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   this.valueSize = valueSize;
 }
 
+public void setBufferSize(long bufferSize) {
+  this.bufferSize = bufferSize;
+}
+
+public long getBufferSize() {
+  return this.bufferSize;
+}
+
 public void setPeriod(int period) {
   this.period = period;
 }
@@ -1251,7 +1262,9 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 
 @Override
 void onStartup() throws IOException {
-  this.mutator = 
connection.getBufferedMutator(TableName.valueOf(opts.tableName));
+  BufferedMutatorParams p = new 
BufferedMutatorParams(TableName.valueOf(opts.tableName));
+  p.writeBufferSize(opts.bufferSize);
+  this.mutator = connection.getBufferedMutator(p);
 }
 
 @Override
@@ -2004,6 +2017,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 System.err.println(" columns Columns to write per row. Default: 
1");
 System.err.println(" familiesSpecify number of column families for 
the table. Default: 1");
 System.err.println(" caching Scan caching to use. Default: 30");
+System.err.println(" bufferSize  Set the value of client side 
buffering. Default: 2MB");
 System.err.println();
 System.err.println(" Note: -D properties will be applied to the conf used. 
");
 System.err.println("  For example: ");
@@ -2240,6 +2254,12 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 continue;
   }
 
+  final String bufferSize = "--bufferSize=";
+  if (cmd.startsWith(bufferSize)) {
+opts.bufferSize = Long.parseLong(cmd.substring(bufferSize.length()));
+continue;
+  }
+
   if (isCommandClass(cmd)) {
 opts.cmdName = cmd;
 opts.numClientThreads = Integer.parseInt(args.remove());

http://git-wip-us.apache.org/repos/asf/hbase/blob/017cb75b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java
index 3414e0a..cade7a4 100644
--- 

[27/50] [abbrv] hbase git commit: HBASE-20557 Backport HBASE-17215 to branch-1

2018-12-13 Thread apurtell
HBASE-20557 Backport HBASE-17215 to branch-1

The second backport of HBASE-20555

Signed-off-by: Zach York 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/30b1dc00
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/30b1dc00
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/30b1dc00

Branch: refs/heads/branch-1.3
Commit: 30b1dc00b47a0289a721cd0f932d9cdd8422534e
Parents: 8e36761
Author: TAK LON WU 
Authored: Sat Jun 23 08:43:21 2018 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:19 2018 -0800

--
 .../org/apache/hadoop/hbase/master/HMaster.java |   1 +
 .../hbase/master/cleaner/HFileCleaner.java  | 316 ++-
 .../hbase/regionserver/RSRpcServices.java   |   3 +-
 .../hbase/master/cleaner/TestHFileCleaner.java  | 152 +
 4 files changed, 468 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/30b1dc00/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index aedb987..315b4c4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -861,6 +861,7 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
 status.markComplete("Initialization successful");
 LOG.info("Master has completed initialization");
 configurationManager.registerObserver(this.balancer);
+configurationManager.registerObserver(this.hfileCleaner);
 configurationManager.registerObserver(this.logCleaner);
 
 // Set master as 'initialized'.

http://git-wip-us.apache.org/repos/asf/hbase/blob/30b1dc00/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
index 89c316b..defe851 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
@@ -17,22 +17,32 @@
  */
 package org.apache.hadoop.hbase.master.cleaner;
 
+import com.google.common.annotations.VisibleForTesting;
+import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+
 /**
  * This Chore, every time it runs, will clear the HFiles in the hfile archive
  * folder that are deletable for each HFile cleaner in the chain.
  */
 @InterfaceAudience.Private
-public class HFileCleaner extends CleanerChore {
+public class HFileCleaner extends CleanerChore 
implements
+ConfigurationObserver {
 
   public static final String MASTER_HFILE_CLEANER_PLUGINS = 
"hbase.master.hfilecleaner.plugins";
 
@@ -41,6 +51,34 @@ public class HFileCleaner extends 
CleanerChore {
 this(period, stopper, conf, fs, directory, null);
   }
 
+  // Configuration key for large/small throttle point
+  public final static String HFILE_DELETE_THROTTLE_THRESHOLD =
+  "hbase.regionserver.thread.hfilecleaner.throttle";
+  public final static int DEFAULT_HFILE_DELETE_THROTTLE_THRESHOLD = 64 * 1024 
* 1024;// 64M
+
+  // Configuration key for large queue size
+  public final static String LARGE_HFILE_DELETE_QUEUE_SIZE =
+  "hbase.regionserver.hfilecleaner.large.queue.size";
+  public final static int DEFAULT_LARGE_HFILE_DELETE_QUEUE_SIZE = 1048576;
+
+  // Configuration key for small queue size
+  public final static String SMALL_HFILE_DELETE_QUEUE_SIZE =
+  "hbase.regionserver.hfilecleaner.small.queue.size";
+  public final static int DEFAULT_SMALL_HFILE_DELETE_QUEUE_SIZE = 1048576;
+
+  private static final Log LOG = LogFactory.getLog(HFileCleaner.class);
+
+  BlockingQueue largeFileQueue;
+  

[34/50] [abbrv] hbase git commit: HBASE-20559 Backport HBASE-18083 (Make large/small file clean thread number configurable in HFileCleaner) to branch-1

2018-12-13 Thread apurtell
HBASE-20559 Backport HBASE-18083 (Make large/small file clean thread number 
configurable in HFileCleaner) to branch-1

The last port commit of HBASE-20555

Signed-off-by: Yu Li 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/24341625
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/24341625
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/24341625

Branch: refs/heads/branch-1.3
Commit: 2434162594f22df0bba94bc40186bd5628501c8e
Parents: 976f07e
Author: TAK LON WU 
Authored: Mon Jul 23 11:23:57 2018 +0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:20 2018 -0800

--
 .../hbase/master/cleaner/HFileCleaner.java  | 154 +--
 .../hbase/master/cleaner/TestHFileCleaner.java  |  13 +-
 2 files changed, 120 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/24341625/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
index 70548b4..8f0b4be 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.atomic.AtomicLong;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -66,6 +67,16 @@ public class HFileCleaner extends 
CleanerChore impleme
   "hbase.regionserver.hfilecleaner.small.queue.size";
   public final static int DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE = 10240;
 
+  // Configuration key for large file delete thread number
+  public final static String LARGE_HFILE_DELETE_THREAD_NUMBER =
+  "hbase.regionserver.hfilecleaner.large.thread.count";
+  public final static int DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER = 1;
+
+  // Configuration key for small file delete thread number
+  public final static String SMALL_HFILE_DELETE_THREAD_NUMBER =
+  "hbase.regionserver.hfilecleaner.small.thread.count";
+  public final static int DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER = 1;
+
   private static final Log LOG = LogFactory.getLog(HFileCleaner.class);
 
   StealJobQueue largeFileQueue;
@@ -73,11 +84,13 @@ public class HFileCleaner extends 
CleanerChore impleme
   private int throttlePoint;
   private int largeQueueInitSize;
   private int smallQueueInitSize;
+  private int largeFileDeleteThreadNumber;
+  private int smallFileDeleteThreadNumber;
   private List threads = new ArrayList();
   private boolean running;
 
-  private long deletedLargeFiles = 0L;
-  private long deletedSmallFiles = 0L;
+  private AtomicLong deletedLargeFiles = new AtomicLong();
+  private AtomicLong deletedSmallFiles = new AtomicLong();
 
   /**
* @param period the period of time to sleep between each run
@@ -99,6 +112,10 @@ public class HFileCleaner extends 
CleanerChore impleme
 conf.getInt(SMALL_HFILE_QUEUE_INIT_SIZE, 
DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE);
 largeFileQueue = new StealJobQueue<>(largeQueueInitSize, 
smallQueueInitSize);
 smallFileQueue = largeFileQueue.getStealFromQueue();
+largeFileDeleteThreadNumber =
+conf.getInt(LARGE_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER);
+smallFileDeleteThreadNumber =
+conf.getInt(SMALL_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER);
 startHFileDeleteThreads();
   }
 
@@ -182,30 +199,34 @@ public class HFileCleaner extends 
CleanerChore impleme
 final String n = Thread.currentThread().getName();
 running = true;
 // start thread for large file deletion
-Thread large = new Thread() {
-  @Override
-  public void run() {
-consumerLoop(largeFileQueue);
-  }
-};
-large.setDaemon(true);
-large.setName(n + "-HFileCleaner.large-" + System.currentTimeMillis());
-large.start();
-LOG.debug("Starting hfile cleaner for large files: " + large.getName());
-threads.add(large);
+for (int i = 0; i < largeFileDeleteThreadNumber; i++) {
+  Thread large = new Thread() {
+@Override
+public void run() {
+  consumerLoop(largeFileQueue);
+}
+  };
+  large.setDaemon(true);
+  large.setName(n + "-HFileCleaner.large." + i + "-" + 
System.currentTimeMillis());
+  large.start();
+  LOG.debug("Starting hfile cleaner for large files: " + large.getName());
+  

[07/50] [abbrv] hbase git commit: HBASE-19551 hbck -boundaries doesn't work correctly (Toshihiro Suzuki)

2018-12-13 Thread apurtell
HBASE-19551 hbck -boundaries doesn't work correctly (Toshihiro Suzuki)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/217ee60f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/217ee60f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/217ee60f

Branch: refs/heads/branch-1.3
Commit: 217ee60fb4538392d345e581725e79dc53ed
Parents: 5d0d3aa
Author: tedyu 
Authored: Sat Dec 30 23:19:13 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:16 2018 -0800

--
 .../org/apache/hadoop/hbase/util/HBaseFsck.java | 22 +++---
 .../apache/hadoop/hbase/util/TestHBaseFsck.java | 71 ++--
 .../hadoop/hbase/util/hbck/HbckTestingUtil.java |  8 +++
 3 files changed, 85 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/217ee60f/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index eae8a64..620a66d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -804,15 +804,17 @@ public class HBaseFsck extends Configured implements 
Closeable {
 for (FileStatus storeFile : storeFiles) {
   HFile.Reader reader = HFile.createReader(fs, 
storeFile.getPath(), new CacheConfig(
   getConf()), getConf());
-  if ((reader.getFirstKey() != null)
-  && ((storeFirstKey == null) || 
(comparator.compare(storeFirstKey,
-  reader.getFirstKey()) > 0))) {
-storeFirstKey = reader.getFirstKey();
+  if (reader.getFirstKey() != null) {
+byte[] firstKey = keyOnly(reader.getFirstKey());
+if (storeFirstKey == null || comparator.compare(storeFirstKey, 
firstKey) > 0) {
+  storeFirstKey = firstKey;
+}
   }
-  if ((reader.getLastKey() != null)
-  && ((storeLastKey == null) || 
(comparator.compare(storeLastKey,
-  reader.getLastKey())) < 0)) {
-storeLastKey = reader.getLastKey();
+  if (reader.getLastKey() != null) {
+byte[] lastKey = keyOnly(reader.getLastKey());
+if (storeLastKey == null || comparator.compare(storeLastKey, 
lastKey) < 0) {
+  storeLastKey = lastKey;
+}
   }
   reader.close();
 }
@@ -820,8 +822,8 @@ public class HBaseFsck extends Configured implements 
Closeable {
 }
 currentRegionBoundariesInformation.metaFirstKey = 
regionInfo.getStartKey();
 currentRegionBoundariesInformation.metaLastKey = 
regionInfo.getEndKey();
-currentRegionBoundariesInformation.storesFirstKey = 
keyOnly(storeFirstKey);
-currentRegionBoundariesInformation.storesLastKey = 
keyOnly(storeLastKey);
+currentRegionBoundariesInformation.storesFirstKey = storeFirstKey;
+currentRegionBoundariesInformation.storesLastKey = storeLastKey;
 if (currentRegionBoundariesInformation.metaFirstKey.length == 0)
   currentRegionBoundariesInformation.metaFirstKey = null;
 if (currentRegionBoundariesInformation.metaLastKey.length == 0)

http://git-wip-us.apache.org/repos/asf/hbase/blob/217ee60f/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index df06970..c316c98 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.util;
 
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertErrors;
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertNoErrors;
+import static 
org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.checkRegionBoundaries;
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.doFsck;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -2998,15 +2999,73 @@ public class TestHBaseFsck {
 
   @Test (timeout = 18)
   public void testRegionBoundariesCheck() throws Exception {
-HBaseFsck hbck = doFsck(conf, false);
+TableName tableName = TableName.valueOf("testRegionBoundariesCheck");
+
+// 

[39/50] [abbrv] hbase git commit: HBASE-17437 Support specifying a WAL directory outside of the root directory (Yishan Yang and Zach York) HBASE-17588 Remove unused imports brought in by HBASE-17437 (

2018-12-13 Thread apurtell
HBASE-17437 Support specifying a WAL directory outside of the root directory 
(Yishan Yang and Zach York)
HBASE-17588 Remove unused imports brought in by HBASE-17437 (Zach York)

Signed-off-by: Enis Soztutar 

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/979438d2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/979438d2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/979438d2

Branch: refs/heads/branch-1.3
Commit: 979438d29e49a6a1510d4b82b4c90652bd354942
Parents: 9e39a20
Author: Zach York 
Authored: Wed Jan 11 12:49:20 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 19:01:12 2018 -0800

--
 .../src/main/resources/hbase-default.xml|   7 +
 .../procedure2/store/wal/WALProcedureStore.java |  14 +-
 .../procedure2/ProcedureTestingUtility.java |   4 +-
 ...ProcedureWALLoaderPerformanceEvaluation.java |   4 +-
 .../wal/ProcedureWALPerformanceEvaluation.java  |   4 +-
 .../org/apache/hadoop/hbase/fs/HFileSystem.java |  12 +-
 .../org/apache/hadoop/hbase/io/WALLink.java |  10 +-
 .../hadoop/hbase/master/AssignmentManager.java  |  10 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   6 +-
 .../hadoop/hbase/master/MasterFileSystem.java   | 138 ++---
 .../hadoop/hbase/master/SplitLogManager.java|   2 +-
 .../hadoop/hbase/regionserver/HRegion.java  |   2 +-
 .../hbase/regionserver/HRegionServer.java   |  47 --
 .../hbase/regionserver/SplitLogWorker.java  |   8 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |   8 +-
 .../regionserver/ReplicationSource.java |   6 +-
 .../regionserver/ReplicationSyncUp.java |  10 +-
 .../org/apache/hadoop/hbase/util/FSUtils.java   |  62 ++--
 .../hadoop/hbase/wal/DefaultWALProvider.java|  18 +--
 .../hadoop/hbase/wal/DisabledWALProvider.java   |   2 +-
 .../apache/hadoop/hbase/wal/WALSplitter.java|  14 +-
 .../hadoop/hbase/HBaseTestingUtility.java   |  58 ++--
 .../hbase/coprocessor/TestWALObserver.java  |  13 +-
 .../hbase/filter/TestFilterFromRegionSide.java  |   2 +-
 .../hadoop/hbase/fs/TestBlockReorder.java   |  16 +-
 .../encoding/TestSeekBeforeWithReverseScan.java |   2 +-
 .../hadoop/hbase/mapreduce/TestWALPlayer.java   |  16 +-
 .../hbase/mapreduce/TestWALRecordReader.java|  19 ++-
 .../master/TestMasterFileSystemWithWALDir.java  |  59 
 .../procedure/TestWALProcedureStoreOnHDFS.java  |   2 +-
 .../regionserver/TestHRegionServerBulkLoad.java |   2 +-
 .../TestCompactedHFilesDischarger.java  |   2 +-
 .../hbase/regionserver/wal/TestFSHLog.java  |  31 ++--
 .../regionserver/wal/TestLogRollAbort.java  |   8 +-
 .../wal/TestWALActionsListener.java |  25 +++-
 .../hbase/regionserver/wal/TestWALReplay.java   | 105 +++--
 .../apache/hadoop/hbase/util/TestFSUtils.java   |  51 ++-
 .../apache/hadoop/hbase/wal/IOTestProvider.java |   2 +-
 .../hbase/wal/TestDefaultWALProvider.java   |  18 ++-
 .../apache/hadoop/hbase/wal/TestWALFactory.java |  10 +-
 .../apache/hadoop/hbase/wal/TestWALRootDir.java | 148 +++
 .../apache/hadoop/hbase/wal/TestWALSplit.java   |  11 +-
 .../hbase/wal/WALPerformanceEvaluation.java |   2 +-
 43 files changed, 729 insertions(+), 261 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/979438d2/hbase-common/src/main/resources/hbase-default.xml
--
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 88e57d7..5ee5fc5 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -1172,6 +1172,13 @@ possible configurations would overwhelm and obscure the 
important.
 if it does not match.
   
   
+hbase.wal.dir.perms
+700
+FS Permissions for the root WAL directory in a 
secure(kerberos) setup.
+  When master starts, it creates the WAL dir with this permissions or sets 
the permissions
+  if it does not match.
+  
+  
 hbase.data.umask.enable
 false
 Enable, if true, that file permissions should be assigned

http://git-wip-us.apache.org/repos/asf/hbase/blob/979438d2/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 

[32/50] [abbrv] hbase git commit: HBASE-20808 Wrong shutdown order between Chores and ChoreService

2018-12-13 Thread apurtell
HBASE-20808 Wrong shutdown order between Chores and ChoreService

Signed-off-by: Reid Chan 

Conflicts:
hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8e36761e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8e36761e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8e36761e

Branch: refs/heads/branch-1.3
Commit: 8e36761e89d0ecdd6a476f06293c5d1c8295eb06
Parents: 955264e
Author: Nihal Jain 
Authored: Fri Jul 6 00:42:58 2018 +0530
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:19 2018 -0800

--
 .../org/apache/hadoop/hbase/master/HMaster.java | 34 
 .../hbase/regionserver/HRegionServer.java   | 21 ++--
 2 files changed, 23 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8e36761e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index b47fecb..aedb987 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -50,6 +50,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
@@ -1226,8 +1227,8 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
 LOG.error("Failed to stop master jetty server", e);
   }
 }
-super.stopServiceThreads();
 stopChores();
+super.stopServiceThreads();
 CleanerChore.shutDownChorePool();
 
 // Wait for all the remaining region servers to report in IFF we were
@@ -1240,9 +1241,6 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
   LOG.debug("Stopping service threads");
 }
 // Clean up and close up shop
-if (this.logCleaner != null) this.logCleaner.cancel(true);
-if (this.hfileCleaner != null) this.hfileCleaner.cancel(true);
-if (this.replicationZKLockCleanerChore != null) 
this.replicationZKLockCleanerChore.cancel(true);
 if (this.quotaManager != null) this.quotaManager.stop();
 if (this.activeMasterManager != null) this.activeMasterManager.stop();
 if (this.serverManager != null) this.serverManager.stop();
@@ -1283,23 +1281,17 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
   }
 
   private void stopChores() {
-if (this.balancerChore != null) {
-  this.balancerChore.cancel(true);
-}
-if (this.normalizerChore != null) {
-  this.normalizerChore.cancel(true);
-}
-if (this.clusterStatusChore != null) {
-  this.clusterStatusChore.cancel(true);
-}
-if (this.catalogJanitorChore != null) {
-  this.catalogJanitorChore.cancel(true);
-}
-if (this.clusterStatusPublisherChore != null){
-  clusterStatusPublisherChore.cancel(true);
-}
-if (this.periodicDoMetricsChore != null) {
-  periodicDoMetricsChore.cancel();
+ChoreService choreService = getChoreService();
+if (choreService != null) {
+  choreService.cancelChore(this.balancerChore);
+  choreService.cancelChore(this.normalizerChore);
+  choreService.cancelChore(this.clusterStatusChore);
+  choreService.cancelChore(this.catalogJanitorChore);
+  choreService.cancelChore(this.clusterStatusPublisherChore);
+  choreService.cancelChore(this.periodicDoMetricsChore);
+  choreService.cancelChore(this.logCleaner);
+  choreService.cancelChore(this.hfileCleaner);
+  choreService.cancelChore(this.replicationZKLockCleanerChore);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8e36761e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index ec21599..545c926 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1044,10 +1044,6 @@ public class HRegionServer extends HasThread implements
 if (this.hMemManager 

[37/50] [abbrv] hbase git commit: HBASE-20858 Port HBASE-20695 (Implement table level RegionServer replication metrics) to branch-1

2018-12-13 Thread apurtell
HBASE-20858 Port HBASE-20695 (Implement table level RegionServer replication 
metrics) to branch-1

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java

hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9e39a200
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9e39a200
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9e39a200

Branch: refs/heads/branch-1.3
Commit: 9e39a2009648ba21d55676ea90b328941bb9b7db
Parents: 614b5f6
Author: Xu Cang 
Authored: Fri Jul 6 16:36:05 2018 -0700
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:20 2018 -0800

--
 .../replication/regionserver/MetricsSource.java | 43 +---
 .../regionserver/ReplicationSource.java |  5 ++
 .../replication/TestReplicationEndpoint.java| 52 
 3 files changed, 94 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9e39a200/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
index 9b99f2a..56baa05 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.hbase.replication.regionserver;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@@ -35,8 +33,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
 public class MetricsSource {
 
-  private static final Log LOG = LogFactory.getLog(MetricsSource.class);
-
   // tracks last shipped timestamp for each wal group
   private Map lastTimeStamps = new HashMap();
   private long lastHFileRefsQueueSize = 0;
@@ -44,7 +40,7 @@ public class MetricsSource {
 
   private final MetricsReplicationSourceSource singleSourceSource;
   private final MetricsReplicationSourceSource globalSourceSource;
-
+  private Map 
singleSourceSourceByTable;
 
   /**
* Constructor used to register the metrics
@@ -56,7 +52,24 @@ public class MetricsSource {
 singleSourceSource =
 
CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class)
 .getSource(id);
-globalSourceSource = 
CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class).getGlobalSource();
+globalSourceSource = CompatibilitySingletonFactory
+.getInstance(MetricsReplicationSourceFactory.class).getGlobalSource();
+singleSourceSourceByTable = new HashMap<>();
+  }
+
+  /**
+   * Constructor for injecting custom (or test) MetricsReplicationSourceSources
+   * @param id Name of the source this class is monitoring
+   * @param singleSourceSource Class to monitor id-scoped metrics
+   * @param globalSourceSource Class to monitor global-scoped metrics
+   */
+  public MetricsSource(String id, MetricsReplicationSourceSource 
singleSourceSource,
+   MetricsReplicationSourceSource globalSourceSource,
+   Map 
singleSourceSourceByTable) {
+this.id = id;
+this.singleSourceSource = singleSourceSource;
+this.globalSourceSource = globalSourceSource;
+this.singleSourceSourceByTable = singleSourceSourceByTable;
   }
 
   /**
@@ -72,6 +85,20 @@ public class MetricsSource {
   }
 
   /**
+   * Set the age of the last edit that was shipped group by table
+   * @param timestamp write time of the edit
+   * @param tableName String as group and tableName
+   */
+  public void setAgeOfLastShippedOpByTable(long timestamp, String tableName) {
+long age = EnvironmentEdgeManager.currentTime() - timestamp;
+if (!this.getSingleSourceSourceByTable().containsKey(tableName)) {
+  this.getSingleSourceSourceByTable().put(tableName,
+  
CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class)
+  .getSource(tableName));
+}
+this.singleSourceSourceByTable.get(tableName).setLastShippedAge(age);
+  }
+  /**
* Convenience method to use the last given 

[23/50] [abbrv] hbase git commit: HBASE-20352 [Chore] Backport HBASE-18309 (Support multi threads in CleanerChore) to branch-1

2018-12-13 Thread apurtell
HBASE-20352 [Chore] Backport HBASE-18309 (Support multi threads in 
CleanerChore) to branch-1

Signed-off-by: Yu Li 

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java

hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/193d1dcb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/193d1dcb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/193d1dcb

Branch: refs/heads/branch-1.3
Commit: 193d1dcb72c22252fc86ee8433c765c42349d3cc
Parents: 35e94c9
Author: Reid Chan 
Authored: Wed Apr 11 14:16:08 2018 +0800
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:18 2018 -0800

--
 .../org/apache/hadoop/hbase/master/HMaster.java |   4 +
 .../hbase/master/cleaner/CleanerChore.java  | 405 +++
 .../hadoop/hbase/master/cleaner/LogCleaner.java | 181 +
 .../org/apache/hadoop/hbase/util/FSUtils.java   |  75 
 .../hadoop/hbase/util/FileStatusFilter.java |  36 ++
 .../TestZooKeeperTableArchiveClient.java|   3 +
 .../hbase/master/cleaner/TestCleanerChore.java  | 164 +++-
 .../hbase/master/cleaner/TestHFileCleaner.java  |   1 +
 .../master/cleaner/TestHFileLinkCleaner.java|   1 +
 .../hbase/master/cleaner/TestLogsCleaner.java   |  57 +++
 10 files changed, 837 insertions(+), 90 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/193d1dcb/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 0906fca..67c7787 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -97,6 +97,7 @@ import org.apache.hadoop.hbase.master.balancer.BalancerChore;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
 import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
 import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
+import org.apache.hadoop.hbase.master.cleaner.CleanerChore;
 import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
 import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
 import org.apache.hadoop.hbase.master.cleaner.ReplicationZKLockCleanerChore;
@@ -859,6 +860,7 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
 status.markComplete("Initialization successful");
 LOG.info("Master has completed initialization");
 configurationManager.registerObserver(this.balancer);
+configurationManager.registerObserver(this.logCleaner);
 
 // Set master as 'initialized'.
 setInitialized(true);
@@ -1176,6 +1178,8 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
startProcedureExecutor();
 
+// Initial cleaner chore
+CleanerChore.initChorePool(conf);
// Start log cleaner thread
int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 
1000);
this.logCleaner =

http://git-wip-us.apache.org/repos/asf/hbase/blob/193d1dcb/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
index d54b7aa..dc614fb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
@@ -21,6 +21,11 @@ import java.io.IOException;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.ForkJoinTask;
+import java.util.concurrent.RecursiveTask;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -28,12 +33,16 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import 

[20/50] [abbrv] hbase git commit: HBASE-19905 ReplicationSyncUp tool will not exit if a peer replication is disabled

2018-12-13 Thread apurtell
HBASE-19905 ReplicationSyncUp tool will not exit if a peer replication is 
disabled

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8a4b2b54
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8a4b2b54
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8a4b2b54

Branch: refs/heads/branch-1.3
Commit: 8a4b2b54cdadfbe2ee730733f10bd3ec57b05340
Parents: 5c799c1
Author: Ashish Singhi 
Authored: Sun Feb 4 18:24:32 2018 +0530
Committer: Andrew Purtell 
Committed: Wed Dec 12 18:08:18 2018 -0800

--
 .../replication/regionserver/ReplicationSourceManager.java| 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8a4b2b54/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index c99d15c..8d19e22 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -58,6 +58,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationListener;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
+import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
@@ -735,6 +736,12 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 replicationQueues.removeQueue(peerId);
 continue;
   }
+  if (server instanceof ReplicationSyncUp.DummyServer
+  && peer.getPeerState().equals(PeerState.DISABLED)) {
+LOG.warn("Peer " + actualPeerId + " is disbaled. ReplicationSyncUp 
tool will skip "
++ "replicating data to this peer.");
+continue;
+  }
   // track sources in walsByIdRecoveredQueues
   Map> walsByGroup = new HashMap>();
   walsByIdRecoveredQueues.put(peerId, walsByGroup);