hbase git commit: HBASE-16596 Reduce redundant interfaces in AsyncProcess

2016-09-09 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master e1e063720 -> cc2a40a78


HBASE-16596 Reduce redundant interfaces in AsyncProcess


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cc2a40a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cc2a40a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cc2a40a7

Branch: refs/heads/master
Commit: cc2a40a78f4e65ef38dad2cbc921613c4d15cbf7
Parents: e1e0637
Author: chenheng 
Authored: Sat Sep 10 11:13:28 2016 +0800
Committer: chenheng 
Committed: Sat Sep 10 11:13:28 2016 +0800

--
 .../hadoop/hbase/client/AsyncProcess.java   | 29 +-
 .../org/apache/hadoop/hbase/client/HTable.java  |  2 +-
 .../hadoop/hbase/client/TestAsyncProcess.java   | 59 +---
 .../hadoop/hbase/client/TestClientPushback.java |  2 +-
 4 files changed, 31 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cc2a40a7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 5bb0f58..c5745e9 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -392,15 +392,7 @@ class AsyncProcess {
 }
 throw new RuntimeException("Neither AsyncProcess nor request have 
ExecutorService");
   }
-  /**
-   * See {@link #submit(ExecutorService, TableName, List, boolean, 
Batch.Callback, boolean)}.
-   * Uses default ExecutorService for this AP (must have been created with 
one).
-   */
-  public  AsyncRequestFuture submit(TableName tableName, final List rows,
-  boolean atLeastOne, Batch.Callback callback, boolean 
needResults)
-  throws InterruptedIOException {
-return submit(null, tableName, rows, atLeastOne, callback, needResults);
-  }
+
   /**
* See {@link #submit(ExecutorService, TableName, RowAccess, boolean, 
Batch.Callback, boolean)}.
* Uses default ExecutorService for this AP (must have been created with 
one).
@@ -529,7 +521,7 @@ class AsyncProcess {
   List locationErrorRows, Map 
actionsByServer,
   ExecutorService pool) {
 AsyncRequestFutureImpl ars = createAsyncRequestFuture(
-  tableName, retainedActions, nonceGroup, pool, callback, results, 
needResults);
+  tableName, retainedActions, nonceGroup, pool, callback, results, 
needResults, null, timeout);
 // Add location errors if any
 if (locationErrors != null) {
   for (int i = 0; i < locationErrors.size(); ++i) {
@@ -564,14 +556,6 @@ class AsyncProcess {
 
 multiAction.add(regionName, action);
   }
-  /**
-   * See {@link #submitAll(ExecutorService, TableName, List, Batch.Callback, 
Object[])}.
-   * Uses default ExecutorService for this AP (must have been created with 
one).
-   */
-  public  AsyncRequestFuture submitAll(TableName tableName,
-  List rows, Batch.Callback callback, Object[] 
results) {
-return submitAll(null, tableName, rows, callback, results, null, timeout);
-  }
 
   public  AsyncRequestFuture submitAll(ExecutorService pool, 
TableName tableName,
   List rows, Batch.Callback callback, Object[] 
results) {
@@ -1785,15 +1769,6 @@ class AsyncProcess {
 results, callback, callable, curTimeout);
   }
 
-  @VisibleForTesting
-  /** Create AsyncRequestFuture. Isolated to be easily overridden in the 
tests. */
-  protected  AsyncRequestFutureImpl createAsyncRequestFuture(
-  TableName tableName, List actions, long nonceGroup, 
ExecutorService pool,
-  Batch.Callback callback, Object[] results, boolean needResults) 
{
-return createAsyncRequestFuture(
-tableName, actions, nonceGroup, pool, callback, results, needResults, 
null, timeout);
-  }
-
   /**
* Create a callable. Isolated to be easily overridden in the tests.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/cc2a40a7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index f8bbfc1..492714f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -1218,7 +1218,7 @@ public class HTable implements Table {
 RpcRetryingCallerFactory.instantiate(configuration, 

[2/3] hbase git commit: HBASE-16576 Shell add_peer doesn't allow setting cluster_key for custom endpoints

2016-09-09 Thread apurtell
HBASE-16576 Shell add_peer doesn't allow setting cluster_key for custom 
endpoints

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-shell/src/main/ruby/shell/commands/add_peer.rb
hbase-shell/src/test/ruby/hbase/replication_admin_test.rb


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fe57fa4d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fe57fa4d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fe57fa4d

Branch: refs/heads/branch-1
Commit: fe57fa4daa303f4d0612c7200a7a1145b336ff7c
Parents: ad67fd0
Author: Andrew Purtell 
Authored: Fri Sep 9 20:36:03 2016 +
Committer: Andrew Purtell 
Committed: Fri Sep 9 14:54:15 2016 -0700

--
 .../src/main/ruby/hbase/replication_admin.rb|  2 --
 .../src/main/ruby/shell/commands/add_peer.rb|  8 +++--
 .../test/ruby/hbase/replication_admin_test.rb   | 35 ++--
 3 files changed, 23 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fe57fa4d/hbase-shell/src/main/ruby/hbase/replication_admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb 
b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index 41f8180..d0719d8 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -55,8 +55,6 @@ module Hbase
 # or neither are provided
 if endpoint_classname.nil? and cluster_key.nil?
   raise(ArgumentError, "Either ENDPOINT_CLASSNAME or CLUSTER_KEY must 
be specified.")
-elsif !endpoint_classname.nil? and !cluster_key.nil?
-  raise(ArgumentError, "ENDPOINT_CLASSNAME and CLUSTER_KEY cannot both 
be specified.")
 end
 
 # Cluster Key is required for ReplicationPeerConfig for a custom 
replication endpoint

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe57fa4d/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb 
b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
index 0fcdd3d..cf9862a 100644
--- a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
@@ -51,9 +51,13 @@ the key TABLE_CFS.
 TABLE_CFS => { "table1" => [], "table2" => ["cf1"], "table3" => ["cf1", 
"cf2"] }
   hbase> add_peer '11', ENDPOINT_CLASSNAME => 
'org.apache.hadoop.hbase.MyReplicationEndpoint',
 DATA => { "key1" => 1 }, CONFIG => { "config1" => "value1", "config2" => 
"value2" },
-TABLE_CFS => { "table1" => [], "table2" => ["cf1"], "table3" => ["cf1", 
"cf2"] }
+TABLE_CFS => { "table1" => [], "ns2:table2" => ["cf1"], "ns3:table3" => 
["cf1", "cf2"] }
+  hbase> add_peer '12', ENDPOINT_CLASSNAME => 
'org.apache.hadoop.hbase.MyReplicationEndpoint',
+CLUSTER_KEY => "server2.cie.com:2181:/hbase"
+
+Note: Either CLUSTER_KEY or ENDPOINT_CLASSNAME must be specified. If 
ENDPOINT_CLASSNAME is specified, CLUSTER_KEY is
+optional and should only be specified if a particular custom endpoint requires 
it.
 
-Note: Either CLUSTER_KEY or ENDPOINT_CLASSNAME must be specified but not both.
 EOF
   end
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/fe57fa4d/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
--
diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb 
b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
index ac088ed..5b99c37 100644
--- a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
@@ -55,15 +55,7 @@ module Hbase
   end
 end
 
-define_test "add_peer: fail when both CLUSTER_KEY and ENDPOINT_CLASSNAME 
are specified" do
-  assert_raise(ArgumentError) do
-args = { CLUSTER_KEY => 'zk1,zk2,zk3:2182:/hbase-prod',
- ENDPOINT_CLASSNAME => 
'org.apache.hadoop.hbase.MyReplicationEndpoint' }
-replication_admin.add_peer(@peer_id, args)
-  end
-end
-
-define_test "add_peer: args must be a string or number" do
+define_test "add_peer: args must be a hash" do
   assert_raise(ArgumentError) do
 replication_admin.add_peer(@peer_id, 1)
   end
@@ -144,15 +136,19 @@ module Hbase
 define_test "add_peer: multiple zk cluster key and table_cfs - peer 
config" do
   cluster_key = "zk4,zk5,zk6:11000:/hbase-test"
   table_cfs = { "table1" => [], "table2" => ["cf1"], "table3" => ["cf1", 
"cf2"] }
-  table_cfs_str = "table1;table2:cf1;table3:cf1,cf2"
+  #table_cfs_str = 

[1/3] hbase git commit: HBASE-16576 Shell add_peer doesn't allow setting cluster_key for custom endpoints

2016-09-09 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 9317955b0 -> 78b036304
  refs/heads/branch-1 ad67fd081 -> fe57fa4da
  refs/heads/master 6c8d1f0ae -> e1e063720


HBASE-16576 Shell add_peer doesn't allow setting cluster_key for custom 
endpoints

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/78b03630
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/78b03630
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/78b03630

Branch: refs/heads/0.98
Commit: 78b036304356b2e4da6c57bad01fc1ce228e4041
Parents: 9317955
Author: Geoffrey 
Authored: Fri Sep 9 11:01:35 2016 -0700
Committer: Andrew Purtell 
Committed: Fri Sep 9 14:54:14 2016 -0700

--
 .../src/main/ruby/hbase/replication_admin.rb|  2 --
 .../src/main/ruby/shell/commands/add_peer.rb|  5 -
 .../test/ruby/hbase/replication_admin_test.rb   | 20 +---
 3 files changed, 13 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/78b03630/hbase-shell/src/main/ruby/hbase/replication_admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb 
b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index ea82384..5e5fa21 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -56,8 +56,6 @@ module Hbase
 # or neither are provided
 if endpoint_classname.nil? and cluster_key.nil?
   raise(ArgumentError, "Either ENDPOINT_CLASSNAME or CLUSTER_KEY must 
be specified.")
-elsif !endpoint_classname.nil? and !cluster_key.nil?
-  raise(ArgumentError, "ENDPOINT_CLASSNAME and CLUSTER_KEY cannot both 
be specified.")
 end
 
 # Cluster Key is required for ReplicationPeerConfig for a custom 
replication endpoint

http://git-wip-us.apache.org/repos/asf/hbase/blob/78b03630/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb 
b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
index 0fcdd3d..c42c790 100644
--- a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
@@ -52,8 +52,11 @@ the key TABLE_CFS.
   hbase> add_peer '11', ENDPOINT_CLASSNAME => 
'org.apache.hadoop.hbase.MyReplicationEndpoint',
 DATA => { "key1" => 1 }, CONFIG => { "config1" => "value1", "config2" => 
"value2" },
 TABLE_CFS => { "table1" => [], "table2" => ["cf1"], "table3" => ["cf1", 
"cf2"] }
+  hbase> add_peer '12', ENDPOINT_CLASSNAME => 
'org.apache.hadoop.hbase.MyReplicationEndpoint',
+CLUSTER_KEY => "server2.cie.com:2181:/hbase"
 
-Note: Either CLUSTER_KEY or ENDPOINT_CLASSNAME must be specified but not both.
+Note: Either CLUSTER_KEY or ENDPOINT_CLASSNAME must be specified. If 
ENDPOINT_CLASSNAME is specified, CLUSTER_KEY is
+optional and should only be specified if a particular custom endpoint requires 
it.
 EOF
   end
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/78b03630/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
--
diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb 
b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
index 660ac91..a050a0e 100644
--- a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
@@ -54,14 +54,6 @@ module Hbase
   end
 end
 
-define_test "add_peer: fail when both CLUSTER_KEY and ENDPOINT_CLASSNAME 
are specified" do
-  assert_raise(ArgumentError) do
-args = { CLUSTER_KEY => 'zk1,zk2,zk3:2182:/hbase-prod',
- ENDPOINT_CLASSNAME => 
'org.apache.hadoop.hbase.MyReplicationEndpoint' }
-replication_admin.add_peer(@peer_id, args)
-  end
-end
-
 define_test "add_peer: args must be a string or number" do
   assert_raise(ArgumentError) do
 replication_admin.add_peer(@peer_id, 1)
@@ -151,7 +143,10 @@ module Hbase
   assert_equal(1, replication_admin.list_peers.length)
   assert(replication_admin.list_peers.key?(@peer_id))
   assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id))
-  assert_equal(table_cfs_str, 
replication_admin.show_peer_tableCFs(@peer_id))
+  # Note: below assertion is dependent on the sort order of an unordered
+  # map and hence flaky depending on JVM
+  # Commenting out until HBASE-16274 is worked.
+  #assert_equal(table_cfs_str, 

hbase git commit: HBASE-16595 Remove reference to Admin from backup / restore server code

2016-09-09 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 b65a2f679 -> 260fafe53


HBASE-16595 Remove reference to Admin from backup / restore server code


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/260fafe5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/260fafe5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/260fafe5

Branch: refs/heads/HBASE-7912
Commit: 260fafe53a9de589a52d2c812518a8101cebc78b
Parents: b65a2f6
Author: tedyu 
Authored: Fri Sep 9 13:56:59 2016 -0700
Committer: tedyu 
Committed: Fri Sep 9 13:56:59 2016 -0700

--
 .../hadoop/hbase/backup/impl/BackupManager.java |   7 +-
 .../backup/impl/IncrementalBackupManager.java   |  27 +-
 .../backup/impl/RestoreTablesProcedure.java |  20 +-
 .../backup/master/FullTableBackupProcedure.java |   4 +-
 .../master/IncrementalTableBackupProcedure.java |   6 +-
 .../hbase/backup/util/BackupServerUtil.java |  65 +++--
 .../hbase/backup/util/RestoreServerUtil.java| 268 +--
 7 files changed, 204 insertions(+), 193 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/260fafe5/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
index a8d21bb..b0d329b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
@@ -50,7 +50,8 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.master.MasterServices;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
@@ -221,8 +222,8 @@ public class BackupManager implements Closeable {
   // list with all user tables from meta. It no table available, throw the 
request exception.
 
   HTableDescriptor[] htds = null;
-  try (Admin hbadmin = conn.getAdmin()) {
-htds = hbadmin.listTables();
+  try {
+htds = ((ClusterConnection)conn).listTables();
   } catch (Exception e) {
 throw new BackupException(e);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/260fafe5/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
index bd496ce..cde6c04 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -41,6 +42,9 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
+import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.wal.DefaultWALProvider;
 import org.apache.hadoop.hbase.backup.impl.BackupSystemTable.WALItem;
@@ -70,11 +74,12 @@ public class IncrementalBackupManager {
   /**
* Obtain the list of logs that need to be copied out for this incremental 
backup. The list is set
* in BackupContext.
+   * @param svc MasterServices
* @param backupContext backup context
* @return The new HashMap of RS log timestamps after the log roll for this 
incremental backup.
* @throws IOException exception
*/
-  public HashMap getIncrBackupLogFileList(BackupInfo 
backupContext)
+  public HashMap getIncrBackupLogFileList(MasterServices 
svc,BackupInfo backupContext)
   throws IOException {
 List logList;
 

hbase git commit: HBASE-16490 Fix race condition between SnapshotManager and SnapshotCleaner (Heng Chen)

2016-09-09 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 aa94a89d2 -> b65a2f679


HBASE-16490 Fix race condition between SnapshotManager and SnapshotCleaner 
(Heng Chen)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b65a2f67
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b65a2f67
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b65a2f67

Branch: refs/heads/HBASE-7912
Commit: b65a2f67949969d63a798be6cbb13e4a4980f54d
Parents: aa94a89
Author: tedyu 
Authored: Fri Sep 9 11:25:39 2016 -0700
Committer: tedyu 
Committed: Fri Sep 9 11:25:39 2016 -0700

--
 .../org/apache/hadoop/hbase/master/HMaster.java |  4 ++-
 .../master/cleaner/BaseFileCleanerDelegate.java |  7 +
 .../hbase/master/cleaner/CleanerChore.java  | 29 +---
 .../master/cleaner/FileCleanerDelegate.java |  8 ++
 .../hbase/master/cleaner/HFileCleaner.java  | 12 ++--
 .../snapshot/DisabledTableSnapshotHandler.java  |  4 +--
 .../snapshot/EnabledTableSnapshotHandler.java   |  2 +-
 .../master/snapshot/SnapshotFileCache.java  | 28 ---
 .../master/snapshot/SnapshotHFileCleaner.java   | 15 +-
 .../hbase/master/snapshot/SnapshotManager.java  | 22 ++-
 .../master/snapshot/TakeSnapshotHandler.java| 10 +--
 .../master/snapshot/TestSnapshotFileCache.java  | 11 
 .../snapshot/TestSnapshotHFileCleaner.java  |  8 +++---
 13 files changed, 121 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b65a2f67/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ddea0b6..c54eee0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1093,8 +1093,10 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 
//start the hfile archive cleaner thread
 Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
+Map params = new HashMap();
+params.put(MASTER, this);
 this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, 
getMasterFileSystem()
-.getFileSystem(), archiveDir);
+.getFileSystem(), archiveDir, params);
 getChoreService().scheduleChore(hfileCleaner);
 serviceStarted = true;
 if (LOG.isTraceEnabled()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b65a2f67/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
index c6955d0..891db22 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.hbase.BaseConfigurable;
 import com.google.common.base.Predicate;
 import com.google.common.collect.Iterables;
 
+import java.util.Map;
+
 /**
  * Base class for file cleaners which allows subclasses to implement a simple
  * isFileDeletable method (which used to be the FileCleanerDelegate contract).
@@ -39,6 +41,11 @@ implements FileCleanerDelegate {
   }});
   }
 
+  @Override
+  public void init(Map params) {
+// subclass could override it if needed.
+  }
+
   /**
* Should the master delete the file or keep it?
* @param fStat file status of the file to check

http://git-wip-us.apache.org/repos/asf/hbase/blob/b65a2f67/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
index 5a93a6d..b094507 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.hbase.master.cleaner;
 
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-
+import com.google.common.annotations.VisibleForTesting;

hbase git commit: HBASE-16464 archive folder grows bigger and bigger due to corrupt snapshot under tmp dir (Heng Chen)

2016-09-09 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 76d7acdfa -> aa94a89d2


HBASE-16464 archive folder grows bigger and bigger due to corrupt snapshot 
under tmp dir (Heng Chen)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aa94a89d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aa94a89d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aa94a89d

Branch: refs/heads/HBASE-7912
Commit: aa94a89d2ce558e83d4630142915d3fb27b2ab13
Parents: 76d7acd
Author: tedyu 
Authored: Fri Sep 9 11:10:45 2016 -0700
Committer: tedyu 
Committed: Fri Sep 9 11:10:45 2016 -0700

--
 .../master/snapshot/SnapshotFileCache.java  | 17 ++-
 .../master/snapshot/TakeSnapshotHandler.java|  2 +-
 .../snapshot/SnapshotDescriptionUtils.java  | 14 ++
 .../hbase/snapshot/SnapshotManifestV2.java  | 18 +---
 .../snapshot/TestSnapshotHFileCleaner.java  | 47 ++--
 .../hbase/snapshot/SnapshotTestingUtils.java| 13 ++
 6 files changed, 100 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/aa94a89d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
index dfd3cb5..5b367c5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.util.FSUtils;
 
@@ -300,7 +301,21 @@ public class SnapshotFileCache implements Stoppable {
 FileStatus[] running = FSUtils.listStatus(fs, snapshotTmpDir);
 if (running != null) {
   for (FileStatus run : running) {
-
snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
+try {
+  
snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
+} catch (CorruptedSnapshotException e) {
+  // See HBASE-16464
+  if (e.getCause() instanceof FileNotFoundException) {
+// If the snapshot is not in progress, we will delete it
+if (!fs.exists(new Path(run.getPath(),
+  SnapshotDescriptionUtils.SNAPSHOT_IN_PROGRESS))) {
+  fs.delete(run.getPath(), true);
+  LOG.warn("delete the " + run.getPath() + " due to exception:", 
e.getCause());
+}
+  } else {
+throw e;
+  }
+}
   }
 }
 return snapshotInProgress;

http://git-wip-us.apache.org/repos/asf/hbase/blob/aa94a89d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 9172e06..8967a70 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -164,7 +164,7 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
 try {
   // If regions move after this meta scan, the region specific snapshot 
should fail, triggering
   // an external exception that gets captured here.
-
+  SnapshotDescriptionUtils.createInProgressTag(workingDir, fs);
   // write down the snapshot info in the working directory
   SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs);
   snapshotManifest.addTableDescriptor(this.htd);

http://git-wip-us.apache.org/repos/asf/hbase/blob/aa94a89d/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index 79e7312..7677e0a 100644
--- 

hbase git commit: HBASE-16530 Reduce DBE code duplication

2016-09-09 Thread liyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 bc4ab4714 -> ad67fd081


HBASE-16530 Reduce DBE code duplication

Signed-off-by: Yu Li 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ad67fd08
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ad67fd08
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ad67fd08

Branch: refs/heads/branch-1
Commit: ad67fd0816bde63ad3bf9ec13b7bb12ecd833c92
Parents: bc4ab47
Author: binlijin 
Authored: Fri Sep 9 16:20:37 2016 +0800
Committer: Yu Li 
Committed: Fri Sep 9 16:25:56 2016 +0800

--
 .../io/encoding/AbstractDataBlockEncoder.java   | 69 
 .../io/encoding/BufferedDataBlockEncoder.java   | 39 ++-
 .../io/encoding/CopyKeyDataBlockEncoder.java| 55 
 .../hadoop/hbase/io/encoding/NoneEncoder.java   | 68 +++
 .../hbase/io/encoding/RowIndexCodecV1.java  | 21 +-
 .../hbase/io/encoding/RowIndexEncoderV1.java| 32 +
 .../hbase/io/encoding/RowIndexSeekerV1.java | 18 +
 .../hbase/io/hfile/NoOpDataBlockEncoder.java| 59 -
 8 files changed, 206 insertions(+), 155 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ad67fd08/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java
new file mode 100644
index 000..16c2c4a
--- /dev/null
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.io.encoding;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.KeyValue.KVComparator;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.io.hfile.BlockType;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
+
+@InterfaceAudience.Private
+public abstract class AbstractDataBlockEncoder implements DataBlockEncoder {
+
+  @Override
+  public HFileBlockEncodingContext newDataBlockEncodingContext(
+  DataBlockEncoding encoding, byte[] header, HFileContext meta) {
+return new HFileBlockDefaultEncodingContext(encoding, header, meta);
+  }
+
+  @Override
+  public HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext 
meta) {
+return new HFileBlockDefaultDecodingContext(meta);
+  }
+
+  protected void postEncoding(HFileBlockEncodingContext encodingCtx)
+  throws IOException {
+if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
+  encodingCtx.postEncoding(BlockType.ENCODED_DATA);
+} else {
+  encodingCtx.postEncoding(BlockType.DATA);
+}
+  }
+
+  protected abstract static class AbstractEncodedSeeker implements
+  EncodedSeeker {
+protected HFileBlockDecodingContext decodingCtx;
+protected final KVComparator comparator;
+
+public AbstractEncodedSeeker(KVComparator comparator,
+HFileBlockDecodingContext decodingCtx) {
+  this.comparator = comparator;
+  this.decodingCtx = decodingCtx;
+}
+
+protected boolean includesMvcc() {
+  return this.decodingCtx.getHFileContext().isIncludesMvcc();
+}
+
+protected boolean includesTags() {
+  return this.decodingCtx.getHFileContext().isIncludesTags();
+}
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ad67fd08/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java

hbase git commit: HBASE-16570 Compute region locality in parallel at startup (binlijin)

2016-09-09 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 493c31c29 -> a5f0223bd


HBASE-16570 Compute region locality in parallel at startup (binlijin)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a5f0223b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a5f0223b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a5f0223b

Branch: refs/heads/branch-1.3
Commit: a5f0223bd1db25c18c11ba1250f53066e50f28e8
Parents: 493c31c
Author: chenheng 
Authored: Fri Sep 9 10:54:48 2016 +0800
Committer: chenheng 
Committed: Fri Sep 9 14:19:48 2016 +0800

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 49 +++
 .../master/balancer/RegionLocationFinder.java   | 18 +--
 .../master/balancer/TestBaseLoadBalancer.java   | 51 +---
 .../balancer/TestRegionLocationFinder.java  |  3 +-
 4 files changed, 100 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f0223b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index c2529a8..0c86557 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -33,6 +33,7 @@ import java.util.NavigableMap;
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.concurrent.ExecutionException;
 
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.commons.logging.Log;
@@ -57,6 +58,7 @@ import com.google.common.base.Joiner;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.ListenableFuture;
 
 /**
  * The base class for load balancers. It provides the the functions used to by
@@ -115,6 +117,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 HRegionInfo[] regions;
 Deque[] regionLoads;
 private RegionLocationFinder regionFinder;
+ArrayList regionLocationFutures;
 
 int[][] regionLocations; //regionIndex -> list of serverIndex sorted by 
locality
 
@@ -236,6 +239,13 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   regionIndexToTableIndex = new int[numRegions];
   regionIndexToPrimaryIndex = new int[numRegions];
   regionLoads = new Deque[numRegions];
+  regionLocationFutures = new 
ArrayList(
+  numRegions);
+  if (regionFinder != null) {
+for (int i = 0; i < numRegions; i++) {
+  regionLocationFutures.add(null);
+}
+  }
   regionLocations = new int[numRegions][];
   serverIndicesSortedByRegionCount = new Integer[numServers];
   serverIndicesSortedByLocality = new Integer[numServers];
@@ -305,6 +315,33 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 regionIndex++;
   }
 
+  if (regionFinder != null) {
+for (int index = 0; index < regionLocationFutures.size(); index++) {
+  ListenableFuture future = 
regionLocationFutures
+  .get(index);
+  HDFSBlocksDistribution blockDistbn = null;
+  try {
+blockDistbn = future.get();
+  } catch (InterruptedException ite) {
+  } catch (ExecutionException ee) {
+LOG.debug(
+"IOException during HDFSBlocksDistribution computation. for 
region = "
++ regions[index].getEncodedName(), ee);
+  } finally {
+if (blockDistbn == null) {
+  blockDistbn = new HDFSBlocksDistribution();
+}
+  }
+  List loc = 
regionFinder.getTopBlockLocations(blockDistbn);
+  regionLocations[index] = new int[loc.size()];
+  for (int i = 0; i < loc.size(); i++) {
+regionLocations[index][i] = loc.get(i) == null ? -1
+: (serversToIndex.get(loc.get(i).getHostAndPort()) == null ? -1
+: serversToIndex.get(loc.get(i).getHostAndPort()));
+  }
+}
+  }
+
   for (int i = 0; i < serversPerHostList.size(); i++) {
 serversPerHost[i] = new int[serversPerHostList.get(i).size()];
 for (int j = 0; j < serversPerHost[i].length; j++) {
@@ -452,15 +489,9 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
 
   if (regionFinder != null) {
-

hbase git commit: HBASE-16309 TestDefaultCompactSelection.testCompactionRatio is flaky

2016-09-09 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master e11aafae9 -> 6c8d1f0ae


HBASE-16309 TestDefaultCompactSelection.testCompactionRatio is flaky


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6c8d1f0a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6c8d1f0a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6c8d1f0a

Branch: refs/heads/master
Commit: 6c8d1f0ae82b736322a37bafabf795c8b3a0fdd4
Parents: e11aafa
Author: zhangduo 
Authored: Fri Sep 9 11:09:17 2016 +0800
Committer: zhangduo 
Committed: Fri Sep 9 14:20:39 2016 +0800

--
 .../compactions/RatioBasedCompactionPolicy.java  |  3 ++-
 .../regionserver/TestDefaultCompactSelection.java| 15 +--
 2 files changed, 15 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6c8d1f0a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
index 3386bfd..a3e10f8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * The default algorithm for selecting files for compaction.
@@ -61,7 +62,7 @@ public class RatioBasedCompactionPolicy extends 
SortedCompactionPolicy {
 }
 // TODO: Use better method for determining stamp of last major (HBASE-2990)
 long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
-long now = System.currentTimeMillis();
+long now = EnvironmentEdgeManager.currentTime();
 if (lowTimestamp > 0L && lowTimestamp < (now - mcTime)) {
   // Major compaction time has elapsed.
   long cfTTL = this.storeConfigInfo.getStoreFileTtl();

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c8d1f0a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
index dbd6f11..1513cd0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import 
org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -34,6 +36,8 @@ public class TestDefaultCompactSelection extends 
TestCompactionPolicy {
 
   @Test
   public void testCompactionRatio() throws IOException {
+TimeOffsetEnvironmentEdge edge = new TimeOffsetEnvironmentEdge();
+EnvironmentEdgeManager.injectEdge(edge);
 /**
  * NOTE: these tests are specific to describe the implementation of the
  * current compaction algorithm.  Developed to ensure that refactoring
@@ -90,10 +94,17 @@ public class TestDefaultCompactSelection extends 
TestCompactionPolicy {
 conf.setFloat("hbase.hregion.majorcompaction.jitter", 0);
 store.storeEngine.getCompactionPolicy().setConf(conf);
 try {
+  // The modTime of the mocked store file is currentTimeMillis, so we need 
to increase the
+  // timestamp a bit to make sure that now - lowestModTime is greater than 
major compaction
+  // period(1ms).
   // trigger an aged major compaction
-  compactEquals(sfCreate(50,25,12,12), 50, 25, 12, 12);
+  List candidates = sfCreate(50, 25, 12, 12);
+  edge.increment(2);
+  compactEquals(candidates, 50, 25, 12, 12);
   // major sure exceeding maxCompactSize also 

hbase git commit: HBASE-16309 TestDefaultCompactSelection.testCompactionRatio is flaky

2016-09-09 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1 b4086795f -> bc4ab4714


HBASE-16309 TestDefaultCompactSelection.testCompactionRatio is flaky


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bc4ab471
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bc4ab471
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bc4ab471

Branch: refs/heads/branch-1
Commit: bc4ab47147428068d3b6b6bb2e284709bdf8fc05
Parents: b408679
Author: zhangduo 
Authored: Fri Sep 9 11:09:17 2016 +0800
Committer: zhangduo 
Committed: Fri Sep 9 14:21:31 2016 +0800

--
 .../compactions/RatioBasedCompactionPolicy.java  |  3 ++-
 .../regionserver/TestDefaultCompactSelection.java| 15 +--
 2 files changed, 15 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bc4ab471/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
index 7b812cd..b378be7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * The default algorithm for selecting files for compaction.
@@ -61,7 +62,7 @@ public class RatioBasedCompactionPolicy extends 
SortedCompactionPolicy {
 }
 // TODO: Use better method for determining stamp of last major (HBASE-2990)
 long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
-long now = System.currentTimeMillis();
+long now = EnvironmentEdgeManager.currentTime();
 if (lowTimestamp > 0L && lowTimestamp < (now - mcTime)) {
   // Major compaction time has elapsed.
   long cfTTL = this.storeConfigInfo.getStoreFileTtl();

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc4ab471/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
index 9ed1ccd..38c1d4e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import 
org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -35,6 +37,8 @@ public class TestDefaultCompactSelection extends 
TestCompactionPolicy {
 
   @Test
   public void testCompactionRatio() throws IOException {
+TimeOffsetEnvironmentEdge edge = new TimeOffsetEnvironmentEdge();
+EnvironmentEdgeManager.injectEdge(edge);
 /**
  * NOTE: these tests are specific to describe the implementation of the
  * current compaction algorithm.  Developed to ensure that refactoring
@@ -91,10 +95,17 @@ public class TestDefaultCompactSelection extends 
TestCompactionPolicy {
 conf.setFloat("hbase.hregion.majorcompaction.jitter", 0);
 store.storeEngine.getCompactionPolicy().setConf(conf);
 try {
+  // The modTime of the mocked store file is currentTimeMillis, so we need 
to increase the
+  // timestamp a bit to make sure that now - lowestModTime is greater than 
major compaction
+  // period(1ms).
   // trigger an aged major compaction
-  compactEquals(sfCreate(50,25,12,12), 50, 25, 12, 12);
+  List candidates = sfCreate(50, 25, 12, 12);
+  edge.increment(2);
+  compactEquals(candidates, 50, 25, 12, 12);
   // major sure exceeding maxCompactSize also 

hbase git commit: HBASE-16570 Compute region locality in parallel at startup (binlijin)

2016-09-09 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 52963b342 -> b4086795f


HBASE-16570 Compute region locality in parallel at startup (binlijin)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b4086795
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b4086795
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b4086795

Branch: refs/heads/branch-1
Commit: b4086795f2dcb1497a367592850fa80f6514cde2
Parents: 52963b3
Author: chenheng 
Authored: Fri Sep 9 10:54:48 2016 +0800
Committer: chenheng 
Committed: Fri Sep 9 14:16:37 2016 +0800

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 49 +++
 .../master/balancer/RegionLocationFinder.java   | 18 +--
 .../master/balancer/TestBaseLoadBalancer.java   | 51 +---
 .../balancer/TestRegionLocationFinder.java  |  3 +-
 4 files changed, 100 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b4086795/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 0a61839..93b29b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -33,6 +33,7 @@ import java.util.NavigableMap;
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.concurrent.ExecutionException;
 
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.commons.logging.Log;
@@ -57,6 +58,7 @@ import com.google.common.base.Joiner;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.ListenableFuture;
 
 /**
  * The base class for load balancers. It provides the the functions used to by
@@ -115,6 +117,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 HRegionInfo[] regions;
 Deque[] regionLoads;
 private RegionLocationFinder regionFinder;
+ArrayList regionLocationFutures;
 
 int[][] regionLocations; //regionIndex -> list of serverIndex sorted by 
locality
 
@@ -236,6 +239,13 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   regionIndexToTableIndex = new int[numRegions];
   regionIndexToPrimaryIndex = new int[numRegions];
   regionLoads = new Deque[numRegions];
+  regionLocationFutures = new 
ArrayList(
+  numRegions);
+  if (regionFinder != null) {
+for (int i = 0; i < numRegions; i++) {
+  regionLocationFutures.add(null);
+}
+  }
   regionLocations = new int[numRegions][];
   serverIndicesSortedByRegionCount = new Integer[numServers];
   serverIndicesSortedByLocality = new Integer[numServers];
@@ -305,6 +315,33 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 regionIndex++;
   }
 
+  if (regionFinder != null) {
+for (int index = 0; index < regionLocationFutures.size(); index++) {
+  ListenableFuture future = 
regionLocationFutures
+  .get(index);
+  HDFSBlocksDistribution blockDistbn = null;
+  try {
+blockDistbn = future.get();
+  } catch (InterruptedException ite) {
+  } catch (ExecutionException ee) {
+LOG.debug(
+"IOException during HDFSBlocksDistribution computation. for 
region = "
++ regions[index].getEncodedName(), ee);
+  } finally {
+if (blockDistbn == null) {
+  blockDistbn = new HDFSBlocksDistribution();
+}
+  }
+  List loc = 
regionFinder.getTopBlockLocations(blockDistbn);
+  regionLocations[index] = new int[loc.size()];
+  for (int i = 0; i < loc.size(); i++) {
+regionLocations[index][i] = loc.get(i) == null ? -1
+: (serversToIndex.get(loc.get(i).getHostAndPort()) == null ? -1
+: serversToIndex.get(loc.get(i).getHostAndPort()));
+  }
+}
+  }
+
   for (int i = 0; i < serversPerHostList.size(); i++) {
 serversPerHost[i] = new int[serversPerHostList.get(i).size()];
 for (int j = 0; j < serversPerHost[i].length; j++) {
@@ -452,15 +489,9 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
 
   if (regionFinder != null) {
-

hbase git commit: HBASE-16570 Compute region locality in parallel at startup (binlijin)

2016-09-09 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 46c756a4a -> e11aafae9


HBASE-16570 Compute region locality in parallel at startup (binlijin)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e11aafae
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e11aafae
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e11aafae

Branch: refs/heads/master
Commit: e11aafae957bc8d71cb622833011f29325049987
Parents: 46c756a
Author: chenheng 
Authored: Fri Sep 9 10:54:48 2016 +0800
Committer: chenheng 
Committed: Fri Sep 9 10:54:48 2016 +0800

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 49 +++
 .../master/balancer/RegionLocationFinder.java   | 18 +--
 .../master/balancer/TestBaseLoadBalancer.java   | 51 +---
 .../balancer/TestRegionLocationFinder.java  |  3 +-
 4 files changed, 100 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e11aafae/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index dc5bace..2b13b21 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -33,6 +33,7 @@ import java.util.NavigableMap;
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.concurrent.ExecutionException;
 
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.commons.logging.Log;
@@ -59,6 +60,7 @@ import com.google.common.base.Joiner;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.ListenableFuture;
 
 /**
  * The base class for load balancers. It provides the the functions used to by
@@ -117,6 +119,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 HRegionInfo[] regions;
 Deque[] regionLoads;
 private RegionLocationFinder regionFinder;
+ArrayList regionLocationFutures;
 
 int[][] regionLocations; //regionIndex -> list of serverIndex sorted by 
locality
 
@@ -238,6 +241,13 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   regionIndexToTableIndex = new int[numRegions];
   regionIndexToPrimaryIndex = new int[numRegions];
   regionLoads = new Deque[numRegions];
+  regionLocationFutures = new 
ArrayList(
+  numRegions);
+  if (regionFinder != null) {
+for (int i = 0; i < numRegions; i++) {
+  regionLocationFutures.add(null);
+}
+  }
   regionLocations = new int[numRegions][];
   serverIndicesSortedByRegionCount = new Integer[numServers];
   serverIndicesSortedByLocality = new Integer[numServers];
@@ -307,6 +317,33 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 regionIndex++;
   }
 
+  if (regionFinder != null) {
+for (int index = 0; index < regionLocationFutures.size(); index++) {
+  ListenableFuture future = 
regionLocationFutures
+  .get(index);
+  HDFSBlocksDistribution blockDistbn = null;
+  try {
+blockDistbn = future.get();
+  } catch (InterruptedException ite) {
+  } catch (ExecutionException ee) {
+LOG.debug(
+"IOException during HDFSBlocksDistribution computation. for 
region = "
++ regions[index].getEncodedName(), ee);
+  } finally {
+if (blockDistbn == null) {
+  blockDistbn = new HDFSBlocksDistribution();
+}
+  }
+  List loc = 
regionFinder.getTopBlockLocations(blockDistbn);
+  regionLocations[index] = new int[loc.size()];
+  for (int i = 0; i < loc.size(); i++) {
+regionLocations[index][i] = loc.get(i) == null ? -1
+: (serversToIndex.get(loc.get(i).getHostAndPort()) == null ? -1
+: serversToIndex.get(loc.get(i).getHostAndPort()));
+  }
+}
+  }
+
   for (int i = 0; i < serversPerHostList.size(); i++) {
 serversPerHost[i] = new int[serversPerHostList.get(i).size()];
 for (int j = 0; j < serversPerHost[i].length; j++) {
@@ -454,15 +491,9 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
 
   if (regionFinder != null) {
-