[6/6] hbase git commit: HBASE-15587 FSTableDescriptors.getDescriptor() logs stack trace erronously

2016-04-05 Thread enis
HBASE-15587 FSTableDescriptors.getDescriptor() logs stack trace erronously

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c480b2a4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c480b2a4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c480b2a4

Branch: refs/heads/0.98
Commit: c480b2a4ef61dc0f2bc820623d4dafc7a4e3616a
Parents: 379ec0d
Author: Enis Soztutar <e...@apache.org>
Authored: Tue Apr 5 18:13:40 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Tue Apr 5 18:51:33 2016 -0700

--
 .../java/org/apache/hadoop/hbase/util/FSTableDescriptors.java  | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c480b2a4/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 622ae46..ca1c180 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -125,11 +125,13 @@ public class FSTableDescriptors implements 
TableDescriptors {
 this.metaTableDescriptor = HTableDescriptor.metaTableDescriptor(conf);
   }
 
+  @Override
   public void setCacheOn() throws IOException {
 this.cache.clear();
 this.usecache = true;
   }
 
+  @Override
   public void setCacheOff() throws IOException {
 this.usecache = false;
 this.cache.clear();
@@ -173,7 +175,9 @@ public class FSTableDescriptors implements TableDescriptors 
{
   tdmt = getTableDescriptorFromFs(fs, rootdir, tablename, !fsreadonly);
 } catch (NullPointerException e) {
   LOG.debug("Exception during readTableDecriptor. Current table name = "
-  + tablename, e);
+  + tablename, e);
+} catch (TableInfoMissingException e) {
+  // ignore. This is regular operation
 } catch (IOException ioe) {
   LOG.debug("Exception during readTableDecriptor. Current table name = "
   + tablename, ioe);



[2/6] hbase git commit: HBASE-15587 FSTableDescriptors.getDescriptor() logs stack trace erronously

2016-04-05 Thread enis
HBASE-15587 FSTableDescriptors.getDescriptor() logs stack trace erronously


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e6a32141
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e6a32141
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e6a32141

Branch: refs/heads/branch-1
Commit: e6a32141b3e5e893c5b78a2a821c4bbbc6f482ce
Parents: 8957dc1
Author: Enis Soztutar <e...@apache.org>
Authored: Tue Apr 5 18:13:40 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Tue Apr 5 18:13:47 2016 -0700

--
 .../java/org/apache/hadoop/hbase/util/FSTableDescriptors.java| 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e6a32141/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 7cd2673..06eb9ea 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -124,11 +124,13 @@ public class FSTableDescriptors implements 
TableDescriptors {
 this.metaTableDescriptor = HTableDescriptor.metaTableDescriptor(conf);
   }
 
+  @Override
   public void setCacheOn() throws IOException {
 this.cache.clear();
 this.usecache = true;
   }
 
+  @Override
   public void setCacheOff() throws IOException {
 this.usecache = false;
 this.cache.clear();
@@ -173,6 +175,8 @@ public class FSTableDescriptors implements TableDescriptors 
{
 } catch (NullPointerException e) {
   LOG.debug("Exception during readTableDecriptor. Current table name = "
   + tablename, e);
+} catch (TableInfoMissingException e) {
+  // ignore. This is regular operation
 } catch (IOException ioe) {
   LOG.debug("Exception during readTableDecriptor. Current table name = "
   + tablename, ioe);



hbase git commit: HBASE-15505 ReplicationPeerConfig should be builder-style (Gabor Liptak)

2016-04-05 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/master a93a8878f -> 7e399883f


HBASE-15505 ReplicationPeerConfig should be builder-style (Gabor Liptak)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7e399883
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7e399883
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7e399883

Branch: refs/heads/master
Commit: 7e399883f62fd37e5215ce3a456a917e690c921c
Parents: a93a887
Author: Enis Soztutar <e...@apache.org>
Authored: Tue Apr 5 11:44:05 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Tue Apr 5 11:44:05 2016 -0700

--
 .../client/UnmodifyableHTableDescriptor.java| 14 +++---
 .../replication/ReplicationPeerConfig.java  |  4 +-
 .../TestUnmodifyableHTableDescriptor.java   | 47 
 .../hadoop/hbase/quotas/TestQuotaFilter.java| 47 
 .../replication/TestReplicationPeerConfig.java  | 47 
 5 files changed, 151 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7e399883/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
index 7331983..59a1bd5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
@@ -68,12 +68,12 @@ public class UnmodifyableHTableDescriptor extends 
HTableDescriptor {
* @param family HColumnDescriptor of familyto add.
*/
   @Override
-  public HTableDescriptor addFamily(final HColumnDescriptor family) {
+  public UnmodifyableHTableDescriptor addFamily(final HColumnDescriptor 
family) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 
   @Override
-  public HTableDescriptor modifyFamily(HColumnDescriptor family) {
+  public UnmodifyableHTableDescriptor modifyFamily(HColumnDescriptor family) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 
@@ -91,7 +91,7 @@ public class UnmodifyableHTableDescriptor extends 
HTableDescriptor {
* @see org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean)
*/
   @Override
-  public HTableDescriptor setReadOnly(boolean readOnly) {
+  public UnmodifyableHTableDescriptor setReadOnly(boolean readOnly) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 
@@ -99,7 +99,7 @@ public class UnmodifyableHTableDescriptor extends 
HTableDescriptor {
* @see org.apache.hadoop.hbase.HTableDescriptor#setValue(byte[], byte[])
*/
   @Override
-  public HTableDescriptor setValue(byte[] key, byte[] value) {
+  public UnmodifyableHTableDescriptor setValue(byte[] key, byte[] value) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 
@@ -107,7 +107,7 @@ public class UnmodifyableHTableDescriptor extends 
HTableDescriptor {
* @see org.apache.hadoop.hbase.HTableDescriptor#setValue(java.lang.String, 
java.lang.String)
*/
   @Override
-  public HTableDescriptor setValue(String key, String value) {
+  public UnmodifyableHTableDescriptor setValue(String key, String value) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 
@@ -115,7 +115,7 @@ public class UnmodifyableHTableDescriptor extends 
HTableDescriptor {
* @see org.apache.hadoop.hbase.HTableDescriptor#setMaxFileSize(long)
*/
   @Override
-  public HTableDescriptor setMaxFileSize(long maxFileSize) {
+  public UnmodifyableHTableDescriptor setMaxFileSize(long maxFileSize) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 
@@ -123,7 +123,7 @@ public class UnmodifyableHTableDescriptor extends 
HTableDescriptor {
* @see org.apache.hadoop.hbase.HTableDescriptor#setMemStoreFlushSize(long)
*/
   @Override
-  public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
+  public UnmodifyableHTableDescriptor setMemStoreFlushSize(long 
memstoreFlushSize) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7e399883/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/Replicat

hbase git commit: HBASE-15567 TestReplicationShell broken by recent replication changes (Geoffrey Jacoby)

2016-03-31 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/master d6fd85945 -> bcc42


HBASE-15567 TestReplicationShell broken by recent replication changes (Geoffrey 
Jacoby)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bcc4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bcc4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bcc4

Branch: refs/heads/master
Commit: bcc420ab1ef9a397d5a299a46a3f22b09d84
Parents: d6fd859
Author: Enis Soztutar <e...@apache.org>
Authored: Thu Mar 31 11:37:09 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Thu Mar 31 11:37:09 2016 -0700

--
 .../src/main/ruby/hbase/replication_admin.rb|  2 +-
 .../test/ruby/hbase/replication_admin_test.rb   | 39 ++--
 2 files changed, 13 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bcc4/hbase-shell/src/main/ruby/hbase/replication_admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb 
b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index a026d09..f441a99 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -95,7 +95,7 @@ module Hbase
 end
 @replication_admin.add_peer(id, replication_peer_config, map)
   else
-raise(ArgumentError, "args must be either a String or Hash")
+raise(ArgumentError, "args must be a Hash")
   end
 end
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bcc4/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
--
diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb 
b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
index 4923560..8f08dc0 100644
--- a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
@@ -30,12 +30,9 @@ module Hbase
 include TestHelpers
 
 def setup
-  @test_name = "hbase_shell_tests_table"
   @peer_id = '1'
 
   setup_hbase
-  drop_test_table(@test_name)
-  create_test_table(@test_name)
 
   assert_equal(0, replication_admin.list_peers.length)
 end
@@ -67,23 +64,26 @@ module Hbase
   end
 end
 
-define_test "add_peer: args must be a string or number" do
+define_test "add_peer: args must be a hash" do
   assert_raise(ArgumentError) do
 replication_admin.add_peer(@peer_id, 1)
   end
   assert_raise(ArgumentError) do
 replication_admin.add_peer(@peer_id, ['test'])
   end
+  assert_raise(ArgumentError) do
+replication_admin.add_peer(@peer_id, 'test')
+  end
 end
 
 define_test "add_peer: single zk cluster key" do
   cluster_key = "server1.cie.com:2181:/hbase"
 
-  replication_admin.add_peer(@peer_id, cluster_key)
+  replication_admin.add_peer(@peer_id, {CLUSTER_KEY => cluster_key})
 
   assert_equal(1, replication_admin.list_peers.length)
   assert(replication_admin.list_peers.key?(@peer_id))
-  assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id))
+  assert_equal(cluster_key, 
replication_admin.list_peers.fetch(@peer_id).get_cluster_key)
 
   # cleanup for future tests
   replication_admin.remove_peer(@peer_id)
@@ -92,26 +92,11 @@ module Hbase
 define_test "add_peer: multiple zk cluster key" do
   cluster_key = "zk1,zk2,zk3:2182:/hbase-prod"
 
-  replication_admin.add_peer(@peer_id, cluster_key)
-
-  assert_equal(1, replication_admin.list_peers.length)
-  assert(replication_admin.list_peers.key?(@peer_id))
-  assert_equal(replication_admin.list_peers.fetch(@peer_id), cluster_key)
-
-  # cleanup for future tests
-  replication_admin.remove_peer(@peer_id)
-end
-
-define_test "add_peer: multiple zk cluster key and table_cfs" do
-  cluster_key = "zk4,zk5,zk6:11000:/hbase-test"
-  table_cfs_str = "table1;table2:cf1;table3:cf2,cf3"
-
-  replication_admin.add_peer(@peer_id, cluster_key, table_cfs_str)
+  replication_admin.add_peer(@peer_id, {CLUSTER_KEY => cluster_key})
 
   assert_equal(1, replication_admin.list_peers.length)
   assert(replication_admin.list_peers.key?(@peer_id))
-  assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id))
-  assert_equal(table_cfs_str, 
replication_admin.show_peer_tableCFs(@peer_id))
+  assert_equal(cluster_key, 
replication_admin.list_peers.fetch(@peer_id).get_cluster_key

[4/5] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock - ADDENDUM for failing test

2016-03-29 Thread enis
HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing 
a deadlock - ADDENDUM for failing test


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8d8a7107
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8d8a7107
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8d8a7107

Branch: refs/heads/branch-1.2
Commit: 8d8a7107dc4ccbf36a92f64675dc60392f85c015
Parents: 547095a
Author: Enis Soztutar <e...@apache.org>
Authored: Tue Mar 29 15:02:18 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Tue Mar 29 15:06:35 2016 -0700

--
 .../org/apache/hadoop/hbase/client/HConnectionTestingUtility.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8d8a7107/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index 306dfee..1a7c2ef 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -160,6 +160,7 @@ public class HConnectionTestingUtility {
 Mockito.when(c.getNewRpcRetryingCallerFactory(conf)).thenReturn(
 RpcRetryingCallerFactory.instantiate(conf,
 RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR, null));
+
Mockito.when(c.getRpcControllerFactory()).thenReturn(Mockito.mock(RpcControllerFactory.class));
 HTableInterface t = Mockito.mock(HTableInterface.class);
 Mockito.when(c.getTable((TableName)Mockito.any())).thenReturn(t);
 ResultScanner rs = Mockito.mock(ResultScanner.class);



[2/5] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock - ADDENDUM for failing test

2016-03-29 Thread enis
HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing 
a deadlock - ADDENDUM for failing test


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1e44dd82
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1e44dd82
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1e44dd82

Branch: refs/heads/branch-1
Commit: 1e44dd82eb593d1d385ea52e7132c05f9405a5b1
Parents: c7ad428
Author: Enis Soztutar <e...@apache.org>
Authored: Tue Mar 29 15:02:18 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Tue Mar 29 15:02:23 2016 -0700

--
 .../org/apache/hadoop/hbase/client/HConnectionTestingUtility.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1e44dd82/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index 306dfee..1a7c2ef 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -160,6 +160,7 @@ public class HConnectionTestingUtility {
 Mockito.when(c.getNewRpcRetryingCallerFactory(conf)).thenReturn(
 RpcRetryingCallerFactory.instantiate(conf,
 RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR, null));
+
Mockito.when(c.getRpcControllerFactory()).thenReturn(Mockito.mock(RpcControllerFactory.class));
 HTableInterface t = Mockito.mock(HTableInterface.class);
 Mockito.when(c.getTable((TableName)Mockito.any())).thenReturn(t);
 ResultScanner rs = Mockito.mock(ResultScanner.class);



[1/5] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock - ADDENDUM for failing test

2016-03-29 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/branch-1 c7ad42867 -> 1e44dd82e
  refs/heads/branch-1.1 c1f0204e7 -> e5c395db3
  refs/heads/branch-1.2 547095ab7 -> 8d8a7107d
  refs/heads/branch-1.3 0f72597db -> c0b9d
  refs/heads/master 7f39baf0f -> afdfd1bd9


HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing 
a deadlock - ADDENDUM for failing test


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/afdfd1bd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/afdfd1bd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/afdfd1bd

Branch: refs/heads/master
Commit: afdfd1bd9c938fa4b5c2aa9346e559167d550785
Parents: 7f39baf
Author: Enis Soztutar <e...@apache.org>
Authored: Tue Mar 29 15:02:18 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Tue Mar 29 15:02:18 2016 -0700

--
 .../org/apache/hadoop/hbase/client/HConnectionTestingUtility.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/afdfd1bd/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index dc1ecf1..24ef5b2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -125,6 +125,7 @@ public class HConnectionTestingUtility {
 Mockito.when(c.getNewRpcRetryingCallerFactory(conf)).thenReturn(
 RpcRetryingCallerFactory.instantiate(conf,
 RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR, null));
+
Mockito.when(c.getRpcControllerFactory()).thenReturn(Mockito.mock(RpcControllerFactory.class));
 HTableInterface t = Mockito.mock(HTableInterface.class);
 Mockito.when(c.getTable((TableName)Mockito.any())).thenReturn(t);
 ResultScanner rs = Mockito.mock(ResultScanner.class);



[3/5] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock - ADDENDUM for failing test

2016-03-29 Thread enis
HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing 
a deadlock - ADDENDUM for failing test


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c0b9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c0b9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c0b9

Branch: refs/heads/branch-1.3
Commit: c0b9d00ee3d8e27fde39b20ed9c53c882e79
Parents: 0f72597
Author: Enis Soztutar <e...@apache.org>
Authored: Tue Mar 29 15:02:18 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Tue Mar 29 15:04:31 2016 -0700

--
 .../org/apache/hadoop/hbase/client/HConnectionTestingUtility.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c0b9/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index 306dfee..1a7c2ef 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -160,6 +160,7 @@ public class HConnectionTestingUtility {
 Mockito.when(c.getNewRpcRetryingCallerFactory(conf)).thenReturn(
 RpcRetryingCallerFactory.instantiate(conf,
 RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR, null));
+
Mockito.when(c.getRpcControllerFactory()).thenReturn(Mockito.mock(RpcControllerFactory.class));
 HTableInterface t = Mockito.mock(HTableInterface.class);
 Mockito.when(c.getTable((TableName)Mockito.any())).thenReturn(t);
 ResultScanner rs = Mockito.mock(ResultScanner.class);



[5/5] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock - ADDENDUM for failing test

2016-03-29 Thread enis
HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing 
a deadlock - ADDENDUM for failing test


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e5c395db
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e5c395db
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e5c395db

Branch: refs/heads/branch-1.1
Commit: e5c395db3dc58d650ec5e5c3778d16d1e09ac9cf
Parents: c1f0204
Author: Enis Soztutar <e...@apache.org>
Authored: Tue Mar 29 15:02:18 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Tue Mar 29 15:08:42 2016 -0700

--
 .../org/apache/hadoop/hbase/client/HConnectionTestingUtility.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e5c395db/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index 306dfee..1a7c2ef 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -160,6 +160,7 @@ public class HConnectionTestingUtility {
 Mockito.when(c.getNewRpcRetryingCallerFactory(conf)).thenReturn(
 RpcRetryingCallerFactory.instantiate(conf,
 RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR, null));
+
Mockito.when(c.getRpcControllerFactory()).thenReturn(Mockito.mock(RpcControllerFactory.class));
 HTableInterface t = Mockito.mock(HTableInterface.class);
 Mockito.when(c.getTable((TableName)Mockito.any())).thenReturn(t);
 ResultScanner rs = Mockito.mock(ResultScanner.class);



[3/3] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing 
a deadlock


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/05200976
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/05200976
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/05200976

Branch: refs/heads/master
Commit: 05200976110135abb60f9b879b9b830671c07141
Parents: cbf9c1e
Author: Enis Soztutar <e...@apache.org>
Authored: Wed Mar 23 12:30:41 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Mar 28 17:56:32 2016 -0700

--
 .../org/apache/hadoop/hbase/HRegionInfo.java|   1 +
 .../hbase/client/BufferedMutatorImpl.java   |   2 +-
 .../hadoop/hbase/client/ClusterConnection.java  |  20 +-
 .../hbase/client/ConnectionConfiguration.java   | 144 ++
 .../hbase/client/ConnectionImplementation.java  |  69 ++-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 511 +--
 .../org/apache/hadoop/hbase/client/HTable.java  |  55 +-
 .../hadoop/hbase/client/TableConfiguration.java | 144 --
 .../hadoop/hbase/ipc/AbstractRpcClient.java |   2 +-
 .../hadoop/hbase/ipc/CoprocessorRpcChannel.java |  11 +-
 .../hbase/ipc/MasterCoprocessorRpcChannel.java  |  18 +-
 .../hbase/ipc/RegionCoprocessorRpcChannel.java  |  46 +-
 .../ipc/RegionServerCoprocessorRpcChannel.java  |  10 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 168 +++---
 .../security/access/AccessControlClient.java|  46 +-
 .../hbase/zookeeper/MetaTableLocator.java   |  18 +-
 .../hbase/client/TestSnapshotFromAdmin.java |  31 +-
 .../hadoop/hbase/DistributedHBaseCluster.java   |   4 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.jamon  |   2 +-
 .../org/apache/hadoop/hbase/ipc/CallRunner.java |   5 +-
 .../apache/hadoop/hbase/ipc/RpcScheduler.java   |   2 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  |  12 +-
 .../hbase/master/RegionPlacementMaintainer.java |   2 +-
 .../hadoop/hbase/master/ServerManager.java  |  34 +-
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |   4 +-
 .../hadoop/hbase/TestGlobalMemStoreSize.java|  14 +-
 .../hadoop/hbase/TestMetaTableAccessor.java |  79 +++
 .../hadoop/hbase/TestMetaTableLocator.java  |   8 +-
 .../hbase/client/HConnectionTestingUtility.java |   5 +
 .../apache/hadoop/hbase/client/TestAdmin1.java  |   4 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   6 +-
 .../hbase/client/TestFromClientSide3.java   |  10 +-
 .../hbase/client/TestHBaseAdminNoCluster.java   |  10 +
 .../client/TestScannersFromClientSide.java  |   6 +-
 .../hbase/ipc/DelegatingRpcScheduler.java   |  76 +++
 .../TestLoadIncrementalHFilesSplitRecovery.java |   3 +-
 .../hbase/master/TestClockSkewDetection.java|  20 +-
 .../regionserver/TestRegionServerNoMaster.java  |   2 +-
 .../hbase/security/access/SecureTestUtil.java   |  12 +-
 .../security/access/TestAccessController.java   |  20 +-
 .../security/access/TestNamespaceCommands.java  |  13 +-
 41 files changed, 1095 insertions(+), 554 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/05200976/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index 13ba23d..71f87f7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -202,6 +202,7 @@ public class HRegionInfo implements Comparable 
{
   public final static byte[] HIDDEN_START_KEY = 
Bytes.toBytes("hidden-start-key");
 
   /** HRegionInfo for first meta region */
+  // TODO: How come Meta regions still do not have encoded region names? Fix.
   public static final HRegionInfo FIRST_META_REGIONINFO =
   new HRegionInfo(1L, TableName.META_TABLE_NAME);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/05200976/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index ef3f7e9..01aaec5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -88,7 +88,7 @@ public class BufferedMutatorImpl implements BufferedMutator {
 this.pool = params.getPool();
 this.listener = params.getListener();
 
-TableConfiguration tableConf = new T

[1/3] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/master cbf9c1e11 -> 052009761


http://git-wip-us.apache.org/repos/asf/hbase/blob/05200976/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
index 8b2b733..45093bb 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
@@ -35,6 +35,8 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.PleaseHoldException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
@@ -310,6 +312,14 @@ public class TestHBaseAdminNoCluster {
   }
 });
 
Mockito.when(connection.getKeepAliveMasterService()).thenReturn(masterAdmin);
+RpcControllerFactory rpcControllerFactory = 
Mockito.mock(RpcControllerFactory.class);
+
Mockito.when(connection.getRpcControllerFactory()).thenReturn(rpcControllerFactory);
+Mockito.when(rpcControllerFactory.newController()).thenReturn(
+  Mockito.mock(PayloadCarryingRpcController.class));
+
+// we need a real retrying caller
+RpcRetryingCallerFactory callerFactory = new 
RpcRetryingCallerFactory(configuration);
+
Mockito.when(connection.getRpcRetryingCallerFactory()).thenReturn(callerFactory);
 
 Admin admin = null;
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/05200976/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
index 1ba..d4d319a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
@@ -285,7 +285,7 @@ public class TestScannersFromClientSide {
   private void verifyExpectedCounts(Table table, Scan scan, int 
expectedRowCount,
   int expectedCellCount) throws Exception {
 ResultScanner scanner = table.getScanner(scan);
-
+
 int rowCount = 0;
 int cellCount = 0;
 Result r = null;
@@ -609,7 +609,7 @@ public class TestScannersFromClientSide {
 byte[] regionName = hri.getRegionName();
 int i = cluster.getServerWith(regionName);
 HRegionServer rs = cluster.getRegionServer(i);
-ProtobufUtil.closeRegion(
+ProtobufUtil.closeRegion(null,
   rs.getRSRpcServices(), rs.getServerName(), regionName);
 long startTime = EnvironmentEdgeManager.currentTime();
 long timeOut = 30;
@@ -627,7 +627,7 @@ public class TestScannersFromClientSide {
 RegionStates states = master.getAssignmentManager().getRegionStates();
 states.regionOffline(hri);
 states.updateRegionState(hri, State.OPENING);
-ProtobufUtil.openRegion(rs.getRSRpcServices(), rs.getServerName(), hri);
+ProtobufUtil.openRegion(null, rs.getRSRpcServices(), rs.getServerName(), 
hri);
 startTime = EnvironmentEdgeManager.currentTime();
 while (true) {
   if (rs.getOnlineRegion(regionName) != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/05200976/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java
new file mode 100644
index 000..b1b3b23
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * 

[2/3] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/05200976/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java
deleted file mode 100644
index 1113cfd..000
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements. See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License. You may obtain a
- * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
- * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
- * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
- * for the specific language governing permissions and limitations under the 
License.
- */
-
-package org.apache.hadoop.hbase.client;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- *
- * Configuration is a heavy weight registry that does a lot of string 
operations and regex matching.
- * Method calls into Configuration account for high CPU usage and have huge 
performance impact.
- * This class caches the value in the TableConfiguration object to improve 
performance.
- * see HBASE-12128
- *
- */
-@InterfaceAudience.Private
-public class TableConfiguration {
-
-  public static final String WRITE_BUFFER_SIZE_KEY = 
"hbase.client.write.buffer";
-  public static final long WRITE_BUFFER_SIZE_DEFAULT = 2097152;
-  public static final String MAX_KEYVALUE_SIZE_KEY = 
"hbase.client.keyvalue.maxsize";
-  public static final int MAX_KEYVALUE_SIZE_DEFAULT = -1;
-
-  private final long writeBufferSize;
-  private final int metaOperationTimeout;
-  private final int operationTimeout;
-  private final int scannerCaching;
-  private final long scannerMaxResultSize;
-  private final int primaryCallTimeoutMicroSecond;
-  private final int replicaCallTimeoutMicroSecondScan;
-  private final int retries;
-  private final int maxKeyValueSize;
-
-// toggle for async/sync prefetch
-  private final boolean clientScannerAsyncPrefetch;
-
-/**
-   * Constructor
-   * @param conf Configuration object
-   */
-  TableConfiguration(Configuration conf) {
-this.writeBufferSize = conf.getLong(WRITE_BUFFER_SIZE_KEY, 
WRITE_BUFFER_SIZE_DEFAULT);
-
-this.metaOperationTimeout = conf.getInt(
-  HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT,
-  HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-
-this.operationTimeout = conf.getInt(
-  HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-
-this.scannerCaching = conf.getInt(
-  HConstants.HBASE_CLIENT_SCANNER_CACHING, 
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING);
-
-this.scannerMaxResultSize =
-conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
-  HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
-
-this.primaryCallTimeoutMicroSecond =
-conf.getInt("hbase.client.primaryCallTimeout.get", 1); // 10ms
-
-this.replicaCallTimeoutMicroSecondScan =
-conf.getInt("hbase.client.replicaCallTimeout.scan", 100); // 1000 
ms
-
-this.retries = conf.getInt(
-   HConstants.HBASE_CLIENT_RETRIES_NUMBER, 
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-
-this.clientScannerAsyncPrefetch = conf.getBoolean(
-   Scan.HBASE_CLIENT_SCANNER_ASYNC_PREFETCH, 
Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH);
-
-this.maxKeyValueSize = conf.getInt(MAX_KEYVALUE_SIZE_KEY, 
MAX_KEYVALUE_SIZE_DEFAULT);
-  }
-
-  /**
-   * Constructor
-   * This is for internal testing purpose (using the default value).
-   * In real usage, we should read the configuration from the Configuration 
object.
-   */
-  @VisibleForTesting
-  protected TableConfiguration() {
-this.writeBufferSize = WRITE_BUFFER_SIZE_DEFAULT;
-this.metaOperationTimeout = 
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT;
-this.operationTimeout = HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT;
-this.scannerCaching = HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING;
-this.scannerMaxResultSize = 
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE;
-this.primaryCallTimeoutMicroSecond = 1;
-this.replicaCallTimeoutMicroSecondScan 

[01/12] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/branch-1 5851ad023 -> 5202d3c25
  refs/heads/branch-1.1 e959eb36f -> c1f0204e7
  refs/heads/branch-1.2 142003e23 -> 6a80087f4
  refs/heads/branch-1.3 a548d9426 -> fc47cc38b


http://git-wip-us.apache.org/repos/asf/hbase/blob/fc47cc38/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
index c74a42a..dab2c9d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
@@ -217,7 +217,7 @@ public class TestZKBasedOpenCloseRegion {
 Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
 Mockito.doThrow(new IOException()).when(htd).get((TableName) 
Mockito.any());
 try {
-  ProtobufUtil.openRegion(regionServer.getRSRpcServices(),
+  ProtobufUtil.openRegion(null, regionServer.getRSRpcServices(),
 regionServer.getServerName(), REGIONINFO);
   fail("It should throw IOException ");
 } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/fc47cc38/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
index cbc1a90..22a9748 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
@@ -372,7 +372,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.grant(protocol, user, actions);
+ProtobufUtil.grant(null, protocol, user, actions);
   }
 }
 return null;
@@ -395,7 +395,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.revoke(protocol, user, actions);
+ProtobufUtil.revoke(null, protocol, user, actions);
   }
 }
 return null;
@@ -418,7 +418,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.grant(protocol, user, namespace, actions);
+ProtobufUtil.grant(null, protocol, user, namespace, actions);
   }
 }
 return null;
@@ -483,7 +483,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.revoke(protocol, user, namespace, actions);
+ProtobufUtil.revoke(null, protocol, user, namespace, actions);
   }
 }
 return null;
@@ -507,7 +507,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.grant(protocol, user, table, family, qualifier, 
actions);
+ProtobufUtil.grant(null, protocol, user, table, family, qualifier, 
actions);
   }
 }
 return null;
@@ -573,7 +573,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.revoke(protocol, user, table, family, qualifier, 
actions);
+ProtobufUtil.revoke(null, protocol, user, table, family, 
qualifier, actions);
   }
 }
 return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/fc47cc38/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java

[11/12] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/c1f0204e/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
index b3d6b41..ef636d1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
@@ -26,12 +26,14 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
 
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
+import com.google.protobuf.RpcController;
 
 /**
  * Provides clients with an RPC connection to call coprocessor endpoint {@link 
com.google.protobuf.Service}s
@@ -45,18 +47,18 @@ import com.google.protobuf.Message;
 public class MasterCoprocessorRpcChannel extends CoprocessorRpcChannel{
   private static Log LOG = 
LogFactory.getLog(MasterCoprocessorRpcChannel.class);
 
-  private final HConnection connection;
+  private final ClusterConnection connection;
 
-  public MasterCoprocessorRpcChannel(HConnection conn) {
+  public MasterCoprocessorRpcChannel(ClusterConnection conn) {
 this.connection = conn;
   }
 
   @Override
-  protected Message callExecService(Descriptors.MethodDescriptor method,
+  protected Message callExecService(RpcController controller, 
Descriptors.MethodDescriptor method,
   Message request, Message responsePrototype)
   throws IOException {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Call: "+method.getName()+", "+request.toString());
+if (LOG.isTraceEnabled()) {
+  LOG.trace("Call: "+method.getName()+", "+request.toString());
 }
 
 final ClientProtos.CoprocessorServiceCall call =
@@ -65,7 +67,10 @@ public class MasterCoprocessorRpcChannel extends 
CoprocessorRpcChannel{
 .setServiceName(method.getService().getFullName())
 .setMethodName(method.getName())
 .setRequest(request.toByteString()).build();
-CoprocessorServiceResponse result = 
ProtobufUtil.execService(connection.getMaster(), call);
+
+// TODO: Are we retrying here? Does not seem so. We should use 
RetryingRpcCaller
+CoprocessorServiceResponse result = ProtobufUtil.execService(controller,
+  connection.getMaster(), call);
 Message response = null;
 if (result.getValue().hasValue()) {
   Message.Builder builder = responsePrototype.newBuilderForType();

http://git-wip-us.apache.org/repos/asf/hbase/blob/c1f0204e/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
index c984797..f0d34f4 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.RegionServerCallable;
 import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
+import com.google.protobuf.RpcController;
 
 /**
  * Provides clients with an RPC connection to call coprocessor endpoint {@link 
com.google.protobuf.Service}s
@@ -49,28 +51,28 @@ import com.google.protobuf.Message;
 public class RegionCoprocessorRpcChannel extends CoprocessorRpcChannel{
   private static Log LOG = 
LogFactory.getLog(RegionCoprocessorRpcChannel.class);
 
-  private final HConnection connection;
+  private final ClusterConnection connection;
   private final TableName table;
   private final byte[] row;
   private byte[] lastRegion;
   private int operationTimeout;
 
-  private RpcRetryingCallerFactory rpcFactory;
+  private 

[06/12] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing 
a deadlock

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java

hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java

hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java

hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java

hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java

hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java

hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java

hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java

hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java

hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java

hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6a80087f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6a80087f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6a80087f

Branch: refs/heads/branch-1.2
Commit: 6a80087f4612c01b7591f15f73586f2d0eeea31f
Parents: 142003e
Author: Enis Soztutar <e...@apache.org>
Authored: Wed Mar 23 12:30:41 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Mar 28 11:45:03 2016 -0700

--
 .../org/apache/hadoop/hbase/HRegionInfo.java|   1 +
 .../hbase/client/BufferedMutatorImpl.java   |   2 +-
 .../hadoop/hbase/client/ClusterConnection.java  |  20 +-
 .../hadoop/hbase/client/ConnectionAdapter.java  |  16 +
 .../hbase/client/ConnectionConfiguration.java   | 132 ++
 .../hadoop/hbase/client/ConnectionManager.java  |  27 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 419 ++-
 .../org/apache/hadoop/hbase/client/HTable.java  |  50 +--
 .../hadoop/hbase/client/TableConfiguration.java | 132 --
 .../hadoop/hbase/ipc/AbstractRpcClient.java |   2 +-
 .../hadoop/hbase/ipc/CoprocessorRpcChannel.java |   9 +-
 .../hbase/ipc/MasterCoprocessorRpcChannel.java  |  17 +-
 .../hbase/ipc/RegionCoprocessorRpcChannel.java  |  44 +-
 .../ipc/RegionServerCoprocessorRpcChannel.java  |  10 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 154 ---
 .../security/access/AccessControlClient.java|  46 +-
 .../hbase/zookeeper/MetaTableLocator.java   |  18 +-
 .../hbase/client/TestSnapshotFromAdmin.java |  31 +-
 .../hadoop/hbase/DistributedHBaseCluster.java   |   2 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.jamon  |   2 +-
 .../org/apache/hadoop/hbase/ipc/CallRunner.java |   5 +-
 .../apache/hadoop/hbase/ipc/RpcScheduler.java   |   2 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  |  12 +-
 .../hbase/master/RegionPlacementMaintainer.java |   2 +-
 .../hadoop/hbase/master/ServerManager.java  |  33 +-
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |   4 +-
 .../hadoop/hbase/TestGlobalMemStoreSize.java|  14 +-
 .../hadoop/hbase/TestMetaTableAccessor.java |  79 
 .../hadoop/hbase/TestMetaTableLocator.java  |   8 +-
 .../hbase/client/HConnectionTestingUtility.java |   7 +-
 .../apache/hadoop/hbase/client/TestAdmin1.java  |   2 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   6 +-
 .../hbase/client/TestHBaseAdminNoCluster.java   |  10 +
 .../client/TestScannersFromClientSide.java  |   6 +-
 .../hbase/ipc/DelegatingRpcScheduler.java   |  66 +++
 .../TestLoadIncrementalHFilesSplitRecovery.java |   5 +-
 .../hbase/master/TestClockSkewDetection.java|   3 +
 .../hadoop/hbase/master/TestMasterFailover.java |  12 +-
 .../master/TestZKBasedOpenCloseRegion.java  |   2 +-
 .../hbase/security/access/SecureTestUtil.java   |  12 +-
 .../security/access/TestAccessController.java   |  20 +-
 .../security/access/TestNamespaceCommands

[12/12] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing 
a deadlock

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java

hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java

hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java

hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java

hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java

hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java

hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java

hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java

hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java

hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java

hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java

hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java

hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c1f0204e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c1f0204e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c1f0204e

Branch: refs/heads/branch-1.1
Commit: c1f0204e77ab9000f1ed45cb8d4792eb20707e0b
Parents: e959eb3
Author: Enis Soztutar <e...@apache.org>
Authored: Wed Mar 23 12:30:41 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Mar 28 17:05:09 2016 -0700

--
 .../org/apache/hadoop/hbase/HRegionInfo.java|   1 +
 .../hbase/client/BufferedMutatorImpl.java   |   2 +-
 .../hadoop/hbase/client/ClusterConnection.java  |  20 +-
 .../hadoop/hbase/client/ConnectionAdapter.java  |  16 +
 .../hbase/client/ConnectionConfiguration.java   | 132 ++
 .../hadoop/hbase/client/ConnectionManager.java  |  27 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 444 +--
 .../org/apache/hadoop/hbase/client/HTable.java  |  71 +--
 .../hadoop/hbase/client/TableConfiguration.java | 132 --
 .../hadoop/hbase/ipc/AbstractRpcClient.java |   2 +-
 .../hadoop/hbase/ipc/CoprocessorRpcChannel.java |   9 +-
 .../hbase/ipc/MasterCoprocessorRpcChannel.java  |  17 +-
 .../hbase/ipc/RegionCoprocessorRpcChannel.java  |  44 +-
 .../ipc/RegionServerCoprocessorRpcChannel.java  |  10 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 154 ---
 .../security/access/AccessControlClient.java|  46 +-
 .../hbase/zookeeper/MetaTableLocator.java   |  18 +-
 .../hbase/client/TestSnapshotFromAdmin.java |  31 +-
 .../hadoop/hbase/DistributedHBaseCluster.java   |   2 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.jamon  |   2 +-
 .../org/apache/hadoop/hbase/ipc/CallRunner.java |   5 +-
 .../apache/hadoop/hbase/ipc/RpcScheduler.java   |   2 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  |  12 +-
 .../hbase/master/RegionPlacementMaintainer.java |   2 +-
 .../hadoop/hbase/master/ServerManager.java  |  33 +-
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |   4 +-
 .../hadoop/hbase/TestGlobalMemStoreSize.java|  14 +-
 .../hadoop/hbase/TestMetaTableAccessor.java |  79 
 .../hadoop/hbase/TestMetaTableLocator.java  |   8 +-
 .../hbase/client/HConnectionTestingUtility.java |   7 +-
 .../apache/hadoop/hbase/client/TestAdmin1.java  |   2 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   6 +-
 .../hbase/client/TestHBaseAdminNoCluster.java   |  10 +
 .../client/TestScannersFromClientSide.java  |   6 +-
 .../hbase/ipc/DelegatingRpcScheduler.java

[08/12] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/5202d3c2/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
index 2d2edaf..81c34d4 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
@@ -26,12 +26,14 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
 
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
+import com.google.protobuf.RpcController;
 
 /**
  * Provides clients with an RPC connection to call coprocessor endpoint {@link 
com.google.protobuf.Service}s
@@ -45,18 +47,18 @@ import com.google.protobuf.Message;
 public class MasterCoprocessorRpcChannel extends CoprocessorRpcChannel{
   private static final Log LOG = 
LogFactory.getLog(MasterCoprocessorRpcChannel.class);
 
-  private final HConnection connection;
+  private final ClusterConnection connection;
 
-  public MasterCoprocessorRpcChannel(HConnection conn) {
+  public MasterCoprocessorRpcChannel(ClusterConnection conn) {
 this.connection = conn;
   }
 
   @Override
-  protected Message callExecService(Descriptors.MethodDescriptor method,
+  protected Message callExecService(RpcController controller, 
Descriptors.MethodDescriptor method,
   Message request, Message responsePrototype)
   throws IOException {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Call: "+method.getName()+", "+request.toString());
+if (LOG.isTraceEnabled()) {
+  LOG.trace("Call: "+method.getName()+", "+request.toString());
 }
 
 final ClientProtos.CoprocessorServiceCall call =
@@ -65,7 +67,10 @@ public class MasterCoprocessorRpcChannel extends 
CoprocessorRpcChannel{
 .setServiceName(method.getService().getFullName())
 .setMethodName(method.getName())
 .setRequest(request.toByteString()).build();
-CoprocessorServiceResponse result = 
ProtobufUtil.execService(connection.getMaster(), call);
+
+// TODO: Are we retrying here? Does not seem so. We should use 
RetryingRpcCaller
+CoprocessorServiceResponse result = ProtobufUtil.execService(controller,
+  connection.getMaster(), call);
 Message response = null;
 if (result.getValue().hasValue()) {
   Message.Builder builder = responsePrototype.newBuilderForType();

http://git-wip-us.apache.org/repos/asf/hbase/blob/5202d3c2/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
index 009156e..ea70265 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.RegionServerCallable;
 import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
+import com.google.protobuf.RpcController;
 
 /**
  * Provides clients with an RPC connection to call coprocessor endpoint {@link 
com.google.protobuf.Service}s
@@ -49,28 +51,28 @@ import com.google.protobuf.Message;
 public class RegionCoprocessorRpcChannel extends CoprocessorRpcChannel{
   private static final Log LOG = 
LogFactory.getLog(RegionCoprocessorRpcChannel.class);
 
-  private final HConnection connection;
+  private final ClusterConnection connection;
   private final TableName table;
   private final byte[] row;
   private byte[] lastRegion;
   private int operationTimeout;
 
-  private RpcRetryingCallerFactory rpcFactory;
+  

[07/12] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/5202d3c2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
index c74a42a..dab2c9d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
@@ -217,7 +217,7 @@ public class TestZKBasedOpenCloseRegion {
 Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
 Mockito.doThrow(new IOException()).when(htd).get((TableName) 
Mockito.any());
 try {
-  ProtobufUtil.openRegion(regionServer.getRSRpcServices(),
+  ProtobufUtil.openRegion(null, regionServer.getRSRpcServices(),
 regionServer.getServerName(), REGIONINFO);
   fail("It should throw IOException ");
 } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5202d3c2/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
index cbc1a90..22a9748 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
@@ -372,7 +372,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.grant(protocol, user, actions);
+ProtobufUtil.grant(null, protocol, user, actions);
   }
 }
 return null;
@@ -395,7 +395,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.revoke(protocol, user, actions);
+ProtobufUtil.revoke(null, protocol, user, actions);
   }
 }
 return null;
@@ -418,7 +418,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.grant(protocol, user, namespace, actions);
+ProtobufUtil.grant(null, protocol, user, namespace, actions);
   }
 }
 return null;
@@ -483,7 +483,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.revoke(protocol, user, namespace, actions);
+ProtobufUtil.revoke(null, protocol, user, namespace, actions);
   }
 }
 return null;
@@ -507,7 +507,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.grant(protocol, user, table, family, qualifier, 
actions);
+ProtobufUtil.grant(null, protocol, user, table, family, qualifier, 
actions);
   }
 }
 return null;
@@ -573,7 +573,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.revoke(protocol, user, table, family, qualifier, 
actions);
+ProtobufUtil.revoke(null, protocol, user, table, family, 
qualifier, actions);
   }
 }
 return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/5202d3c2/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 

[02/12] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/fc47cc38/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
index 2d2edaf..81c34d4 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
@@ -26,12 +26,14 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
 
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
+import com.google.protobuf.RpcController;
 
 /**
  * Provides clients with an RPC connection to call coprocessor endpoint {@link 
com.google.protobuf.Service}s
@@ -45,18 +47,18 @@ import com.google.protobuf.Message;
 public class MasterCoprocessorRpcChannel extends CoprocessorRpcChannel{
   private static final Log LOG = 
LogFactory.getLog(MasterCoprocessorRpcChannel.class);
 
-  private final HConnection connection;
+  private final ClusterConnection connection;
 
-  public MasterCoprocessorRpcChannel(HConnection conn) {
+  public MasterCoprocessorRpcChannel(ClusterConnection conn) {
 this.connection = conn;
   }
 
   @Override
-  protected Message callExecService(Descriptors.MethodDescriptor method,
+  protected Message callExecService(RpcController controller, 
Descriptors.MethodDescriptor method,
   Message request, Message responsePrototype)
   throws IOException {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Call: "+method.getName()+", "+request.toString());
+if (LOG.isTraceEnabled()) {
+  LOG.trace("Call: "+method.getName()+", "+request.toString());
 }
 
 final ClientProtos.CoprocessorServiceCall call =
@@ -65,7 +67,10 @@ public class MasterCoprocessorRpcChannel extends 
CoprocessorRpcChannel{
 .setServiceName(method.getService().getFullName())
 .setMethodName(method.getName())
 .setRequest(request.toByteString()).build();
-CoprocessorServiceResponse result = 
ProtobufUtil.execService(connection.getMaster(), call);
+
+// TODO: Are we retrying here? Does not seem so. We should use 
RetryingRpcCaller
+CoprocessorServiceResponse result = ProtobufUtil.execService(controller,
+  connection.getMaster(), call);
 Message response = null;
 if (result.getValue().hasValue()) {
   Message.Builder builder = responsePrototype.newBuilderForType();

http://git-wip-us.apache.org/repos/asf/hbase/blob/fc47cc38/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
index 009156e..ea70265 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.RegionServerCallable;
 import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
+import com.google.protobuf.RpcController;
 
 /**
  * Provides clients with an RPC connection to call coprocessor endpoint {@link 
com.google.protobuf.Service}s
@@ -49,28 +51,28 @@ import com.google.protobuf.Message;
 public class RegionCoprocessorRpcChannel extends CoprocessorRpcChannel{
   private static final Log LOG = 
LogFactory.getLog(RegionCoprocessorRpcChannel.class);
 
-  private final HConnection connection;
+  private final ClusterConnection connection;
   private final TableName table;
   private final byte[] row;
   private byte[] lastRegion;
   private int operationTimeout;
 
-  private RpcRetryingCallerFactory rpcFactory;
+  

[05/12] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/6a80087f/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
index 2d2edaf..81c34d4 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
@@ -26,12 +26,14 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
 
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
+import com.google.protobuf.RpcController;
 
 /**
  * Provides clients with an RPC connection to call coprocessor endpoint {@link 
com.google.protobuf.Service}s
@@ -45,18 +47,18 @@ import com.google.protobuf.Message;
 public class MasterCoprocessorRpcChannel extends CoprocessorRpcChannel{
   private static final Log LOG = 
LogFactory.getLog(MasterCoprocessorRpcChannel.class);
 
-  private final HConnection connection;
+  private final ClusterConnection connection;
 
-  public MasterCoprocessorRpcChannel(HConnection conn) {
+  public MasterCoprocessorRpcChannel(ClusterConnection conn) {
 this.connection = conn;
   }
 
   @Override
-  protected Message callExecService(Descriptors.MethodDescriptor method,
+  protected Message callExecService(RpcController controller, 
Descriptors.MethodDescriptor method,
   Message request, Message responsePrototype)
   throws IOException {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Call: "+method.getName()+", "+request.toString());
+if (LOG.isTraceEnabled()) {
+  LOG.trace("Call: "+method.getName()+", "+request.toString());
 }
 
 final ClientProtos.CoprocessorServiceCall call =
@@ -65,7 +67,10 @@ public class MasterCoprocessorRpcChannel extends 
CoprocessorRpcChannel{
 .setServiceName(method.getService().getFullName())
 .setMethodName(method.getName())
 .setRequest(request.toByteString()).build();
-CoprocessorServiceResponse result = 
ProtobufUtil.execService(connection.getMaster(), call);
+
+// TODO: Are we retrying here? Does not seem so. We should use 
RetryingRpcCaller
+CoprocessorServiceResponse result = ProtobufUtil.execService(controller,
+  connection.getMaster(), call);
 Message response = null;
 if (result.getValue().hasValue()) {
   Message.Builder builder = responsePrototype.newBuilderForType();

http://git-wip-us.apache.org/repos/asf/hbase/blob/6a80087f/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
index 009156e..ea70265 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.RegionServerCallable;
 import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
+import com.google.protobuf.RpcController;
 
 /**
  * Provides clients with an RPC connection to call coprocessor endpoint {@link 
com.google.protobuf.Service}s
@@ -49,28 +51,28 @@ import com.google.protobuf.Message;
 public class RegionCoprocessorRpcChannel extends CoprocessorRpcChannel{
   private static final Log LOG = 
LogFactory.getLog(RegionCoprocessorRpcChannel.class);
 
-  private final HConnection connection;
+  private final ClusterConnection connection;
   private final TableName table;
   private final byte[] row;
   private byte[] lastRegion;
   private int operationTimeout;
 
-  private RpcRetryingCallerFactory rpcFactory;
+  

[10/12] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/c1f0204e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
index c74a42a..dab2c9d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
@@ -217,7 +217,7 @@ public class TestZKBasedOpenCloseRegion {
 Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
 Mockito.doThrow(new IOException()).when(htd).get((TableName) 
Mockito.any());
 try {
-  ProtobufUtil.openRegion(regionServer.getRSRpcServices(),
+  ProtobufUtil.openRegion(null, regionServer.getRSRpcServices(),
 regionServer.getServerName(), REGIONINFO);
   fail("It should throw IOException ");
 } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c1f0204e/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
index 003e4ab..bb80f3d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
@@ -364,7 +364,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.grant(protocol, user, actions);
+ProtobufUtil.grant(null, protocol, user, actions);
   }
 }
 return null;
@@ -387,7 +387,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.revoke(protocol, user, actions);
+ProtobufUtil.revoke(null, protocol, user, actions);
   }
 }
 return null;
@@ -410,7 +410,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.grant(protocol, user, namespace, actions);
+ProtobufUtil.grant(null, protocol, user, namespace, actions);
   }
 }
 return null;
@@ -475,7 +475,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.revoke(protocol, user, namespace, actions);
+ProtobufUtil.revoke(null, protocol, user, namespace, actions);
   }
 }
 return null;
@@ -499,7 +499,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.grant(protocol, user, table, family, qualifier, 
actions);
+ProtobufUtil.grant(null, protocol, user, table, family, qualifier, 
actions);
   }
 }
 return null;
@@ -565,7 +565,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.revoke(protocol, user, table, family, qualifier, 
actions);
+ProtobufUtil.revoke(null, protocol, user, table, family, 
qualifier, actions);
   }
 }
 return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c1f0204e/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 

[03/12] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing 
a deadlock

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java

hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java

hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java

hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java

hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java

hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java

hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java

hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java

hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fc47cc38
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fc47cc38
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fc47cc38

Branch: refs/heads/branch-1.3
Commit: fc47cc38b7efe5d2750a019016c4cec6d5f31465
Parents: a548d94
Author: Enis Soztutar <e...@apache.org>
Authored: Wed Mar 23 12:30:41 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Mar 28 11:31:19 2016 -0700

--
 .../org/apache/hadoop/hbase/HRegionInfo.java|   1 +
 .../hbase/client/BufferedMutatorImpl.java   |   2 +-
 .../hadoop/hbase/client/ClusterConnection.java  |  20 +-
 .../hadoop/hbase/client/ConnectionAdapter.java  |  16 +
 .../hbase/client/ConnectionConfiguration.java   | 132 ++
 .../hadoop/hbase/client/ConnectionManager.java  |  27 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 426 ++-
 .../org/apache/hadoop/hbase/client/HTable.java  |  55 +--
 .../hadoop/hbase/client/TableConfiguration.java | 132 --
 .../hadoop/hbase/ipc/AbstractRpcClient.java |   2 +-
 .../hadoop/hbase/ipc/CoprocessorRpcChannel.java |   9 +-
 .../hbase/ipc/MasterCoprocessorRpcChannel.java  |  17 +-
 .../hbase/ipc/RegionCoprocessorRpcChannel.java  |  44 +-
 .../ipc/RegionServerCoprocessorRpcChannel.java  |  10 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 156 ---
 .../security/access/AccessControlClient.java|  46 +-
 .../hbase/zookeeper/MetaTableLocator.java   |  18 +-
 .../hbase/client/TestSnapshotFromAdmin.java |  31 +-
 .../hadoop/hbase/DistributedHBaseCluster.java   |   2 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.jamon  |   2 +-
 .../org/apache/hadoop/hbase/ipc/CallRunner.java |   5 +-
 .../apache/hadoop/hbase/ipc/RpcScheduler.java   |   2 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  |  12 +-
 .../hbase/master/RegionPlacementMaintainer.java |   2 +-
 .../hadoop/hbase/master/ServerManager.java  |  33 +-
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |   4 +-
 .../hadoop/hbase/TestGlobalMemStoreSize.java|  14 +-
 .../hadoop/hbase/TestMetaTableAccessor.java |  79 
 .../hadoop/hbase/TestMetaTableLocator.java  |   8 +-
 .../hbase/client/HConnectionTestingUtility.java |   7 +-
 .../apache/hadoop/hbase/client/TestAdmin1.java  |   4 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   6 +-
 .../hbase/client/TestHBaseAdminNoCluster.java   |  10 +
 .../client/TestScannersFromClientSide.java  |   6 +-
 .../hbase/ipc/DelegatingRpcScheduler.java   |  76 
 .../TestLoadIncrementalHFilesSplitRecovery.java |   5 +-
 .../hbase/master/TestClockSkewDetection.java|   3 +
 .../hadoop/hbase/master/TestMasterFailover.java |  12 +-
 .../master/TestZKBasedOpenCloseRegion.java  |   2 +-
 .../hbase/security/access/SecureTestUtil.java   |  12 +-
 .../security/access/TestAccessController.java   |  20 +-
 .../security/access/TestNamespaceCommands.java  |  13 +-
 42 files changed, 1012 insertions(+), 471 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fc47cc38/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java 
b/hbase-client/src/m

[09/12] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing 
a deadlock

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java

hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java

hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java

hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java

hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java

hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java

hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java

hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java

hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5202d3c2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5202d3c2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5202d3c2

Branch: refs/heads/branch-1
Commit: 5202d3c25b394cac3a00a5afc0693ad221fad9d6
Parents: 5851ad0
Author: Enis Soztutar <e...@apache.org>
Authored: Wed Mar 23 12:30:41 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Mar 28 17:00:21 2016 -0700

--
 .../org/apache/hadoop/hbase/HRegionInfo.java|   1 +
 .../hbase/client/BufferedMutatorImpl.java   |   2 +-
 .../hadoop/hbase/client/ClusterConnection.java  |  20 +-
 .../hadoop/hbase/client/ConnectionAdapter.java  |  16 +
 .../hbase/client/ConnectionConfiguration.java   | 132 ++
 .../hadoop/hbase/client/ConnectionManager.java  |  27 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 426 ++-
 .../org/apache/hadoop/hbase/client/HTable.java  |  55 +--
 .../hadoop/hbase/client/TableConfiguration.java | 132 --
 .../hadoop/hbase/ipc/AbstractRpcClient.java |   2 +-
 .../hadoop/hbase/ipc/CoprocessorRpcChannel.java |   9 +-
 .../hbase/ipc/MasterCoprocessorRpcChannel.java  |  17 +-
 .../hbase/ipc/RegionCoprocessorRpcChannel.java  |  44 +-
 .../ipc/RegionServerCoprocessorRpcChannel.java  |  10 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 156 ---
 .../security/access/AccessControlClient.java|  46 +-
 .../hbase/zookeeper/MetaTableLocator.java   |  18 +-
 .../hbase/client/TestSnapshotFromAdmin.java |  31 +-
 .../hadoop/hbase/DistributedHBaseCluster.java   |   2 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.jamon  |   2 +-
 .../org/apache/hadoop/hbase/ipc/CallRunner.java |   5 +-
 .../apache/hadoop/hbase/ipc/RpcScheduler.java   |   2 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  |  12 +-
 .../hbase/master/RegionPlacementMaintainer.java |   2 +-
 .../hadoop/hbase/master/ServerManager.java  |  33 +-
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |   4 +-
 .../hadoop/hbase/TestGlobalMemStoreSize.java|  14 +-
 .../hadoop/hbase/TestMetaTableAccessor.java |  79 
 .../hadoop/hbase/TestMetaTableLocator.java  |   8 +-
 .../hbase/client/HConnectionTestingUtility.java |   7 +-
 .../apache/hadoop/hbase/client/TestAdmin1.java  |   4 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   6 +-
 .../hbase/client/TestHBaseAdminNoCluster.java   |  10 +
 .../client/TestScannersFromClientSide.java  |   6 +-
 .../hbase/ipc/DelegatingRpcScheduler.java   |  76 
 .../TestLoadIncrementalHFilesSplitRecovery.java |   5 +-
 .../hbase/master/TestClockSkewDetection.java|   3 +
 .../hadoop/hbase/master/TestMasterFailover.java |  12 +-
 .../master/TestZKBasedOpenCloseRegion.java  |   2 +-
 .../hbase/security/access/SecureTestUtil.java   |  12 +-
 .../security/access/TestAccessController.java   |  20 +-
 .../security/access/TestNamespaceCommands.java  |  13 +-
 42 files changed, 1012 insertions(+), 471 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5202d3c2/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java 
b/hbase-client/src/m

[04/12] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

2016-03-28 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/6a80087f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
index c74a42a..dab2c9d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
@@ -217,7 +217,7 @@ public class TestZKBasedOpenCloseRegion {
 Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
 Mockito.doThrow(new IOException()).when(htd).get((TableName) 
Mockito.any());
 try {
-  ProtobufUtil.openRegion(regionServer.getRSRpcServices(),
+  ProtobufUtil.openRegion(null, regionServer.getRSRpcServices(),
 regionServer.getServerName(), REGIONINFO);
   fail("It should throw IOException ");
 } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6a80087f/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
index d6ebc21..f193ac9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java
@@ -364,7 +364,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.grant(protocol, user, actions);
+ProtobufUtil.grant(null, protocol, user, actions);
   }
 }
 return null;
@@ -387,7 +387,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.revoke(protocol, user, actions);
+ProtobufUtil.revoke(null, protocol, user, actions);
   }
 }
 return null;
@@ -410,7 +410,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.grant(protocol, user, namespace, actions);
+ProtobufUtil.grant(null, protocol, user, namespace, actions);
   }
 }
 return null;
@@ -475,7 +475,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.revoke(protocol, user, namespace, actions);
+ProtobufUtil.revoke(null, protocol, user, namespace, actions);
   }
 }
 return null;
@@ -499,7 +499,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.grant(protocol, user, table, family, qualifier, 
actions);
+ProtobufUtil.grant(null, protocol, user, table, family, qualifier, 
actions);
   }
 }
 return null;
@@ -565,7 +565,7 @@ public class SecureTestUtil {
 BlockingRpcChannel service = 
acl.coprocessorService(HConstants.EMPTY_START_ROW);
 AccessControlService.BlockingInterface protocol =
 AccessControlService.newBlockingStub(service);
-ProtobufUtil.revoke(protocol, user, table, family, qualifier, 
actions);
+ProtobufUtil.revoke(null, protocol, user, table, family, 
qualifier, actions);
   }
 }
 return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/6a80087f/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 

[3/3] hbase git commit: HBASE-15412 Add average region size metric (Alicia Ying Shu)

2016-03-22 Thread enis
HBASE-15412 Add average region size metric (Alicia Ying Shu)

Conflicts:

hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cdd71371
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cdd71371
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cdd71371

Branch: refs/heads/branch-1.3
Commit: cdd71371d1c7d6f53d6096104d4385f385487592
Parents: 95801e4
Author: Enis Soztutar <e...@apache.org>
Authored: Tue Mar 22 14:46:27 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Tue Mar 22 14:52:25 2016 -0700

--
 .../regionserver/MetricsRegionServerSource.java |  3 +++
 .../MetricsRegionServerWrapper.java |  5 +
 .../MetricsRegionServerSourceImpl.java  |  2 +-
 .../MetricsRegionServerWrapperImpl.java | 12 +++
 .../MetricsRegionServerWrapperStub.java |  5 +
 .../regionserver/TestRegionServerMetrics.java   | 21 
 6 files changed, 47 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd71371/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 4fd5728..cdd8967 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -422,4 +422,7 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   String RPC_MUTATE_REQUEST_COUNT = "rpcMutateRequestCount";
   String RPC_MUTATE_REQUEST_COUNT_DESC =
   "Number of rpc mutation requests this region server has answered.";
+  String AVERAGE_REGION_SIZE = "averageRegionSize";
+  String AVERAGE_REGION_SIZE_DESC = 
+  "Average region size over the region server including memstore and 
storefile sizes.";
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd71371/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index bb782bb..101ea3d 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -322,4 +322,9 @@ public interface MetricsRegionServerWrapper {
* Get the number of rpc mutate requests to this region server.
*/
   long getRpcMutateRequestsCount();
+
+  /**
+   * Get the average region size to this region server.
+   */
+  long getAverageRegionSize();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd71371/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index fd99045..f1c2b82 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -328,6 +328,7 @@ public class MetricsRegionServerSourceImpl
   rsWrap.getNumReferenceFiles())
   .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC),
   rsWrap.getStartCode())
+  .addGauge(Interns.info(AVERAGE_REGION_SIZE, 
AVERAGE_REGION_SIZE_DESC), rsWrap.getAverageRegionSize())
   .addCounter(Interns.info(TOTAL_REQUEST_COUNT, 
TOTAL_REQUEST_COUNT_DESC),
   rsWrap.getTotalRequestCount())
   .addCounter(Interns.info(READ_REQUEST_COUNT, 
READ_REQUEST_COUNT_DESC),
@@ -412,7 

[2/3] hbase git commit: HBASE-15412 Add average region size metric (Alicia Ying Shu)

2016-03-22 Thread enis
HBASE-15412 Add average region size metric (Alicia Ying Shu)

Conflicts:

hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d07230a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d07230a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d07230a7

Branch: refs/heads/branch-1
Commit: d07230a7596bec8d9bb30f6faaf7f74ef2c87474
Parents: 2331d9e
Author: Enis Soztutar <e...@apache.org>
Authored: Tue Mar 22 14:46:27 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Tue Mar 22 14:50:14 2016 -0700

--
 .../regionserver/MetricsRegionServerSource.java |  3 +++
 .../MetricsRegionServerWrapper.java |  5 +
 .../MetricsRegionServerSourceImpl.java  |  2 +-
 .../MetricsRegionServerWrapperImpl.java | 12 +++
 .../MetricsRegionServerWrapperStub.java |  5 +
 .../regionserver/TestRegionServerMetrics.java   | 21 
 6 files changed, 47 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d07230a7/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 4fd5728..cdd8967 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -422,4 +422,7 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   String RPC_MUTATE_REQUEST_COUNT = "rpcMutateRequestCount";
   String RPC_MUTATE_REQUEST_COUNT_DESC =
   "Number of rpc mutation requests this region server has answered.";
+  String AVERAGE_REGION_SIZE = "averageRegionSize";
+  String AVERAGE_REGION_SIZE_DESC = 
+  "Average region size over the region server including memstore and 
storefile sizes.";
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d07230a7/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index bb782bb..101ea3d 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -322,4 +322,9 @@ public interface MetricsRegionServerWrapper {
* Get the number of rpc mutate requests to this region server.
*/
   long getRpcMutateRequestsCount();
+
+  /**
+   * Get the average region size to this region server.
+   */
+  long getAverageRegionSize();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d07230a7/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index fd99045..f1c2b82 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -328,6 +328,7 @@ public class MetricsRegionServerSourceImpl
   rsWrap.getNumReferenceFiles())
   .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC),
   rsWrap.getStartCode())
+  .addGauge(Interns.info(AVERAGE_REGION_SIZE, 
AVERAGE_REGION_SIZE_DESC), rsWrap.getAverageRegionSize())
   .addCounter(Interns.info(TOTAL_REQUEST_COUNT, 
TOTAL_REQUEST_COUNT_DESC),
   rsWrap.getTotalRequestCount())
   .addCounter(Interns.info(READ_REQUEST_COUNT, 
READ_REQUEST_COUNT_DESC),
@@ -412,7 

[1/3] hbase git commit: HBASE-15412 Add average region size metric (Alicia Ying Shu)

2016-03-22 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/branch-1 2331d9efe -> d07230a75
  refs/heads/branch-1.3 95801e480 -> cdd71371d
  refs/heads/master 234847850 -> b3fe4ed16


HBASE-15412 Add average region size metric (Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b3fe4ed1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b3fe4ed1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b3fe4ed1

Branch: refs/heads/master
Commit: b3fe4ed16c45e6411f7163099a8bc4c18c39779e
Parents: 2348478
Author: Enis Soztutar <e...@apache.org>
Authored: Tue Mar 22 14:46:27 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Tue Mar 22 14:46:27 2016 -0700

--
 .../regionserver/MetricsRegionServerSource.java |  3 +++
 .../MetricsRegionServerWrapper.java |  5 +
 .../MetricsRegionServerSourceImpl.java  |  2 +-
 .../MetricsRegionServerWrapperImpl.java | 13 +++-
 .../MetricsRegionServerWrapperStub.java |  5 +
 .../regionserver/TestRegionServerMetrics.java   | 21 
 6 files changed, 47 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b3fe4ed1/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index f097296..9693bba 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -463,4 +463,7 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   String RPC_MUTATE_REQUEST_COUNT = "rpcMutateRequestCount";
   String RPC_MUTATE_REQUEST_COUNT_DESC =
   "Number of rpc mutation requests this region server has answered.";
+  String AVERAGE_REGION_SIZE = "averageRegionSize";
+  String AVERAGE_REGION_SIZE_DESC = 
+  "Average region size over the region server including memstore and 
storefile sizes.";
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3fe4ed1/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index 3ae6f9c..5ecda04 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -412,4 +412,9 @@ public interface MetricsRegionServerWrapper {
* Get the number of rpc mutate requests to this region server.
*/
   long getRpcMutateRequestsCount();
+
+  /**
+   * Get the average region size to this region server.
+   */
+  long getAverageRegionSize();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3fe4ed1/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index 0c24cb4..c625d49 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -328,6 +328,7 @@ public class MetricsRegionServerSourceImpl
   rsWrap.getNumReferenceFiles())
   .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC),
   rsWrap.getStartCode())
+  .addGauge(Interns.info(AVERAGE_REGION_SIZE, 
AVERAGE_REGION_SIZE_DESC), rsWrap.getAverageRegionSize())
   .addCounter(Interns.info(TOTAL_REQUEST_COUNT, 
TOTAL_REQUEST_COUNT_DESC),
   rsWrap.getTotalRequestCount())
   .addCounter(Interns.info(READ_REQUEST_COUNT, 
READ_REQUEST_COUNT_DESC),
@@ -450,7 +451,6 @@ public class MetricsRegionServerSourceImpl
   .addCounter(Interns.info(HEDGED_READS, HEDGED_READS_DESC), 
rsWrap.getHedgedReadOps())
   .ad

[15/50] [abbrv] hbase git commit: HBASE-15222 Addendum - Use less contended classes for metrics

2016-03-21 Thread enis
HBASE-15222 Addendum - Use less contended classes for metrics


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/77133fd2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/77133fd2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/77133fd2

Branch: refs/heads/HBASE-7912
Commit: 77133fd225df9f65be87ce97b38676d2bab48a71
Parents: 43f99de
Author: Elliott Clark 
Authored: Thu Feb 25 09:08:11 2016 -0800
Committer: Elliott Clark 
Committed: Thu Feb 25 09:08:11 2016 -0800

--
 .../org/apache/hadoop/hbase/util/FastLongHistogram.java   | 10 +++---
 .../org/apache/hadoop/metrics2/lib/MutableHistogram.java  |  4 +++-
 .../apache/hadoop/metrics2/lib/MutableRangeHistogram.java |  6 --
 3 files changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/77133fd2/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
index 78b2bf0..9b403d9 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
@@ -310,12 +310,8 @@ public class FastLongHistogram {
* Resets the histogram for new counting.
*/
   public FastLongHistogram reset() {
-if (this.bins.hasData.get()) {
-  Bins oldBins = this.bins;
-  this.bins = new Bins(this.bins, this.bins.counts.length - 3, 0.01, 0.99);
-  return new FastLongHistogram(oldBins);
-}
-
-return null;
+Bins oldBins = this.bins;
+this.bins = new Bins(this.bins, this.bins.counts.length - 3, 0.01, 0.99);
+return new FastLongHistogram(oldBins);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/77133fd2/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
index 717e0ee..5b4a294 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java
@@ -63,7 +63,9 @@ public class MutableHistogram extends MutableMetric 
implements MetricHistogram {
   public synchronized void snapshot(MetricsRecordBuilder metricsRecordBuilder, 
boolean all) {
 // Get a reference to the old histogram.
 FastLongHistogram histo = histogram.reset();
-updateSnapshotMetrics(metricsRecordBuilder, histo);
+if (histo != null) {
+  updateSnapshotMetrics(metricsRecordBuilder, histo);
+}
   }
 
   protected void updateSnapshotMetrics(MetricsRecordBuilder 
metricsRecordBuilder,

http://git-wip-us.apache.org/repos/asf/hbase/blob/77133fd2/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java
index ac8aee0..13187af 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java
@@ -56,8 +56,10 @@ public abstract class MutableRangeHistogram extends 
MutableHistogram implements
   public synchronized void snapshot(MetricsRecordBuilder metricsRecordBuilder, 
boolean all) {
 // Get a reference to the old histogram.
 FastLongHistogram histo = histogram.reset();
-updateSnapshotMetrics(metricsRecordBuilder, histo);
-updateSnapshotRangeMetrics(metricsRecordBuilder, histo);
+if (histo != null) {
+  updateSnapshotMetrics(metricsRecordBuilder, histo);
+  updateSnapshotRangeMetrics(metricsRecordBuilder, histo);
+}
   }
 
   public void updateSnapshotRangeMetrics(MetricsRecordBuilder 
metricsRecordBuilder,



[31/50] [abbrv] hbase git commit: HBASE-15349 Update surefire version to 2.19.1. (Apekshit)

2016-03-21 Thread enis
HBASE-15349 Update surefire version to 2.19.1. (Apekshit)

Signed-off-by: stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3c660e2a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3c660e2a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3c660e2a

Branch: refs/heads/HBASE-7912
Commit: 3c660e2a0f436a52a9bbdfb7c6dd82bf67097639
Parents: c528894
Author: Apekshit 
Authored: Fri Feb 26 12:01:08 2016 -0800
Committer: stack 
Committed: Fri Feb 26 12:26:40 2016 -0800

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3c660e2a/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 82eff70..b3fa787 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1239,7 +1239,7 @@
 
hbase-procedure-${project.version}-tests.jar
 hbase-it-${project.version}-tests.jar
 
hbase-annotations-${project.version}-tests.jar
-2.18.1
+2.19.1
 surefire-junit47
 
 false



[04/50] [abbrv] hbase git commit: HBASE-15016 Services a Store needs from a Region

2016-03-21 Thread enis
HBASE-15016 Services a Store needs from a Region

Signed-off-by: stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/876a6ab7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/876a6ab7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/876a6ab7

Branch: refs/heads/HBASE-7912
Commit: 876a6ab73ecff71b9b4010a532272474ea241daf
Parents: 28cd48b
Author: eshcar 
Authored: Wed Feb 24 09:56:25 2016 +0200
Committer: stack 
Committed: Wed Feb 24 07:07:07 2016 -0800

--
 .../org/apache/hadoop/hbase/util/ClassSize.java |  4 +
 .../hbase/regionserver/DefaultMemStore.java |  4 +
 .../hadoop/hbase/regionserver/HMobStore.java|  3 +
 .../hadoop/hbase/regionserver/HRegion.java  | 93 +++-
 .../hadoop/hbase/regionserver/HStore.java   |  4 +
 .../hadoop/hbase/regionserver/MemStore.java |  7 ++
 .../hadoop/hbase/regionserver/Region.java   | 12 +--
 .../regionserver/RegionServicesForStores.java   | 53 +++
 .../apache/hadoop/hbase/regionserver/Store.java |  8 ++
 .../org/apache/hadoop/hbase/TestIOFencing.java  | 10 ++-
 10 files changed, 165 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/876a6ab7/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
index 77acf9b..fdd0fae 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
@@ -110,6 +110,8 @@ public class ClassSize {
   /** Overhead for CellSkipListSet */
   public static final int CELL_SKIPLIST_SET;
 
+  public static final int STORE_SERVICES;
+
   /* Are we running on jdk7? */
   private static final boolean JDK7;
   static {
@@ -193,6 +195,8 @@ public class ClassSize {
 TIMERANGE_TRACKER = align(ClassSize.OBJECT + Bytes.SIZEOF_LONG * 2);
 
 CELL_SKIPLIST_SET = align(OBJECT + REFERENCE);
+
+STORE_SERVICES = align(OBJECT + REFERENCE + ATOMIC_LONG);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/876a6ab7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index 82d40b6..92bb7b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -162,6 +162,10 @@ public class DefaultMemStore extends AbstractMemStore {
 return;
   }
 
+  @Override
+  public void finalizeFlush() {
+  }
+
   /**
* Code to help figure if our approximation of object heap sizes is close
* enough.  See hbase-900.  Fills memstores then waits so user can heap

http://git-wip-us.apache.org/repos/asf/hbase/blob/876a6ab7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
index d666db5..7b44338 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
@@ -511,6 +511,9 @@ public class HMobStore extends HStore {
 }
   }
 
+  @Override public void finalizeFlush() {
+  }
+
   public void updateCellsCountCompactedToMob(long count) {
 cellsCountCompactedToMob += count;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/876a6ab7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 0d5a71e..b70a4c3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -17,6 +17,20 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;

[28/50] [abbrv] hbase git commit: HBASE-15205 Do not find the replication scope for every WAL#append() (Ram)

2016-03-21 Thread enis
HBASE-15205 Do not find the replication scope for every WAL#append() (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8f2bd060
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8f2bd060
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8f2bd060

Branch: refs/heads/HBASE-7912
Commit: 8f2bd06019869a1738bcfd66066737cdb7802ca8
Parents: 538815d
Author: ramkrishna 
Authored: Fri Feb 26 22:30:55 2016 +0530
Committer: ramkrishna 
Committed: Fri Feb 26 22:30:55 2016 +0530

--
 .../hbase/protobuf/ReplicationProtbufUtil.java  |   2 +-
 .../hadoop/hbase/regionserver/HRegion.java  |  89 +-
 .../hadoop/hbase/regionserver/HStore.java   |   2 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |  13 +-
 .../hbase/regionserver/wal/FSWALEntry.java  |  10 +-
 .../hadoop/hbase/regionserver/wal/HLogKey.java  |  48 ++--
 .../regionserver/wal/WALActionsListener.java|   8 +-
 .../hadoop/hbase/regionserver/wal/WALUtil.java  |  57 +
 .../hbase/replication/ScopeWALEntryFilter.java  |   2 +-
 .../replication/regionserver/Replication.java   |  70 +++
 .../hadoop/hbase/wal/DisabledWALProvider.java   |   4 +-
 .../java/org/apache/hadoop/hbase/wal/WAL.java   |   9 +-
 .../org/apache/hadoop/hbase/wal/WALKey.java | 121 +++
 .../apache/hadoop/hbase/wal/WALSplitter.java|   2 +-
 .../org/apache/hadoop/hbase/TestIOFencing.java  |   3 +-
 .../hbase/coprocessor/TestWALObserver.java  |  48 +---
 .../hbase/mapreduce/TestHLogRecordReader.java   |   7 +-
 .../hbase/mapreduce/TestImportExport.java   |  16 +--
 .../hbase/mapreduce/TestWALRecordReader.java|  20 +--
 .../master/TestDistributedLogSplitting.java |   9 +-
 .../hadoop/hbase/regionserver/TestBulkLoad.java |  17 +--
 .../hadoop/hbase/regionserver/TestHRegion.java  |  16 +--
 .../regionserver/TestHRegionReplayEvents.java   |   6 +-
 .../regionserver/TestHRegionServerBulkLoad.java |   3 +-
 .../hbase/regionserver/TestWALLockup.java   |  10 +-
 .../hbase/regionserver/wal/TestFSHLog.java  |  57 ++---
 .../regionserver/wal/TestLogRollAbort.java  |  12 +-
 .../wal/TestLogRollingNoCluster.java|  11 +-
 .../wal/TestWALActionsListener.java |  12 +-
 .../hbase/regionserver/wal/TestWALReplay.java   |  47 ---
 .../hbase/replication/TestReplicationBase.java  |   9 ++
 .../replication/TestReplicationSmallTests.java  |  13 +-
 .../TestReplicationWALEntryFilters.java |  62 +-
 .../TestReplicationSourceManager.java   |  57 +
 .../TestReplicationWALReaderManager.java|  13 +-
 .../apache/hadoop/hbase/wal/FaultyFSLog.java|   7 +-
 .../hbase/wal/TestDefaultWALProvider.java   |  64 +++---
 .../wal/TestDefaultWALProviderWithHLogKey.java  |   7 +-
 .../apache/hadoop/hbase/wal/TestSecureWAL.java  |  11 +-
 .../apache/hadoop/hbase/wal/TestWALFactory.java |  74 
 .../hbase/wal/TestWALReaderOnSecureWAL.java |  11 +-
 .../hbase/wal/WALPerformanceEvaluation.java |  15 ++-
 42 files changed, 685 insertions(+), 389 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8f2bd060/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index 91185af..8cb2237 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -134,7 +134,7 @@ public class ReplicationProtbufUtil {
 keyBuilder.setOrigSequenceNumber(key.getOrigLogSeqNum());
   }
   WALEdit edit = entry.getEdit();
-  NavigableMap scopes = key.getScopes();
+  NavigableMap scopes = key.getReplicationScopes();
   if (scopes != null && !scopes.isEmpty()) {
 for (Map.Entry scope: scopes.entrySet()) {
   scopeBuilder.setFamily(ByteStringer.wrap(scope.getKey()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f2bd060/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index b70a4c3..406850e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 

[03/50] [abbrv] hbase git commit: HBASE-15277 TestRegionMergeTransactionOnCluster.testWholesomeMerge fails with no connection to master; ADDING DEBUGGING

2016-03-21 Thread enis
HBASE-15277 TestRegionMergeTransactionOnCluster.testWholesomeMerge fails with 
no connection to master; ADDING DEBUGGING


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/28cd48b6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/28cd48b6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/28cd48b6

Branch: refs/heads/HBASE-7912
Commit: 28cd48b673ca743d193874b2951bc995699e8e89
Parents: 0024865
Author: stack 
Authored: Tue Feb 23 22:43:01 2016 -0800
Committer: stack 
Committed: Tue Feb 23 22:43:01 2016 -0800

--
 .../org/apache/hadoop/hbase/master/TableStateManager.java   | 2 +-
 .../regionserver/TestRegionMergeTransactionOnCluster.java   | 9 +++--
 2 files changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/28cd48b6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
index 12db91e..b6befaa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -131,7 +131,7 @@ public class TableStateManager {
   TableState.State tableState = getTableState(tableName);
   return TableState.isInStates(tableState, states);
 } catch (IOException e) {
-  LOG.error("Unable to get table " + tableName + " state, probably table 
not exists");
+  LOG.error("Unable to get table " + tableName + " state", e);
   return false;
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/28cd48b6/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index a532bb7..cd4410f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -78,6 +78,7 @@ import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
 import org.junit.rules.TestRule;
 
 import com.google.common.base.Joiner;
@@ -94,6 +95,7 @@ import com.google.protobuf.ServiceException;
 public class TestRegionMergeTransactionOnCluster {
   private static final Log LOG = LogFactory
   .getLog(TestRegionMergeTransactionOnCluster.class);
+  @Rule public TestName name = new TestName();
   @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
   withLookingForStuckThread(true).build();
   private static final int NB_SERVERS = 3;
@@ -182,7 +184,6 @@ public class TestRegionMergeTransactionOnCluster {
*/
   @Test
   public void testMergeAndRestartingMaster() throws Exception {
-LOG.info("Starting testMergeAndRestartingMaster");
 final TableName tableName = 
TableName.valueOf("testMergeAndRestartingMaster");
 
 // Create table and load data.
@@ -458,11 +459,15 @@ public class TestRegionMergeTransactionOnCluster {
 }
 
 Table table = TEST_UTIL.createTable(tablename, FAMILYNAME, splitRows);
+LOG.info("Created " + table.getName());
 if (replication > 1) {
   HBaseTestingUtility.setReplicas(ADMIN, tablename, replication);
+  LOG.info("Set replication of " + replication + " on " + table.getName());
 }
 loadData(table);
+LOG.info("Loaded " + table.getName());
 verifyRowCount(table, ROWSIZE);
+LOG.info("Verified " + table.getName());
 
 // sleep here is an ugly hack to allow region transitions to finish
 long timeout = System.currentTimeMillis() + waitTime;
@@ -474,7 +479,7 @@ public class TestRegionMergeTransactionOnCluster {
 break;
   Thread.sleep(250);
 }
-
+LOG.info("Getting regions of " + table.getName());
 tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
 TEST_UTIL.getConnection(), tablename);
 LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));



[13/50] [abbrv] hbase git commit: HBASE-15264 Implement a fan out HDFS OutputStream

2016-03-21 Thread enis
HBASE-15264 Implement a fan out HDFS OutputStream


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6e9d355b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6e9d355b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6e9d355b

Branch: refs/heads/HBASE-7912
Commit: 6e9d355b12a1e666f4d05be02775a01b6754d063
Parents: a3b4575
Author: zhangduo 
Authored: Wed Feb 24 20:47:38 2016 +0800
Committer: zhangduo 
Committed: Thu Feb 25 10:07:27 2016 +0800

--
 .../util/FanOutOneBlockAsyncDFSOutput.java  | 533 +++
 .../FanOutOneBlockAsyncDFSOutputHelper.java | 672 +++
 ...anOutOneBlockAsyncDFSOutputFlushHandler.java |  61 ++
 .../util/TestFanOutOneBlockAsyncDFSOutput.java  | 190 ++
 4 files changed, 1456 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6e9d355b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
new file mode 100644
index 000..b10f180
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
@@ -0,0 +1,533 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import static 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
+import static 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
+import static 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
+import static 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.channels.CompletionHandler;
+import java.util.ArrayDeque;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Deque;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
+import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.util.DataChecksum;
+
+import com.google.common.base.Supplier;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufAllocator;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.EventLoop;
+import io.netty.channel.SimpleChannelInboundHandler;
+import io.netty.handler.codec.protobuf.ProtobufDecoder;
+import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
+import io.netty.handler.timeout.IdleState;
+import io.netty.handler.timeout.IdleStateEvent;
+import io.netty.handler.timeout.IdleStateHandler;
+import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.FutureListener;
+import io.netty.util.concurrent.Promise;
+
+/**
+ * An asynchronous HDFS output stream implementation which fans out data to 

[34/50] [abbrv] hbase git commit: HBASE-15128 Disable region splits and merges switch in master

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/99955a32/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index 4371739..0240a67 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -8196,6 +8196,450 @@ public final class ZooKeeperProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.TableLock)
   }
 
+  public interface SwitchStateOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// optional bool enabled = 1;
+/**
+ * optional bool enabled = 1;
+ */
+boolean hasEnabled();
+/**
+ * optional bool enabled = 1;
+ */
+boolean getEnabled();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.SwitchState}
+   *
+   * 
+   **
+   * State of the switch.
+   * 
+   */
+  public static final class SwitchState extends
+  com.google.protobuf.GeneratedMessage
+  implements SwitchStateOrBuilder {
+// Use SwitchState.newBuilder() to construct.
+private SwitchState(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private SwitchState(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final SwitchState defaultInstance;
+public static SwitchState getDefaultInstance() {
+  return defaultInstance;
+}
+
+public SwitchState getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private SwitchState(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 8: {
+  bitField0_ |= 0x0001;
+  enabled_ = input.readBool();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.class, 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.Builder.class);
+}
+
+public static com.google.protobuf.Parser PARSER =
+new com.google.protobuf.AbstractParser() {
+  public SwitchState parsePartialFrom(
+  com.google.protobuf.CodedInputStream input,
+  com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+  throws com.google.protobuf.InvalidProtocolBufferException {
+return new SwitchState(input, extensionRegistry);
+  }
+};
+
+@java.lang.Override
+public com.google.protobuf.Parser getParserForType() {
+  return PARSER;
+}
+
+private int bitField0_;
+// optional bool enabled = 1;
+public static final int ENABLED_FIELD_NUMBER = 1;
+private boolean enabled_;
+/**
+ * optional bool 

[23/50] [abbrv] hbase git commit: Revert "HBASE-15128 Disable region splits and merges switch in master"

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
index 9805d50..8dbb5ad 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
@@ -11,13 +11,13 @@ public final class SnapshotProtos {
   public interface SnapshotFileInfoOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
 
-// required .hbase.pb.SnapshotFileInfo.Type type = 1;
+// required .SnapshotFileInfo.Type type = 1;
 /**
- * required .hbase.pb.SnapshotFileInfo.Type type = 1;
+ * required .SnapshotFileInfo.Type type = 1;
  */
 boolean hasType();
 /**
- * required .hbase.pb.SnapshotFileInfo.Type type = 1;
+ * required .SnapshotFileInfo.Type type = 1;
  */
 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type 
getType();
 
@@ -67,7 +67,7 @@ public final class SnapshotProtos {
 getWalNameBytes();
   }
   /**
-   * Protobuf type {@code hbase.pb.SnapshotFileInfo}
+   * Protobuf type {@code SnapshotFileInfo}
*/
   public static final class SnapshotFileInfo extends
   com.google.protobuf.GeneratedMessage
@@ -157,12 +157,12 @@ public final class SnapshotProtos {
 }
 public static final com.google.protobuf.Descriptors.Descriptor
 getDescriptor() {
-  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_hbase_pb_SnapshotFileInfo_descriptor;
+  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_descriptor;
 }
 
 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
 internalGetFieldAccessorTable() {
-  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_hbase_pb_SnapshotFileInfo_fieldAccessorTable
+  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_fieldAccessorTable
   .ensureFieldAccessorsInitialized(
   
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.class,
 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder.class);
 }
@@ -183,7 +183,7 @@ public final class SnapshotProtos {
 }
 
 /**
- * Protobuf enum {@code hbase.pb.SnapshotFileInfo.Type}
+ * Protobuf enum {@code SnapshotFileInfo.Type}
  */
 public enum Type
 implements com.google.protobuf.ProtocolMessageEnum {
@@ -261,21 +261,21 @@ public final class SnapshotProtos {
 this.value = value;
   }
 
-  // @@protoc_insertion_point(enum_scope:hbase.pb.SnapshotFileInfo.Type)
+  // @@protoc_insertion_point(enum_scope:SnapshotFileInfo.Type)
 }
 
 private int bitField0_;
-// required .hbase.pb.SnapshotFileInfo.Type type = 1;
+// required .SnapshotFileInfo.Type type = 1;
 public static final int TYPE_FIELD_NUMBER = 1;
 private 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type 
type_;
 /**
- * required .hbase.pb.SnapshotFileInfo.Type type = 1;
+ * required .SnapshotFileInfo.Type type = 1;
  */
 public boolean hasType() {
   return ((bitField0_ & 0x0001) == 0x0001);
 }
 /**
- * required .hbase.pb.SnapshotFileInfo.Type type = 1;
+ * required .SnapshotFileInfo.Type type = 1;
  */
 public 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type 
getType() {
   return type_;
@@ -613,19 +613,19 @@ public final class SnapshotProtos {
   return builder;
 }
 /**
- * Protobuf type {@code hbase.pb.SnapshotFileInfo}
+ * Protobuf type {@code SnapshotFileInfo}
  */
 public static final class Builder extends
 com.google.protobuf.GeneratedMessage.Builder
implements 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfoOrBuilder
 {
   public static final com.google.protobuf.Descriptors.Descriptor
   getDescriptor() {
-return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_hbase_pb_SnapshotFileInfo_descriptor;
+return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_descriptor;
   }
 
   protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
   internalGetFieldAccessorTable() {
-return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_hbase_pb_SnapshotFileInfo_fieldAccessorTable
+return 

[10/50] [abbrv] hbase git commit: HBASE-15222 Use less contended classes for metrics

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/630a6582/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java
index 32d4fae..aaf4359 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.metrics2.lib;
 
-import java.util.concurrent.atomic.AtomicLongArray;
-
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.metrics2.MetricsInfo;
 
@@ -28,31 +26,30 @@ import org.apache.hadoop.metrics2.MetricsInfo;
  */
 @InterfaceAudience.Private
 public class MutableTimeHistogram extends MutableRangeHistogram {
-  private final String rangeType = "TimeRangeCount";
-  private final long[] ranges =
+  private final static String RANGE_TYPE = "TimeRangeCount";
+  private final static long[] RANGES =
   { 1, 3, 10, 30, 100, 300, 1000, 3000, 1, 3, 6, 12, 
30, 60 };
-  private final AtomicLongArray rangeVals = new 
AtomicLongArray(ranges.length+1);
 
   public MutableTimeHistogram(MetricsInfo info) {
 this(info.name(), info.description());
   }
 
   public MutableTimeHistogram(String name, String description) {
-super(name, description);
+this(name, description, RANGES[RANGES.length - 2]);
+  }
+
+  public MutableTimeHistogram(String name, String description, long 
expectedMax) {
+super(name, description, expectedMax);
   }
 
   @Override
   public String getRangeType() {
-return rangeType;
+return RANGE_TYPE;
   }
 
   @Override
-  public long[] getRange() {
-return ranges;
+  public long[] getRanges() {
+return RANGES;
   }
 
-  @Override
-  public AtomicLongArray getRangeVals() {
-return rangeVals;
-  } 
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/630a6582/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
 
b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
index 7381fb9..2e374f7 100644
--- 
a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.metrics;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.testclassification.MetricsTests;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableFastCounter;
 import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -72,9 +73,9 @@ public class TestBaseSourceImpl {
   @Test
   public void testIncCounters() throws Exception {
 bmsi.incCounters("testinccounter", 100);
-assertEquals(100, ((MutableCounterLong) 
bmsi.metricsRegistry.get("testinccounter")).value());
+assertEquals(100, ((MutableFastCounter) 
bmsi.metricsRegistry.get("testinccounter")).value());
 bmsi.incCounters("testinccounter", 100);
-assertEquals(200, ((MutableCounterLong) 
bmsi.metricsRegistry.get("testinccounter")).value());
+assertEquals(200, ((MutableFastCounter) 
bmsi.metricsRegistry.get("testinccounter")).value());
 
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/630a6582/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
index 6986f12..3dcd5e2 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
@@ -178,7 +178,6 @@ org.apache.hadoop.util.StringUtils;
   AgeSnapshot ageAtEvictionSnapshot = bc.getStats().getAgeAtEvictionSnapshot();
   // Only show if non-zero mean and stddev as is the case in combinedblockcache
   double mean = ageAtEvictionSnapshot.getMean();
-  double stddev = ageAtEvictionSnapshot.getStdDev();
 
 
 Evicted
@@ -197,13 +196,6 @@ org.apache.hadoop.util.StringUtils;
 Mean age of Blocks at eviction time (seconds)
 
 
-<%if stddev > 0 %>
-
-StdDev
-<% String.format("%,d", 

[18/50] [abbrv] hbase git commit: HBASE-15128 Disable region splits and merges switch in master

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/24d481c5/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index 4371739..0240a67 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -8196,6 +8196,450 @@ public final class ZooKeeperProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.TableLock)
   }
 
+  public interface SwitchStateOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// optional bool enabled = 1;
+/**
+ * optional bool enabled = 1;
+ */
+boolean hasEnabled();
+/**
+ * optional bool enabled = 1;
+ */
+boolean getEnabled();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.SwitchState}
+   *
+   * 
+   **
+   * State of the switch.
+   * 
+   */
+  public static final class SwitchState extends
+  com.google.protobuf.GeneratedMessage
+  implements SwitchStateOrBuilder {
+// Use SwitchState.newBuilder() to construct.
+private SwitchState(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private SwitchState(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final SwitchState defaultInstance;
+public static SwitchState getDefaultInstance() {
+  return defaultInstance;
+}
+
+public SwitchState getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private SwitchState(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 8: {
+  bitField0_ |= 0x0001;
+  enabled_ = input.readBool();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.class, 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.Builder.class);
+}
+
+public static com.google.protobuf.Parser PARSER =
+new com.google.protobuf.AbstractParser() {
+  public SwitchState parsePartialFrom(
+  com.google.protobuf.CodedInputStream input,
+  com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+  throws com.google.protobuf.InvalidProtocolBufferException {
+return new SwitchState(input, extensionRegistry);
+  }
+};
+
+@java.lang.Override
+public com.google.protobuf.Parser getParserForType() {
+  return PARSER;
+}
+
+private int bitField0_;
+// optional bool enabled = 1;
+public static final int ENABLED_FIELD_NUMBER = 1;
+private boolean enabled_;
+/**
+ * optional bool 

[43/50] [abbrv] hbase git commit: HBASE-15323 Hbase Rest CheckAndDeleteAPi should be able to delete more cells (Ajith)

2016-03-21 Thread enis
HBASE-15323 Hbase Rest CheckAndDeleteAPi should be able to delete more cells 
(Ajith)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7c54525c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7c54525c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7c54525c

Branch: refs/heads/HBASE-7912
Commit: 7c54525c89bbbe0c66401813433bfb957e461eac
Parents: bc11288
Author: Enis Soztutar <e...@apache.org>
Authored: Mon Feb 29 12:24:18 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Feb 29 12:24:18 2016 -0800

--
 .../apache/hadoop/hbase/rest/RowResource.java   | 52 ---
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  1 +
 .../hadoop/hbase/rest/RowResourceBase.java  | 49 +++---
 .../hbase/rest/TestGetAndPutResource.java   | 53 
 4 files changed, 142 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7c54525c/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index bac4edb..15828ce 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -553,9 +553,12 @@ public class RowResource extends ResourceBase {
   .build();
   }
 
+  List cellModels = rowModel.getCells();
+  int cellModelCount = cellModels.size();
+
   delete = new Delete(key);
   boolean retValue;
-  CellModel valueToDeleteCell = rowModel.getCells().get(0);
+  CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount -1);
   byte[] valueToDeleteColumn = valueToDeleteCell.getColumn();
   if (valueToDeleteColumn == null) {
 try {
@@ -567,25 +570,62 @@ public class RowResource extends ResourceBase {
 .build();
 }
   }
-  byte[][] parts = KeyValue.parseColumn(valueToDeleteColumn);
+
+  byte[][] parts ;
+  // Copy all the cells to the Delete request if extra cells are sent
+  if(cellModelCount > 1) {
+for (int i = 0, n = cellModelCount - 1; i < n; i++) {
+  CellModel cell = cellModels.get(i);
+  byte[] col = cell.getColumn();
+
+  if (col == null) {
+servlet.getMetrics().incrementFailedPutRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request: Column found to 
be null." + CRLF)
+.build();
+  }
+
+  parts = KeyValue.parseColumn(col);
+
+  if (parts.length == 1) {
+// Only Column Family is specified
+delete.addFamily(parts[0], cell.getTimestamp());
+  } else if (parts.length == 2) {
+delete.addColumn(parts[0], parts[1], cell.getTimestamp());
+  } else {
+servlet.getMetrics().incrementFailedDeleteRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT)
+.entity("Bad request: Column to delete incorrectly 
specified." + CRLF)
+.build();
+  }
+}
+  }
+
+  parts = KeyValue.parseColumn(valueToDeleteColumn);
   if (parts.length == 2) {
 if (parts[1].length != 0) {
-  delete.addColumns(parts[0], parts[1]);
+  // To support backcompat of deleting a cell
+  // if that is the only cell passed to the rest api
+  if(cellModelCount == 1) {
+delete.addColumns(parts[0], parts[1]);
+  }
   retValue = table.checkAndDelete(key, parts[0], parts[1],
 valueToDeleteCell.getValue(), delete);
 } else {
   // The case of empty qualifier.
-  delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+  if(cellModelCount == 1) {
+delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+  }
   retValue = table.checkAndDelete(key, parts[0], 
Bytes.toBytes(StringUtils.EMPTY),
 valueToDeleteCell.getValue(), delete);
 }
   } else {
 servlet.getMetrics().incrementFailedDeleteRequests(1);
 return Response.status(Response.Status.BAD_REQUEST)
-  .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly 
specified." + CRLF)
+  .type(MIMETYPE_TEXT).entity("Bad request: Column to check 
incorrectly specified." + CRLF)
   .build();
   }
-  delete.addColumns(parts[0], parts[1]);
 
  

[47/50] [abbrv] hbase git commit: HBASE-14030 HBase Backup/Restore Phase 1 (v42)

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java
new file mode 100644
index 000..7bd6e99
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java
@@ -0,0 +1,702 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.BackupUtility;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
+import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.zookeeper.KeeperException.NoNodeException;
+
+/**
+ * A Handler to carry the operations of backup progress
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupHandler implements Callable {
+  private static final Log LOG = LogFactory.getLog(BackupHandler.class);
+
+  // backup phase
+  // for overall backup (for table list, some table may go online, while some 
may go offline)
+  protected static enum BackupPhase {
+REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, 
STORE_MANIFEST;
+  }
+
+  // backup status flag
+  public static enum BackupState {
+WAITING, RUNNING, COMPLETE, FAILED, CANCELLED;
+  }
+
+  protected final BackupContext backupContext;
+  private final BackupManager backupManager;
+  private final Configuration conf;
+  private final Connection conn;
+
+  public BackupHandler(BackupContext backupContext,
+  BackupManager backupManager, Configuration conf, Connection connection) {
+this.backupContext = backupContext;
+this.backupManager = backupManager;
+this.conf = conf;
+this.conn = connection;
+  }
+
+  public BackupContext getBackupContext() {
+return backupContext;
+  }
+
+  @Override
+  public Void call() throws Exception {
+try(Admin admin = conn.getAdmin()) {
+  // overall backup begin
+  this.beginBackup(backupContext);
+  HashMap newTimestamps = null;
+  // handle full or incremental backup for table or table list
+  if (backupContext.getType() == BackupType.FULL) {
+String savedStartCode = null;
+boolean firstBackup = false;
+// do snapshot for full table backup
+
+try {
+  savedStartCode = backupManager.readBackupStartCode();
+  firstBackup = savedStartCode == null;
+  if (firstBackup) {
+// This is our first backup. Let's put some marker on ZK so that 
we can hold the logs
+// while we do the backup.
+backupManager.writeBackupStartCode(0L);
+  }
+  // We roll log here before we do the snapshot. It is possible there 
is duplicate data
+  // in the log that is already in the snapshot. But 

[06/50] [abbrv] hbase git commit: HBASE-15302 Reenable the other tests disabled by HBASE-14678

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/30cec72f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
new file mode 100644
index 000..c5728cf
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
@@ -0,0 +1,1799 @@
+/**
+ *
+
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static 
org.apache.hadoop.hbase.SplitLogCounters.tot_mgr_wait_for_zk_delete;
+import static 
org.apache.hadoop.hbase.SplitLogCounters.tot_wkr_final_transition_failed;
+import static org.apache.hadoop.hbase.SplitLogCounters.tot_wkr_preempt_task;
+import static org.apache.hadoop.hbase.SplitLogCounters.tot_wkr_task_acquired;
+import static org.apache.hadoop.hbase.SplitLogCounters.tot_wkr_task_done;
+import static org.apache.hadoop.hbase.SplitLogCounters.tot_wkr_task_err;
+import static org.apache.hadoop.hbase.SplitLogCounters.tot_wkr_task_resigned;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.SplitLogCounters;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ConnectionUtils;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.NonceGenerator;
+import org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
+import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
+import org.apache.hadoop.hbase.exceptions.OperationConflictException;
+import org.apache.hadoop.hbase.exceptions.RegionInRecoveryException;
+import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import 

[24/50] [abbrv] hbase git commit: Revert "HBASE-15128 Disable region splits and merges switch in master"

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 073eba9..043d549 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -8,88 +8,6 @@ public final class MasterProtos {
   public static void registerAllExtensions(
   com.google.protobuf.ExtensionRegistry registry) {
   }
-  /**
-   * Protobuf enum {@code hbase.pb.MasterSwitchType}
-   */
-  public enum MasterSwitchType
-  implements com.google.protobuf.ProtocolMessageEnum {
-/**
- * SPLIT = 0;
- */
-SPLIT(0, 0),
-/**
- * MERGE = 1;
- */
-MERGE(1, 1),
-;
-
-/**
- * SPLIT = 0;
- */
-public static final int SPLIT_VALUE = 0;
-/**
- * MERGE = 1;
- */
-public static final int MERGE_VALUE = 1;
-
-
-public final int getNumber() { return value; }
-
-public static MasterSwitchType valueOf(int value) {
-  switch (value) {
-case 0: return SPLIT;
-case 1: return MERGE;
-default: return null;
-  }
-}
-
-public static com.google.protobuf.Internal.EnumLiteMap
-internalGetValueMap() {
-  return internalValueMap;
-}
-private static com.google.protobuf.Internal.EnumLiteMap
-internalValueMap =
-  new com.google.protobuf.Internal.EnumLiteMap() {
-public MasterSwitchType findValueByNumber(int number) {
-  return MasterSwitchType.valueOf(number);
-}
-  };
-
-public final com.google.protobuf.Descriptors.EnumValueDescriptor
-getValueDescriptor() {
-  return getDescriptor().getValues().get(index);
-}
-public final com.google.protobuf.Descriptors.EnumDescriptor
-getDescriptorForType() {
-  return getDescriptor();
-}
-public static final com.google.protobuf.Descriptors.EnumDescriptor
-getDescriptor() {
-  return 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.getDescriptor().getEnumTypes().get(0);
-}
-
-private static final MasterSwitchType[] VALUES = values();
-
-public static MasterSwitchType valueOf(
-com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
-  if (desc.getType() != getDescriptor()) {
-throw new java.lang.IllegalArgumentException(
-  "EnumValueDescriptor is not for this type.");
-  }
-  return VALUES[desc.getIndex()];
-}
-
-private final int index;
-private final int value;
-
-private MasterSwitchType(int index, int value) {
-  this.index = index;
-  this.value = value;
-}
-
-// @@protoc_insertion_point(enum_scope:hbase.pb.MasterSwitchType)
-  }
-
   public interface AddColumnRequestOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
 
@@ -28846,62 +28764,28 @@ public final class MasterProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.IsBalancerEnabledResponse)
   }
 
-  public interface SetSplitOrMergeEnabledRequestOrBuilder
+  public interface NormalizeRequestOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
-
-// required bool enabled = 1;
-/**
- * required bool enabled = 1;
- */
-boolean hasEnabled();
-/**
- * required bool enabled = 1;
- */
-boolean getEnabled();
-
-// optional bool synchronous = 2;
-/**
- * optional bool synchronous = 2;
- */
-boolean hasSynchronous();
-/**
- * optional bool synchronous = 2;
- */
-boolean getSynchronous();
-
-// repeated .hbase.pb.MasterSwitchType switch_types = 3;
-/**
- * repeated .hbase.pb.MasterSwitchType switch_types = 3;
- */
-
java.util.List
 getSwitchTypesList();
-/**
- * repeated .hbase.pb.MasterSwitchType switch_types = 3;
- */
-int getSwitchTypesCount();
-/**
- * repeated .hbase.pb.MasterSwitchType switch_types = 3;
- */
-org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterSwitchType 
getSwitchTypes(int index);
   }
   /**
-   * Protobuf type {@code hbase.pb.SetSplitOrMergeEnabledRequest}
+   * Protobuf type {@code hbase.pb.NormalizeRequest}
*/
-  public static final class SetSplitOrMergeEnabledRequest extends
+  public static final class NormalizeRequest extends
   com.google.protobuf.GeneratedMessage
-  implements SetSplitOrMergeEnabledRequestOrBuilder {
-// Use SetSplitOrMergeEnabledRequest.newBuilder() to construct.
-private 
SetSplitOrMergeEnabledRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  implements NormalizeRequestOrBuilder {
+

[30/50] [abbrv] hbase git commit: HBASE-15332 Document how to take advantage of HDFS-6133 in HBase

2016-03-21 Thread enis
HBASE-15332 Document how to take advantage of HDFS-6133 in HBase

(cherry picked from commit e0a656ed50027a7d982f1eca7a8c0ee3cab47f92)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c5288947
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c5288947
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c5288947

Branch: refs/heads/HBASE-7912
Commit: c5288947ddc4abae2f4036544a775ff81538df2f
Parents: e88d943
Author: Misty Stanley-Jones 
Authored: Thu Feb 25 13:51:26 2016 -0800
Committer: Misty Stanley-Jones 
Committed: Fri Feb 26 09:38:32 2016 -0800

--
 .../asciidoc/_chapters/troubleshooting.adoc | 22 
 1 file changed, 22 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c5288947/src/main/asciidoc/_chapters/troubleshooting.adoc
--
diff --git a/src/main/asciidoc/_chapters/troubleshooting.adoc 
b/src/main/asciidoc/_chapters/troubleshooting.adoc
index 66e56b8..8b2011d 100644
--- a/src/main/asciidoc/_chapters/troubleshooting.adoc
+++ b/src/main/asciidoc/_chapters/troubleshooting.adoc
@@ -1347,6 +1347,28 @@ Settings for HDFS retries and timeouts are important to 
HBase.::
   Defaults are current as of Hadoop 2.3.
   Check the Hadoop documentation for the most current values and 
recommendations.
 
+The HBase Balancer and HDFS Balancer are incompatible::
+  The HDFS balancer attempts to spread HDFS blocks evenly among DataNodes. 
HBase relies
+  on compactions to restore locality after a region split or failure. These 
two types
+  of balancing do not work well together.
++
+In the past, the generally accepted advice was to turn off the HDFS load 
balancer and rely
+on the HBase balancer, since the HDFS balancer would degrade locality. This 
advice
+is still valid if your HDFS version is lower than 2.7.1.
++
+link:https://issues.apache.org/jira/browse/HDFS-6133[HDFS-6133] provides the 
ability
+to exclude a given directory from the HDFS load balancer, by setting the
+`dfs.datanode.block-pinning.enabled` property to `true` in your HDFS
+configuration and running the following hdfs command:
++
+
+$ sudo -u hdfs hdfs balancer -exclude /hbase
+
++
+NOTE: HDFS-6133 is available in HDFS 2.7.0 and higher, but HBase does not 
support
+running on HDFS 2.7.0, so you must be using HDFS 2.7.1 or higher to use this 
feature
+with HBase.
+
 .Connection Timeouts
 Connection timeouts occur between the client (HBASE) and the HDFS DataNode.
 They may occur when establishing a connection, attempting to read, or 
attempting to write.



[40/50] [abbrv] hbase git commit: HBASE-15358 canEnforceTimeLimitFromScope should use timeScope instead of sizeScope

2016-03-21 Thread enis
HBASE-15358 canEnforceTimeLimitFromScope should use timeScope instead of 
sizeScope

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/88f77599
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/88f77599
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/88f77599

Branch: refs/heads/HBASE-7912
Commit: 88f775996b3b52d784ad13ab07515134619316ba
Parents: 03ffb30
Author: Phil Yang 
Authored: Mon Feb 29 16:26:51 2016 +0800
Committer: zhangduo 
Committed: Mon Feb 29 17:21:42 2016 +0800

--
 .../java/org/apache/hadoop/hbase/regionserver/ScannerContext.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/88f77599/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index 2062230..6674443 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -626,7 +626,7 @@ public class ScannerContext {
  * @return true when the limit can be enforced from the scope of the 
checker
  */
 boolean canEnforceTimeLimitFromScope(LimitScope checkerScope) {
-  return this.sizeScope.canEnforceLimitFromScope(checkerScope);
+  return this.timeScope.canEnforceLimitFromScope(checkerScope);
 }
 
 @Override



[21/50] [abbrv] hbase git commit: HBASE-15128 Disable region splits and merges switch in master

2016-03-21 Thread enis
HBASE-15128 Disable region splits and merges switch in master


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/24d481c5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/24d481c5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/24d481c5

Branch: refs/heads/HBASE-7912
Commit: 24d481c5803e69a6190339cd8bb218b2c4585459
Parents: 75c57a0
Author: chenheng 
Authored: Fri Feb 26 08:11:16 2016 +0800
Committer: chenheng 
Committed: Fri Feb 26 08:11:16 2016 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |   27 +-
 .../hbase/client/ConnectionImplementation.java  |   14 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   31 +
 .../hadoop/hbase/protobuf/RequestConverter.java |   49 +
 .../hbase/zookeeper/ZooKeeperWatcher.java   |   10 +
 .../hbase/protobuf/generated/MasterProtos.java  | 4304 ++
 .../protobuf/generated/SnapshotProtos.java  |  500 +-
 .../protobuf/generated/ZooKeeperProtos.java |  462 +-
 hbase-protocol/src/main/protobuf/Master.proto   |   36 +
 .../src/main/protobuf/ZooKeeper.proto   |7 +
 .../hadoop/hbase/master/AssignmentManager.java  |   10 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   28 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   42 +
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   35 +
 .../zookeeper/SplitOrMergeTrackerManager.java   |  151 +
 .../hbase/client/TestSplitOrMergeStatus.java|  198 +
 hbase-shell/src/main/ruby/hbase/admin.rb|   32 +
 hbase-shell/src/main/ruby/shell.rb  |2 +
 .../ruby/shell/commands/splitormerge_enabled.rb |   41 +
 .../ruby/shell/commands/splitormerge_switch.rb  |   43 +
 20 files changed, 4822 insertions(+), 1200 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/24d481c5/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index d7b52d5..c3b524b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1678,11 +1678,28 @@ public interface Admin extends Abortable, Closeable {
   List getSecurityCapabilities() throws IOException;
 
   /**
+   * Turn the Split or Merge switches on or off.
+   *
+   * @param enabled enabled or not
+   * @param synchronous If true, it waits until current split() call, if 
outstanding, to return.
+   * @param switchTypes switchType list {@link MasterSwitchType}
+   * @return Previous switch value array
+   */
+  boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean 
synchronous,
+   final MasterSwitchType... switchTypes) 
throws IOException;
+
+  /**
+   * Query the current state of the switch
+   *
+   * @return true if the switch is enabled, false otherwise.
+   */
+  boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws 
IOException;
+
+  /**
* Currently, there are only two compact types:
* {@code NORMAL} means do store files compaction;
* {@code MOB} means do mob files compaction.
* */
-
   @InterfaceAudience.Public
   @InterfaceStability.Unstable
   public enum CompactType {
@@ -1692,4 +1709,12 @@ public interface Admin extends Abortable, Closeable {
 
 CompactType(int value) {}
   }
+  
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
+  public enum MasterSwitchType {
+SPLIT,
+MERGE
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/24d481c5/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index dfa9937..64eb9fb 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -1742,6 +1742,20 @@ class ConnectionImplementation implements 
ClusterConnection, Closeable {
   }
 
   @Override
+  public MasterProtos.SetSplitOrMergeEnabledResponse 
setSplitOrMergeEnabled(
+RpcController controller, MasterProtos.SetSplitOrMergeEnabledRequest 
request)
+throws ServiceException {
+return stub.setSplitOrMergeEnabled(controller, request);
+  }
+
+  @Override
+  public MasterProtos.IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(
+   

[48/50] [abbrv] hbase git commit: HBASE-14030 HBase Backup/Restore Phase 1 (v42)

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-protocol/src/main/protobuf/Backup.proto
--
diff --git a/hbase-protocol/src/main/protobuf/Backup.proto 
b/hbase-protocol/src/main/protobuf/Backup.proto
new file mode 100644
index 000..383b990
--- /dev/null
+++ b/hbase-protocol/src/main/protobuf/Backup.proto
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains Backup manifest
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "BackupProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+
+enum BackupType {
+  FULL = 0;
+  INCREMENTAL = 1;
+}
+
+message BackupImage {
+  required string backup_id = 1;
+  required BackupType backup_type = 2;
+  required string root_dir = 3;
+  repeated TableName table_list = 4;
+  required uint64 start_ts = 5;
+  required uint64 complete_ts = 6;
+  repeated BackupImage ancestors = 7; 
+}
+
+message ServerTimestamp {
+  required string server = 1;
+  required uint64 timestamp = 2;
+}
+
+message TableServerTimestamp {
+  required TableName table = 1;
+  repeated ServerTimestamp server_timestamp = 2;
+}
+
+message BackupManifest {
+  required string version = 1;
+  required string backup_id = 2;
+  required BackupType type = 3;
+  repeated TableName table_list = 4;
+  required uint64 start_ts = 5;
+  required uint64 complete_ts = 6;
+  required int64 total_bytes = 7;
+  optional int64 log_bytes  = 8;
+  repeated TableServerTimestamp tst_map = 9;
+  repeated BackupImage dependent_backup_image = 10;
+  required bool compacted = 11; 
+}
+
+message TableBackupStatus {
+  required TableName table = 1;
+  required string target_dir = 2;
+  optional string snapshot = 3;
+}
+
+message BackupContext {
+  required string backup_id = 1;
+  required BackupType type = 2;
+  required string target_root_dir = 3;
+  optional BackupState state = 4;
+  optional BackupPhase phase = 5;
+  optional string failed_message = 6;
+  repeated TableBackupStatus table_backup_status = 7;
+  optional uint64  start_ts = 8;
+  optional uint64  end_ts = 9;
+  optional int64  total_bytes_copied = 10;
+  optional string hlog_target_dir = 11;
+  optional uint32 progress = 12; 
+  
+  enum BackupState {
+WAITING = 0;
+RUNNING = 1;
+COMPLETE = 2;
+FAILED = 3;
+CANCELLED = 4;
+  }
+
+  enum BackupPhase {
+REQUEST = 0;
+SNAPSHOT = 1;
+PREPARE_INCREMENTAL = 2;
+SNAPSHOTCOPY = 3;
+INCREMENTAL_COPY = 4;
+STORE_MANIFEST = 5;
+  } 
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/pom.xml
--
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index d5f1e30..e4b296a 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -394,6 +394,11 @@
${project.version}
true
 
+ 
+  org.apache.hadoop
+  hadoop-distcp
+  ${hadoop-two.version}
+
 
   commons-httpclient
   commons-httpclient
@@ -407,6 +412,11 @@
   commons-collections
 
 
+  org.apache.hadoop
+  hadoop-distcp
+  ${hadoop-two.version}
+
+
   org.apache.hbase
   hbase-hadoop-compat
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
new file mode 100644
index 000..7c8ea39
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache 

[26/50] [abbrv] hbase git commit: HBASE-15215 TestBlockEvictionFromClient is flaky in jdk1.7 build (setting offheap)

2016-03-21 Thread enis
HBASE-15215 TestBlockEvictionFromClient is flaky in jdk1.7 build (setting
offheap)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/538815d8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/538815d8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/538815d8

Branch: refs/heads/HBASE-7912
Commit: 538815d82a62cbcc7aaccec0a3bc4e44cb925277
Parents: bf4fcc3
Author: ramkrishna 
Authored: Fri Feb 26 11:43:00 2016 +0530
Committer: ramkrishna 
Committed: Fri Feb 26 11:43:00 2016 +0530

--
 .../apache/hadoop/hbase/client/TestBlockEvictionFromClient.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/538815d8/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
index f4d668c..d3f718b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
@@ -102,7 +102,7 @@ public class TestBlockEvictionFromClient {
 // tests
 conf.setInt("hbase.regionserver.handler.count", 20);
 conf.setInt("hbase.bucketcache.size", 400);
-conf.setStrings("hbase.bucketcache.ioengine", "heap");
+conf.setStrings("hbase.bucketcache.ioengine", "offheap");
 conf.setFloat("hfile.block.cache.size", 0.2f);
 conf.setFloat("hbase.regionserver.global.memstore.size", 0.1f);
 conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 0);// do not retry



[07/50] [abbrv] hbase git commit: HBASE-15302 Reenable the other tests disabled by HBASE-14678

2016-03-21 Thread enis
HBASE-15302 Reenable the other tests disabled by HBASE-14678

Signed-off-by: stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/30cec72f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/30cec72f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/30cec72f

Branch: refs/heads/HBASE-7912
Commit: 30cec72f9ade972d7e9ce4bba527b0e6074cae60
Parents: 876a6ab
Author: Phil Yang 
Authored: Mon Feb 22 14:17:24 2016 +0800
Committer: stack 
Committed: Wed Feb 24 07:14:01 2016 -0800

--
 .../apache/hadoop/hbase/wal/WALSplitter.java|   11 +-
 .../hbase/TestPartialResultsFromClientSide.java |  832 
 .../TestMobSnapshotCloneIndependence.java   |   69 +
 .../client/TestSnapshotCloneIndependence.java   |  481 +
 .../master/TestDistributedLogSplitting.java | 1799 ++
 .../balancer/TestStochasticLoadBalancer2.java   |   90 +
 .../TestMasterFailoverWithProcedures.java   |  514 +
 .../TestMobFlushSnapshotFromClient.java |   72 +
 .../apache/hadoop/hbase/wal/TestWALSplit.java   | 1320 +
 .../hbase/wal/TestWALSplitCompressed.java   |   36 +
 .../hbase/client/TestReplicationShell.java  |   37 +
 11 files changed, 5256 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/30cec72f/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 54b82b2..010fd37 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -515,13 +515,14 @@ public class WALSplitter {
* @param fs
* @param logEntry
* @param rootDir HBase root dir.
-   * @param fileBeingSplit the file being split currently. Used to generate 
tmp file name.
+   * @param fileNameBeingSplit the file being split currently. Used to 
generate tmp file name.
* @return Path to file into which to dump split log edits.
* @throws IOException
*/
   @SuppressWarnings("deprecation")
-  private static Path getRegionSplitEditsPath(final FileSystem fs,
-  final Entry logEntry, final Path rootDir, FileStatus fileBeingSplit)
+  @VisibleForTesting
+  static Path getRegionSplitEditsPath(final FileSystem fs,
+  final Entry logEntry, final Path rootDir, String fileNameBeingSplit)
   throws IOException {
 Path tableDir = FSUtils.getTableDir(rootDir, 
logEntry.getKey().getTablename());
 String encodedRegionName = 
Bytes.toString(logEntry.getKey().getEncodedRegionName());
@@ -556,7 +557,7 @@ public class WALSplitter {
 // Append file name ends with RECOVERED_LOG_TMPFILE_SUFFIX to ensure
 // region's replayRecoveredEdits will not delete it
 String fileName = 
formatRecoveredEditsFileName(logEntry.getKey().getSequenceId());
-fileName = getTmpRecoveredEditsFileName(fileName + "-" + 
fileBeingSplit.getPath().getName());
+fileName = getTmpRecoveredEditsFileName(fileName + "-" + 
fileNameBeingSplit);
 return new Path(dir, fileName);
   }
 
@@ -1518,7 +1519,7 @@ public class WALSplitter {
  * @return a path with a write for that path. caller should close.
  */
 private WriterAndPath createWAP(byte[] region, Entry entry, Path rootdir) 
throws IOException {
-  Path regionedits = getRegionSplitEditsPath(fs, entry, rootdir, 
fileBeingSplit);
+  Path regionedits = getRegionSplitEditsPath(fs, entry, rootdir, 
fileBeingSplit.getPath().getName());
   if (regionedits == null) {
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/30cec72f/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
new file mode 100644
index 000..a6f8373
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
@@ -0,0 +1,832 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the 

[25/50] [abbrv] hbase git commit: Revert "HBASE-15128 Disable region splits and merges switch in master"

2016-03-21 Thread enis
Revert "HBASE-15128 Disable region splits and merges switch in master"

This reverts commit 24d481c5803e69a6190339cd8bb218b2c4585459.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bf4fcc30
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bf4fcc30
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bf4fcc30

Branch: refs/heads/HBASE-7912
Commit: bf4fcc30c62395e8db9fe52fde07c752f9e00e54
Parents: 24d481c
Author: chenheng 
Authored: Fri Feb 26 08:52:12 2016 +0800
Committer: chenheng 
Committed: Fri Feb 26 08:52:12 2016 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |   27 +-
 .../hbase/client/ConnectionImplementation.java  |   14 -
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   31 -
 .../hadoop/hbase/protobuf/RequestConverter.java |   49 -
 .../hbase/zookeeper/ZooKeeperWatcher.java   |   10 -
 .../hbase/protobuf/generated/MasterProtos.java  | 4304 --
 .../protobuf/generated/SnapshotProtos.java  |  500 +-
 .../protobuf/generated/ZooKeeperProtos.java |  462 +-
 hbase-protocol/src/main/protobuf/Master.proto   |   36 -
 .../src/main/protobuf/ZooKeeper.proto   |7 -
 .../hadoop/hbase/master/AssignmentManager.java  |   10 -
 .../org/apache/hadoop/hbase/master/HMaster.java |   28 -
 .../hadoop/hbase/master/MasterRpcServices.java  |   42 -
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   35 -
 .../zookeeper/SplitOrMergeTrackerManager.java   |  151 -
 .../hbase/client/TestSplitOrMergeStatus.java|  198 -
 hbase-shell/src/main/ruby/hbase/admin.rb|   32 -
 hbase-shell/src/main/ruby/shell.rb  |2 -
 .../ruby/shell/commands/splitormerge_enabled.rb |   41 -
 .../ruby/shell/commands/splitormerge_switch.rb  |   43 -
 20 files changed, 1200 insertions(+), 4822 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index c3b524b..d7b52d5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1678,28 +1678,11 @@ public interface Admin extends Abortable, Closeable {
   List getSecurityCapabilities() throws IOException;
 
   /**
-   * Turn the Split or Merge switches on or off.
-   *
-   * @param enabled enabled or not
-   * @param synchronous If true, it waits until current split() call, if 
outstanding, to return.
-   * @param switchTypes switchType list {@link MasterSwitchType}
-   * @return Previous switch value array
-   */
-  boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean 
synchronous,
-   final MasterSwitchType... switchTypes) 
throws IOException;
-
-  /**
-   * Query the current state of the switch
-   *
-   * @return true if the switch is enabled, false otherwise.
-   */
-  boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws 
IOException;
-
-  /**
* Currently, there are only two compact types:
* {@code NORMAL} means do store files compaction;
* {@code MOB} means do mob files compaction.
* */
+
   @InterfaceAudience.Public
   @InterfaceStability.Unstable
   public enum CompactType {
@@ -1709,12 +1692,4 @@ public interface Admin extends Abortable, Closeable {
 
 CompactType(int value) {}
   }
-  
-  @InterfaceAudience.Public
-  @InterfaceStability.Evolving
-  public enum MasterSwitchType {
-SPLIT,
-MERGE
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bf4fcc30/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 64eb9fb..dfa9937 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -1742,20 +1742,6 @@ class ConnectionImplementation implements 
ClusterConnection, Closeable {
   }
 
   @Override
-  public MasterProtos.SetSplitOrMergeEnabledResponse 
setSplitOrMergeEnabled(
-RpcController controller, MasterProtos.SetSplitOrMergeEnabledRequest 
request)
-throws ServiceException {
-return stub.setSplitOrMergeEnabled(controller, request);
-  }
-
-  @Override
-  

[05/50] [abbrv] hbase git commit: HBASE-15302 Reenable the other tests disabled by HBASE-14678

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/30cec72f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
new file mode 100644
index 000..125f5a1
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
@@ -0,0 +1,514 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, LargeTests.class})
+public class TestMasterFailoverWithProcedures {
+  private static final Log LOG = 
LogFactory.getLog(TestMasterFailoverWithProcedures.class);
+
+  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  private static void setupConf(Configuration conf) {
+// don't waste time retrying with the roll, the test is already slow 
enough.
+conf.setInt("hbase.procedure.store.wal.max.retries.before.roll", 1);
+conf.setInt("hbase.procedure.store.wal.wait.before.roll", 0);
+conf.setInt("hbase.procedure.store.wal.max.roll.retries", 1);
+conf.setInt("hbase.procedure.store.wal.sync.failure.roll.max", 1);
+  }
+
+  @Before
+  public void setup() throws Exception {
+setupConf(UTIL.getConfiguration());
+UTIL.startMiniCluster(2, 1);
+
+final ProcedureExecutor procExec = 
getMasterProcedureExecutor();
+ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, false);
+ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, false);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+try {
+  UTIL.shutdownMiniCluster();
+} catch (Exception e) {
+  LOG.warn("failure shutting down cluster", e);
+}
+  }
+
+  @Test(timeout=6)
+  public void testWalRecoverLease() throws Exception {
+final ProcedureStore masterStore = getMasterProcedureExecutor().getStore();
+assertTrue("expected WALStore for this 

[35/50] [abbrv] hbase git commit: HBASE-15128 Disable region splits and merges switch in master

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/99955a32/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
index 8dbb5ad..9805d50 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
@@ -11,13 +11,13 @@ public final class SnapshotProtos {
   public interface SnapshotFileInfoOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
 
-// required .SnapshotFileInfo.Type type = 1;
+// required .hbase.pb.SnapshotFileInfo.Type type = 1;
 /**
- * required .SnapshotFileInfo.Type type = 1;
+ * required .hbase.pb.SnapshotFileInfo.Type type = 1;
  */
 boolean hasType();
 /**
- * required .SnapshotFileInfo.Type type = 1;
+ * required .hbase.pb.SnapshotFileInfo.Type type = 1;
  */
 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type 
getType();
 
@@ -67,7 +67,7 @@ public final class SnapshotProtos {
 getWalNameBytes();
   }
   /**
-   * Protobuf type {@code SnapshotFileInfo}
+   * Protobuf type {@code hbase.pb.SnapshotFileInfo}
*/
   public static final class SnapshotFileInfo extends
   com.google.protobuf.GeneratedMessage
@@ -157,12 +157,12 @@ public final class SnapshotProtos {
 }
 public static final com.google.protobuf.Descriptors.Descriptor
 getDescriptor() {
-  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_descriptor;
+  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_hbase_pb_SnapshotFileInfo_descriptor;
 }
 
 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
 internalGetFieldAccessorTable() {
-  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_fieldAccessorTable
+  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_hbase_pb_SnapshotFileInfo_fieldAccessorTable
   .ensureFieldAccessorsInitialized(
   
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.class,
 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder.class);
 }
@@ -183,7 +183,7 @@ public final class SnapshotProtos {
 }
 
 /**
- * Protobuf enum {@code SnapshotFileInfo.Type}
+ * Protobuf enum {@code hbase.pb.SnapshotFileInfo.Type}
  */
 public enum Type
 implements com.google.protobuf.ProtocolMessageEnum {
@@ -261,21 +261,21 @@ public final class SnapshotProtos {
 this.value = value;
   }
 
-  // @@protoc_insertion_point(enum_scope:SnapshotFileInfo.Type)
+  // @@protoc_insertion_point(enum_scope:hbase.pb.SnapshotFileInfo.Type)
 }
 
 private int bitField0_;
-// required .SnapshotFileInfo.Type type = 1;
+// required .hbase.pb.SnapshotFileInfo.Type type = 1;
 public static final int TYPE_FIELD_NUMBER = 1;
 private 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type 
type_;
 /**
- * required .SnapshotFileInfo.Type type = 1;
+ * required .hbase.pb.SnapshotFileInfo.Type type = 1;
  */
 public boolean hasType() {
   return ((bitField0_ & 0x0001) == 0x0001);
 }
 /**
- * required .SnapshotFileInfo.Type type = 1;
+ * required .hbase.pb.SnapshotFileInfo.Type type = 1;
  */
 public 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type 
getType() {
   return type_;
@@ -613,19 +613,19 @@ public final class SnapshotProtos {
   return builder;
 }
 /**
- * Protobuf type {@code SnapshotFileInfo}
+ * Protobuf type {@code hbase.pb.SnapshotFileInfo}
  */
 public static final class Builder extends
 com.google.protobuf.GeneratedMessage.Builder
implements 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfoOrBuilder
 {
   public static final com.google.protobuf.Descriptors.Descriptor
   getDescriptor() {
-return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_descriptor;
+return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_hbase_pb_SnapshotFileInfo_descriptor;
   }
 
   protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
   internalGetFieldAccessorTable() {
-return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_fieldAccessorTable
+return 

[02/50] [abbrv] hbase git commit: HBASE-15184 SparkSQL Scan operation doesn't work on kerberos cluster (Ted Malaska)

2016-03-21 Thread enis
HBASE-15184 SparkSQL Scan operation doesn't work on kerberos cluster (Ted 
Malaska)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/00248656
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/00248656
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/00248656

Branch: refs/heads/HBASE-7912
Commit: 00248656ee9c60009ff1697e90ba9d0f86264103
Parents: f47dba7
Author: tedyu 
Authored: Tue Feb 23 16:52:13 2016 -0800
Committer: tedyu 
Committed: Tue Feb 23 16:52:13 2016 -0800

--
 .../hadoop/hbase/spark/DefaultSource.scala  |  4 +--
 .../hadoop/hbase/spark/HBaseContext.scala   | 15 +---
 .../apache/hadoop/hbase/spark/NewHBaseRDD.scala | 36 
 .../spark/datasources/HBaseTableScanRDD.scala   | 15 
 4 files changed, 57 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/00248656/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
--
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
index b6d7982..844b5b5 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
@@ -164,7 +164,7 @@ case class HBaseRelation (val tableName:String,
 HBaseSparkConf.BULKGET_SIZE,  HBaseSparkConf.defaultBulkGetSize))
 
   //create or get latest HBaseContext
-  @transient val hbaseContext:HBaseContext = if (useHBaseContext) {
+  val hbaseContext:HBaseContext = if (useHBaseContext) {
 LatestHBaseContextCache.latest
   } else {
 val config = HBaseConfiguration.create()
@@ -270,7 +270,7 @@ case class HBaseRelation (val tableName:String,
 } else {
   None
 }
-val hRdd = new HBaseTableScanRDD(this, pushDownFilterJava, 
requiredQualifierDefinitionList.seq)
+val hRdd = new HBaseTableScanRDD(this, hbaseContext, pushDownFilterJava, 
requiredQualifierDefinitionList.seq)
 pushDownRowKeyFilter.points.foreach(hRdd.addPoint(_))
 pushDownRowKeyFilter.ranges.foreach(hRdd.addRange(_))
 var resultRDD: RDD[Row] = {

http://git-wip-us.apache.org/repos/asf/hbase/blob/00248656/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
--
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
index 2d21e69..61ed3cf 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding
 import org.apache.hadoop.hbase.io.hfile.{CacheConfig, HFileContextBuilder, 
HFileWriterImpl}
 import org.apache.hadoop.hbase.regionserver.{HStore, StoreFile, BloomType}
 import org.apache.hadoop.hbase.util.Bytes
+import org.apache.hadoop.mapred.JobConf
 import org.apache.spark.broadcast.Broadcast
 import org.apache.spark.deploy.SparkHadoopUtil
 import org.apache.spark.rdd.RDD
@@ -228,7 +229,7 @@ class HBaseContext(@transient sc: SparkContext,
 }))
   }
 
-  def applyCreds[T] (configBroadcast: 
Broadcast[SerializableWritable[Configuration]]){
+  def applyCreds[T] (){
 credentials = SparkHadoopUtil.get.getCurrentUserCredentials()
 
 logDebug("appliedCredentials:" + appliedCredentials + ",credentials:" + 
credentials)
@@ -440,10 +441,14 @@ class HBaseContext(@transient sc: SparkContext,
 TableMapReduceUtil.initTableMapperJob(tableName, scan,
   classOf[IdentityTableMapper], null, null, job)
 
-sc.newAPIHadoopRDD(job.getConfiguration,
+val jconf = new JobConf(job.getConfiguration)
+SparkHadoopUtil.get.addCredentials(jconf)
+new NewHBaseRDD(sc,
   classOf[TableInputFormat],
   classOf[ImmutableBytesWritable],
-  classOf[Result]).map(f)
+  classOf[Result],
+  job.getConfiguration,
+  this).map(f)
   }
 
   /**
@@ -474,7 +479,7 @@ class HBaseContext(@transient sc: SparkContext,
 
 val config = getConf(configBroadcast)
 
-applyCreds(configBroadcast)
+applyCreds
 // specify that this is a proxy user
 val connection = ConnectionFactory.createConnection(config)
 f(it, connection)
@@ -514,7 +519,7 @@ class HBaseContext(@transient sc: SparkContext,
  Iterator[U]): Iterator[U] = {
 
 val config = getConf(configBroadcast)
-applyCreds(configBroadcast)
+applyCreds
 
 val connection = 

[33/50] [abbrv] hbase git commit: HBASE-15290 Hbase Rest CheckAndAPI should save other cells along with compared cell (Ajith)

2016-03-21 Thread enis
HBASE-15290 Hbase Rest CheckAndAPI should save other cells along with compared 
cell (Ajith)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/793babf4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/793babf4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/793babf4

Branch: refs/heads/HBASE-7912
Commit: 793babf4a4c0156f9e712a2bbf9578e2a1d6e1e4
Parents: 8f6e297
Author: Enis Soztutar <e...@apache.org>
Authored: Fri Feb 26 15:05:59 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Fri Feb 26 15:05:59 2016 -0800

--
 .../apache/hadoop/hbase/rest/RowResource.java   | 32 +++--
 .../hadoop/hbase/rest/RowResourceBase.java  | 39 +--
 .../hbase/rest/TestGetAndPutResource.java   | 69 
 3 files changed, 129 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/793babf4/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index f922343..bac4edb 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -455,20 +455,40 @@ public class RowResource extends ResourceBase {
   byte[][] valueToPutParts = KeyValue.parseColumn(valueToCheckColumn);
   if (valueToPutParts.length == 2 && valueToPutParts[1].length > 0) {
 CellModel valueToPutCell = null;
+
+// Copy all the cells to the Put request
+// and track if the check cell's latest value is also sent
 for (int i = 0, n = cellModelCount - 1; i < n ; i++) {
-  if(Bytes.equals(cellModels.get(i).getColumn(),
-  valueToCheckCell.getColumn())) {
-valueToPutCell = cellModels.get(i);
-break;
+  CellModel cell = cellModels.get(i);
+  byte[] col = cell.getColumn();
+
+  if (col == null) {
+servlet.getMetrics().incrementFailedPutRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request: Column found to 
be null." + CRLF)
+.build();
+  }
+
+  byte [][] parts = KeyValue.parseColumn(col);
+
+  if (parts.length != 2) {
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+.build();
+  }
+  put.addImmutable(parts[0], parts[1], cell.getTimestamp(), 
cell.getValue());
+
+  if(Bytes.equals(col,
+  valueToCheckCell.getColumn())) {
+valueToPutCell = cell;
   }
 }
+
 if (valueToPutCell == null) {
   servlet.getMetrics().incrementFailedPutRequests(1);
   return 
Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
   .entity("Bad request: The column to put and check do not match." 
+ CRLF).build();
 } else {
-  put.addImmutable(valueToPutParts[0], valueToPutParts[1], 
valueToPutCell.getTimestamp(),
-valueToPutCell.getValue());
   retValue = table.checkAndPut(key, valueToPutParts[0], 
valueToPutParts[1],
 valueToCheckCell.getValue(), put);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/793babf4/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
--
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
index 48cebb2..b2fc0a6 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.StringWriter;
+import java.util.*;
 
 import javax.ws.rs.core.MediaType;
 import javax.xml.bind.JAXBContext;
@@ -29,6 +30,7 @@ import javax.xml.bind.JAXBException;
 import javax.xml.bind.Marshaller;
 import javax.xml.bind.Unmarshaller;
 
+import org.apache.commons.collections.keyvalue.AbstractMapEntry;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -232,13 +234,22 @@ public class RowResourceBase {
   }
 
   protected static Response checkA

[44/50] [abbrv] hbase git commit: HBASE-14030 HBase Backup/Restore Phase 1 (v42)

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
new file mode 100644
index 000..21bf63c
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestBackupBoundaryTests extends TestBackupBase {
+
+  private static final Log LOG = 
LogFactory.getLog(TestBackupBoundaryTests.class);
+
+  /**
+   * Verify that full backup is created on a single empty table correctly.
+   * @throws Exception
+   */
+  @Test
+  public void testFullBackupSingleEmpty() throws Exception {
+
+LOG.info("create full backup image on single table");
+List tables = Lists.newArrayList(table3);
+String backupId = getBackupClient().create(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
+LOG.info("Finished Backup");
+assertTrue(checkSucceeded(backupId));
+  }
+
+  /**
+   * Verify that full backup is created on multiple empty tables correctly.
+   * @throws Exception
+   */
+  @Test
+  public void testFullBackupMultipleEmpty() throws Exception {
+LOG.info("create full backup image on mulitple empty tables");
+
+List tables = Lists.newArrayList(table3, table4);
+String backupId = getBackupClient().create(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
+assertTrue(checkSucceeded(backupId));
+  }
+
+  /**
+   * Verify that full backup fails on a single table that does not exist.
+   * @throws Exception
+   */
+  @Test(expected = DoNotRetryIOException.class)
+  public void testFullBackupSingleDNE() throws Exception {
+
+LOG.info("test full backup fails on a single table that does not exist");
+List tables = toList("tabledne");
+String backupId = getBackupClient().create(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
+assertTrue(checkSucceeded(backupId));
+  }
+
+  /**
+   * Verify that full backup fails on multiple tables that do not exist.
+   * @throws Exception
+   */
+  @Test(expected = DoNotRetryIOException.class)
+  public void testFullBackupMultipleDNE() throws Exception {
+
+LOG.info("test full backup fails on multiple tables that do not exist");
+List tables = toList("table1dne", "table2dne");
+String backupId = getBackupClient().create(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
+assertTrue(checkSucceeded(backupId));
+  }
+
+  /**
+   * Verify that full backup fails on tableset containing real and fake tables.
+   * @throws Exception
+   */
+  @Test(expected = DoNotRetryIOException.class)
+  public void testFullBackupMixExistAndDNE() throws Exception {
+LOG.info("create full backup fails on tableset containing real and fake 
table");
+
+List tables = toList(table1.getNameAsString(), "tabledne");
+String backupId = getBackupClient().create(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
+//assertTrue(checkSucceeded(backupId)); // TODO
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java
new file mode 100644
index 000..899f53b
--- 

[01/50] [abbrv] hbase git commit: HBASE-15306 Make RPC call queue length dynamically configurable

2016-03-21 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 9bf26f46d -> ab491d4a2


HBASE-15306 Make RPC call queue length dynamically configurable


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f47dba74
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f47dba74
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f47dba74

Branch: refs/heads/HBASE-7912
Commit: f47dba74d498d5d39f124ad8ea5723c437acbc85
Parents: 58283fa
Author: Mikhail Antonov 
Authored: Tue Feb 23 14:20:40 2016 -0800
Committer: Mikhail Antonov 
Committed: Tue Feb 23 14:20:40 2016 -0800

--
 .../hbase/ipc/BalancedQueueRpcExecutor.java | 11 +-
 .../hadoop/hbase/ipc/RWQueueRpcExecutor.java| 19 +-
 .../apache/hadoop/hbase/ipc/RpcExecutor.java| 11 ++
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  |  3 ++
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java| 18 -
 .../hbase/ipc/TestSimpleRpcScheduler.java   | 39 
 6 files changed, 97 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f47dba74/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
index 79b4ec8..e4205eb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
@@ -66,6 +66,10 @@ public class BalancedQueueRpcExecutor extends RpcExecutor {
 
   protected void initializeQueues(final int numQueues,
   final Class queueClass, Object... initargs) {
+if (initargs.length > 0) {
+  currentQueueLimit = (int) initargs[0];
+  initargs[0] = Math.max((int) initargs[0], 
DEFAULT_CALL_QUEUE_SIZE_HARD_LIMIT);
+}
 for (int i = 0; i < numQueues; ++i) {
   queues.add((BlockingQueue) 
ReflectionUtils.newInstance(queueClass, initargs));
 }
@@ -74,7 +78,12 @@ public class BalancedQueueRpcExecutor extends RpcExecutor {
   @Override
   public boolean dispatch(final CallRunner callTask) throws 
InterruptedException {
 int queueIndex = balancer.getNextQueue();
-return queues.get(queueIndex).offer(callTask);
+BlockingQueue queue = queues.get(queueIndex);
+// that means we can overflow by at most  size (5), that's ok
+if (queue.size() >= currentQueueLimit) {
+  return false;
+}
+return queue.offer(callTask);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/f47dba74/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java
index 544370d..a9648b0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RWQueueRpcExecutor.java
@@ -139,12 +139,22 @@ public class RWQueueRpcExecutor extends RpcExecutor {
   " readQueues=" + numReadQueues + " readHandlers=" + 
readHandlersCount +
   ((numScanQueues == 0) ? "" : " scanQueues=" + numScanQueues +
 " scanHandlers=" + scanHandlersCount));
-
+if (writeQueueInitArgs.length > 0) {
+  currentQueueLimit = (int) writeQueueInitArgs[0];
+  writeQueueInitArgs[0] = Math.max((int) writeQueueInitArgs[0],
+DEFAULT_CALL_QUEUE_SIZE_HARD_LIMIT);
+}
 for (int i = 0; i < numWriteQueues; ++i) {
+
   queues.add((BlockingQueue)
 ReflectionUtils.newInstance(writeQueueClass, writeQueueInitArgs));
 }
 
+if (readQueueInitArgs.length > 0) {
+  currentQueueLimit = (int) readQueueInitArgs[0];
+  readQueueInitArgs[0] = Math.max((int) readQueueInitArgs[0],
+DEFAULT_CALL_QUEUE_SIZE_HARD_LIMIT);
+}
 for (int i = 0; i < (numReadQueues + numScanQueues); ++i) {
   queues.add((BlockingQueue)
 ReflectionUtils.newInstance(readQueueClass, readQueueInitArgs));
@@ -170,7 +180,12 @@ public class RWQueueRpcExecutor extends RpcExecutor {
 } else {
   queueIndex = numWriteQueues + readBalancer.getNextQueue();
 }
-return queues.get(queueIndex).offer(callTask);
+
+BlockingQueue queue = queues.get(queueIndex);
+if (queue.size() >= currentQueueLimit) {
+  return false;
+}
+return queue.offer(callTask);
   }
 
   private boolean isWriteRequest(final 

[41/50] [abbrv] hbase git commit: HBASE-14878 Add hbase-shaded-client archetype to hbase-archetypes

2016-03-21 Thread enis
HBASE-14878 Add hbase-shaded-client archetype to hbase-archetypes

Signed-off-by: Elliott Clark 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/83297f66
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/83297f66
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/83297f66

Branch: refs/heads/HBASE-7912
Commit: 83297f661b80af58190591c57d3cef1e6496e56b
Parents: 88f7759
Author: Daniel Vimont 
Authored: Tue Feb 23 13:13:56 2016 +0900
Committer: Elliott Clark 
Committed: Mon Feb 29 09:09:37 2016 -0800

--
 hbase-archetypes/README.md  |  10 +-
 .../hbase-archetype-builder/createArchetypes.sh |   4 +
 .../installArchetypes.sh|   4 +
 .../hbase-archetype-builder/pom.xml |  74 +-
 .../hbase-shaded-client-project/pom.xml |  76 +++
 .../exemplars/shaded_client/HelloHBase.java | 226 +++
 .../exemplars/shaded_client/package-info.java   |  25 ++
 .../src/main/resources/log4j.properties | 111 +
 .../exemplars/shaded_client/TestHelloHBase.java | 131 +++
 hbase-archetypes/pom.xml|   3 +-
 10 files changed, 659 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/83297f66/hbase-archetypes/README.md
--
diff --git a/hbase-archetypes/README.md b/hbase-archetypes/README.md
index 3af1f8b..7997b56 100644
--- a/hbase-archetypes/README.md
+++ b/hbase-archetypes/README.md
@@ -81,11 +81,15 @@ of the new archetype. (It may be most straightforward to 
simply copy the `src`
 and `pom.xml` components from one of the existing exemplar projects, replace
 the `src/main` and `src/test` code, and modify the `pom.xml` file's
 ``, ``,` `, and `` elements.)
-2. Modify the `hbase-archetype-builder/pom.xml` file: (a) add the new exemplar
-project to the `` element, and (b) add appropriate ``
+2. Modify the `hbase-archetypes/pom.xml` file: add a new `` subelement
+to the `` element, with the new exemplar project's subdirectory name
+as its value.
+3. Modify the `hbase-archetype-builder/pom.xml` file: (a) add a new `<*.dir>`
+subelement to the `` element, with the new exemplar project's
+subdirectory name as its value, and (b) add appropriate ``
 elements and `` elements within the `` elements
 (using the existing entries from already-existing exemplar projects as a 
guide).
-3. Add appropriate entries for the new exemplar project to the
+4. Add appropriate entries for the new exemplar project to the
 `createArchetypes.sh` and `installArchetypes.sh` scripts in the
 `hbase-archetype-builder` subdirectory (using the existing entries as a guide).
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/83297f66/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh
--
diff --git a/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh 
b/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh
index 3aeb1c3..067fbd9 100755
--- a/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh
+++ b/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh
@@ -25,6 +25,10 @@ buildArchetypeSubdir=target/build-archetype
 cd /"$workingDir"/../hbase-client-project/$buildArchetypeSubdir
 mvn archetype:create-from-project
 
+# CREATE hbase-shaded-client archetype
+cd /"$workingDir"/../hbase-shaded-client-project/$buildArchetypeSubdir
+mvn archetype:create-from-project
+
 # add entries for additional archetypes above this comment (modeled on entries 
above)
 
 cd "$workingDir"

http://git-wip-us.apache.org/repos/asf/hbase/blob/83297f66/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh
--
diff --git a/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh 
b/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh
index 74f118e..1067a1f 100755
--- a/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh
+++ b/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh
@@ -26,6 +26,10 @@ archetypeSourceSubdir=target/generated-sources/archetype
 cd 
/"$workingDir"/../hbase-client-project/$buildArchetypeSubdir/$archetypeSourceSubdir
 mvn install
 
+# INSTALL hbase-shaded-client archetype
+cd 
/"$workingDir"/../hbase-shaded-client-project/$buildArchetypeSubdir/$archetypeSourceSubdir
+mvn install
+
 # add entries for additional archetypes above this comment (modeled on entries 
above)
 
 cd "$workingDir"

http://git-wip-us.apache.org/repos/asf/hbase/blob/83297f66/hbase-archetypes/hbase-archetype-builder/pom.xml

[11/50] [abbrv] hbase git commit: HBASE-15222 Use less contended classes for metrics

2016-03-21 Thread enis
HBASE-15222 Use less contended classes for metrics

Summary:
Use less contended things for metrics.
For histogram which was the largest culprit we use FastLongHistogram
For atomic long where possible we now use counter.

Test Plan: unit tests

Reviewers:

Subscribers:

Differential Revision: https://reviews.facebook.net/D54381


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/630a6582
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/630a6582
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/630a6582

Branch: refs/heads/HBASE-7912
Commit: 630a65825ed9a9c00f72bbfcac0588e1ab0cdd72
Parents: 20e14f4
Author: Elliott Clark 
Authored: Thu Feb 18 09:54:05 2016 -0800
Committer: Elliott Clark 
Committed: Wed Feb 24 14:34:05 2016 -0800

--
 .../hadoop/hbase/util/FastLongHistogram.java| 162 ++-
 .../hbase/util/TestFastLongHistogram.java   |  32 
 .../apache/hadoop/hbase/metrics/BaseSource.java |   8 -
 .../apache/hadoop/metrics2/MetricHistogram.java |   3 +
 hbase-hadoop2-compat/pom.xml|   8 +-
 .../hbase/ipc/MetricsHBaseServerSourceImpl.java |  50 +++---
 .../MetricsAssignmentManagerSourceImpl.java |  10 +-
 .../MetricsMasterFilesystemSourceImpl.java  |  14 +-
 .../hbase/master/MetricsMasterSourceImpl.java   |   4 +-
 .../hbase/master/MetricsSnapshotSourceImpl.java |   8 +-
 .../balancer/MetricsBalancerSourceImpl.java |   8 +-
 .../hadoop/hbase/metrics/BaseSourceImpl.java|  17 +-
 .../MetricsRegionServerSourceImpl.java  |  16 +-
 .../regionserver/MetricsRegionSourceImpl.java   |  24 +--
 .../regionserver/wal/MetricsWALSourceImpl.java  |  10 +-
 .../MetricsReplicationGlobalSourceSource.java   |  36 ++---
 .../MetricsReplicationSinkSourceImpl.java   |  16 +-
 .../MetricsReplicationSourceSourceImpl.java |  36 ++---
 .../hbase/rest/MetricsRESTSourceImpl.java   |  38 ++---
 .../thrift/MetricsThriftServerSourceImpl.java   |  13 +-
 .../metrics2/lib/DynamicMetricsRegistry.java| 103 ++--
 .../metrics2/lib/MetricMutableQuantiles.java| 154 --
 .../metrics2/lib/MetricsExecutorImpl.java   |   2 +-
 .../hadoop/metrics2/lib/MutableFastCounter.java |  60 +++
 .../hadoop/metrics2/lib/MutableHistogram.java   | 133 +--
 .../metrics2/lib/MutableRangeHistogram.java |  75 -
 .../metrics2/lib/MutableSizeHistogram.java  |  25 ++-
 .../metrics2/lib/MutableTimeHistogram.java  |  23 ++-
 .../hbase/metrics/TestBaseSourceImpl.java   |   5 +-
 .../tmpl/regionserver/BlockCacheTmpl.jamon  |   8 -
 .../tmpl/regionserver/BlockCacheViewTmpl.jamon  |   1 -
 .../tmpl/regionserver/ServerMetricsTmpl.jamon   |   1 -
 .../hadoop/hbase/io/hfile/AgeSnapshot.java  |  38 +++--
 .../hadoop/hbase/io/hfile/BlockCacheUtil.java   |  31 ++--
 .../hadoop/hbase/io/hfile/CacheStats.java   |  54 +++
 .../org/apache/hadoop/hbase/io/hfile/HFile.java |  10 +-
 .../hadoop/hbase/io/hfile/HFileBlock.java   |   2 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java  |   4 +-
 .../hbase/io/hfile/bucket/BucketCacheStats.java |  11 +-
 .../hbase/regionserver/StoreFileScanner.java|  14 +-
 40 files changed, 565 insertions(+), 702 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/630a6582/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
index 623cbdb..78b2bf0 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.util;
 
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicLongArray;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
@@ -31,11 +30,20 @@ import 
org.apache.hadoop.hbase.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class FastLongHistogram {
+
+  /**
+   * Default number of bins.
+   */
+  public static final int DEFAULT_NBINS = 255;
+
+  public static final double[] DEFAULT_QUANTILES =
+  new double[]{0.25, 0.5, 0.75, 0.90, 0.95, 0.98, 0.99, 0.999};
+
   /**
* Bins is a class containing a list of buckets(or bins) for estimation 
histogram of some data.
*/
   private static class Bins {
-private final AtomicLongArray counts;
+private 

[38/50] [abbrv] hbase git commit: HBASE-15181 A simple implementation of date based tiered compaction (Clara Xiong)

2016-03-21 Thread enis
HBASE-15181 A simple implementation of date based tiered compaction (Clara 
Xiong)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f7f96b9f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f7f96b9f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f7f96b9f

Branch: refs/heads/HBASE-7912
Commit: f7f96b9fb70f5b2243558cf531ab7fa51162e656
Parents: 99955a3
Author: tedyu 
Authored: Fri Feb 26 17:36:23 2016 -0800
Committer: tedyu 
Committed: Fri Feb 26 17:36:23 2016 -0800

--
 .../hadoop/hbase/regionserver/StoreFile.java|   7 +
 .../compactions/CompactionConfiguration.java|  82 +-
 .../compactions/DateTieredCompactionPolicy.java | 294 +++
 .../compactions/RatioBasedCompactionPolicy.java |  18 +-
 .../hbase/regionserver/MockStoreFile.java   |  12 +
 .../regionserver/TestDateTieredCompaction.java  | 211 +
 .../TestDefaultCompactSelection.java| 187 +---
 7 files changed, 622 insertions(+), 189 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f7f96b9f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 4ced556..61eb9b8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -768,6 +768,13 @@ public class StoreFile {
 getReader().timeRangeTracker.getMinimumTimestamp();
   }
 
+  public Long getMaximumTimestamp() {
+return (getReader().timeRangeTracker == null) ?
+null :
+getReader().timeRangeTracker.getMaximumTimestamp();
+  }
+
+
   /**
* Gets the approximate mid-point of this file that is optimal for use in 
splitting it.
* @param comparator Comparator used to compare KVs.

http://git-wip-us.apache.org/repos/asf/hbase/blob/f7f96b9f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
index 633477e..9bb4c77 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
@@ -21,9 +21,9 @@ package org.apache.hadoop.hbase.regionserver.compactions;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
 
 /**
@@ -67,6 +67,23 @@ public class CompactionConfiguration {
   public static final String HBASE_HFILE_COMPACTION_DISCHARGER_THREAD_COUNT =
   "hbase.hfile.compaction.discharger.thread.count";
 
+  /*
+   * The epoch time length for the windows we no longer compact
+   */
+  public static final String MAX_AGE_MILLIS_KEY =
+"hbase.hstore.compaction.date.tiered.max.storefile.age.millis";
+  public static final String BASE_WINDOW_MILLIS_KEY =
+"hbase.hstore.compaction.date.tiered.base.window.millis";
+  public static final String WINDOWS_PER_TIER_KEY =
+"hbase.hstore.compaction.date.tiered.windows.per.tier";
+  public static final String INCOMING_WINDOW_MIN_KEY =
+"hbase.hstore.compaction.date.tiered.incoming.window.min";
+  public static final String COMPACTION_POLICY_CLASS_FOR_TIERED_WINDOWS_KEY =
+"hbase.hstore.compaction.date.tiered.window.policy.class";
+
+  private static final Class
+DEFAULT_TIER_COMPACTION_POLICY_CLASS = ExploringCompactionPolicy.class;
+
   Configuration conf;
   StoreConfigInformation storeConfigInfo;
 
@@ -75,13 +92,19 @@ public class CompactionConfiguration {
   private final long maxCompactSize;
   private final long offPeakMaxCompactSize;
   private final long minCompactSize;
-  private final int minFilesToCompact;
+  /** This one can be update **/
+  private int minFilesToCompact;
   private final int maxFilesToCompact;
   private final double compactionRatio;
   private final long throttlePoint;
   private final long majorCompactionPeriod;
   private final 

[37/50] [abbrv] hbase git commit: HBASE-15128 Disable region splits and merges switch in master

2016-03-21 Thread enis
HBASE-15128 Disable region splits and merges switch in master


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/99955a32
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/99955a32
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/99955a32

Branch: refs/heads/HBASE-7912
Commit: 99955a3240c5032daae471cacebe595134f71fc3
Parents: 793babf
Author: chenheng 
Authored: Sat Feb 27 08:36:59 2016 +0800
Committer: chenheng 
Committed: Sat Feb 27 08:36:59 2016 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |   27 +-
 .../hbase/client/ConnectionImplementation.java  |   14 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   31 +
 .../hadoop/hbase/protobuf/RequestConverter.java |   49 +
 .../hbase/zookeeper/ZooKeeperWatcher.java   |   10 +
 .../hbase/protobuf/generated/MasterProtos.java  | 4304 ++
 .../protobuf/generated/SnapshotProtos.java  |  500 +-
 .../protobuf/generated/ZooKeeperProtos.java |  462 +-
 hbase-protocol/src/main/protobuf/Master.proto   |   36 +
 .../src/main/protobuf/ZooKeeper.proto   |7 +
 .../hadoop/hbase/master/AssignmentManager.java  |   10 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   28 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   42 +
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   37 +
 .../hbase/zookeeper/SplitOrMergeTracker.java|  151 +
 .../hbase/client/TestSplitOrMergeStatus.java|  198 +
 hbase-shell/src/main/ruby/hbase/admin.rb|   32 +
 hbase-shell/src/main/ruby/shell.rb  |2 +
 .../ruby/shell/commands/splitormerge_enabled.rb |   41 +
 .../ruby/shell/commands/splitormerge_switch.rb  |   43 +
 20 files changed, 4824 insertions(+), 1200 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/99955a32/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index d7b52d5..c3b524b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1678,11 +1678,28 @@ public interface Admin extends Abortable, Closeable {
   List getSecurityCapabilities() throws IOException;
 
   /**
+   * Turn the Split or Merge switches on or off.
+   *
+   * @param enabled enabled or not
+   * @param synchronous If true, it waits until current split() call, if 
outstanding, to return.
+   * @param switchTypes switchType list {@link MasterSwitchType}
+   * @return Previous switch value array
+   */
+  boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean 
synchronous,
+   final MasterSwitchType... switchTypes) 
throws IOException;
+
+  /**
+   * Query the current state of the switch
+   *
+   * @return true if the switch is enabled, false otherwise.
+   */
+  boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws 
IOException;
+
+  /**
* Currently, there are only two compact types:
* {@code NORMAL} means do store files compaction;
* {@code MOB} means do mob files compaction.
* */
-
   @InterfaceAudience.Public
   @InterfaceStability.Unstable
   public enum CompactType {
@@ -1692,4 +1709,12 @@ public interface Admin extends Abortable, Closeable {
 
 CompactType(int value) {}
   }
+  
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
+  public enum MasterSwitchType {
+SPLIT,
+MERGE
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/99955a32/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index dfa9937..64eb9fb 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -1742,6 +1742,20 @@ class ConnectionImplementation implements 
ClusterConnection, Closeable {
   }
 
   @Override
+  public MasterProtos.SetSplitOrMergeEnabledResponse 
setSplitOrMergeEnabled(
+RpcController controller, MasterProtos.SetSplitOrMergeEnabledRequest 
request)
+throws ServiceException {
+return stub.setSplitOrMergeEnabled(controller, request);
+  }
+
+  @Override
+  public MasterProtos.IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(
+   

[09/50] [abbrv] hbase git commit: HBASE-15310 hbase-spark module has compilation failures with clover profile

2016-03-21 Thread enis
HBASE-15310 hbase-spark module has compilation failures with clover profile


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/20e14f44
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/20e14f44
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/20e14f44

Branch: refs/heads/HBASE-7912
Commit: 20e14f449a9d5ba052ef6250c08ee1e4c558ccf2
Parents: 2a30643
Author: Jonathan M Hsieh 
Authored: Wed Feb 24 10:09:21 2016 -0800
Committer: Jonathan M Hsieh 
Committed: Wed Feb 24 11:54:43 2016 -0800

--
 hbase-spark/pom.xml | 35 +++
 1 file changed, 35 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/20e14f44/hbase-spark/pom.xml
--
diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml
index 7c7590e..7767440 100644
--- a/hbase-spark/pom.xml
+++ b/hbase-spark/pom.xml
@@ -604,6 +604,41 @@
 
 
 
+
+
+
+org.codehaus.mojo
+build-helper-maven-plugin
+
+
+add-source
+validate
+
+add-source
+
+
+
+src/main/scala
+
+
+
+
+add-test-source
+validate
+
+add-test-source
+
+
+
+src/test/scala
+
+
+
+
+
 
 
 



[39/50] [abbrv] hbase git commit: HBASE-15181 adds TestCompactionPolicy which was missing in first commit

2016-03-21 Thread enis
HBASE-15181 adds TestCompactionPolicy which was missing in first commit


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/03ffb30e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/03ffb30e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/03ffb30e

Branch: refs/heads/HBASE-7912
Commit: 03ffb30efe341c226a19b4e80ec0e3352e55806c
Parents: f7f96b9
Author: tedyu 
Authored: Fri Feb 26 19:58:33 2016 -0800
Committer: tedyu 
Committed: Fri Feb 26 19:58:33 2016 -0800

--
 .../regionserver/TestCompactionPolicy.java  | 207 +++
 1 file changed, 207 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/03ffb30e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
new file mode 100644
index 000..f5f0926
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java
@@ -0,0 +1,207 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import com.google.common.collect.Lists;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+import 
org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
+import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestCompactionPolicy {
+  private final static Log LOG = 
LogFactory.getLog(TestDefaultCompactSelection.class);
+  protected final static HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+
+  protected Configuration conf;
+  protected HStore store;
+  private static final String DIR = TEST_UTIL.getDataTestDir(
+TestDefaultCompactSelection.class.getSimpleName()).toString();
+  protected static Path TEST_FILE;
+  protected static final int minFiles = 3;
+  protected static final int maxFiles = 5;
+
+  protected static final long minSize = 10;
+  protected static final long maxSize = 2100;
+
+  private FSHLog hlog;
+  private HRegion region;
+
+  @Before
+  public void setUp() throws Exception {
+config();
+initialize();
+  }
+
+  /**
+   * setup config values necessary for store
+   */
+  protected void config() {
+this.conf = TEST_UTIL.getConfiguration();
+this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
+this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 
minFiles);
+this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 
maxFiles);
+
this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, 
minSize);
+
this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, 
maxSize);
+

[49/50] [abbrv] hbase git commit: HBASE-14030 HBase Backup/Restore Phase 1 (v42)

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
new file mode 100644
index 000..1a7a1ba
--- /dev/null
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
@@ -0,0 +1,9143 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: Backup.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class BackupProtos {
+  private BackupProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  /**
+   * Protobuf enum {@code hbase.pb.BackupType}
+   */
+  public enum BackupType
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * FULL = 0;
+ */
+FULL(0, 0),
+/**
+ * INCREMENTAL = 1;
+ */
+INCREMENTAL(1, 1),
+;
+
+/**
+ * FULL = 0;
+ */
+public static final int FULL_VALUE = 0;
+/**
+ * INCREMENTAL = 1;
+ */
+public static final int INCREMENTAL_VALUE = 1;
+
+
+public final int getNumber() { return value; }
+
+public static BackupType valueOf(int value) {
+  switch (value) {
+case 0: return FULL;
+case 1: return INCREMENTAL;
+default: return null;
+  }
+}
+
+public static com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static com.google.protobuf.Internal.EnumLiteMap
+internalValueMap =
+  new com.google.protobuf.Internal.EnumLiteMap() {
+public BackupType findValueByNumber(int number) {
+  return BackupType.valueOf(number);
+}
+  };
+
+public final com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(index);
+}
+public final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0);
+}
+
+private static final BackupType[] VALUES = values();
+
+public static BackupType valueOf(
+com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int index;
+private final int value;
+
+private BackupType(int index, int value) {
+  this.index = index;
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.BackupType)
+  }
+
+  public interface BackupImageOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string backup_id = 1;
+/**
+ * required string backup_id = 1;
+ */
+boolean hasBackupId();
+/**
+ * required string backup_id = 1;
+ */
+java.lang.String getBackupId();
+/**
+ * required string backup_id = 1;
+ */
+com.google.protobuf.ByteString
+getBackupIdBytes();
+
+// required .hbase.pb.BackupType backup_type = 2;
+/**
+ * required .hbase.pb.BackupType backup_type = 2;
+ */
+boolean hasBackupType();
+/**
+ * required .hbase.pb.BackupType backup_type = 2;
+ */
+org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType 
getBackupType();
+
+// required string root_dir = 3;
+/**
+ * required string root_dir = 3;
+ */
+boolean hasRootDir();
+/**
+ * required string root_dir = 3;
+ */
+java.lang.String getRootDir();
+/**
+ * required string root_dir = 3;
+ */
+com.google.protobuf.ByteString
+getRootDirBytes();
+
+// repeated .hbase.pb.TableName table_list = 4;
+/**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+
java.util.List
 
+getTableListList();
+/**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableList(int index);
+/**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+int getTableListCount();
+/**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+java.util.List 
+getTableListOrBuilderList();
+/**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+

[17/50] [abbrv] hbase git commit: HBASE-15311 Prevent NPE in BlockCacheViewTmpl.

2016-03-21 Thread enis
HBASE-15311 Prevent NPE in BlockCacheViewTmpl.

Signed-off-by: stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/75c57a04
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/75c57a04
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/75c57a04

Branch: refs/heads/HBASE-7912
Commit: 75c57a04ddad2d7cf3435df1eba13541775319fb
Parents: 40c5591
Author: Samir Ahmic 
Authored: Tue Feb 23 11:34:09 2016 +0100
Committer: stack 
Committed: Thu Feb 25 15:23:28 2016 -0800

--
 .../apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/75c57a04/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon
index fa55f6a..c6d7a61 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.jamon
@@ -44,7 +44,7 @@ org.apache.hadoop.util.StringUtils;
   if (bcn.equals("L1")) {
 bc = bcs == null || bcs.length == 0? bc: bcs[0];
   } else {
-if (bcs.length < 2) {
+if (bcs == null || bcs.length < 2) {
   System.out.println("There is no L2 block cache");
   return;
 }



[46/50] [abbrv] hbase git commit: HBASE-14030 HBase Backup/Restore Phase 1 (v42)

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/ab491d4a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
new file mode 100644
index 000..18a0f06
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -0,0 +1,571 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeSet;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
+import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
+
+/**
+ * This class provides 'hbase:backup' table API
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class BackupSystemTable implements Closeable {
+
+  private static final Log LOG = LogFactory.getLog(BackupSystemTable.class);
+  private final static String TABLE_NAMESPACE = "hbase";
+  private final static String TABLE_NAME = "backup";
+  private final static TableName tableName = 
TableName.valueOf(TABLE_NAMESPACE, TABLE_NAME);
+  final static byte[] familyName = "f".getBytes();
+
+  // Connection to HBase cluster, shared
+  // among all instances
+  private final Connection connection;
+  // Cluster configuration
+  private final Configuration conf;
+
+  /**
+   * Create a BackupSystemTable object for the given Connection. Connection is 
NOT owned by this
+   * instance and has to be closed explicitly.
+   * @param connection
+   * @throws IOException
+   */
+  public BackupSystemTable(Connection connection) throws IOException {
+this.connection = connection;
+this.conf = connection.getConfiguration();
+
+createSystemTableIfNotExists();
+  }
+
+  @Override
+  public void close() {
+  }
+
+  /**
+   * Gets table name
+   * @return table name
+   */
+  public static TableName getTableName() {
+return tableName;
+  }
+
+  private void createSystemTableIfNotExists() throws IOException {
+try(Admin admin = connection.getAdmin()) {
+  if (admin.tableExists(tableName) == false) {
+HTableDescriptor tableDesc = new HTableDescriptor(tableName);
+HColumnDescriptor colDesc = new HColumnDescriptor(familyName);
+colDesc.setMaxVersions(1);
+int ttl =
+conf.getInt(HConstants.BACKUP_SYSTEM_TTL_KEY, 
HConstants.BACKUP_SYSTEM_TTL_DEFAULT);
+colDesc.setTimeToLive(ttl);
+tableDesc.addFamily(colDesc);
+admin.createTable(tableDesc);
+  }
+} catch (IOException e) {
+  LOG.error(e);
+  throw e;
+}
+  }
+
+  /**
+   * Updates status (state) of a backup session in hbase:backup table
+   * @param context 

[14/50] [abbrv] hbase git commit: HBASE-15136 Explore different queuing behaviors while busy

2016-03-21 Thread enis
HBASE-15136 Explore different queuing behaviors while busy


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/43f99def
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/43f99def
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/43f99def

Branch: refs/heads/HBASE-7912
Commit: 43f99def670551cfe314c44181c0cb9570cdaaa3
Parents: 6e9d355
Author: Mikhail Antonov 
Authored: Wed Feb 24 20:40:44 2016 -0800
Committer: Mikhail Antonov 
Committed: Wed Feb 24 20:41:30 2016 -0800

--
 .../hadoop/hbase/util/ReflectionUtils.java  |   1 +
 .../hbase/ipc/MetricsHBaseServerSource.java |   6 +
 .../hbase/ipc/MetricsHBaseServerWrapper.java|   2 +
 .../hbase/ipc/MetricsHBaseServerSourceImpl.java |   6 +-
 .../hbase/ipc/AdaptiveLifoCoDelCallQueue.java   | 329 +++
 .../hadoop/hbase/ipc/FifoRpcScheduler.java  |  10 +
 .../ipc/MetricsHBaseServerWrapperImpl.java  |  16 +
 .../hadoop/hbase/ipc/RWQueueRpcExecutor.java|  10 +
 .../apache/hadoop/hbase/ipc/RpcScheduler.java   |  13 +
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java|  70 +++-
 .../ipc/MetricsHBaseServerWrapperStub.java  |  10 +
 .../hbase/ipc/TestSimpleRpcScheduler.java   |  63 
 12 files changed, 534 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/43f99def/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java
index 650c544..15b3930 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java
@@ -85,6 +85,7 @@ public class ReflectionUtils {
 match = (!ctorParamTypes[i].isPrimitive()) ? 
ctorParamTypes[i].isAssignableFrom(paramType) :
   ((int.class.equals(ctorParamTypes[i]) && 
Integer.class.equals(paramType)) ||
(long.class.equals(ctorParamTypes[i]) && 
Long.class.equals(paramType)) ||
+   (double.class.equals(ctorParamTypes[i]) && 
Double.class.equals(paramType)) ||
(char.class.equals(ctorParamTypes[i]) && 
Character.class.equals(paramType)) ||
(short.class.equals(ctorParamTypes[i]) && 
Short.class.equals(paramType)) ||
(boolean.class.equals(ctorParamTypes[i]) && 
Boolean.class.equals(paramType)) ||

http://git-wip-us.apache.org/repos/asf/hbase/blob/43f99def/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
index 061a672..bb89789 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
@@ -64,6 +64,12 @@ public interface MetricsHBaseServerSource extends BaseSource 
{
   String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections.";
   String NUM_ACTIVE_HANDLER_NAME = "numActiveHandler";
   String NUM_ACTIVE_HANDLER_DESC = "Number of active rpc handlers.";
+  String NUM_GENERAL_CALLS_DROPPED_NAME = "numGeneralCallsDropped";
+  String NUM_GENERAL_CALLS_DROPPED_DESC = "Total number of calls in general 
queue which " +
+"were dropped by CoDel RPC executor";
+  String NUM_LIFO_MODE_SWITCHES_NAME = "numLifoModeSwitches";
+  String NUM_LIFO_MODE_SWITCHES_DESC = "Total number of calls in general queue 
which " +
+"were served from the tail of the queue";
 
   String EXCEPTIONS_NAME="exceptions";
   String EXCEPTIONS_DESC="Exceptions caused by requests";

http://git-wip-us.apache.org/repos/asf/hbase/blob/43f99def/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
index 1885264..8f30205 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
@@ -26,4 +26,6 @@ public interface MetricsHBaseServerWrapper {
   int 

[20/50] [abbrv] hbase git commit: HBASE-15128 Disable region splits and merges switch in master

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/24d481c5/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 043d549..073eba9 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -8,6 +8,88 @@ public final class MasterProtos {
   public static void registerAllExtensions(
   com.google.protobuf.ExtensionRegistry registry) {
   }
+  /**
+   * Protobuf enum {@code hbase.pb.MasterSwitchType}
+   */
+  public enum MasterSwitchType
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * SPLIT = 0;
+ */
+SPLIT(0, 0),
+/**
+ * MERGE = 1;
+ */
+MERGE(1, 1),
+;
+
+/**
+ * SPLIT = 0;
+ */
+public static final int SPLIT_VALUE = 0;
+/**
+ * MERGE = 1;
+ */
+public static final int MERGE_VALUE = 1;
+
+
+public final int getNumber() { return value; }
+
+public static MasterSwitchType valueOf(int value) {
+  switch (value) {
+case 0: return SPLIT;
+case 1: return MERGE;
+default: return null;
+  }
+}
+
+public static com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static com.google.protobuf.Internal.EnumLiteMap
+internalValueMap =
+  new com.google.protobuf.Internal.EnumLiteMap() {
+public MasterSwitchType findValueByNumber(int number) {
+  return MasterSwitchType.valueOf(number);
+}
+  };
+
+public final com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(index);
+}
+public final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.getDescriptor().getEnumTypes().get(0);
+}
+
+private static final MasterSwitchType[] VALUES = values();
+
+public static MasterSwitchType valueOf(
+com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int index;
+private final int value;
+
+private MasterSwitchType(int index, int value) {
+  this.index = index;
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.MasterSwitchType)
+  }
+
   public interface AddColumnRequestOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
 
@@ -28764,28 +28846,62 @@ public final class MasterProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.IsBalancerEnabledResponse)
   }
 
-  public interface NormalizeRequestOrBuilder
+  public interface SetSplitOrMergeEnabledRequestOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
+
+// required bool enabled = 1;
+/**
+ * required bool enabled = 1;
+ */
+boolean hasEnabled();
+/**
+ * required bool enabled = 1;
+ */
+boolean getEnabled();
+
+// optional bool synchronous = 2;
+/**
+ * optional bool synchronous = 2;
+ */
+boolean hasSynchronous();
+/**
+ * optional bool synchronous = 2;
+ */
+boolean getSynchronous();
+
+// repeated .hbase.pb.MasterSwitchType switch_types = 3;
+/**
+ * repeated .hbase.pb.MasterSwitchType switch_types = 3;
+ */
+
java.util.List
 getSwitchTypesList();
+/**
+ * repeated .hbase.pb.MasterSwitchType switch_types = 3;
+ */
+int getSwitchTypesCount();
+/**
+ * repeated .hbase.pb.MasterSwitchType switch_types = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterSwitchType 
getSwitchTypes(int index);
   }
   /**
-   * Protobuf type {@code hbase.pb.NormalizeRequest}
+   * Protobuf type {@code hbase.pb.SetSplitOrMergeEnabledRequest}
*/
-  public static final class NormalizeRequest extends
+  public static final class SetSplitOrMergeEnabledRequest extends
   com.google.protobuf.GeneratedMessage
-  implements NormalizeRequestOrBuilder {
-// Use NormalizeRequest.newBuilder() to construct.
-private NormalizeRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  implements SetSplitOrMergeEnabledRequestOrBuilder {
+// Use 

[16/50] [abbrv] hbase git commit: HBASE-15144 Procedure v2 - Web UI displaying Store state

2016-03-21 Thread enis
HBASE-15144 Procedure v2 - Web UI displaying Store state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/40c55915
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/40c55915
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/40c55915

Branch: refs/heads/HBASE-7912
Commit: 40c55915e7a45a639adb7f7a370a04f38058ac26
Parents: 77133fd
Author: Samir Ahmic 
Authored: Wed Feb 24 16:05:24 2016 +0100
Committer: Matteo Bertozzi 
Committed: Thu Feb 25 10:46:56 2016 -0800

--
 .../procedure2/store/wal/ProcedureWALFile.java  |  32 +++--
 .../store/wal/ProcedureWALFormat.java   |   9 +-
 .../store/wal/ProcedureWALFormatReader.java |   8 +-
 .../procedure2/store/wal/WALProcedureStore.java | 108 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   8 +-
 .../hbase-webapps/master/procedures.jsp | 118 ++-
 6 files changed, 244 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/40c55915/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
index 6493526..097cd29 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
@@ -22,12 +22,12 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker;
 import 
org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader;
 import 
org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer;
@@ -42,24 +42,29 @@ public class ProcedureWALFile implements 
Comparable {
 
   private ProcedureWALHeader header;
   private FSDataInputStream stream;
-  private FileStatus logStatus;
   private FileSystem fs;
   private Path logFile;
   private long startPos;
   private long minProcId;
   private long maxProcId;
+  private long logSize;
+  private long timestamp;
 
   public ProcedureWALFile(final FileSystem fs, final FileStatus logStatus) {
 this.fs = fs;
-this.logStatus = logStatus;
 this.logFile = logStatus.getPath();
+this.logSize = logStatus.getLen();
+this.timestamp = logStatus.getModificationTime();
   }
 
-  public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader 
header, long startPos) {
+  public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader 
header,
+  long startPos, long timestamp) {
 this.fs = fs;
-this.logFile = logFile;
 this.header = header;
+this.logFile = logFile;
 this.startPos = startPos;
+this.logSize = startPos;
+this.timestamp = timestamp;
   }
 
   public void open() throws IOException {
@@ -77,7 +82,7 @@ public class ProcedureWALFile implements 
Comparable {
 
   public ProcedureWALTrailer readTrailer() throws IOException {
 try {
-  return ProcedureWALFormat.readTrailer(stream, startPos, 
logStatus.getLen());
+  return ProcedureWALFormat.readTrailer(stream, startPos, logSize);
 } finally {
   stream.seek(startPos);
 }
@@ -112,6 +117,10 @@ public class ProcedureWALFile implements 
Comparable {
 return header;
   }
 
+  public long getTimestamp() {
+return timestamp;
+  }
+
   public boolean isCompacted() {
 return header.getType() == ProcedureWALFormat.LOG_TYPE_COMPACTED;
   }
@@ -121,7 +130,14 @@ public class ProcedureWALFile implements 
Comparable {
   }
 
   public long getSize() {
-return logStatus != null ? logStatus.getLen() : 0;
+return logSize;
+  }
+
+  /**
+   * Used to update in-progress log sizes. the FileStatus will report 0 
otherwise.
+   */
+  void addToSize(long size) {
+this.logSize += size;
   }
 
   public void removeFile() throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/40c55915/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java

[08/50] [abbrv] hbase git commit: HBASE-15312 Update the dependences of pom for mini cluster in HBase Book (Liu Shaohui)

2016-03-21 Thread enis
HBASE-15312 Update the dependences of pom for mini cluster in HBase Book (Liu 
Shaohui)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2a306437
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2a306437
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2a306437

Branch: refs/heads/HBASE-7912
Commit: 2a306437aaccb99ff333ab41c7165333994eba48
Parents: 30cec72
Author: stack 
Authored: Wed Feb 24 09:31:10 2016 -0800
Committer: stack 
Committed: Wed Feb 24 09:31:10 2016 -0800

--
 src/main/asciidoc/_chapters/unit_testing.adoc | 60 ++
 1 file changed, 39 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2a306437/src/main/asciidoc/_chapters/unit_testing.adoc
--
diff --git a/src/main/asciidoc/_chapters/unit_testing.adoc 
b/src/main/asciidoc/_chapters/unit_testing.adoc
index e1bcf87..15b6cce 100644
--- a/src/main/asciidoc/_chapters/unit_testing.adoc
+++ b/src/main/asciidoc/_chapters/unit_testing.adoc
@@ -268,37 +268,55 @@ Check the versions to be sure they are appropriate.
 
 [source,xml]
 
+
+  2.0.0-SNAPSHOT
+  2.7.1
+
 
-
-org.apache.hadoop
-hadoop-common
-2.0.0
+
+  
+org.apache.hbase
+hbase-server
+${hbase.version}
+test
+  
+  
+org.apache.hbase
+hbase-server
+${hbase.version}
 test-jar
 test
-
-
-
+  
+  
 org.apache.hbase
-hbase
-0.98.3
+hbase-hadoop-compat
+${hbase.version}
 test-jar
 test
-
+  
 
-
+  
 org.apache.hadoop
-hadoop-hdfs
-2.0.0
+hadoop-common
+${hadoop.version}
 test-jar
 test
-
-
-
+  
+  
+org.apache.hbase
+hbase-hadoop2-compat
+${hbase.version}
+test-jar
+test
+  
+  
 org.apache.hadoop
 hadoop-hdfs
-2.0.0
+${hadoop.version}
+test-jar
 test
-
+  
+
 
 
 This code represents an integration test for the MyDAO insert shown in 
<>.
@@ -309,7 +327,8 @@ This code represents an integration test for the MyDAO 
insert shown in <

[19/50] [abbrv] hbase git commit: HBASE-15128 Disable region splits and merges switch in master

2016-03-21 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/24d481c5/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
index 8dbb5ad..9805d50 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
@@ -11,13 +11,13 @@ public final class SnapshotProtos {
   public interface SnapshotFileInfoOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
 
-// required .SnapshotFileInfo.Type type = 1;
+// required .hbase.pb.SnapshotFileInfo.Type type = 1;
 /**
- * required .SnapshotFileInfo.Type type = 1;
+ * required .hbase.pb.SnapshotFileInfo.Type type = 1;
  */
 boolean hasType();
 /**
- * required .SnapshotFileInfo.Type type = 1;
+ * required .hbase.pb.SnapshotFileInfo.Type type = 1;
  */
 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type 
getType();
 
@@ -67,7 +67,7 @@ public final class SnapshotProtos {
 getWalNameBytes();
   }
   /**
-   * Protobuf type {@code SnapshotFileInfo}
+   * Protobuf type {@code hbase.pb.SnapshotFileInfo}
*/
   public static final class SnapshotFileInfo extends
   com.google.protobuf.GeneratedMessage
@@ -157,12 +157,12 @@ public final class SnapshotProtos {
 }
 public static final com.google.protobuf.Descriptors.Descriptor
 getDescriptor() {
-  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_descriptor;
+  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_hbase_pb_SnapshotFileInfo_descriptor;
 }
 
 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
 internalGetFieldAccessorTable() {
-  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_fieldAccessorTable
+  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_hbase_pb_SnapshotFileInfo_fieldAccessorTable
   .ensureFieldAccessorsInitialized(
   
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.class,
 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder.class);
 }
@@ -183,7 +183,7 @@ public final class SnapshotProtos {
 }
 
 /**
- * Protobuf enum {@code SnapshotFileInfo.Type}
+ * Protobuf enum {@code hbase.pb.SnapshotFileInfo.Type}
  */
 public enum Type
 implements com.google.protobuf.ProtocolMessageEnum {
@@ -261,21 +261,21 @@ public final class SnapshotProtos {
 this.value = value;
   }
 
-  // @@protoc_insertion_point(enum_scope:SnapshotFileInfo.Type)
+  // @@protoc_insertion_point(enum_scope:hbase.pb.SnapshotFileInfo.Type)
 }
 
 private int bitField0_;
-// required .SnapshotFileInfo.Type type = 1;
+// required .hbase.pb.SnapshotFileInfo.Type type = 1;
 public static final int TYPE_FIELD_NUMBER = 1;
 private 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type 
type_;
 /**
- * required .SnapshotFileInfo.Type type = 1;
+ * required .hbase.pb.SnapshotFileInfo.Type type = 1;
  */
 public boolean hasType() {
   return ((bitField0_ & 0x0001) == 0x0001);
 }
 /**
- * required .SnapshotFileInfo.Type type = 1;
+ * required .hbase.pb.SnapshotFileInfo.Type type = 1;
  */
 public 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type 
getType() {
   return type_;
@@ -613,19 +613,19 @@ public final class SnapshotProtos {
   return builder;
 }
 /**
- * Protobuf type {@code SnapshotFileInfo}
+ * Protobuf type {@code hbase.pb.SnapshotFileInfo}
  */
 public static final class Builder extends
 com.google.protobuf.GeneratedMessage.Builder
implements 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfoOrBuilder
 {
   public static final com.google.protobuf.Descriptors.Descriptor
   getDescriptor() {
-return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_descriptor;
+return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_hbase_pb_SnapshotFileInfo_descriptor;
   }
 
   protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
   internalGetFieldAccessorTable() {
-return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_fieldAccessorTable
+return 

[3/3] hbase git commit: HBASE-15464 Flush / Compaction metrics revisited

2016-03-21 Thread enis
HBASE-15464 Flush / Compaction metrics revisited

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java

hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java

hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/179cd14c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/179cd14c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/179cd14c

Branch: refs/heads/branch-1.3
Commit: 179cd14ce11e38fb1e32f0ad7fac4b115251612f
Parents: 64f1b12
Author: Enis Soztutar <e...@apache.org>
Authored: Mon Mar 21 17:50:02 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Mar 21 17:56:41 2016 -0700

--
 .../regionserver/MetricsRegionServerSource.java | 107 ++-
 .../MetricsRegionServerSourceImpl.java  | 130 ++-
 .../procedure/flush/FlushTableSubprocedure.java |   1 +
 .../hbase/regionserver/DefaultStoreEngine.java  |   6 -
 .../hadoop/hbase/regionserver/HRegion.java  |  10 +-
 .../hbase/regionserver/HRegionServer.java   |   6 +
 .../hadoop/hbase/regionserver/HStore.java   |  38 +-
 .../hbase/regionserver/MemStoreFlusher.java |  17 +--
 .../hbase/regionserver/MetricsRegionServer.java |  13 +-
 .../hbase/regionserver/RSRpcServices.java   |  27 +---
 .../regionserver/RegionServerServices.java  |   5 +
 .../apache/hadoop/hbase/regionserver/Store.java |   7 +-
 .../hbase/regionserver/StoreFlushContext.java   |   5 +
 .../hbase/regionserver/StripeStoreEngine.java   |   7 -
 .../compactions/CompactionContext.java  |   7 -
 .../compactions/CompactionRequest.java  |   3 +
 .../hadoop/hbase/MockRegionServerServices.java  |   6 +
 .../hadoop/hbase/master/MockRegionServer.java   |   6 +
 .../hbase/regionserver/TestCompaction.java  |  19 +--
 .../regionserver/TestMetricsRegionServer.java   |  64 +
 .../regionserver/TestStripeStoreEngine.java |   2 +-
 21 files changed, 398 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/179cd14c/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index aaae034..4fd5728 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -146,6 +146,53 @@ public interface MetricsRegionServerSource extends 
BaseSource {
*/
   void updateFlushTime(long t);
 
+  /**
+   * Update the flush memstore size histogram
+   * @param bytes the number of bytes in the memstore
+   */
+  void updateFlushMemstoreSize(long bytes);
+
+  /**
+   * Update the flush output file size histogram
+   * @param bytes the number of bytes in the output file
+   */
+  void updateFlushOutputSize(long bytes);
+
+  /**
+   * Update the compaction time histogram, both major and minor
+   * @param isMajor whether compaction is a major compaction
+   * @param t time it took, in milliseconds
+   */
+  void updateCompactionTime(boolean isMajor, long t);
+
+  /**
+   * Update the compaction input number of files histogram
+   * @param isMajor whether compaction is a major compaction
+   * @param c number of files
+   */
+  void updateCompactionInputFileCount(boolean isMajor, long c);
+
+  /**
+   * Update the compaction total input file size histogram
+   * @param isMajor whether compaction is a major compaction
+   * @param bytes the number of bytes of the compaction input file
+   */
+  void updateCompactionInputSize(boolean isMajor, long bytes);
+
+  /**
+   * Update the compaction output number of files histogram
+   * @param isMajor whether compaction is a major compaction
+   * @param c number of files
+   */
+  void updateCompactionOutputFileCount(boolean isMajor, long c);
+
+  /**
+   * Update the compaction total output file size
+   * @param isMajor whether compaction is a major compaction
+   * @param bytes the number of bytes of the compaction input file
+   */
+  void updateCompactionOutputSize(boolean isMajor, long bytes);
+
   // Strings used for exporting to metrics system.
   String REGION_COUNT = "regionCount";
   String REGION_COUNT_DESC = "Number of regions";
@@ -209,6 +256,10 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   Stri

[1/3] hbase git commit: HBASE-15464 Flush / Compaction metrics revisited

2016-03-21 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/branch-1 1c5002660 -> 249e37f83
  refs/heads/branch-1.3 64f1b124d -> 179cd14ce
  refs/heads/master 75252af3a -> 797562e6c


HBASE-15464 Flush / Compaction metrics revisited


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/797562e6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/797562e6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/797562e6

Branch: refs/heads/master
Commit: 797562e6c3a2131f93a1ab5d777abf1867d91383
Parents: 75252af
Author: Enis Soztutar <e...@apache.org>
Authored: Mon Mar 21 17:50:02 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Mar 21 17:50:02 2016 -0700

--
 .../regionserver/MetricsRegionServerSource.java | 107 ++-
 .../MetricsRegionServerSourceImpl.java  | 130 ++-
 .../procedure/flush/FlushTableSubprocedure.java |   1 +
 .../hbase/regionserver/DefaultStoreEngine.java  |   6 -
 .../hadoop/hbase/regionserver/HRegion.java  |  11 +-
 .../hbase/regionserver/HRegionServer.java   |   6 +
 .../hadoop/hbase/regionserver/HStore.java   |  38 +-
 .../hbase/regionserver/MemStoreFlusher.java |  17 +--
 .../hbase/regionserver/MetricsRegionServer.java |  13 +-
 .../hbase/regionserver/RSRpcServices.java   |  27 +---
 .../regionserver/RegionServerServices.java  |   5 +
 .../apache/hadoop/hbase/regionserver/Store.java |   7 +-
 .../hbase/regionserver/StoreFlushContext.java   |   5 +
 .../hbase/regionserver/StripeStoreEngine.java   |   7 -
 .../compactions/CompactionContext.java  |   7 -
 .../compactions/CompactionRequest.java  |   3 +
 .../hadoop/hbase/MockRegionServerServices.java  |   7 +
 .../hadoop/hbase/master/MockRegionServer.java   |   7 +
 .../hbase/regionserver/TestCompaction.java  |  19 +--
 .../regionserver/TestMetricsRegionServer.java   |  64 +
 .../regionserver/TestStripeStoreEngine.java |   2 +-
 21 files changed, 401 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/797562e6/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index ee3e847..f097296 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -146,6 +146,53 @@ public interface MetricsRegionServerSource extends 
BaseSource {
*/
   void updateFlushTime(long t);
 
+  /**
+   * Update the flush memstore size histogram
+   * @param bytes the number of bytes in the memstore
+   */
+  void updateFlushMemstoreSize(long bytes);
+
+  /**
+   * Update the flush output file size histogram
+   * @param bytes the number of bytes in the output file
+   */
+  void updateFlushOutputSize(long bytes);
+
+  /**
+   * Update the compaction time histogram, both major and minor
+   * @param isMajor whether compaction is a major compaction
+   * @param t time it took, in milliseconds
+   */
+  void updateCompactionTime(boolean isMajor, long t);
+
+  /**
+   * Update the compaction input number of files histogram
+   * @param isMajor whether compaction is a major compaction
+   * @param c number of files
+   */
+  void updateCompactionInputFileCount(boolean isMajor, long c);
+
+  /**
+   * Update the compaction total input file size histogram
+   * @param isMajor whether compaction is a major compaction
+   * @param bytes the number of bytes of the compaction input file
+   */
+  void updateCompactionInputSize(boolean isMajor, long bytes);
+
+  /**
+   * Update the compaction output number of files histogram
+   * @param isMajor whether compaction is a major compaction
+   * @param c number of files
+   */
+  void updateCompactionOutputFileCount(boolean isMajor, long c);
+
+  /**
+   * Update the compaction total output file size
+   * @param isMajor whether compaction is a major compaction
+   * @param bytes the number of bytes of the compaction input file
+   */
+  void updateCompactionOutputSize(boolean isMajor, long bytes);
+
   // Strings used for exporting to metrics system.
   String REGION_COUNT = "regionCount";
   String REGION_COUNT_DESC = "Number of regions";
@@ -212,6 +259,10 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   String LARGE_COMPACTION_QUEUE_LENGTH = "largeCompactionQueueLength";
   String SMALL_COMPACTION_QUE

[2/3] hbase git commit: HBASE-15464 Flush / Compaction metrics revisited

2016-03-21 Thread enis
HBASE-15464 Flush / Compaction metrics revisited

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java

hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java

hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/249e37f8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/249e37f8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/249e37f8

Branch: refs/heads/branch-1
Commit: 249e37f83c32a8231824d4e84f298e3b344e158a
Parents: 1c50026
Author: Enis Soztutar <e...@apache.org>
Authored: Mon Mar 21 17:50:02 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Mar 21 17:56:22 2016 -0700

--
 .../regionserver/MetricsRegionServerSource.java | 107 ++-
 .../MetricsRegionServerSourceImpl.java  | 130 ++-
 .../procedure/flush/FlushTableSubprocedure.java |   1 +
 .../hbase/regionserver/DefaultStoreEngine.java  |   6 -
 .../hadoop/hbase/regionserver/HRegion.java  |  10 +-
 .../hbase/regionserver/HRegionServer.java   |   6 +
 .../hadoop/hbase/regionserver/HStore.java   |  38 +-
 .../hbase/regionserver/MemStoreFlusher.java |  17 +--
 .../hbase/regionserver/MetricsRegionServer.java |  13 +-
 .../hbase/regionserver/RSRpcServices.java   |  27 +---
 .../regionserver/RegionServerServices.java  |   5 +
 .../apache/hadoop/hbase/regionserver/Store.java |   7 +-
 .../hbase/regionserver/StoreFlushContext.java   |   5 +
 .../hbase/regionserver/StripeStoreEngine.java   |   7 -
 .../compactions/CompactionContext.java  |   7 -
 .../compactions/CompactionRequest.java  |   3 +
 .../hadoop/hbase/MockRegionServerServices.java  |   6 +
 .../hadoop/hbase/master/MockRegionServer.java   |   6 +
 .../hbase/regionserver/TestCompaction.java  |  19 +--
 .../regionserver/TestMetricsRegionServer.java   |  64 +
 .../regionserver/TestStripeStoreEngine.java |   2 +-
 21 files changed, 398 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/249e37f8/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index aaae034..4fd5728 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -146,6 +146,53 @@ public interface MetricsRegionServerSource extends 
BaseSource {
*/
   void updateFlushTime(long t);
 
+  /**
+   * Update the flush memstore size histogram
+   * @param bytes the number of bytes in the memstore
+   */
+  void updateFlushMemstoreSize(long bytes);
+
+  /**
+   * Update the flush output file size histogram
+   * @param bytes the number of bytes in the output file
+   */
+  void updateFlushOutputSize(long bytes);
+
+  /**
+   * Update the compaction time histogram, both major and minor
+   * @param isMajor whether compaction is a major compaction
+   * @param t time it took, in milliseconds
+   */
+  void updateCompactionTime(boolean isMajor, long t);
+
+  /**
+   * Update the compaction input number of files histogram
+   * @param isMajor whether compaction is a major compaction
+   * @param c number of files
+   */
+  void updateCompactionInputFileCount(boolean isMajor, long c);
+
+  /**
+   * Update the compaction total input file size histogram
+   * @param isMajor whether compaction is a major compaction
+   * @param bytes the number of bytes of the compaction input file
+   */
+  void updateCompactionInputSize(boolean isMajor, long bytes);
+
+  /**
+   * Update the compaction output number of files histogram
+   * @param isMajor whether compaction is a major compaction
+   * @param c number of files
+   */
+  void updateCompactionOutputFileCount(boolean isMajor, long c);
+
+  /**
+   * Update the compaction total output file size
+   * @param isMajor whether compaction is a major compaction
+   * @param bytes the number of bytes of the compaction input file
+   */
+  void updateCompactionOutputSize(boolean isMajor, long bytes);
+
   // Strings used for exporting to metrics system.
   String REGION_COUNT = "regionCount";
   String REGION_COUNT_DESC = "Number of regions";
@@ -209,6 +256,10 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   Stri

[2/3] hbase git commit: HBASE-15377 Per-RS Get metric is time based, per-region metric is size-based (Heng Chen)

2016-03-15 Thread enis
HBASE-15377 Per-RS Get metric is time based, per-region metric is size-based 
(Heng Chen)

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/934c0274
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/934c0274
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/934c0274

Branch: refs/heads/branch-1
Commit: 934c0274e32cb09ee25b80cb48c6fc2bfe28ac26
Parents: d0bd490
Author: Enis Soztutar <e...@apache.org>
Authored: Tue Mar 15 11:22:18 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Tue Mar 15 13:35:13 2016 -0700

--
 .../regionserver/MetricsRegionServerSource.java |  1 +
 .../hbase/regionserver/MetricsRegionSource.java |  8 +++-
 .../regionserver/MetricsRegionSourceImpl.java   | 16 ++-
 .../hadoop/hbase/regionserver/HRegion.java  | 13 --
 .../hbase/regionserver/MetricsRegion.java   |  8 +++-
 .../regionserver/TestRegionServerMetrics.java   | 46 
 6 files changed, 83 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/934c0274/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 922ffbf..aaae034 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -255,6 +255,7 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   String UPDATES_BLOCKED_DESC =
   "Number of MS updates have been blocked so that the memstore can be 
flushed.";
   String DELETE_KEY = "delete";
+  String GET_SIZE_KEY = "getSize";
   String GET_KEY = "get";
   String INCREMENT_KEY = "increment";
   String MUTATE_KEY = "mutate";

http://git-wip-us.apache.org/repos/asf/hbase/blob/934c0274/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
index 11fc068..8dc7e11 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
@@ -59,7 +59,13 @@ public interface MetricsRegionSource extends 
Comparable {
* Update count and sizes of gets.
* @param getSize size in bytes of the resulting key values for a get
*/
-  void updateGet(long getSize);
+  void updateGetSize(long getSize);
+
+  /**
+   * Update time of gets
+   * @param mills time for this get operation.
+   */
+  void updateGet(long mills);
 
   /**
* Update the count and sizes of resultScanner.next()

http://git-wip-us.apache.org/repos/asf/hbase/blob/934c0274/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
index 4ef977c..42cddd0 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
@@ -48,6 +48,7 @@ public class MetricsRegionSourceImpl implements 
MetricsRegionSource {
   private final String regionNamePrefix;
   private final String regionPutKey;
   private final String regionDeleteKey;
+  private final String regionGetSizeKey;
   private final String regionGetKey;
   private final String regionIncrementKey;
   private final String regionAppendKey;
@@ -58,6 +59,7 @@ public class MetricsRegionSourceImpl implements 
MetricsRegionSource {
   private final MutableFastCounter regionDelete;
   private final MutableFastCounter regionIncrement;
   private final MutableFastCounter regionAppend;
+  private final MetricHistogram reg

[3/3] hbase git commit: HBASE-15377 Per-RS Get metric is time based, per-region metric is size-based (Heng Chen)

2016-03-15 Thread enis
HBASE-15377 Per-RS Get metric is time based, per-region metric is size-based 
(Heng Chen)

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5ff55175
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5ff55175
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5ff55175

Branch: refs/heads/branch-1.3
Commit: 5ff551754e7b64e3ddb4e609698a3395e0f73402
Parents: e800f04
Author: Enis Soztutar <e...@apache.org>
Authored: Tue Mar 15 11:22:18 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Tue Mar 15 13:38:31 2016 -0700

--
 .../regionserver/MetricsRegionServerSource.java |  1 +
 .../hbase/regionserver/MetricsRegionSource.java |  8 +++-
 .../regionserver/MetricsRegionSourceImpl.java   | 16 ++-
 .../hadoop/hbase/regionserver/HRegion.java  | 13 --
 .../hbase/regionserver/MetricsRegion.java   |  8 +++-
 .../regionserver/TestRegionServerMetrics.java   | 46 
 6 files changed, 83 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5ff55175/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 922ffbf..aaae034 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -255,6 +255,7 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   String UPDATES_BLOCKED_DESC =
   "Number of MS updates have been blocked so that the memstore can be 
flushed.";
   String DELETE_KEY = "delete";
+  String GET_SIZE_KEY = "getSize";
   String GET_KEY = "get";
   String INCREMENT_KEY = "increment";
   String MUTATE_KEY = "mutate";

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ff55175/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
index 11fc068..8dc7e11 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
@@ -59,7 +59,13 @@ public interface MetricsRegionSource extends 
Comparable {
* Update count and sizes of gets.
* @param getSize size in bytes of the resulting key values for a get
*/
-  void updateGet(long getSize);
+  void updateGetSize(long getSize);
+
+  /**
+   * Update time of gets
+   * @param mills time for this get operation.
+   */
+  void updateGet(long mills);
 
   /**
* Update the count and sizes of resultScanner.next()

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ff55175/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
index 4ef977c..42cddd0 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
@@ -48,6 +48,7 @@ public class MetricsRegionSourceImpl implements 
MetricsRegionSource {
   private final String regionNamePrefix;
   private final String regionPutKey;
   private final String regionDeleteKey;
+  private final String regionGetSizeKey;
   private final String regionGetKey;
   private final String regionIncrementKey;
   private final String regionAppendKey;
@@ -58,6 +59,7 @@ public class MetricsRegionSourceImpl implements 
MetricsRegionSource {
   private final MutableFastCounter regionDelete;
   private final MutableFastCounter regionIncrement;
   private final MutableFastCounter regionAppend;
+  private final MetricHistogram reg

[1/7] hbase git commit: HBASE-6721 RegionServer Group based Assignment (Francis Liu)

2016-03-14 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/master 122e6f579 -> ca816f078


http://git-wip-us.apache.org/repos/asf/hbase/blob/ca816f07/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index c096a9f..66ea050 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -2738,4 +2738,79 @@ public class TestAccessController extends SecureTestUtil 
{
 verifyDenied(replicateLogEntriesAction, USER_CREATE, USER_RW, USER_RO, 
USER_NONE, USER_OWNER,
   USER_GROUP_READ, USER_GROUP_ADMIN, USER_GROUP_CREATE);
   }
+
+  @Test
+  public void testMoveServers() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preMoveServers(ObserverContext.createAndPrepare(CP_ENV, null),
+null, null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
+
+  @Test
+  public void testMoveTables() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preMoveTables(ObserverContext.createAndPrepare(CP_ENV, null),
+null, null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
+
+  @Test
+  public void testAddGroup() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preAddRSGroup(ObserverContext.createAndPrepare(CP_ENV, null),
+null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
+
+  @Test
+  public void testRemoveGroup() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preRemoveRSGroup(ObserverContext.createAndPrepare(CP_ENV, 
null),
+null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
+
+  @Test
+  public void testBalanceGroup() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preBalanceRSGroup(ObserverContext.createAndPrepare(CP_ENV, 
null),
+null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ca816f07/hbase-shell/pom.xml
--
diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml
index 7c3754e..cf63e94 100644
--- a/hbase-shell/pom.xml
+++ b/hbase-shell/pom.xml
@@ -253,6 +253,41 @@

   
   
+
+  rsgroup
+  
+
+!skip-rsgroup
+
+  
+  
+
+  org.apache.hbase
+  hbase-rsgroup
+
+  
+  
+
+  
+org.codehaus.mojo
+build-helper-maven-plugin
+
+  
+add-test-source
+
+  add-test-source
+
+
+  
+src/test/rsgroup
+  
+
+  
+
+  
+
+  
+
 
 
   skipShellTests

http://git-wip-us.apache.org/repos/asf/hbase/blob/ca816f07/hbase-shell/src/main/ruby/hbase.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase.rb 
b/hbase-shell/src/main/ruby/hbase.rb
index aca1006..21f88f9 100644
--- a/hbase-shell/src/main/ruby/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase.rb
@@ -102,5 +102,6 @@ require 'hbase/quotas'
 require 'hbase/replication_admin'
 require 'hbase/security'
 require 'hbase/visibility_labels'
+require 'hbase/rsgroup_admin'
 
 include HBaseQuotasConstants

http://git-wip-us.apache.org/repos/asf/hbase/blob/ca816f07/hbase-shell/src/main/ruby/hbase/hbase.rb

[7/7] hbase git commit: HBASE-6721 RegionServer Group based Assignment (Francis Liu)

2016-03-14 Thread enis
HBASE-6721 RegionServer Group based Assignment (Francis Liu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ca816f07
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ca816f07
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ca816f07

Branch: refs/heads/master
Commit: ca816f0780f6e5a117b85810cf35f3b29c964ddc
Parents: 122e6f5
Author: Enis Soztutar <e...@apache.org>
Authored: Mon Mar 14 18:28:50 2016 -0700
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Mar 14 18:28:50 2016 -0700

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java |69 +-
 .../org/apache/hadoop/hbase/ServerName.java |24 +-
 .../hadoop/hbase/rsgroup/RSGroupInfo.java   |   187 +
 hbase-it/pom.xml|10 +
 .../hbase/rsgroup/IntegrationTestRSGroup.java   |99 +
 hbase-protocol/pom.xml  | 2 +
 .../protobuf/generated/RSGroupAdminProtos.java  | 11855 +
 .../hbase/protobuf/generated/RSGroupProtos.java |  1331 ++
 hbase-protocol/src/main/protobuf/RSGroup.proto  |34 +
 .../src/main/protobuf/RSGroupAdmin.proto|   136 +
 hbase-rsgroup/pom.xml   |   346 +
 .../hadoop/hbase/rsgroup/RSGroupAdmin.java  |   121 +
 .../hbase/rsgroup/RSGroupAdminClient.java   |   204 +
 .../hbase/rsgroup/RSGroupAdminEndpoint.java |   965 ++
 .../hbase/rsgroup/RSGroupAdminServer.java   |   501 +
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java |   422 +
 .../hbase/rsgroup/RSGroupInfoManager.java   |   132 +
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |   755 ++
 .../hadoop/hbase/rsgroup/RSGroupSerDe.java  |88 +
 .../hbase/rsgroup/RSGroupableBalancer.java  |29 +
 .../balancer/TestRSGroupBasedLoadBalancer.java  |   574 +
 .../hadoop/hbase/rsgroup/TestRSGroups.java  |   287 +
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |   643 +
 .../hbase/rsgroup/TestRSGroupsOfflineMode.java  |   187 +
 .../rsgroup/VerifyingRSGroupAdminClient.java|   149 +
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|34 +-
 .../BaseMasterAndRegionObserver.java|53 +
 .../hbase/coprocessor/BaseMasterObserver.java   |54 +
 .../hbase/coprocessor/MasterObserver.java   |98 +
 .../hadoop/hbase/master/AssignmentManager.java  |25 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |32 +-
 .../hadoop/hbase/master/LoadBalancer.java   | 3 +
 .../hbase/master/MasterCoprocessorHost.java |   137 +
 .../hadoop/hbase/master/MasterRpcServices.java  | 8 +
 .../hadoop/hbase/master/MasterServices.java | 5 +
 .../hbase/security/access/AccessController.java |32 +
 .../hbase/coprocessor/TestMasterObserver.java   |53 +
 .../master/TestAssignmentManagerOnCluster.java  |   127 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java | 3 +
 .../hbase/master/TestMasterStatusServlet.java   |12 +-
 .../normalizer/TestSimpleRegionNormalizer.java  | 2 +-
 .../security/access/TestAccessController.java   |75 +
 hbase-shell/pom.xml |35 +
 hbase-shell/src/main/ruby/hbase.rb  | 1 +
 hbase-shell/src/main/ruby/hbase/hbase.rb| 4 +
 .../src/main/ruby/hbase/rsgroup_admin.rb|   150 +
 hbase-shell/src/main/ruby/shell.rb  |21 +
 hbase-shell/src/main/ruby/shell/commands.rb | 4 +
 .../src/main/ruby/shell/commands/add_rsgroup.rb |39 +
 .../main/ruby/shell/commands/balance_rsgroup.rb |37 +
 .../src/main/ruby/shell/commands/get_rsgroup.rb |44 +
 .../ruby/shell/commands/get_server_rsgroup.rb   |40 +
 .../ruby/shell/commands/get_table_rsgroup.rb|41 +
 .../main/ruby/shell/commands/list_rsgroups.rb   |50 +
 .../ruby/shell/commands/move_rsgroup_servers.rb |37 +
 .../ruby/shell/commands/move_rsgroup_tables.rb  |37 +
 .../main/ruby/shell/commands/remove_rsgroup.rb  |37 +
 .../apache/hadoop/hbase/client/TestShell.java   | 2 +-
 .../hbase/client/rsgroup/TestShellRSGroups.java |   111 +
 .../src/test/ruby/shell/rsgroup_shell_test.rb   |96 +
 hbase-shell/src/test/ruby/test_helper.rb| 4 +
 pom.xml |23 +
 62 files changed, 20652 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ca816f07/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index b052e63..9c71d97 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/s

[4/7] hbase git commit: HBASE-6721 RegionServer Group based Assignment (Francis Liu)

2016-03-14 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/ca816f07/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
new file mode 100644
index 000..e51dcc2
--- /dev/null
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -0,0 +1,965 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rsgroup;
+
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.ResponseConverter;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
+import 

[6/7] hbase git commit: HBASE-6721 RegionServer Group based Assignment (Francis Liu)

2016-03-14 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/ca816f07/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
new file mode 100644
index 000..3d1f4bd
--- /dev/null
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
@@ -0,0 +1,11855 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: RSGroupAdmin.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class RSGroupAdminProtos {
+  private RSGroupAdminProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface ListTablesOfRSGroupRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string r_s_group_name = 1;
+/**
+ * required string r_s_group_name = 1;
+ */
+boolean hasRSGroupName();
+/**
+ * required string r_s_group_name = 1;
+ */
+java.lang.String getRSGroupName();
+/**
+ * required string r_s_group_name = 1;
+ */
+com.google.protobuf.ByteString
+getRSGroupNameBytes();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.ListTablesOfRSGroupRequest}
+   */
+  public static final class ListTablesOfRSGroupRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements ListTablesOfRSGroupRequestOrBuilder {
+// Use ListTablesOfRSGroupRequest.newBuilder() to construct.
+private 
ListTablesOfRSGroupRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private ListTablesOfRSGroupRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final ListTablesOfRSGroupRequest defaultInstance;
+public static ListTablesOfRSGroupRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public ListTablesOfRSGroupRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private ListTablesOfRSGroupRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  rSGroupName_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupRequest_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest.class,
 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest.Builder.class);
+}
+
+public static com.google.protobuf.Parser 
PARSER =
+new com.google.protobuf.AbstractParser() {
+  public ListTablesOfRSGroupRequest parsePartialFrom(
+  

[2/7] hbase git commit: HBASE-6721 RegionServer Group based Assignment (Francis Liu)

2016-03-14 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/ca816f07/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
new file mode 100644
index 000..9225e09
--- /dev/null
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -0,0 +1,643 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rsgroup;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.HBaseCluster;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.security.SecureRandom;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public abstract class TestRSGroupsBase {
+  protected static final Log LOG = LogFactory.getLog(TestRSGroupsBase.class);
+
+  //shared
+  protected final static String groupPrefix = "Group";
+  protected final static String tablePrefix = "Group";
+  protected final static SecureRandom rand = new SecureRandom();
+
+  //shared, cluster type specific
+  protected static HBaseTestingUtility TEST_UTIL;
+  protected static HBaseAdmin admin;
+  protected static HBaseCluster cluster;
+  protected static RSGroupAdmin rsGroupAdmin;
+
+  public final static long WAIT_TIMEOUT = 6*5;
+  public final static int NUM_SLAVES_BASE = 4; //number of slaves for the 
smallest cluster
+
+
+
+  protected RSGroupInfo addGroup(RSGroupAdmin gAdmin, String groupName,
+ int serverCount) throws IOException, 
InterruptedException {
+RSGroupInfo defaultInfo = gAdmin
+.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP);
+assertTrue(defaultInfo != null);
+assertTrue(defaultInfo.getServers().size() >= serverCount);
+gAdmin.addRSGroup(groupName);
+
+Set set = new HashSet();
+for(HostAndPort server: defaultInfo.getServers()) {
+  if(set.size() == serverCount) {
+break;
+  }
+  set.add(server);
+}
+gAdmin.moveServers(set, groupName);
+RSGroupInfo result = gAdmin.getRSGroupInfo(groupName);
+assertTrue(result.getServers().size() >= serverCount);
+return result;
+  }
+
+  static void removeGroup(RSGroupAdminClient groupAdmin, String groupName) 
throws IOException {
+RSGroupInfo RSGroupInfo = groupAdmin.getRSGroupInfo(groupName);
+groupAdmin.moveTables(RSGroupInfo.getTables(), RSGroupInfo.DEFAULT_GROUP);
+groupAdmin.moveServers(RSGroupInfo.getServers(), 
RSGroupInfo.DEFAULT_GROUP);
+groupAdmin.removeRSGroup(groupName);
+  }
+
+  protected void deleteTableIfNecessary() throws IOException {
+for (HTableDescriptor desc : 
TEST_UTIL.getHBaseAdmin().listTables(tablePrefix+".*")) {
+  

[5/7] hbase git commit: HBASE-6721 RegionServer Group based Assignment (Francis Liu)

2016-03-14 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/ca816f07/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
new file mode 100644
index 000..979f762
--- /dev/null
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
@@ -0,0 +1,1331 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: RSGroup.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class RSGroupProtos {
+  private RSGroupProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface RSGroupInfoOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string name = 1;
+/**
+ * required string name = 1;
+ */
+boolean hasName();
+/**
+ * required string name = 1;
+ */
+java.lang.String getName();
+/**
+ * required string name = 1;
+ */
+com.google.protobuf.ByteString
+getNameBytes();
+
+// repeated .hbase.pb.ServerName servers = 4;
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+
java.util.List
 
+getServersList();
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName 
getServers(int index);
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+int getServersCount();
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+java.util.List 
+getServersOrBuilderList();
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder 
getServersOrBuilder(
+int index);
+
+// repeated .hbase.pb.TableName tables = 3;
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+
java.util.List
 
+getTablesList();
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTables(int index);
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+int getTablesCount();
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+java.util.List 
+getTablesOrBuilderList();
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTablesOrBuilder(
+int index);
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RSGroupInfo}
+   */
+  public static final class RSGroupInfo extends
+  com.google.protobuf.GeneratedMessage
+  implements RSGroupInfoOrBuilder {
+// Use RSGroupInfo.newBuilder() to construct.
+private RSGroupInfo(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private RSGroupInfo(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final RSGroupInfo defaultInstance;
+public static RSGroupInfo getDefaultInstance() {
+  return defaultInstance;
+}
+
+public RSGroupInfo getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private RSGroupInfo(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  name_ = input.readBytes();
+  break;
+}
+case 26: {
+  if (!((mutable_bitField0_ & 0x0004) == 0x0004)) {
+tables_ = new 
java.util.ArrayList();
+mutable_bitField0_ |= 0x0004;

[3/7] hbase git commit: HBASE-6721 RegionServer Group based Assignment (Francis Liu)

2016-03-14 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/ca816f07/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
new file mode 100644
index 000..01efefc
--- /dev/null
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -0,0 +1,755 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rsgroup;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+import com.google.protobuf.ServiceException;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableAccessor.DefaultVisitorBase;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.ServerListener;
+import org.apache.hadoop.hbase.master.TableStateManager;
+import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
+import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
+import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
+import org.apache.hadoop.hbase.security.access.AccessControlLists;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * This is an implementation of {@link RSGroupInfoManager}. Which makes
+ * use of an HBase table as the persistence store for the group information.
+ * It also makes use of zookeeper to store group information needed
+ * for bootstrapping during offline mode.
+ */
+public class RSGroupInfoManagerImpl implements RSGroupInfoManager, 
ServerListener {
+  private static final Log LOG = 
LogFactory.getLog(RSGroupInfoManagerImpl.class);
+
+  /** Table descriptor 

[1/3] hbase git commit: HBASE-15435 Add WAL (in bytes) written metric (Alicia Ying Shu)

2016-03-10 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/branch-1 75547a42b -> 95421d276
  refs/heads/branch-1.3 b04b394b0 -> ab47fc933
  refs/heads/master 6628d2df1 -> a979d8558


HBASE-15435 Add WAL (in bytes) written metric (Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a979d855
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a979d855
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a979d855

Branch: refs/heads/master
Commit: a979d85582684bd30d4e1f01b3d53ac421f8666b
Parents: 6628d2d
Author: Enis Soztutar <e...@apache.org>
Authored: Thu Mar 10 19:30:31 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Thu Mar 10 20:16:30 2016 -0800

--
 .../hbase/regionserver/wal/MetricsWALSource.java   |  6 ++
 .../hbase/regionserver/wal/MetricsWALSourceImpl.java   | 13 +
 .../hadoop/hbase/regionserver/wal/MetricsWAL.java  |  1 +
 .../hadoop/hbase/regionserver/wal/TestMetricsWAL.java  | 11 +++
 4 files changed, 31 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a979d855/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
index c6dc731..a665571 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
@@ -62,6 +62,8 @@ public interface MetricsWALSource extends BaseSource {
   String LOW_REPLICA_ROLL_REQUESTED = "lowReplicaRollRequest";
   String LOW_REPLICA_ROLL_REQUESTED_DESC =
   "How many times a log roll was requested due to too few DN's in the 
write pipeline.";
+  String WRITTEN_BYTES = "writtenBytes";
+  String WRITTEN_BYTES_DESC = "Size (in bytes) of the data written to the 
WAL.";
 
   /**
* Add the append size.
@@ -93,4 +95,8 @@ public interface MetricsWALSource extends BaseSource {
   void incrementLowReplicationLogRoll();
 
   long getSlowAppendCount();
+
+  void incrementWrittenBytes(long val);
+
+  long getWrittenBytes();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a979d855/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
index 995e334..9f9e09c 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
@@ -40,6 +40,7 @@ public class MetricsWALSourceImpl extends BaseSourceImpl 
implements MetricsWALSo
   private final MutableFastCounter slowAppendCount;
   private final MutableFastCounter logRollRequested;
   private final MutableFastCounter lowReplicationLogRollRequested;
+  private final MutableFastCounter writtenBytes;
 
   public MetricsWALSourceImpl() {
 this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, 
METRICS_JMX_CONTEXT);
@@ -62,6 +63,7 @@ public class MetricsWALSourceImpl extends BaseSourceImpl 
implements MetricsWALSo
 this.getMetricsRegistry().newCounter(ROLL_REQUESTED, 
ROLL_REQUESTED_DESC, 0L);
 lowReplicationLogRollRequested = this.getMetricsRegistry()
 .newCounter(LOW_REPLICA_ROLL_REQUESTED, 
LOW_REPLICA_ROLL_REQUESTED_DESC, 0L);
+writtenBytes = this.getMetricsRegistry().newCounter(WRITTEN_BYTES, 
WRITTEN_BYTES_DESC, 0l);
   }
 
   @Override
@@ -103,4 +105,15 @@ public class MetricsWALSourceImpl extends BaseSourceImpl 
implements MetricsWALSo
   public long getSlowAppendCount() {
 return slowAppendCount.value();
   }
+
+  @Override
+  public void incrementWrittenBytes(long val) {
+writtenBytes.incr(val);
+  }
+
+  @Override
+  public long getWrittenBytes() {
+return writtenBytes.value();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a979d855/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java
in

[3/3] hbase git commit: HBASE-15435 Add WAL (in bytes) written metric (Alicia Ying Shu)

2016-03-10 Thread enis
HBASE-15435 Add WAL (in bytes) written metric (Alicia Ying Shu)

Conflicts:

hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java

hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWAL.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ab47fc93
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ab47fc93
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ab47fc93

Branch: refs/heads/branch-1.3
Commit: ab47fc93397ccc1dd2b4e50b1dc7b77740d22bd0
Parents: b04b394
Author: Enis Soztutar <e...@apache.org>
Authored: Thu Mar 10 19:30:31 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Thu Mar 10 20:22:15 2016 -0800

--
 .../hbase/regionserver/wal/MetricsWALSource.java   |  5 +
 .../hbase/regionserver/wal/MetricsWALSourceImpl.java   | 13 +
 .../hadoop/hbase/regionserver/wal/MetricsWAL.java  |  1 +
 .../hadoop/hbase/regionserver/wal/TestMetricsWAL.java  | 13 -
 4 files changed, 31 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ab47fc93/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
index f8c746f..2be1d0d 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
@@ -62,6 +62,8 @@ public interface MetricsWALSource extends BaseSource {
   String LOW_REPLICA_ROLL_REQUESTED = "lowReplicaRollRequest";
   String LOW_REPLICA_ROLL_REQUESTED_DESC =
   "How many times a log roll was requested due to too few DN's in the 
write pipeline.";
+  String WRITTEN_BYTES = "writtenBytes";
+  String WRITTEN_BYTES_DESC = "Size (in bytes) of the data written to the 
WAL.";
 
   /**
* Add the append size.
@@ -92,4 +94,7 @@ public interface MetricsWALSource extends BaseSource {
 
   void incrementLowReplicationLogRoll();
 
+  void incrementWrittenBytes(long val);
+
+  long getWrittenBytes();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab47fc93/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
index aec3278..1299637 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
@@ -40,6 +40,7 @@ public class MetricsWALSourceImpl extends BaseSourceImpl 
implements MetricsWALSo
   private final MutableFastCounter slowAppendCount;
   private final MutableFastCounter logRollRequested;
   private final MutableFastCounter lowReplicationLogRollRequested;
+  private final MutableFastCounter writtenBytes;
 
   public MetricsWALSourceImpl() {
 this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, 
METRICS_JMX_CONTEXT);
@@ -62,6 +63,7 @@ public class MetricsWALSourceImpl extends BaseSourceImpl 
implements MetricsWALSo
 this.getMetricsRegistry().newCounter(ROLL_REQUESTED, 
ROLL_REQUESTED_DESC, 0L);
 lowReplicationLogRollRequested = this.getMetricsRegistry()
 .newCounter(LOW_REPLICA_ROLL_REQUESTED, 
LOW_REPLICA_ROLL_REQUESTED_DESC, 0L);
+writtenBytes = this.getMetricsRegistry().newCounter(WRITTEN_BYTES, 
WRITTEN_BYTES_DESC, 0l);
   }
 
   @Override
@@ -98,4 +100,15 @@ public class MetricsWALSourceImpl extends BaseSourceImpl 
implements MetricsWALSo
   public void incrementLowReplicationLogRoll() {
 lowReplicationLogRollRequested.incr();
   }
+
+  @Override
+  public void incrementWrittenBytes(long val) {
+writtenBytes.incr(val);
+  }
+
+  @Override
+  public long getWrittenBytes() {
+return writtenBytes.value();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab47fc93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java
--
diff --git 
a/hbase-server/sr

[2/3] hbase git commit: HBASE-15435 Add WAL (in bytes) written metric (Alicia Ying Shu)

2016-03-10 Thread enis
HBASE-15435 Add WAL (in bytes) written metric (Alicia Ying Shu)

Conflicts:

hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java

hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWAL.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/95421d27
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/95421d27
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/95421d27

Branch: refs/heads/branch-1
Commit: 95421d276e78b2ecd3ff4d0142784398d252d1dd
Parents: 75547a4
Author: Enis Soztutar <e...@apache.org>
Authored: Thu Mar 10 19:30:31 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Thu Mar 10 20:21:53 2016 -0800

--
 .../hbase/regionserver/wal/MetricsWALSource.java   |  5 +
 .../hbase/regionserver/wal/MetricsWALSourceImpl.java   | 13 +
 .../hadoop/hbase/regionserver/wal/MetricsWAL.java  |  1 +
 .../hadoop/hbase/regionserver/wal/TestMetricsWAL.java  | 13 -
 4 files changed, 31 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/95421d27/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
index f8c746f..2be1d0d 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
@@ -62,6 +62,8 @@ public interface MetricsWALSource extends BaseSource {
   String LOW_REPLICA_ROLL_REQUESTED = "lowReplicaRollRequest";
   String LOW_REPLICA_ROLL_REQUESTED_DESC =
   "How many times a log roll was requested due to too few DN's in the 
write pipeline.";
+  String WRITTEN_BYTES = "writtenBytes";
+  String WRITTEN_BYTES_DESC = "Size (in bytes) of the data written to the 
WAL.";
 
   /**
* Add the append size.
@@ -92,4 +94,7 @@ public interface MetricsWALSource extends BaseSource {
 
   void incrementLowReplicationLogRoll();
 
+  void incrementWrittenBytes(long val);
+
+  long getWrittenBytes();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/95421d27/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
index aec3278..1299637 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
@@ -40,6 +40,7 @@ public class MetricsWALSourceImpl extends BaseSourceImpl 
implements MetricsWALSo
   private final MutableFastCounter slowAppendCount;
   private final MutableFastCounter logRollRequested;
   private final MutableFastCounter lowReplicationLogRollRequested;
+  private final MutableFastCounter writtenBytes;
 
   public MetricsWALSourceImpl() {
 this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, 
METRICS_JMX_CONTEXT);
@@ -62,6 +63,7 @@ public class MetricsWALSourceImpl extends BaseSourceImpl 
implements MetricsWALSo
 this.getMetricsRegistry().newCounter(ROLL_REQUESTED, 
ROLL_REQUESTED_DESC, 0L);
 lowReplicationLogRollRequested = this.getMetricsRegistry()
 .newCounter(LOW_REPLICA_ROLL_REQUESTED, 
LOW_REPLICA_ROLL_REQUESTED_DESC, 0L);
+writtenBytes = this.getMetricsRegistry().newCounter(WRITTEN_BYTES, 
WRITTEN_BYTES_DESC, 0l);
   }
 
   @Override
@@ -98,4 +100,15 @@ public class MetricsWALSourceImpl extends BaseSourceImpl 
implements MetricsWALSo
   public void incrementLowReplicationLogRoll() {
 lowReplicationLogRollRequested.incr();
   }
+
+  @Override
+  public void incrementWrittenBytes(long val) {
+writtenBytes.incr(val);
+  }
+
+  @Override
+  public long getWrittenBytes() {
+return writtenBytes.value();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/95421d27/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java
--
diff --git 
a/hbase-server/sr

[4/4] hbase git commit: HBASE-15128 Disable region splits and merges switch in master

2016-02-29 Thread enis
HBASE-15128 Disable region splits and merges switch in master


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6463bbaa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6463bbaa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6463bbaa

Branch: refs/heads/branch-1.3
Commit: 6463bbaa4110e7b73fa67811ef7ad710e90f9fbd
Parents: 5655351
Author: chenheng <chenh...@apache.org>
Authored: Mon Feb 29 20:35:18 2016 +0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Feb 29 14:39:12 2016 -0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |   25 +
 .../hadoop/hbase/client/ConnectionManager.java  |   14 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   32 +
 .../hadoop/hbase/protobuf/RequestConverter.java |   49 +
 .../hbase/zookeeper/ZooKeeperWatcher.java   |   10 +
 .../hbase/protobuf/generated/MasterProtos.java  | 4274 ++
 .../protobuf/generated/SnapshotProtos.java  |  500 +-
 .../protobuf/generated/ZooKeeperProtos.java |  462 +-
 hbase-protocol/src/main/protobuf/Master.proto   |   36 +
 .../src/main/protobuf/ZooKeeper.proto   |7 +
 .../hadoop/hbase/master/AssignmentManager.java  |   12 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   28 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   45 +
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   37 +
 .../hbase/zookeeper/SplitOrMergeTracker.java|  151 +
 .../hbase/client/TestSplitOrMergeStatus.java|  199 +
 hbase-shell/src/main/ruby/hbase/admin.rb|   32 +
 hbase-shell/src/main/ruby/shell.rb  |2 +
 .../ruby/shell/commands/splitormerge_enabled.rb |   41 +
 .../ruby/shell/commands/splitormerge_switch.rb  |   43 +
 20 files changed, 4813 insertions(+), 1186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6463bbaa/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index b40f30e..d9361b3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1484,4 +1484,29 @@ public interface Admin extends Abortable, Closeable {
* @throws UnsupportedOperationException
*/
   List getSecurityCapabilities() throws IOException;
+
+  /**
+   * Turn the Split or Merge switches on or off.
+   *
+   * @param enabled enabled or not
+   * @param synchronous If true, it waits until current split() call, if 
outstanding, to return.
+   * @param switchTypes switchType list {@link MasterSwitchType}
+   * @return Previous switch value array
+   */
+  boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean 
synchronous,
+   final MasterSwitchType... switchTypes) 
throws IOException;
+
+  /**
+   * Query the current state of the switch
+   *
+   * @return true if the switch is enabled, false otherwise.
+   */
+  boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws 
IOException;
+
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
+  public enum MasterSwitchType {
+SPLIT,
+MERGE
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6463bbaa/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 5aa604d..dab5392 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -2091,6 +2091,20 @@ class ConnectionManager {
 }
 
 @Override
+public MasterProtos.SetSplitOrMergeEnabledResponse 
setSplitOrMergeEnabled(
+  RpcController controller,
+  MasterProtos.SetSplitOrMergeEnabledRequest request) throws 
ServiceException {
+  return stub.setSplitOrMergeEnabled(controller, request);
+}
+
+@Override
+public MasterProtos.IsSplitOrMergeEnabledResponse 
isSplitOrMergeEnabled(
+  RpcController controller,
+  MasterProtos.IsSplitOrMergeEnabledRequest request) throws 
ServiceException {
+  return stub.isSplitOrMergeEnabled(controller, request);
+}
+
+@Override
 public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController 
controller,
 IsNormalizerEnabledRequest request) throws Serv

[2/4] hbase git commit: HBASE-15128 Disable region splits and merges switch in master

2016-02-29 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/6463bbaa/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
index 8dbb5ad..9805d50 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
@@ -11,13 +11,13 @@ public final class SnapshotProtos {
   public interface SnapshotFileInfoOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
 
-// required .SnapshotFileInfo.Type type = 1;
+// required .hbase.pb.SnapshotFileInfo.Type type = 1;
 /**
- * required .SnapshotFileInfo.Type type = 1;
+ * required .hbase.pb.SnapshotFileInfo.Type type = 1;
  */
 boolean hasType();
 /**
- * required .SnapshotFileInfo.Type type = 1;
+ * required .hbase.pb.SnapshotFileInfo.Type type = 1;
  */
 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type 
getType();
 
@@ -67,7 +67,7 @@ public final class SnapshotProtos {
 getWalNameBytes();
   }
   /**
-   * Protobuf type {@code SnapshotFileInfo}
+   * Protobuf type {@code hbase.pb.SnapshotFileInfo}
*/
   public static final class SnapshotFileInfo extends
   com.google.protobuf.GeneratedMessage
@@ -157,12 +157,12 @@ public final class SnapshotProtos {
 }
 public static final com.google.protobuf.Descriptors.Descriptor
 getDescriptor() {
-  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_descriptor;
+  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_hbase_pb_SnapshotFileInfo_descriptor;
 }
 
 protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
 internalGetFieldAccessorTable() {
-  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_fieldAccessorTable
+  return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_hbase_pb_SnapshotFileInfo_fieldAccessorTable
   .ensureFieldAccessorsInitialized(
   
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.class,
 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder.class);
 }
@@ -183,7 +183,7 @@ public final class SnapshotProtos {
 }
 
 /**
- * Protobuf enum {@code SnapshotFileInfo.Type}
+ * Protobuf enum {@code hbase.pb.SnapshotFileInfo.Type}
  */
 public enum Type
 implements com.google.protobuf.ProtocolMessageEnum {
@@ -261,21 +261,21 @@ public final class SnapshotProtos {
 this.value = value;
   }
 
-  // @@protoc_insertion_point(enum_scope:SnapshotFileInfo.Type)
+  // @@protoc_insertion_point(enum_scope:hbase.pb.SnapshotFileInfo.Type)
 }
 
 private int bitField0_;
-// required .SnapshotFileInfo.Type type = 1;
+// required .hbase.pb.SnapshotFileInfo.Type type = 1;
 public static final int TYPE_FIELD_NUMBER = 1;
 private 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type 
type_;
 /**
- * required .SnapshotFileInfo.Type type = 1;
+ * required .hbase.pb.SnapshotFileInfo.Type type = 1;
  */
 public boolean hasType() {
   return ((bitField0_ & 0x0001) == 0x0001);
 }
 /**
- * required .SnapshotFileInfo.Type type = 1;
+ * required .hbase.pb.SnapshotFileInfo.Type type = 1;
  */
 public 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type 
getType() {
   return type_;
@@ -613,19 +613,19 @@ public final class SnapshotProtos {
   return builder;
 }
 /**
- * Protobuf type {@code SnapshotFileInfo}
+ * Protobuf type {@code hbase.pb.SnapshotFileInfo}
  */
 public static final class Builder extends
 com.google.protobuf.GeneratedMessage.Builder
implements 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfoOrBuilder
 {
   public static final com.google.protobuf.Descriptors.Descriptor
   getDescriptor() {
-return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_descriptor;
+return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_hbase_pb_SnapshotFileInfo_descriptor;
   }
 
   protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
   internalGetFieldAccessorTable() {
-return 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_fieldAccessorTable
+return 

[3/4] hbase git commit: HBASE-15128 Disable region splits and merges switch in master

2016-02-29 Thread enis
http://git-wip-us.apache.org/repos/asf/hbase/blob/6463bbaa/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 63d0536..588cc86 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -8,6 +8,88 @@ public final class MasterProtos {
   public static void registerAllExtensions(
   com.google.protobuf.ExtensionRegistry registry) {
   }
+  /**
+   * Protobuf enum {@code hbase.pb.MasterSwitchType}
+   */
+  public enum MasterSwitchType
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * SPLIT = 0;
+ */
+SPLIT(0, 0),
+/**
+ * MERGE = 1;
+ */
+MERGE(1, 1),
+;
+
+/**
+ * SPLIT = 0;
+ */
+public static final int SPLIT_VALUE = 0;
+/**
+ * MERGE = 1;
+ */
+public static final int MERGE_VALUE = 1;
+
+
+public final int getNumber() { return value; }
+
+public static MasterSwitchType valueOf(int value) {
+  switch (value) {
+case 0: return SPLIT;
+case 1: return MERGE;
+default: return null;
+  }
+}
+
+public static com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static com.google.protobuf.Internal.EnumLiteMap
+internalValueMap =
+  new com.google.protobuf.Internal.EnumLiteMap() {
+public MasterSwitchType findValueByNumber(int number) {
+  return MasterSwitchType.valueOf(number);
+}
+  };
+
+public final com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(index);
+}
+public final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.getDescriptor().getEnumTypes().get(0);
+}
+
+private static final MasterSwitchType[] VALUES = values();
+
+public static MasterSwitchType valueOf(
+com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int index;
+private final int value;
+
+private MasterSwitchType(int index, int value) {
+  this.index = index;
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.MasterSwitchType)
+  }
+
   public interface AddColumnRequestOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
 
@@ -27996,28 +28078,62 @@ public final class MasterProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.IsBalancerEnabledResponse)
   }
 
-  public interface NormalizeRequestOrBuilder
+  public interface SetSplitOrMergeEnabledRequestOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
+
+// required bool enabled = 1;
+/**
+ * required bool enabled = 1;
+ */
+boolean hasEnabled();
+/**
+ * required bool enabled = 1;
+ */
+boolean getEnabled();
+
+// optional bool synchronous = 2;
+/**
+ * optional bool synchronous = 2;
+ */
+boolean hasSynchronous();
+/**
+ * optional bool synchronous = 2;
+ */
+boolean getSynchronous();
+
+// repeated .hbase.pb.MasterSwitchType switch_types = 3;
+/**
+ * repeated .hbase.pb.MasterSwitchType switch_types = 3;
+ */
+
java.util.List
 getSwitchTypesList();
+/**
+ * repeated .hbase.pb.MasterSwitchType switch_types = 3;
+ */
+int getSwitchTypesCount();
+/**
+ * repeated .hbase.pb.MasterSwitchType switch_types = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterSwitchType 
getSwitchTypes(int index);
   }
   /**
-   * Protobuf type {@code hbase.pb.NormalizeRequest}
+   * Protobuf type {@code hbase.pb.SetSplitOrMergeEnabledRequest}
*/
-  public static final class NormalizeRequest extends
+  public static final class SetSplitOrMergeEnabledRequest extends
   com.google.protobuf.GeneratedMessage
-  implements NormalizeRequestOrBuilder {
-// Use NormalizeRequest.newBuilder() to construct.
-private NormalizeRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  implements SetSplitOrMergeEnabledRequestOrBuilder {
+// Use 

[1/4] hbase git commit: HBASE-15128 Disable region splits and merges switch in master

2016-02-29 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 565535178 -> 6463bbaa4


http://git-wip-us.apache.org/repos/asf/hbase/blob/6463bbaa/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index 1238073..09479c4 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -9281,6 +9281,450 @@ public final class ZooKeeperProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.TableLock)
   }
 
+  public interface SwitchStateOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// optional bool enabled = 1;
+/**
+ * optional bool enabled = 1;
+ */
+boolean hasEnabled();
+/**
+ * optional bool enabled = 1;
+ */
+boolean getEnabled();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.SwitchState}
+   *
+   * 
+   **
+   * State of the switch.
+   * 
+   */
+  public static final class SwitchState extends
+  com.google.protobuf.GeneratedMessage
+  implements SwitchStateOrBuilder {
+// Use SwitchState.newBuilder() to construct.
+private SwitchState(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private SwitchState(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final SwitchState defaultInstance;
+public static SwitchState getDefaultInstance() {
+  return defaultInstance;
+}
+
+public SwitchState getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private SwitchState(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 8: {
+  bitField0_ |= 0x0001;
+  enabled_ = input.readBool();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.class, 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.Builder.class);
+}
+
+public static com.google.protobuf.Parser PARSER =
+new com.google.protobuf.AbstractParser() {
+  public SwitchState parsePartialFrom(
+  com.google.protobuf.CodedInputStream input,
+  com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+  throws com.google.protobuf.InvalidProtocolBufferException {
+return new SwitchState(input, extensionRegistry);
+  }
+};
+
+@java.lang.Override
+public com.google.protobuf.Parser getParserForType() {
+  return PARSER;
+}
+
+private int bitField0_;
+// optional bool enabled = 1;
+public static final int 

[1/5] hbase git commit: HBASE-15323 Hbase Rest CheckAndDeleteAPi should be able to delete more cells (Ajith)

2016-02-29 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/branch-1 18194f8a5 -> a9dd9c7b8
  refs/heads/branch-1.1 84c4fe2bd -> 58f0bb52f
  refs/heads/branch-1.2 01a113e5e -> a0301dcac
  refs/heads/branch-1.3 5a9539e83 -> 565535178
  refs/heads/master bc112888e -> 7c54525c8


HBASE-15323 Hbase Rest CheckAndDeleteAPi should be able to delete more cells 
(Ajith)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7c54525c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7c54525c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7c54525c

Branch: refs/heads/master
Commit: 7c54525c89bbbe0c66401813433bfb957e461eac
Parents: bc11288
Author: Enis Soztutar <e...@apache.org>
Authored: Mon Feb 29 12:24:18 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Feb 29 12:24:18 2016 -0800

--
 .../apache/hadoop/hbase/rest/RowResource.java   | 52 ---
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  1 +
 .../hadoop/hbase/rest/RowResourceBase.java  | 49 +++---
 .../hbase/rest/TestGetAndPutResource.java   | 53 
 4 files changed, 142 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7c54525c/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index bac4edb..15828ce 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -553,9 +553,12 @@ public class RowResource extends ResourceBase {
   .build();
   }
 
+  List cellModels = rowModel.getCells();
+  int cellModelCount = cellModels.size();
+
   delete = new Delete(key);
   boolean retValue;
-  CellModel valueToDeleteCell = rowModel.getCells().get(0);
+  CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount -1);
   byte[] valueToDeleteColumn = valueToDeleteCell.getColumn();
   if (valueToDeleteColumn == null) {
 try {
@@ -567,25 +570,62 @@ public class RowResource extends ResourceBase {
 .build();
 }
   }
-  byte[][] parts = KeyValue.parseColumn(valueToDeleteColumn);
+
+  byte[][] parts ;
+  // Copy all the cells to the Delete request if extra cells are sent
+  if(cellModelCount > 1) {
+for (int i = 0, n = cellModelCount - 1; i < n; i++) {
+  CellModel cell = cellModels.get(i);
+  byte[] col = cell.getColumn();
+
+  if (col == null) {
+servlet.getMetrics().incrementFailedPutRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request: Column found to 
be null." + CRLF)
+.build();
+  }
+
+  parts = KeyValue.parseColumn(col);
+
+  if (parts.length == 1) {
+// Only Column Family is specified
+delete.addFamily(parts[0], cell.getTimestamp());
+  } else if (parts.length == 2) {
+delete.addColumn(parts[0], parts[1], cell.getTimestamp());
+  } else {
+servlet.getMetrics().incrementFailedDeleteRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT)
+.entity("Bad request: Column to delete incorrectly 
specified." + CRLF)
+.build();
+  }
+}
+  }
+
+  parts = KeyValue.parseColumn(valueToDeleteColumn);
   if (parts.length == 2) {
 if (parts[1].length != 0) {
-  delete.addColumns(parts[0], parts[1]);
+  // To support backcompat of deleting a cell
+  // if that is the only cell passed to the rest api
+  if(cellModelCount == 1) {
+delete.addColumns(parts[0], parts[1]);
+  }
   retValue = table.checkAndDelete(key, parts[0], parts[1],
 valueToDeleteCell.getValue(), delete);
 } else {
   // The case of empty qualifier.
-  delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+  if(cellModelCount == 1) {
+delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+  }
   retValue = table.checkAndDelete(key, parts[0], 
Bytes.toBytes(StringUtils.EMPTY),
 valueToDeleteCell.getValue(), delete);
 }
   } else {
 servlet.getMetrics().incrementFailedDeleteRequests(1);
 return Response.status(Response.Status.BAD_REQUEST)
-  .type(M

[5/5] hbase git commit: HBASE-15323 Hbase Rest CheckAndDeleteAPi should be able to delete more cells (Ajith)

2016-02-29 Thread enis
HBASE-15323 Hbase Rest CheckAndDeleteAPi should be able to delete more cells 
(Ajith)

Conflicts:
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/58f0bb52
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/58f0bb52
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/58f0bb52

Branch: refs/heads/branch-1.1
Commit: 58f0bb52f1e29493f2e43a2f2769183ac802145a
Parents: 84c4fe2
Author: Enis Soztutar <e...@apache.org>
Authored: Mon Feb 29 12:24:18 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Feb 29 12:42:27 2016 -0800

--
 .../apache/hadoop/hbase/rest/RowResource.java   | 52 ---
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  1 +
 .../hadoop/hbase/rest/RowResourceBase.java  | 49 +++---
 .../hbase/rest/TestGetAndPutResource.java   | 53 
 4 files changed, 142 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/58f0bb52/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index 39a4128..bd1ea24 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -553,9 +553,12 @@ public class RowResource extends ResourceBase {
   .build();
   }
 
+  List cellModels = rowModel.getCells();
+  int cellModelCount = cellModels.size();
+
   delete = new Delete(key);
   boolean retValue;
-  CellModel valueToDeleteCell = rowModel.getCells().get(0);
+  CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount -1);
   byte[] valueToDeleteColumn = valueToDeleteCell.getColumn();
   if (valueToDeleteColumn == null) {
 try {
@@ -567,25 +570,62 @@ public class RowResource extends ResourceBase {
 .build();
 }
   }
-  byte[][] parts = KeyValue.parseColumn(valueToDeleteColumn);
+
+  byte[][] parts ;
+  // Copy all the cells to the Delete request if extra cells are sent
+  if(cellModelCount > 1) {
+for (int i = 0, n = cellModelCount - 1; i < n; i++) {
+  CellModel cell = cellModels.get(i);
+  byte[] col = cell.getColumn();
+
+  if (col == null) {
+servlet.getMetrics().incrementFailedPutRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request: Column found to 
be null." + CRLF)
+.build();
+  }
+
+  parts = KeyValue.parseColumn(col);
+
+  if (parts.length == 1) {
+// Only Column Family is specified
+delete.addFamily(parts[0], cell.getTimestamp());
+  } else if (parts.length == 2) {
+delete.addColumn(parts[0], parts[1], cell.getTimestamp());
+  } else {
+servlet.getMetrics().incrementFailedDeleteRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT)
+.entity("Bad request: Column to delete incorrectly 
specified." + CRLF)
+.build();
+  }
+}
+  }
+
+  parts = KeyValue.parseColumn(valueToDeleteColumn);
   if (parts.length == 2) {
 if (parts[1].length != 0) {
-  delete.deleteColumns(parts[0], parts[1]);
+  // To support backcompat of deleting a cell
+  // if that is the only cell passed to the rest api
+  if(cellModelCount == 1) {
+delete.addColumns(parts[0], parts[1]);
+  }
   retValue = table.checkAndDelete(key, parts[0], parts[1],
 valueToDeleteCell.getValue(), delete);
 } else {
   // The case of empty qualifier.
-  delete.deleteColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+  if(cellModelCount == 1) {
+delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+  }
   retValue = table.checkAndDelete(key, parts[0], 
Bytes.toBytes(StringUtils.EMPTY),
 valueToDeleteCell.getValue(), delete);
 }
   } else {
 servlet.getMetrics().incrementFailedDeleteRequests(1);
 return Response.status(Response.Status.BAD_REQUEST)
-  .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly 
specified." + CRLF)
+  .type(MIMETYPE_TEXT).entity("Bad request: Column to che

[2/5] hbase git commit: HBASE-15323 Hbase Rest CheckAndDeleteAPi should be able to delete more cells (Ajith)

2016-02-29 Thread enis
HBASE-15323 Hbase Rest CheckAndDeleteAPi should be able to delete more cells 
(Ajith)

Conflicts:
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a9dd9c7b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a9dd9c7b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a9dd9c7b

Branch: refs/heads/branch-1
Commit: a9dd9c7b8286ec07e4ba37c467ed0bb2c6298a1b
Parents: 18194f8
Author: Enis Soztutar <e...@apache.org>
Authored: Mon Feb 29 12:24:18 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Feb 29 12:31:53 2016 -0800

--
 .../apache/hadoop/hbase/rest/RowResource.java   | 52 ---
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  1 +
 .../hadoop/hbase/rest/RowResourceBase.java  | 49 +++---
 .../hbase/rest/TestGetAndPutResource.java   | 53 
 4 files changed, 142 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a9dd9c7b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index 39a4128..bd1ea24 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -553,9 +553,12 @@ public class RowResource extends ResourceBase {
   .build();
   }
 
+  List cellModels = rowModel.getCells();
+  int cellModelCount = cellModels.size();
+
   delete = new Delete(key);
   boolean retValue;
-  CellModel valueToDeleteCell = rowModel.getCells().get(0);
+  CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount -1);
   byte[] valueToDeleteColumn = valueToDeleteCell.getColumn();
   if (valueToDeleteColumn == null) {
 try {
@@ -567,25 +570,62 @@ public class RowResource extends ResourceBase {
 .build();
 }
   }
-  byte[][] parts = KeyValue.parseColumn(valueToDeleteColumn);
+
+  byte[][] parts ;
+  // Copy all the cells to the Delete request if extra cells are sent
+  if(cellModelCount > 1) {
+for (int i = 0, n = cellModelCount - 1; i < n; i++) {
+  CellModel cell = cellModels.get(i);
+  byte[] col = cell.getColumn();
+
+  if (col == null) {
+servlet.getMetrics().incrementFailedPutRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request: Column found to 
be null." + CRLF)
+.build();
+  }
+
+  parts = KeyValue.parseColumn(col);
+
+  if (parts.length == 1) {
+// Only Column Family is specified
+delete.addFamily(parts[0], cell.getTimestamp());
+  } else if (parts.length == 2) {
+delete.addColumn(parts[0], parts[1], cell.getTimestamp());
+  } else {
+servlet.getMetrics().incrementFailedDeleteRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT)
+.entity("Bad request: Column to delete incorrectly 
specified." + CRLF)
+.build();
+  }
+}
+  }
+
+  parts = KeyValue.parseColumn(valueToDeleteColumn);
   if (parts.length == 2) {
 if (parts[1].length != 0) {
-  delete.deleteColumns(parts[0], parts[1]);
+  // To support backcompat of deleting a cell
+  // if that is the only cell passed to the rest api
+  if(cellModelCount == 1) {
+delete.addColumns(parts[0], parts[1]);
+  }
   retValue = table.checkAndDelete(key, parts[0], parts[1],
 valueToDeleteCell.getValue(), delete);
 } else {
   // The case of empty qualifier.
-  delete.deleteColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+  if(cellModelCount == 1) {
+delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+  }
   retValue = table.checkAndDelete(key, parts[0], 
Bytes.toBytes(StringUtils.EMPTY),
 valueToDeleteCell.getValue(), delete);
 }
   } else {
 servlet.getMetrics().incrementFailedDeleteRequests(1);
 return Response.status(Response.Status.BAD_REQUEST)
-  .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly 
specified." + CRLF)
+  .type(MIMETYPE_TEXT).entity("Bad request: Column to che

[4/5] hbase git commit: HBASE-15323 Hbase Rest CheckAndDeleteAPi should be able to delete more cells (Ajith)

2016-02-29 Thread enis
HBASE-15323 Hbase Rest CheckAndDeleteAPi should be able to delete more cells 
(Ajith)

Conflicts:
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a0301dca
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a0301dca
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a0301dca

Branch: refs/heads/branch-1.2
Commit: a0301dcacf5793b865ba63f370cc44d2a2d516e0
Parents: 01a113e
Author: Enis Soztutar <e...@apache.org>
Authored: Mon Feb 29 12:24:18 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Feb 29 12:39:23 2016 -0800

--
 .../apache/hadoop/hbase/rest/RowResource.java   | 52 ---
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  1 +
 .../hadoop/hbase/rest/RowResourceBase.java  | 49 +++---
 .../hbase/rest/TestGetAndPutResource.java   | 53 
 4 files changed, 142 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a0301dca/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index 39a4128..bd1ea24 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -553,9 +553,12 @@ public class RowResource extends ResourceBase {
   .build();
   }
 
+  List cellModels = rowModel.getCells();
+  int cellModelCount = cellModels.size();
+
   delete = new Delete(key);
   boolean retValue;
-  CellModel valueToDeleteCell = rowModel.getCells().get(0);
+  CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount -1);
   byte[] valueToDeleteColumn = valueToDeleteCell.getColumn();
   if (valueToDeleteColumn == null) {
 try {
@@ -567,25 +570,62 @@ public class RowResource extends ResourceBase {
 .build();
 }
   }
-  byte[][] parts = KeyValue.parseColumn(valueToDeleteColumn);
+
+  byte[][] parts ;
+  // Copy all the cells to the Delete request if extra cells are sent
+  if(cellModelCount > 1) {
+for (int i = 0, n = cellModelCount - 1; i < n; i++) {
+  CellModel cell = cellModels.get(i);
+  byte[] col = cell.getColumn();
+
+  if (col == null) {
+servlet.getMetrics().incrementFailedPutRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request: Column found to 
be null." + CRLF)
+.build();
+  }
+
+  parts = KeyValue.parseColumn(col);
+
+  if (parts.length == 1) {
+// Only Column Family is specified
+delete.addFamily(parts[0], cell.getTimestamp());
+  } else if (parts.length == 2) {
+delete.addColumn(parts[0], parts[1], cell.getTimestamp());
+  } else {
+servlet.getMetrics().incrementFailedDeleteRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT)
+.entity("Bad request: Column to delete incorrectly 
specified." + CRLF)
+.build();
+  }
+}
+  }
+
+  parts = KeyValue.parseColumn(valueToDeleteColumn);
   if (parts.length == 2) {
 if (parts[1].length != 0) {
-  delete.deleteColumns(parts[0], parts[1]);
+  // To support backcompat of deleting a cell
+  // if that is the only cell passed to the rest api
+  if(cellModelCount == 1) {
+delete.addColumns(parts[0], parts[1]);
+  }
   retValue = table.checkAndDelete(key, parts[0], parts[1],
 valueToDeleteCell.getValue(), delete);
 } else {
   // The case of empty qualifier.
-  delete.deleteColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+  if(cellModelCount == 1) {
+delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+  }
   retValue = table.checkAndDelete(key, parts[0], 
Bytes.toBytes(StringUtils.EMPTY),
 valueToDeleteCell.getValue(), delete);
 }
   } else {
 servlet.getMetrics().incrementFailedDeleteRequests(1);
 return Response.status(Response.Status.BAD_REQUEST)
-  .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly 
specified." + CRLF)
+  .type(MIMETYPE_TEXT).entity("Bad request: Column to che

[3/5] hbase git commit: HBASE-15323 Hbase Rest CheckAndDeleteAPi should be able to delete more cells (Ajith)

2016-02-29 Thread enis
HBASE-15323 Hbase Rest CheckAndDeleteAPi should be able to delete more cells 
(Ajith)

Conflicts:
hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/56553517
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/56553517
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/56553517

Branch: refs/heads/branch-1.3
Commit: 5655351788133f2ac992b2888ff59ebb1879632f
Parents: 5a9539e
Author: Enis Soztutar <e...@apache.org>
Authored: Mon Feb 29 12:24:18 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Mon Feb 29 12:36:18 2016 -0800

--
 .../apache/hadoop/hbase/rest/RowResource.java   | 52 ---
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  1 +
 .../hadoop/hbase/rest/RowResourceBase.java  | 49 +++---
 .../hbase/rest/TestGetAndPutResource.java   | 53 
 4 files changed, 142 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/56553517/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index 39a4128..bd1ea24 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -553,9 +553,12 @@ public class RowResource extends ResourceBase {
   .build();
   }
 
+  List cellModels = rowModel.getCells();
+  int cellModelCount = cellModels.size();
+
   delete = new Delete(key);
   boolean retValue;
-  CellModel valueToDeleteCell = rowModel.getCells().get(0);
+  CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount -1);
   byte[] valueToDeleteColumn = valueToDeleteCell.getColumn();
   if (valueToDeleteColumn == null) {
 try {
@@ -567,25 +570,62 @@ public class RowResource extends ResourceBase {
 .build();
 }
   }
-  byte[][] parts = KeyValue.parseColumn(valueToDeleteColumn);
+
+  byte[][] parts ;
+  // Copy all the cells to the Delete request if extra cells are sent
+  if(cellModelCount > 1) {
+for (int i = 0, n = cellModelCount - 1; i < n; i++) {
+  CellModel cell = cellModels.get(i);
+  byte[] col = cell.getColumn();
+
+  if (col == null) {
+servlet.getMetrics().incrementFailedPutRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request: Column found to 
be null." + CRLF)
+.build();
+  }
+
+  parts = KeyValue.parseColumn(col);
+
+  if (parts.length == 1) {
+// Only Column Family is specified
+delete.addFamily(parts[0], cell.getTimestamp());
+  } else if (parts.length == 2) {
+delete.addColumn(parts[0], parts[1], cell.getTimestamp());
+  } else {
+servlet.getMetrics().incrementFailedDeleteRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT)
+.entity("Bad request: Column to delete incorrectly 
specified." + CRLF)
+.build();
+  }
+}
+  }
+
+  parts = KeyValue.parseColumn(valueToDeleteColumn);
   if (parts.length == 2) {
 if (parts[1].length != 0) {
-  delete.deleteColumns(parts[0], parts[1]);
+  // To support backcompat of deleting a cell
+  // if that is the only cell passed to the rest api
+  if(cellModelCount == 1) {
+delete.addColumns(parts[0], parts[1]);
+  }
   retValue = table.checkAndDelete(key, parts[0], parts[1],
 valueToDeleteCell.getValue(), delete);
 } else {
   // The case of empty qualifier.
-  delete.deleteColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+  if(cellModelCount == 1) {
+delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+  }
   retValue = table.checkAndDelete(key, parts[0], 
Bytes.toBytes(StringUtils.EMPTY),
 valueToDeleteCell.getValue(), delete);
 }
   } else {
 servlet.getMetrics().incrementFailedDeleteRequests(1);
 return Response.status(Response.Status.BAD_REQUEST)
-  .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly 
specified." + CRLF)
+  .type(MIMETYPE_TEXT).entity("Bad request: Column to che

[2/4] hbase git commit: HBASE-15290 Hbase Rest CheckAndAPI should save other cells along with compared cell (Ajith)

2016-02-26 Thread enis
HBASE-15290 Hbase Rest CheckAndAPI should save other cells along with compared 
cell (Ajith)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d233e09c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d233e09c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d233e09c

Branch: refs/heads/branch-1
Commit: d233e09c1479ab0e46adf7bfc27cb890b493ae23
Parents: 46ffa85
Author: Enis Soztutar <e...@apache.org>
Authored: Fri Feb 26 15:05:59 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Fri Feb 26 15:06:05 2016 -0800

--
 .../apache/hadoop/hbase/rest/RowResource.java   | 32 +++--
 .../hadoop/hbase/rest/RowResourceBase.java  | 39 +--
 .../hbase/rest/TestGetAndPutResource.java   | 69 
 3 files changed, 129 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d233e09c/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index dad5a32..39a4128 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -455,20 +455,40 @@ public class RowResource extends ResourceBase {
   byte[][] valueToPutParts = KeyValue.parseColumn(valueToCheckColumn);
   if (valueToPutParts.length == 2 && valueToPutParts[1].length > 0) {
 CellModel valueToPutCell = null;
+
+// Copy all the cells to the Put request
+// and track if the check cell's latest value is also sent
 for (int i = 0, n = cellModelCount - 1; i < n ; i++) {
-  if(Bytes.equals(cellModels.get(i).getColumn(),
-  valueToCheckCell.getColumn())) {
-valueToPutCell = cellModels.get(i);
-break;
+  CellModel cell = cellModels.get(i);
+  byte[] col = cell.getColumn();
+
+  if (col == null) {
+servlet.getMetrics().incrementFailedPutRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request: Column found to 
be null." + CRLF)
+.build();
+  }
+
+  byte [][] parts = KeyValue.parseColumn(col);
+
+  if (parts.length != 2) {
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+.build();
+  }
+  put.addImmutable(parts[0], parts[1], cell.getTimestamp(), 
cell.getValue());
+
+  if(Bytes.equals(col,
+  valueToCheckCell.getColumn())) {
+valueToPutCell = cell;
   }
 }
+
 if (valueToPutCell == null) {
   servlet.getMetrics().incrementFailedPutRequests(1);
   return 
Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
   .entity("Bad request: The column to put and check do not match." 
+ CRLF).build();
 } else {
-  put.addImmutable(valueToPutParts[0], valueToPutParts[1], 
valueToPutCell.getTimestamp(),
-valueToPutCell.getValue());
   retValue = table.checkAndPut(key, valueToPutParts[0], 
valueToPutParts[1],
 valueToCheckCell.getValue(), put);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d233e09c/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
--
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
index 0e74b46..c88bd4c 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.StringWriter;
+import java.util.*;
 
 import javax.ws.rs.core.MediaType;
 import javax.xml.bind.JAXBContext;
@@ -29,6 +30,7 @@ import javax.xml.bind.JAXBException;
 import javax.xml.bind.Marshaller;
 import javax.xml.bind.Unmarshaller;
 
+import org.apache.commons.collections.keyvalue.AbstractMapEntry;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -228,13 +230,22 @@ public class RowResourceBase {
   }
 
   protected static Response checkA

[4/4] hbase git commit: HBASE-15290 Hbase Rest CheckAndAPI should save other cells along with compared cell (Ajith)

2016-02-26 Thread enis
HBASE-15290 Hbase Rest CheckAndAPI should save other cells along with compared 
cell (Ajith)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/12a3d441
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/12a3d441
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/12a3d441

Branch: refs/heads/branch-1.1
Commit: 12a3d441ea0ca0825cc65c60e70bae6028bfcd5b
Parents: 41efb92
Author: Enis Soztutar <e...@apache.org>
Authored: Fri Feb 26 15:05:59 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Fri Feb 26 15:09:06 2016 -0800

--
 .../apache/hadoop/hbase/rest/RowResource.java   | 32 +++--
 .../hadoop/hbase/rest/RowResourceBase.java  | 39 +--
 .../hbase/rest/TestGetAndPutResource.java   | 69 
 3 files changed, 129 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/12a3d441/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index dad5a32..39a4128 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -455,20 +455,40 @@ public class RowResource extends ResourceBase {
   byte[][] valueToPutParts = KeyValue.parseColumn(valueToCheckColumn);
   if (valueToPutParts.length == 2 && valueToPutParts[1].length > 0) {
 CellModel valueToPutCell = null;
+
+// Copy all the cells to the Put request
+// and track if the check cell's latest value is also sent
 for (int i = 0, n = cellModelCount - 1; i < n ; i++) {
-  if(Bytes.equals(cellModels.get(i).getColumn(),
-  valueToCheckCell.getColumn())) {
-valueToPutCell = cellModels.get(i);
-break;
+  CellModel cell = cellModels.get(i);
+  byte[] col = cell.getColumn();
+
+  if (col == null) {
+servlet.getMetrics().incrementFailedPutRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request: Column found to 
be null." + CRLF)
+.build();
+  }
+
+  byte [][] parts = KeyValue.parseColumn(col);
+
+  if (parts.length != 2) {
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+.build();
+  }
+  put.addImmutable(parts[0], parts[1], cell.getTimestamp(), 
cell.getValue());
+
+  if(Bytes.equals(col,
+  valueToCheckCell.getColumn())) {
+valueToPutCell = cell;
   }
 }
+
 if (valueToPutCell == null) {
   servlet.getMetrics().incrementFailedPutRequests(1);
   return 
Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
   .entity("Bad request: The column to put and check do not match." 
+ CRLF).build();
 } else {
-  put.addImmutable(valueToPutParts[0], valueToPutParts[1], 
valueToPutCell.getTimestamp(),
-valueToPutCell.getValue());
   retValue = table.checkAndPut(key, valueToPutParts[0], 
valueToPutParts[1],
 valueToCheckCell.getValue(), put);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/12a3d441/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
--
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
index 0e74b46..c88bd4c 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.StringWriter;
+import java.util.*;
 
 import javax.ws.rs.core.MediaType;
 import javax.xml.bind.JAXBContext;
@@ -29,6 +30,7 @@ import javax.xml.bind.JAXBException;
 import javax.xml.bind.Marshaller;
 import javax.xml.bind.Unmarshaller;
 
+import org.apache.commons.collections.keyvalue.AbstractMapEntry;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -228,13 +230,22 @@ public class RowResourceBase {
   }
 
   protected static Response checkA

[3/4] hbase git commit: HBASE-15290 Hbase Rest CheckAndAPI should save other cells along with compared cell (Ajith)

2016-02-26 Thread enis
HBASE-15290 Hbase Rest CheckAndAPI should save other cells along with compared 
cell (Ajith)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/61852848
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/61852848
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/61852848

Branch: refs/heads/branch-1.2
Commit: 61852848cdc3dd0172f730eeb6e31b2b0ba861ed
Parents: 31b12fd
Author: Enis Soztutar <e...@apache.org>
Authored: Fri Feb 26 15:05:59 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Fri Feb 26 15:07:33 2016 -0800

--
 .../apache/hadoop/hbase/rest/RowResource.java   | 32 +++--
 .../hadoop/hbase/rest/RowResourceBase.java  | 39 +--
 .../hbase/rest/TestGetAndPutResource.java   | 69 
 3 files changed, 129 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/61852848/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index dad5a32..39a4128 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -455,20 +455,40 @@ public class RowResource extends ResourceBase {
   byte[][] valueToPutParts = KeyValue.parseColumn(valueToCheckColumn);
   if (valueToPutParts.length == 2 && valueToPutParts[1].length > 0) {
 CellModel valueToPutCell = null;
+
+// Copy all the cells to the Put request
+// and track if the check cell's latest value is also sent
 for (int i = 0, n = cellModelCount - 1; i < n ; i++) {
-  if(Bytes.equals(cellModels.get(i).getColumn(),
-  valueToCheckCell.getColumn())) {
-valueToPutCell = cellModels.get(i);
-break;
+  CellModel cell = cellModels.get(i);
+  byte[] col = cell.getColumn();
+
+  if (col == null) {
+servlet.getMetrics().incrementFailedPutRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request: Column found to 
be null." + CRLF)
+.build();
+  }
+
+  byte [][] parts = KeyValue.parseColumn(col);
+
+  if (parts.length != 2) {
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+.build();
+  }
+  put.addImmutable(parts[0], parts[1], cell.getTimestamp(), 
cell.getValue());
+
+  if(Bytes.equals(col,
+  valueToCheckCell.getColumn())) {
+valueToPutCell = cell;
   }
 }
+
 if (valueToPutCell == null) {
   servlet.getMetrics().incrementFailedPutRequests(1);
   return 
Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
   .entity("Bad request: The column to put and check do not match." 
+ CRLF).build();
 } else {
-  put.addImmutable(valueToPutParts[0], valueToPutParts[1], 
valueToPutCell.getTimestamp(),
-valueToPutCell.getValue());
   retValue = table.checkAndPut(key, valueToPutParts[0], 
valueToPutParts[1],
 valueToCheckCell.getValue(), put);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/61852848/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
--
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
index 0e74b46..c88bd4c 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.StringWriter;
+import java.util.*;
 
 import javax.ws.rs.core.MediaType;
 import javax.xml.bind.JAXBContext;
@@ -29,6 +30,7 @@ import javax.xml.bind.JAXBException;
 import javax.xml.bind.Marshaller;
 import javax.xml.bind.Unmarshaller;
 
+import org.apache.commons.collections.keyvalue.AbstractMapEntry;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -228,13 +230,22 @@ public class RowResourceBase {
   }
 
   protected static Response checkA

[1/4] hbase git commit: HBASE-15290 Hbase Rest CheckAndAPI should save other cells along with compared cell (Ajith)

2016-02-26 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/branch-1 46ffa8598 -> d233e09c1
  refs/heads/branch-1.1 41efb9233 -> 12a3d441e
  refs/heads/branch-1.2 31b12fda0 -> 61852848c
  refs/heads/master 8f6e29785 -> 793babf4a


HBASE-15290 Hbase Rest CheckAndAPI should save other cells along with compared 
cell (Ajith)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/793babf4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/793babf4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/793babf4

Branch: refs/heads/master
Commit: 793babf4a4c0156f9e712a2bbf9578e2a1d6e1e4
Parents: 8f6e297
Author: Enis Soztutar <e...@apache.org>
Authored: Fri Feb 26 15:05:59 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Fri Feb 26 15:05:59 2016 -0800

--
 .../apache/hadoop/hbase/rest/RowResource.java   | 32 +++--
 .../hadoop/hbase/rest/RowResourceBase.java  | 39 +--
 .../hbase/rest/TestGetAndPutResource.java   | 69 
 3 files changed, 129 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/793babf4/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index f922343..bac4edb 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -455,20 +455,40 @@ public class RowResource extends ResourceBase {
   byte[][] valueToPutParts = KeyValue.parseColumn(valueToCheckColumn);
   if (valueToPutParts.length == 2 && valueToPutParts[1].length > 0) {
 CellModel valueToPutCell = null;
+
+// Copy all the cells to the Put request
+// and track if the check cell's latest value is also sent
 for (int i = 0, n = cellModelCount - 1; i < n ; i++) {
-  if(Bytes.equals(cellModels.get(i).getColumn(),
-  valueToCheckCell.getColumn())) {
-valueToPutCell = cellModels.get(i);
-break;
+  CellModel cell = cellModels.get(i);
+  byte[] col = cell.getColumn();
+
+  if (col == null) {
+servlet.getMetrics().incrementFailedPutRequests(1);
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request: Column found to 
be null." + CRLF)
+.build();
+  }
+
+  byte [][] parts = KeyValue.parseColumn(col);
+
+  if (parts.length != 2) {
+return Response.status(Response.Status.BAD_REQUEST)
+.type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+.build();
+  }
+  put.addImmutable(parts[0], parts[1], cell.getTimestamp(), 
cell.getValue());
+
+  if(Bytes.equals(col,
+  valueToCheckCell.getColumn())) {
+valueToPutCell = cell;
   }
 }
+
 if (valueToPutCell == null) {
   servlet.getMetrics().incrementFailedPutRequests(1);
   return 
Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
   .entity("Bad request: The column to put and check do not match." 
+ CRLF).build();
 } else {
-  put.addImmutable(valueToPutParts[0], valueToPutParts[1], 
valueToPutCell.getTimestamp(),
-valueToPutCell.getValue());
   retValue = table.checkAndPut(key, valueToPutParts[0], 
valueToPutParts[1],
 valueToCheckCell.getValue(), put);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/793babf4/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
--
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
index 48cebb2..b2fc0a6 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.StringWriter;
+import java.util.*;
 
 import javax.ws.rs.core.MediaType;
 import javax.xml.bind.JAXBContext;
@@ -29,6 +30,7 @@ import javax.xml.bind.JAXBException;
 import javax.xml.bind.Marshaller;
 import javax.xml.bind.Unmarshaller;
 
+import org.apache.commons.collections.keyvalue.Abs

[2/2] hbase git commit: HBASE-15274 ClientSideRegionScanner's reaction to Scan#setBatch is not consistent between HBase versions (Youngjoon Kim)

2016-02-17 Thread enis
HBASE-15274 ClientSideRegionScanner's reaction to Scan#setBatch is not 
consistent between HBase versions (Youngjoon Kim)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/95b55fea
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/95b55fea
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/95b55fea

Branch: refs/heads/0.98
Commit: 95b55fea8735c712f8ece4cb019dbbb1bedfc4bc
Parents: c20d34c
Author: Enis Soztutar <e...@apache.org>
Authored: Wed Feb 17 18:27:55 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Wed Feb 17 18:27:55 2016 -0800

--
 .../org/apache/hadoop/hbase/client/ClientSideRegionScanner.java | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/95b55fea/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index 584d434..5c2b372 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -73,9 +73,8 @@ public class ClientSideRegionScanner extends 
AbstractClientScanner {
   @Override
   public Result next() throws IOException {
 values.clear();
-
-scanner.nextRaw(values, -1); // pass -1 as limit so that we see the whole 
row.
-if (values == null || values.isEmpty()) {
+scanner.nextRaw(values);
+if (values.isEmpty()) {
   //we are done
   return null;
 }



[1/2] hbase git commit: HBASE-15274 ClientSideRegionScanner's reaction to Scan#setBatch is not consistent between HBase versions (Youngjoon Kim)

2016-02-17 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/0.98 c20d34c15 -> 95b55fea8
  refs/heads/branch-1.0 f280c4595 -> 2ce516b0f


HBASE-15274 ClientSideRegionScanner's reaction to Scan#setBatch is not 
consistent between HBase versions (Youngjoon Kim)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2ce516b0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2ce516b0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2ce516b0

Branch: refs/heads/branch-1.0
Commit: 2ce516b0fff93a4ee84bb53f9623c495d8dc3d13
Parents: f280c45
Author: Enis Soztutar <e...@apache.org>
Authored: Wed Feb 17 17:41:13 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Wed Feb 17 17:41:13 2016 -0800

--
 .../org/apache/hadoop/hbase/client/ClientSideRegionScanner.java   | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2ce516b0/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index 9cb9494..f9dacdd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -71,8 +71,7 @@ public class ClientSideRegionScanner extends 
AbstractClientScanner {
   @Override
   public Result next() throws IOException {
 values.clear();
-
-scanner.nextRaw(values, -1); // pass -1 as limit so that we see the whole 
row.
+scanner.nextRaw(values);
 if (values.isEmpty()) {
   //we are done
   return null;



hbase git commit: HBASE-15177 Reduce garbage created under high load

2016-02-04 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/master d5d26f080 -> a69272efe


HBASE-15177 Reduce garbage created under high load


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a69272ef
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a69272ef
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a69272ef

Branch: refs/heads/master
Commit: a69272efe12f7b780fbf2fa14c42d0c0b155205f
Parents: d5d26f0
Author: Enis Soztutar <e...@apache.org>
Authored: Thu Feb 4 11:07:36 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Thu Feb 4 13:27:00 2016 -0800

--
 .../hadoop/hbase/client/ScannerCallable.java|  14 ++-
 .../hadoop/hbase/ipc/AsyncRpcChannel.java   |   7 +-
 .../org/apache/hadoop/hbase/ipc/IPCUtil.java|  20 ++--
 .../hbase/ipc/PayloadCarryingRpcController.java |   7 +-
 .../apache/hadoop/hbase/ipc/RpcClientImpl.java  |   6 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java |  19 +++-
 .../hadoop/hbase/client/TestClientScanner.java  |   2 +-
 .../apache/hadoop/hbase/ipc/TestIPCUtil.java|   4 +-
 .../hadoop/hbase/io/ByteBufferInputStream.java  | 107 +++
 .../org/apache/hadoop/hbase/util/Threads.java   |   2 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  |  54 ++
 .../AnnotationReadingPriorityFunction.java  |   9 +-
 .../hadoop/hbase/regionserver/HRegion.java  |   6 +-
 .../hbase/regionserver/RSRpcServices.java   |  15 ++-
 14 files changed, 210 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a69272ef/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
index f6445a6..72d69ec 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
@@ -191,6 +191,13 @@ public class ScannerCallable extends 
RegionServerCallable<Result[]> {
 if (Thread.interrupted()) {
   throw new InterruptedIOException();
 }
+
+if (controller == null) {
+  controller = controllerFactory.newController();
+  controller.setPriority(getTableName());
+  controller.setCallTimeout(callTimeout);
+}
+
 if (closed) {
   if (scannerId != -1) {
 close();
@@ -209,9 +216,6 @@ public class ScannerCallable extends 
RegionServerCallable<Result[]> {
   RequestConverter.buildScanRequest(scannerId, caching, false, 
nextCallSeq,
 this.scanMetrics != null, renew);
   ScanResponse response = null;
-  controller = controllerFactory.newController();
-  controller.setPriority(getTableName());
-  controller.setCallTimeout(callTimeout);
   try {
 response = getStub().scan(controller, request);
 // Client and RS maintain a nextCallSeq number during the scan. 
Every next() call
@@ -371,7 +375,7 @@ public class ScannerCallable extends 
RegionServerCallable<Result[]> {
   ScanRequest request =
   RequestConverter.buildScanRequest(this.scannerId, 0, true, 
this.scanMetrics != null);
   try {
-getStub().scan(null, request);
+getStub().scan(controller, request);
   } catch (ServiceException se) {
 throw ProtobufUtil.getRemoteException(se);
   }
@@ -388,7 +392,7 @@ public class ScannerCallable extends 
RegionServerCallable<Result[]> {
 getLocation().getRegionInfo().getRegionName(),
 this.scan, 0, false);
 try {
-  ScanResponse response = getStub().scan(null, request);
+  ScanResponse response = getStub().scan(controller, request);
   long id = response.getScannerId();
   if (logScannerActivity) {
 LOG.info("Open scanner=" + id + " for scan=" + scan.toString()

http://git-wip-us.apache.org/repos/asf/hbase/blob/a69272ef/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
index 69978fc..787aa47 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
@@ -412,7 +412,7 @@ public class AsyncRpcChannel {
 requestHeaderBuilder.setCellBlockMeta(cellBlockBuilder.build());
   }
   // Only p

hbase git commit: HBASE-15177 Reduce garbage created under high load

2016-02-04 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/branch-1 908e5a662 -> 73d677882


HBASE-15177 Reduce garbage created under high load

Conflicts:
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/73d67788
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/73d67788
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/73d67788

Branch: refs/heads/branch-1
Commit: 73d67788206c3f60773d861375f5e6934a284418
Parents: 908e5a6
Author: Enis Soztutar <e...@apache.org>
Authored: Thu Feb 4 11:07:36 2016 -0800
Committer: Enis Soztutar <e...@apache.org>
Committed: Thu Feb 4 13:26:22 2016 -0800

--
 .../hadoop/hbase/client/ScannerCallable.java| 14 +--
 .../hadoop/hbase/ipc/AsyncRpcChannel.java   |  7 +-
 .../org/apache/hadoop/hbase/ipc/IPCUtil.java| 20 ++---
 .../hbase/ipc/PayloadCarryingRpcController.java |  7 +-
 .../apache/hadoop/hbase/ipc/RpcClientImpl.java  |  6 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 19 +++-
 .../hadoop/hbase/client/TestClientScanner.java  |  2 +-
 .../apache/hadoop/hbase/ipc/TestIPCUtil.java|  4 +-
 .../hadoop/hbase/io/ByteBufferInputStream.java  | 14 ++-
 .../hadoop/hbase/util/ByteBufferUtils.java  | 25 ++
 .../org/apache/hadoop/hbase/util/Threads.java   |  2 +-
 .../apache/hadoop/hbase/util/UnsafeAccess.java  | 95 +++-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 55 +++-
 .../AnnotationReadingPriorityFunction.java  |  9 +-
 .../hadoop/hbase/regionserver/HRegion.java  |  6 +-
 .../hbase/regionserver/RSRpcServices.java   | 15 +++-
 16 files changed, 231 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/73d67788/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
index 5100314..8912e58 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
@@ -194,6 +194,13 @@ public class ScannerCallable extends 
RegionServerCallable<Result[]> {
 if (Thread.interrupted()) {
   throw new InterruptedIOException();
 }
+
+if (controller == null) {
+  controller = controllerFactory.newController();
+  controller.setPriority(getTableName());
+  controller.setCallTimeout(callTimeout);
+}
+
 if (closed) {
   if (scannerId != -1) {
 close();
@@ -212,9 +219,6 @@ public class ScannerCallable extends 
RegionServerCallable<Result[]> {
   RequestConverter.buildScanRequest(scannerId, caching, false, 
nextCallSeq,
 this.scanMetrics != null, renew);
   ScanResponse response = null;
-  controller = controllerFactory.newController();
-  controller.setPriority(getTableName());
-  controller.setCallTimeout(callTimeout);
   try {
 response = getStub().scan(controller, request);
 // Client and RS maintain a nextCallSeq number during the scan. 
Every next() call
@@ -374,7 +378,7 @@ public class ScannerCallable extends 
RegionServerCallable<Result[]> {
   ScanRequest request =
   RequestConverter.buildScanRequest(this.scannerId, 0, true, 
this.scanMetrics != null);
   try {
-getStub().scan(null, request);
+getStub().scan(controller, request);
   } catch (ServiceException se) {
 throw ProtobufUtil.getRemoteException(se);
   }
@@ -391,7 +395,7 @@ public class ScannerCallable extends 
RegionServerCallable<Result[]> {
 getLocation().getRegionInfo().getRegionName(),
 this.scan, 0, false);
 try {
-  ScanResponse response = getStub().scan(null, request);
+  ScanResponse response = getStub().scan(controller, request);
   long id = response.getScannerId();
   if (logScannerActivity) {
 LOG.info("Open scanner=" + id + " for scan=" + scan.toString()

http://git-wip-us.apache.org/repos/asf/hbase/blob/73d67788/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
index 44e8322..9fe2cf6 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
+++ 
b/hbase-client/src/main/java/org/ap

<    1   2   3   4   5   6   7   8   9   10   >