hbase git commit: HBASE-16655 hbase backup describe with incorrect backup id results in NPE - addendum allows progress command to retrieve on-going backup session

2016-09-19 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 d0d1485f2 -> 7dab75096


HBASE-16655 hbase backup describe with incorrect backup id results in NPE - 
addendum allows progress command to retrieve on-going backup session


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7dab7509
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7dab7509
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7dab7509

Branch: refs/heads/HBASE-7912
Commit: 7dab75096f8549b0d59338a07bffae7e594bd758
Parents: d0d1485
Author: tedyu 
Authored: Mon Sep 19 18:00:12 2016 -0700
Committer: tedyu 
Committed: Mon Sep 19 18:00:12 2016 -0700

--
 .../apache/hadoop/hbase/backup/impl/BackupCommands.java   |  9 +
 .../hadoop/hbase/backup/TestBackupCommandLineTool.java|  9 -
 .../apache/hadoop/hbase/backup/TestBackupDescribe.java| 10 ++
 3 files changed, 15 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7dab7509/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
index 3d40da2..6d93a7c 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -78,6 +78,7 @@ public final class BackupCommands {
 
   public static final String PROGRESS_CMD_USAGE = "Usage: hbase backup 
progress \n"
   + " backupIdbackup image id\n";
+  public static final String NO_INFO_FOUND = "No info was found for backup id: 
";
 
   public static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup 
decsribe \n"
   + " backupIdbackup image id\n";
@@ -353,24 +354,24 @@ public final class BackupCommands {
   super.execute();
   
   if (cmdline == null || cmdline.getArgs() == null ||
-  cmdline.getArgs().length != 2) {
+  cmdline.getArgs().length == 1) {
 System.err.println("No backup id was specified, "
 + "will retrieve the most recent (ongoing) sessions");
   }
   String[] args = cmdline.getArgs();
-  if (args.length != 2) {
+  if (args.length > 2) {
 System.err.println("ERROR: wrong number of arguments: " + args.length);
 printUsage();
 throw new IOException(INCORRECT_USAGE);
   }
 
-  String backupId = args == null ? null : args[1];
+  String backupId = (args == null || args.length <= 1) ? null : args[1];
   Configuration conf = getConf() != null? getConf(): 
HBaseConfiguration.create();
   try(final Connection conn = ConnectionFactory.createConnection(conf); 
   final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){
 int progress = admin.getProgress(backupId);
 if(progress < 0){
-  System.out.println("No info was found for backup id: "+backupId);
+  System.err.println(NO_INFO_FOUND + backupId);
 } else{
   System.out.println(backupId+" progress=" + progress+"%");
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7dab7509/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
index 31a859d..1e267d2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
@@ -169,15 +169,6 @@ public class TestBackupCommandLineTool {
 output = baos.toString();
 System.out.println(baos.toString());
 assertTrue(output.indexOf("Usage: hbase backup progress") >= 0);
-
-baos = new ByteArrayOutputStream();
-System.setErr(new PrintStream(baos));
-args = new String[]{"progress" };
-ToolRunner.run(conf, new BackupDriver(), args);
-
-output = baos.toString();
-System.out.println(baos.toString());
-assertTrue(output.indexOf("Usage: hbase backup progress") >= 0);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/7dab7509/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
--
diff --git 

[38/50] [abbrv] hbase git commit: HBASE-16349 TestClusterId may hang during cluster shutdown

2016-09-19 Thread syuanjiang
HBASE-16349 TestClusterId may hang during cluster shutdown


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2597217a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2597217a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2597217a

Branch: refs/heads/hbase-12439
Commit: 2597217ae5aa057e1931c772139ce8cc7a2b3efb
Parents: e19632a
Author: tedyu 
Authored: Fri Sep 16 02:32:03 2016 -0700
Committer: tedyu 
Committed: Fri Sep 16 02:32:03 2016 -0700

--
 .../java/org/apache/hadoop/hbase/regionserver/TestClusterId.java| 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2597217a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java
index 9c03201..33ca73d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java
@@ -56,6 +56,7 @@ public class TestClusterId {
 
   @Before
   public void setUp() throws Exception {
+TEST_UTIL.getConfiguration().setBoolean(ShutdownHook.RUN_SHUTDOWN_HOOK, 
false);
   }
 
   @After



[32/50] [abbrv] hbase git commit: Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/hbase

2016-09-19 Thread syuanjiang
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/hbase


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e6f8f6db
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e6f8f6db
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e6f8f6db

Branch: refs/heads/hbase-12439
Commit: e6f8f6dbd0f7e801fd0fbafbe8cb35492bf08285
Parents: 156a8b2 1d6c90b
Author: anoopsamjohn 
Authored: Thu Sep 15 18:07:12 2016 +0530
Committer: anoopsamjohn 
Committed: Thu Sep 15 18:07:12 2016 +0530

--
 .../org/apache/hadoop/hbase/client/Admin.java   |   5 +-
 .../hadoop/hbase/client/AsyncProcess.java   |   4 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   6 +-
 .../hadoop/hbase/ipc/AbstractRpcClient.java |  22 
 .../hbase/ipc/ServerTooBusyException.java   |  38 ++
 .../org/apache/hadoop/hbase/HConstants.java |  12 ++
 .../java/org/apache/hadoop/hbase/KeyValue.java  |  15 ++-
 .../src/main/resources/hbase-default.xml|  16 ++-
 .../AbstractStateMachineNamespaceProcedure.java |  70 +++
 .../AbstractStateMachineTableProcedure.java | 111 +
 .../procedure/AddColumnFamilyProcedure.java |  38 ++
 .../procedure/CloneSnapshotProcedure.java   |  30 +
 .../procedure/CreateNamespaceProcedure.java |  28 +
 .../master/procedure/CreateTableProcedure.java  |  43 ++-
 .../procedure/DeleteColumnFamilyProcedure.java  |  38 ++
 .../procedure/DeleteNamespaceProcedure.java |  34 +-
 .../master/procedure/DeleteTableProcedure.java  |  49 ++--
 .../master/procedure/DisableTableProcedure.java |  55 ++---
 .../DispatchMergingRegionsProcedure.java|  16 +--
 .../master/procedure/EnableTableProcedure.java  |  55 ++---
 .../procedure/MasterDDLOperationHelper.java |  16 ---
 .../procedure/ModifyColumnFamilyProcedure.java  |  38 ++
 .../procedure/ModifyNamespaceProcedure.java |  34 +-
 .../master/procedure/ModifyTableProcedure.java  |  44 ++-
 .../procedure/RestoreSnapshotProcedure.java |  27 +
 .../procedure/TruncateTableProcedure.java   |  36 ++
 .../org/apache/hadoop/hbase/client/TestHCM.java | 119 ++-
 .../TestScannerHeartbeatMessages.java   | 105 +++-
 hbase-shell/src/main/ruby/hbase/security.rb |   9 +-
 hbase-shell/src/main/ruby/hbase/table.rb|  81 +
 .../src/main/ruby/shell/commands/deleteall.rb   |  17 ++-
 hbase-shell/src/test/ruby/hbase/table_test.rb   |  12 ++
 src/main/asciidoc/_chapters/developer.adoc  |  60 ++
 33 files changed, 677 insertions(+), 606 deletions(-)
--




[46/50] [abbrv] hbase git commit: HBASE-16165 Decrease RpcServer.callQueueSize before writeResponse causes OOM (Guanghao Zhang)

2016-09-19 Thread syuanjiang
HBASE-16165 Decrease RpcServer.callQueueSize before writeResponse causes OOM 
(Guanghao Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4faa8ea9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4faa8ea9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4faa8ea9

Branch: refs/heads/hbase-12439
Commit: 4faa8ea934ba5d9cc8ea4eb0d1e64a38a5e6dc7d
Parents: da3abbc
Author: zhangduo 
Authored: Sun Sep 18 10:05:27 2016 +0800
Committer: zhangduo 
Committed: Sun Sep 18 10:05:27 2016 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java   | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4faa8ea9/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index a678237..37b60c9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -599,6 +599,8 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 }
 
 public synchronized void sendResponseIfReady() throws IOException {
+  // set param null to reduce memory pressure
+  this.param = null;
   this.responder.doRespond(this);
 }
 



[04/50] [abbrv] hbase git commit: HBASE-16086 TableCfWALEntryFilter and ScopeWALEntryFilter should not redundantly iterate over cells (Vincent Poon)

2016-09-19 Thread syuanjiang
HBASE-16086 TableCfWALEntryFilter and ScopeWALEntryFilter should not 
redundantly iterate over cells (Vincent Poon)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/80d8b210
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/80d8b210
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/80d8b210

Branch: refs/heads/hbase-12439
Commit: 80d8b2100d9f4dc2a01ea6bdbded6ec52d7e4263
Parents: cc2a40a
Author: chenheng 
Authored: Sun Sep 11 09:55:08 2016 +0800
Committer: chenheng 
Committed: Sun Sep 11 09:55:08 2016 +0800

--
 .../hbase/replication/BulkLoadCellFilter.java   |  81 
 .../hbase/replication/ChainWALEntryFilter.java  |  38 +-
 .../hbase/replication/ScopeWALEntryFilter.java  |  94 --
 .../replication/TableCfWALEntryFilter.java  | 124 +++
 .../hadoop/hbase/replication/WALCellFilter.java |  41 ++
 .../TestReplicationWALEntryFilters.java |  12 +-
 6 files changed, 231 insertions(+), 159 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/80d8b210/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
new file mode 100644
index 000..3599d10
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
+import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+
+import com.google.common.base.Predicate;
+
+public class BulkLoadCellFilter {
+  private static final Log LOG = LogFactory.getLog(BulkLoadCellFilter.class);
+
+  /**
+   * Filters the bulk load cell using the supplied predicate.
+   * @param cell The WAL cell to filter.
+   * @param famPredicate Returns true of given family should be removed.
+   * @return The filtered cell.
+   */
+  public Cell filterCell(Cell cell, Predicate famPredicate) {
+byte[] fam;
+BulkLoadDescriptor bld = null;
+try {
+  bld = WALEdit.getBulkLoadDescriptor(cell);
+} catch (IOException e) {
+  LOG.warn("Failed to get bulk load events information from the WAL 
file.", e);
+  return cell;
+}
+List storesList = bld.getStoresList();
+// Copy the StoreDescriptor list and update it as storesList is a 
unmodifiableList
+List copiedStoresList = new 
ArrayList(storesList);
+Iterator copiedStoresListIterator = 
copiedStoresList.iterator();
+boolean anyStoreRemoved = false;
+while (copiedStoresListIterator.hasNext()) {
+  StoreDescriptor sd = copiedStoresListIterator.next();
+  fam = sd.getFamilyName().toByteArray();
+  if (famPredicate.apply(fam)) {
+copiedStoresListIterator.remove();
+anyStoreRemoved = true;
+  }
+}
+
+if (!anyStoreRemoved) {
+  return cell;
+} else if (copiedStoresList.isEmpty()) {
+  return null;
+}
+BulkLoadDescriptor.Builder newDesc =
+BulkLoadDescriptor.newBuilder().setTableName(bld.getTableName())
+.setEncodedRegionName(bld.getEncodedRegionName())
+.setBulkloadSeqNum(bld.getBulkloadSeqNum());
+newDesc.addAllStores(copiedStoresList);
+BulkLoadDescriptor newBulkLoadDescriptor = newDesc.build();
+return 

[09/50] [abbrv] hbase git commit: HBASE-16614 Use daemon thread for netty event loop

2016-09-19 Thread syuanjiang
HBASE-16614 Use daemon thread for netty event loop


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0860bdb6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0860bdb6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0860bdb6

Branch: refs/heads/hbase-12439
Commit: 0860bdb675180a7ed9a1bbafe542b9e730d39e31
Parents: 105bfc7
Author: zhangduo 
Authored: Mon Sep 12 16:32:03 2016 +0800
Committer: zhangduo 
Committed: Mon Sep 12 22:05:03 2016 +0800

--
 .../apache/hadoop/hbase/ipc/DefaultNettyEventLoopConfig.java| 5 -
 .../main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java   | 4 +++-
 2 files changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0860bdb6/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DefaultNettyEventLoopConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DefaultNettyEventLoopConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DefaultNettyEventLoopConfig.java
index c7c0f32..f710d54 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DefaultNettyEventLoopConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DefaultNettyEventLoopConfig.java
@@ -21,6 +21,7 @@ import io.netty.channel.Channel;
 import io.netty.channel.EventLoopGroup;
 import io.netty.channel.nio.NioEventLoopGroup;
 import io.netty.channel.socket.nio.NioSocketChannel;
+import io.netty.util.concurrent.DefaultThreadFactory;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.util.Pair;
@@ -32,6 +33,8 @@ import org.apache.hadoop.hbase.util.Pair;
 class DefaultNettyEventLoopConfig {
 
   public static final Pair 
GROUP_AND_CHANNEL_CLASS = Pair
-  . newPair(new 
NioEventLoopGroup(),
+  . newPair(
+new NioEventLoopGroup(0,
+new DefaultThreadFactory("Default-IPC-NioEventLoopGroup", true, 
Thread.MAX_PRIORITY)),
 NioSocketChannel.class);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0860bdb6/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java
index 8c568af..cde453f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java
@@ -21,6 +21,7 @@ import io.netty.channel.Channel;
 import io.netty.channel.EventLoopGroup;
 import io.netty.channel.nio.NioEventLoopGroup;
 import io.netty.channel.socket.nio.NioSocketChannel;
+import io.netty.util.concurrent.DefaultThreadFactory;
 
 import java.io.IOException;
 import java.net.SocketAddress;
@@ -51,7 +52,8 @@ public class NettyRpcClient extends 
AbstractRpcClient {
 .getEventLoopConfig(conf);
 if (groupAndChannelClass == null) {
   // Use our own EventLoopGroup.
-  this.group = new NioEventLoopGroup();
+  this.group = new NioEventLoopGroup(0,
+  new DefaultThreadFactory("IPC-NioEventLoopGroup", true, 
Thread.MAX_PRIORITY));
   this.channelClass = NioSocketChannel.class;
   this.shutdownGroupWhenClose = true;
 } else {



[21/50] [abbrv] hbase git commit: HBASE-16229 Cleaning up size and heapSize calculation.

2016-09-19 Thread syuanjiang
HBASE-16229 Cleaning up size and heapSize calculation.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/77b32732
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/77b32732
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/77b32732

Branch: refs/heads/hbase-12439
Commit: 77b327320a72ca01b35f655c42f8c13f659dff31
Parents: 2566cfe
Author: anoopsamjohn 
Authored: Tue Sep 13 11:43:26 2016 +0530
Committer: Gary Helmling 
Committed: Mon Sep 12 23:23:38 2016 -0700

--
 .../org/apache/hadoop/hbase/util/ClassSize.java |  15 +++
 .../hbase/regionserver/AbstractMemStore.java|  49 +++
 .../hbase/regionserver/CompactingMemStore.java  |  92 ++---
 .../hbase/regionserver/CompactionPipeline.java  |  31 +++--
 .../hbase/regionserver/DefaultMemStore.java |  30 ++---
 .../hbase/regionserver/ImmutableSegment.java|  84 ++--
 .../hadoop/hbase/regionserver/MemStore.java |   6 +-
 .../hbase/regionserver/MemStoreCompactor.java   |  24 ++--
 .../hbase/regionserver/MemStoreSnapshot.java|   2 +-
 .../hbase/regionserver/MutableSegment.java  |  25 ++--
 .../hadoop/hbase/regionserver/Segment.java  | 103 ++-
 .../hbase/regionserver/SegmentFactory.java  |  43 +++
 .../apache/hadoop/hbase/io/TestHeapSize.java|  92 -
 .../regionserver/TestCompactingMemStore.java|  22 +---
 .../TestCompactingToCellArrayMapMemStore.java   |   4 +-
 .../regionserver/TestPerColumnFamilyFlush.java  |  49 +++
 .../TestWalAndCompactingMemStoreFlush.java  | 129 +++
 17 files changed, 417 insertions(+), 383 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/77b32732/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
index ff9dbcb..85a6483 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
@@ -46,6 +46,12 @@ public class ClassSize {
   /** Overhead for ArrayList(0) */
   public static final int ARRAYLIST;
 
+  /** Overhead for LinkedList(0) */
+  public static final int LINKEDLIST;
+
+  /** Overhead for a single entry in LinkedList */
+  public static final int LINKEDLIST_ENTRY;
+
   /** Overhead for ByteBuffer */
   public static final int BYTE_BUFFER;
 
@@ -100,6 +106,9 @@ public class ClassSize {
   /** Overhead for AtomicBoolean */
   public static final int ATOMIC_BOOLEAN;
 
+  /** Overhead for AtomicReference */
+  public static final int ATOMIC_REFERENCE;
+
   /** Overhead for CopyOnWriteArraySet */
   public static final int COPYONWRITE_ARRAYSET;
 
@@ -240,6 +249,10 @@ public class ClassSize {
 
 ARRAYLIST = align(OBJECT + REFERENCE + (2 * Bytes.SIZEOF_INT)) + 
align(ARRAY);
 
+LINKEDLIST = align(OBJECT + (2 * Bytes.SIZEOF_INT) + (2 * REFERENCE));
+
+LINKEDLIST_ENTRY = align(OBJECT + (2 * REFERENCE));
+
 //noinspection PointlessArithmeticExpression
 BYTE_BUFFER = align(OBJECT + REFERENCE +
 (5 * Bytes.SIZEOF_INT) +
@@ -292,6 +305,8 @@ public class ClassSize {
 
 ATOMIC_BOOLEAN = align(OBJECT + Bytes.SIZEOF_BOOLEAN);
 
+ATOMIC_REFERENCE = align(OBJECT + REFERENCE);
+
 COPYONWRITE_ARRAYSET = align(OBJECT + REFERENCE);
 
 COPYONWRITE_ARRAYLIST = align(OBJECT + (2 * REFERENCE) + ARRAY);

http://git-wip-us.apache.org/repos/asf/hbase/blob/77b32732/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
index 419b76a..5e9f632 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
@@ -52,34 +52,29 @@ public abstract class AbstractMemStore implements MemStore {
   private final CellComparator comparator;
 
   // active segment absorbs write operations
-  private volatile MutableSegment active;
+  protected volatile MutableSegment active;
   // Snapshot of memstore.  Made for flusher.
-  private volatile ImmutableSegment snapshot;
+  protected volatile ImmutableSegment snapshot;
   protected volatile long snapshotId;
   // Used to track when to flush
   private volatile long timeOfOldestEdit;
 
-  public final static long FIXED_OVERHEAD = ClassSize.align(
-  

[02/50] [abbrv] hbase git commit: HBASE-16576 Shell add_peer doesn't allow setting cluster_key for custom endpoints

2016-09-19 Thread syuanjiang
HBASE-16576 Shell add_peer doesn't allow setting cluster_key for custom 
endpoints

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e1e06372
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e1e06372
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e1e06372

Branch: refs/heads/hbase-12439
Commit: e1e06372004bd9c6694d5489ede4d0529512f699
Parents: 6c8d1f0
Author: Geoffrey 
Authored: Wed Sep 7 14:48:33 2016 -0700
Committer: Andrew Purtell 
Committed: Fri Sep 9 14:54:20 2016 -0700

--
 .../src/main/ruby/hbase/replication_admin.rb|  2 --
 .../src/main/ruby/shell/commands/add_peer.rb|  6 -
 .../test/ruby/hbase/replication_admin_test.rb   | 23 ++--
 3 files changed, 16 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e1e06372/hbase-shell/src/main/ruby/hbase/replication_admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb 
b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index 7eae7af..4de3962 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -51,8 +51,6 @@ module Hbase
 # or neither are provided
 if endpoint_classname.nil? and cluster_key.nil?
   raise(ArgumentError, "Either ENDPOINT_CLASSNAME or CLUSTER_KEY must 
be specified.")
-elsif !endpoint_classname.nil? and !cluster_key.nil?
-  raise(ArgumentError, "ENDPOINT_CLASSNAME and CLUSTER_KEY cannot both 
be specified.")
 end
 
 # Cluster Key is required for ReplicationPeerConfig for a custom 
replication endpoint

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1e06372/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb 
b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
index 558e86d..e9431cf 100644
--- a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
@@ -52,8 +52,12 @@ the key TABLE_CFS.
   hbase> add_peer '11', ENDPOINT_CLASSNAME => 
'org.apache.hadoop.hbase.MyReplicationEndpoint',
 DATA => { "key1" => 1 }, CONFIG => { "config1" => "value1", "config2" => 
"value2" },
 TABLE_CFS => { "table1" => [], "ns2:table2" => ["cf1"], "ns3:table3" => 
["cf1", "cf2"] }
+  hbase> add_peer '12', ENDPOINT_CLASSNAME => 
'org.apache.hadoop.hbase.MyReplicationEndpoint',
+CLUSTER_KEY => "server2.cie.com:2181:/hbase"
+
+Note: Either CLUSTER_KEY or ENDPOINT_CLASSNAME must be specified. If 
ENDPOINT_CLASSNAME is specified, CLUSTER_KEY is
+optional and should only be specified if a particular custom endpoint requires 
it.
 
-Note: Either CLUSTER_KEY or ENDPOINT_CLASSNAME must be specified but not both.
 EOF
   end
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1e06372/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
--
diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb 
b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
index cf6eac2..1d27e67 100644
--- a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
@@ -55,14 +55,6 @@ module Hbase
   end
 end
 
-define_test "add_peer: fail when both CLUSTER_KEY and ENDPOINT_CLASSNAME 
are specified" do
-  assert_raise(ArgumentError) do
-args = { CLUSTER_KEY => 'zk1,zk2,zk3:2182:/hbase-prod',
- ENDPOINT_CLASSNAME => 
'org.apache.hadoop.hbase.MyReplicationEndpoint' }
-command(:add_peer, @peer_id, args)
-  end
-end
-
 define_test "add_peer: args must be a hash" do
   assert_raise(ArgumentError) do
 command(:add_peer, @peer_id, 1)
@@ -132,7 +124,7 @@ module Hbase
 define_test "add_peer: multiple zk cluster key and table_cfs - peer 
config" do
   cluster_key = "zk4,zk5,zk6:11000:/hbase-test"
   table_cfs = { "table1" => [], "table2" => ["cf1"], "table3" => ["cf1", 
"cf2"] }
-  table_cfs_str = 
"default.table1;default.table3:cf1,cf2;default.table2:cf1"
+  #table_cfs_str = 
"default.table1;default.table3:cf1,cf2;default.table2:cf1"
 
   args = { CLUSTER_KEY => cluster_key, TABLE_CFS => table_cfs }
   command(:add_peer, @peer_id, args)
@@ -140,7 +132,11 @@ module Hbase
   assert_equal(1, command(:list_peers).length)
   assert(command(:list_peers).key?(@peer_id))
   assert_equal(cluster_key, 

[16/50] [abbrv] hbase git commit: HBASE-16592 Unify Delete request with AP

2016-09-19 Thread syuanjiang
HBASE-16592 Unify Delete request with AP


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/831fb3cc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/831fb3cc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/831fb3cc

Branch: refs/heads/hbase-12439
Commit: 831fb3ccb8a0ba449d249962379afd268e8fe032
Parents: 1cdc5ac
Author: chenheng 
Authored: Tue Sep 13 10:07:45 2016 +0800
Committer: chenheng 
Committed: Tue Sep 13 10:07:45 2016 +0800

--
 .../hadoop/hbase/client/AbstractResponse.java   | 38 
 .../hadoop/hbase/client/AsyncProcess.java   | 22 ---
 .../org/apache/hadoop/hbase/client/HTable.java  | 43 -
 .../hadoop/hbase/client/MultiResponse.java  |  7 ++-
 .../hadoop/hbase/client/SingleResponse.java | 65 
 .../hbase/protobuf/ResponseConverter.java   | 14 +
 .../hadoop/hbase/client/TestAsyncProcess.java   | 18 +++---
 .../hadoop/hbase/client/TestFromClientSide.java | 46 ++
 8 files changed, 222 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/831fb3cc/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java
new file mode 100644
index 000..7878d05
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java
@@ -0,0 +1,38 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * This class is used to extend AP to process single action request, like 
delete, get etc.
+ */
+@InterfaceAudience.Private
+abstract class AbstractResponse {
+
+  public enum ResponseType {
+
+SINGLE(0),
+MULTI   (1);
+
+ResponseType(int value) {}
+  }
+
+  public abstract ResponseType type();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/831fb3cc/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index c5745e9..1531201 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -756,14 +756,14 @@ class AsyncProcess {
 
   @Override
   public void run() {
-MultiResponse res;
+AbstractResponse res;
 CancellableRegionServerCallable callable = currentCallable;
 try {
   // setup the callable based on the actions, if we don't have one 
already from the request
   if (callable == null) {
 callable = createCallable(server, tableName, multiAction);
   }
-  RpcRetryingCaller caller = createCaller(callable);
+  RpcRetryingCaller caller = createCaller(callable);
   try {
 if (callsInProgress != null) {
   callsInProgress.add(callable);
@@ -785,9 +785,16 @@ class AsyncProcess {
 receiveGlobalFailure(multiAction, server, numAttempt, t);
 return;
   }
-
-  // Normal case: we received an answer from the server, and it's not 
an exception.
-  receiveMultiAction(multiAction, server, res, numAttempt);
+  if (res.type() == AbstractResponse.ResponseType.MULTI) {
+// Normal case: we received an answer from the server, and it's 
not an exception.
+receiveMultiAction(multiAction, server, (MultiResponse) res, 
numAttempt);
+  } else {
+if (results != null) {
+  

[15/50] [abbrv] hbase git commit: Adding checks in Scanner's setStartRow and setStopRow for invalid row key sizes.

2016-09-19 Thread syuanjiang
Adding checks in Scanner's setStartRow and setStopRow for invalid row key sizes.

Signed-off-by: Gary Helmling 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1cdc5acf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1cdc5acf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1cdc5acf

Branch: refs/heads/hbase-12439
Commit: 1cdc5acfd456688f106287c19ae9af62fd03a3af
Parents: 8855670
Author: Dustin Pho 
Authored: Mon Sep 12 13:25:02 2016 -0700
Committer: Gary Helmling 
Committed: Mon Sep 12 16:54:51 2016 -0700

--
 .../org/apache/hadoop/hbase/client/Scan.java| 16 +++
 .../apache/hadoop/hbase/client/TestScan.java| 28 
 2 files changed, 44 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1cdc5acf/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index ee3ed43..22f611a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -373,8 +373,16 @@ public class Scan extends Query {
* next closest row after the specified row.
* @param startRow row to start scanner at or after
* @return this
+   * @throws IllegalArgumentException if startRow does not meet criteria
+   * for a row key (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
   public Scan setStartRow(byte [] startRow) {
+if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
+  throw new IllegalArgumentException(
+"startRow's length must be less than or equal to " +
+HConstants.MAX_ROW_LENGTH + " to meet the criteria" +
+" for a row key.");
+}
 this.startRow = startRow;
 return this;
   }
@@ -389,8 +397,16 @@ public class Scan extends Query {
* use {@link #setRowPrefixFilter(byte[])}.
* The 'trailing 0' will not yield the desired result.
* @return this
+   * @throws IllegalArgumentException if stopRow does not meet criteria
+   * for a row key (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
   public Scan setStopRow(byte [] stopRow) {
+if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
+  throw new IllegalArgumentException(
+"stopRow's length must be less than or equal to " +
+HConstants.MAX_ROW_LENGTH + " to meet the criteria" +
+" for a row key.");
+}
 this.stopRow = stopRow;
 return this;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1cdc5acf/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java
index 129914f..16c74df 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java
@@ -25,6 +25,8 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.Set;
 
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.security.visibility.Authorizations;
@@ -132,5 +134,31 @@ public class TestScan {
   fail("should not throw exception");
 }
   }
+
+  @Test
+  public void testSetStartRowAndSetStopRow() {
+Scan scan = new Scan();
+scan.setStartRow(null);
+scan.setStartRow(new byte[1]);
+scan.setStartRow(new byte[HConstants.MAX_ROW_LENGTH]);
+try {
+  scan.setStartRow(new byte[HConstants.MAX_ROW_LENGTH+1]);
+  fail("should've thrown exception");
+} catch (IllegalArgumentException iae) {
+} catch (Exception e) {
+  fail("expected IllegalArgumentException to be thrown");
+}
+
+scan.setStopRow(null);
+scan.setStopRow(new byte[1]);
+scan.setStopRow(new byte[HConstants.MAX_ROW_LENGTH]);
+try {
+  scan.setStopRow(new byte[HConstants.MAX_ROW_LENGTH+1]);
+  fail("should've thrown exception");
+} catch (IllegalArgumentException iae) {
+} catch (Exception e) {
+  fail("expected IllegalArgumentException to be thrown");
+}
+  }
 }
 



[06/50] [abbrv] hbase git commit: HBASE-16606 Remove some duplicate code in HTable

2016-09-19 Thread syuanjiang
HBASE-16606 Remove some duplicate code in HTable


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2c3b0f2c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2c3b0f2c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2c3b0f2c

Branch: refs/heads/hbase-12439
Commit: 2c3b0f2c0b2d47dfd3a22e1f47f7eb1317d3514f
Parents: 7bda515
Author: chenheng 
Authored: Mon Sep 12 10:57:21 2016 +0800
Committer: chenheng 
Committed: Mon Sep 12 10:57:21 2016 +0800

--
 .../org/apache/hadoop/hbase/client/HTable.java  | 44 +---
 1 file changed, 10 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2c3b0f2c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 492714f..e98424c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -479,16 +479,18 @@ public class HTable implements Table {
   @Override
   public void batch(final List actions, final Object[] results)
   throws InterruptedException, IOException {
-AsyncRequestFuture ars = multiAp.submitAll(pool, tableName, actions, null, 
results);
-ars.waitUntilDone();
-if (ars.hasError()) {
-  throw ars.getErrors();
-}
+batch(actions, results, -1);
   }
 
   public void batch(final List actions, final Object[] results, 
int timeout)
   throws InterruptedException, IOException {
-AsyncRequestFuture ars = multiAp.submitAll(pool, tableName, actions, null, 
results, null, timeout);
+AsyncRequestFuture ars = null;
+if (timeout != -1) {
+  ars = multiAp.submitAll(pool, tableName, actions, null, results, null, 
timeout);
+} else {
+  // use default timeout in AP
+  ars = multiAp.submitAll(pool, tableName, actions, null, results);
+}
 ars.waitUntilDone();
 if (ars.hasError()) {
   throw ars.getErrors();
@@ -720,20 +722,7 @@ public class HTable implements Table {
   final byte [] family, final byte [] qualifier, final byte [] value,
   final Put put)
   throws IOException {
-RegionServerCallable callable =
-new RegionServerCallable(this.connection, 
this.rpcControllerFactory,
-getName(), row) {
-  @Override
-  protected Boolean rpcCall() throws Exception {
-MutateRequest request = RequestConverter.buildMutateRequest(
-  getLocation().getRegionInfo().getRegionName(), row, family, 
qualifier,
-  new BinaryComparator(value), CompareType.EQUAL, put);
-MutateResponse response = getStub().mutate(getRpcController(), 
request);
-return Boolean.valueOf(response.getProcessed());
-  }
-};
-return rpcCallerFactory. newCaller(this.writeRpcTimeout).
-callWithRetries(callable, this.operationTimeout);
+return checkAndPut(row, family, qualifier, CompareOp.EQUAL, value, put);
   }
 
   /**
@@ -768,20 +757,7 @@ public class HTable implements Table {
   public boolean checkAndDelete(final byte [] row, final byte [] family, final 
byte [] qualifier,
   final byte [] value, final Delete delete)
   throws IOException {
-RegionServerCallable callable =
-new RegionServerCallable(this.connection, 
this.rpcControllerFactory,
-getName(), row) {
-  @Override
-  protected Boolean rpcCall() throws Exception {
-MutateRequest request = RequestConverter.buildMutateRequest(
-  getLocation().getRegionInfo().getRegionName(), row, family, 
qualifier,
-  new BinaryComparator(value), CompareType.EQUAL, delete);
-MutateResponse response = getStub().mutate(getRpcController(), 
request);
-return Boolean.valueOf(response.getProcessed());
-  }
-};
-return rpcCallerFactory. newCaller(this.writeRpcTimeout).
-callWithRetries(callable, this.operationTimeout);
+return checkAndDelete(row, family, qualifier, CompareOp.EQUAL, value, 
delete);
   }
 
   /**



[27/50] [abbrv] hbase git commit: HBASE-16612 Use array to cache Types for KeyValue.Type.codeToType (Phil Yang)

2016-09-19 Thread syuanjiang
HBASE-16612 Use array to cache Types for KeyValue.Type.codeToType (Phil Yang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/981200bf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/981200bf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/981200bf

Branch: refs/heads/hbase-12439
Commit: 981200bf1344e2c58559874cb7a66132f703efd6
Parents: a602aaf
Author: tedyu 
Authored: Tue Sep 13 09:54:27 2016 -0700
Committer: tedyu 
Committed: Tue Sep 13 09:54:27 2016 -0700

--
 .../main/java/org/apache/hadoop/hbase/KeyValue.java  | 15 +++
 1 file changed, 11 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/981200bf/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index c1734cc..0c33a96 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -252,6 +252,14 @@ public class KeyValue implements Cell, HeapSize, 
Cloneable, SettableSequenceId,
   return this.code;
 }
 
+private static Type[] codeArray = new Type[256];
+
+static {
+  for (Type t : Type.values()) {
+codeArray[t.code & 0xff] = t;
+  }
+}
+
 /**
  * Cannot rely on enum ordinals . They change if item is removed or moved.
  * Do our own codes.
@@ -259,10 +267,9 @@ public class KeyValue implements Cell, HeapSize, 
Cloneable, SettableSequenceId,
  * @return Type associated with passed code.
  */
 public static Type codeToType(final byte b) {
-  for (Type t : Type.values()) {
-if (t.getCode() == b) {
-  return t;
-}
+  Type t = codeArray[b & 0xff];
+  if (t != null) {
+return t;
   }
   throw new RuntimeException("Unknown code " + b);
 }



[14/50] [abbrv] hbase git commit: HBASE-16616 Rpc handlers stuck on ThreadLocalMap.expungeStaleEntry (Tomu Tsuruhara)

2016-09-19 Thread syuanjiang
HBASE-16616 Rpc handlers stuck on ThreadLocalMap.expungeStaleEntry (Tomu 
Tsuruhara)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8855670c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8855670c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8855670c

Branch: refs/heads/hbase-12439
Commit: 8855670cd701fdf9c2ab41907f9525d122608e6d
Parents: 552400e
Author: tedyu 
Authored: Mon Sep 12 15:49:23 2016 -0700
Committer: tedyu 
Committed: Mon Sep 12 15:49:23 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/util/Counter.java  | 4 
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java | 1 +
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8855670c/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java
index 7b8a7e9..92c0a8f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java
@@ -181,6 +181,10 @@ public class Counter {
 return sum;
   }
 
+  public void destroy() {
+indexHolderThreadLocal.remove();
+  }
+
   @Override
   public String toString() {
 Cell[] cells = containerRef.get().cells;

http://git-wip-us.apache.org/repos/asf/hbase/blob/8855670c/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 0df5097..a678237 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -1957,6 +1957,7 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
   LOG.trace("Ignored exception", ignored);
 }
   }
+  rpcCount.destroy();
 }
 
 private UserGroupInformation createUser(ConnectionHeader head) {



[20/50] [abbrv] hbase git commit: HBASE-16229 Cleaning up size and heapSize calculation.

2016-09-19 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/77b32732/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
index a6c7912..74826b0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
@@ -19,10 +19,7 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -31,19 +28,14 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.JVMClusterUtil;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -59,7 +51,6 @@ import static org.junit.Assert.assertTrue;
 @Category({ RegionServerTests.class, LargeTests.class })
 public class TestWalAndCompactingMemStoreFlush {
 
-  private static final Log LOG = 
LogFactory.getLog(TestWalAndCompactingMemStoreFlush.class);
   private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
   private static final Path DIR = TEST_UTIL.getDataTestDir("TestHRegion");
   public static final TableName TABLENAME = 
TableName.valueOf("TestWalAndCompactingMemStoreFlush",
@@ -201,12 +192,13 @@ public class TestWalAndCompactingMemStoreFlush {
 // memstores of CF1, CF2 and CF3.
 String msg = "totalMemstoreSize="+totalMemstoreSize +
 " DefaultMemStore.DEEP_OVERHEAD="+DefaultMemStore.DEEP_OVERHEAD +
-" 
DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM="+CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM
-+
+" CompactingMemStore.DEEP_OVERHEAD="+CompactingMemStore.DEEP_OVERHEAD +
 " cf1MemstoreSizePhaseI="+cf1MemstoreSizePhaseI +
 " cf2MemstoreSizePhaseI="+cf2MemstoreSizePhaseI +
 " cf3MemstoreSizePhaseI="+cf3MemstoreSizePhaseI ;
-assertEquals(msg,totalMemstoreSize + 3 * DefaultMemStore.DEEP_OVERHEAD,
+assertEquals(msg,
+totalMemstoreSize + 2 * (CompactingMemStore.DEEP_OVERHEAD + 
MutableSegment.DEEP_OVERHEAD)
++ (DefaultMemStore.DEEP_OVERHEAD + MutableSegment.DEEP_OVERHEAD),
 cf1MemstoreSizePhaseI + cf2MemstoreSizePhaseI + cf3MemstoreSizePhaseI);
 
 // Flush!!
@@ -220,11 +212,6 @@ public class TestWalAndCompactingMemStoreFlush {
 ((CompactingMemStore) 
region.getStore(FAMILY3).getMemStore()).flushInMemory();
 region.flush(false);
 
-// CF3 should be compacted so wait here to be sure the compaction is done
-while (((CompactingMemStore) region.getStore(FAMILY3).getMemStore())
-.isMemStoreFlushingInMemory())
-  Threads.sleep(10);
-
 // Recalculate everything
 long cf1MemstoreSizePhaseII = region.getStore(FAMILY1).getMemStoreSize();
 long cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getMemStoreSize();
@@ -239,8 +226,6 @@ public class TestWalAndCompactingMemStoreFlush {
 
 s = s + "DefaultMemStore DEEP_OVERHEAD is:" + DefaultMemStore.DEEP_OVERHEAD
 + ", CompactingMemStore DEEP_OVERHEAD is:" + 
CompactingMemStore.DEEP_OVERHEAD
-+ ", CompactingMemStore DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM is:" 
+ CompactingMemStore
-.DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM
 + "\nAfter first flush! CF1 should be flushed to memory, but not 
compacted.---\n"
 + "Size of CF1 is:" + cf1MemstoreSizePhaseII + ", size of CF2 is:" + 
cf2MemstoreSizePhaseII
 + ", size of CF3 is:" + cf3MemstoreSizePhaseII + "\n";
@@ -249,12 +234,13 @@ public class TestWalAndCompactingMemStoreFlush {
 assertTrue(cf1MemstoreSizePhaseII < cf1MemstoreSizePhaseI);
 
 // 

[43/50] [abbrv] hbase git commit: HBASE-16544 Remove or Clarify Using Amazon S3 Storage section in the reference guide -addendum (Yi Liang)

2016-09-19 Thread syuanjiang
HBASE-16544 Remove or Clarify Using Amazon S3 Storage section in the reference 
guide -addendum (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bb3d9ccd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bb3d9ccd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bb3d9ccd

Branch: refs/heads/hbase-12439
Commit: bb3d9ccd489fb64e3cb2020583935a393382a678
Parents: 1a1003a
Author: Jerry He 
Authored: Fri Sep 16 18:34:23 2016 -0700
Committer: Jerry He 
Committed: Fri Sep 16 18:34:23 2016 -0700

--
 src/main/asciidoc/_chapters/ops_mgt.adoc | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bb3d9ccd/src/main/asciidoc/_chapters/ops_mgt.adoc
--
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc 
b/src/main/asciidoc/_chapters/ops_mgt.adoc
index 0d184a7..550d5f7 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -2056,9 +2056,7 @@ $ bin/hbase 
org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot
 [[snapshots_s3]]
 === Storing Snapshots in an Amazon S3 Bucket
 
-For general information and limitations of using Amazon S3 storage with HBase, 
see
-<>. You can also store and retrieve snapshots from 
Amazon
-S3, using the following procedure.
+You can store and retrieve snapshots from Amazon S3, using the following 
procedure.
 
 NOTE: You can also store snapshots in Microsoft Azure Blob Storage. See 
<>.
 



[03/50] [abbrv] hbase git commit: HBASE-16596 Reduce redundant interfaces in AsyncProcess

2016-09-19 Thread syuanjiang
HBASE-16596 Reduce redundant interfaces in AsyncProcess


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cc2a40a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cc2a40a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cc2a40a7

Branch: refs/heads/hbase-12439
Commit: cc2a40a78f4e65ef38dad2cbc921613c4d15cbf7
Parents: e1e0637
Author: chenheng 
Authored: Sat Sep 10 11:13:28 2016 +0800
Committer: chenheng 
Committed: Sat Sep 10 11:13:28 2016 +0800

--
 .../hadoop/hbase/client/AsyncProcess.java   | 29 +-
 .../org/apache/hadoop/hbase/client/HTable.java  |  2 +-
 .../hadoop/hbase/client/TestAsyncProcess.java   | 59 +---
 .../hadoop/hbase/client/TestClientPushback.java |  2 +-
 4 files changed, 31 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cc2a40a7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 5bb0f58..c5745e9 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -392,15 +392,7 @@ class AsyncProcess {
 }
 throw new RuntimeException("Neither AsyncProcess nor request have 
ExecutorService");
   }
-  /**
-   * See {@link #submit(ExecutorService, TableName, List, boolean, 
Batch.Callback, boolean)}.
-   * Uses default ExecutorService for this AP (must have been created with 
one).
-   */
-  public  AsyncRequestFuture submit(TableName tableName, final List rows,
-  boolean atLeastOne, Batch.Callback callback, boolean 
needResults)
-  throws InterruptedIOException {
-return submit(null, tableName, rows, atLeastOne, callback, needResults);
-  }
+
   /**
* See {@link #submit(ExecutorService, TableName, RowAccess, boolean, 
Batch.Callback, boolean)}.
* Uses default ExecutorService for this AP (must have been created with 
one).
@@ -529,7 +521,7 @@ class AsyncProcess {
   List locationErrorRows, Map 
actionsByServer,
   ExecutorService pool) {
 AsyncRequestFutureImpl ars = createAsyncRequestFuture(
-  tableName, retainedActions, nonceGroup, pool, callback, results, 
needResults);
+  tableName, retainedActions, nonceGroup, pool, callback, results, 
needResults, null, timeout);
 // Add location errors if any
 if (locationErrors != null) {
   for (int i = 0; i < locationErrors.size(); ++i) {
@@ -564,14 +556,6 @@ class AsyncProcess {
 
 multiAction.add(regionName, action);
   }
-  /**
-   * See {@link #submitAll(ExecutorService, TableName, List, Batch.Callback, 
Object[])}.
-   * Uses default ExecutorService for this AP (must have been created with 
one).
-   */
-  public  AsyncRequestFuture submitAll(TableName tableName,
-  List rows, Batch.Callback callback, Object[] 
results) {
-return submitAll(null, tableName, rows, callback, results, null, timeout);
-  }
 
   public  AsyncRequestFuture submitAll(ExecutorService pool, 
TableName tableName,
   List rows, Batch.Callback callback, Object[] 
results) {
@@ -1785,15 +1769,6 @@ class AsyncProcess {
 results, callback, callable, curTimeout);
   }
 
-  @VisibleForTesting
-  /** Create AsyncRequestFuture. Isolated to be easily overridden in the 
tests. */
-  protected  AsyncRequestFutureImpl createAsyncRequestFuture(
-  TableName tableName, List actions, long nonceGroup, 
ExecutorService pool,
-  Batch.Callback callback, Object[] results, boolean needResults) 
{
-return createAsyncRequestFuture(
-tableName, actions, nonceGroup, pool, callback, results, needResults, 
null, timeout);
-  }
-
   /**
* Create a callable. Isolated to be easily overridden in the tests.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/cc2a40a7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index f8bbfc1..492714f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -1218,7 +1218,7 @@ public class HTable implements Table {
 RpcRetryingCallerFactory.instantiate(configuration, 
connection.getStatisticsTracker()),
 true, 

[25/50] [abbrv] hbase git commit: HBASE-16611 Flakey org.apache.hadoop.hbase.client.TestReplicasClient.testCancelOfMultiGet

2016-09-19 Thread syuanjiang
HBASE-16611 Flakey 
org.apache.hadoop.hbase.client.TestReplicasClient.testCancelOfMultiGet


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cd9f4223
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cd9f4223
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cd9f4223

Branch: refs/heads/hbase-12439
Commit: cd9f42237344756a7db395bd8241f41b00e359a2
Parents: 422734e
Author: chenheng 
Authored: Tue Sep 13 14:52:50 2016 +0800
Committer: chenheng 
Committed: Tue Sep 13 14:52:50 2016 +0800

--
 .../main/java/org/apache/hadoop/hbase/client/AsyncProcess.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cd9f4223/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 1531201..93b17bc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -756,7 +756,7 @@ class AsyncProcess {
 
   @Override
   public void run() {
-AbstractResponse res;
+AbstractResponse res = null;
 CancellableRegionServerCallable callable = currentCallable;
 try {
   // setup the callable based on the actions, if we don't have one 
already from the request
@@ -802,7 +802,7 @@ class AsyncProcess {
   throw new RuntimeException(t);
 } finally {
   decTaskCounters(multiAction.getRegions(), server);
-  if (callsInProgress != null && callable != null) {
+  if (callsInProgress != null && callable != null && res != null) {
 callsInProgress.remove(callable);
   }
 }



[07/50] [abbrv] hbase git commit: HBASE-16607 Make NoncedRegionServerCallable extend CancellableRegionServerCallable

2016-09-19 Thread syuanjiang
HBASE-16607 Make NoncedRegionServerCallable extend 
CancellableRegionServerCallable


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c19d2cab
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c19d2cab
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c19d2cab

Branch: refs/heads/hbase-12439
Commit: c19d2cabbd4c6e312e4926f72d348a5e554cd3dd
Parents: 2c3b0f2
Author: chenheng 
Authored: Mon Sep 12 11:03:29 2016 +0800
Committer: chenheng 
Committed: Mon Sep 12 11:03:29 2016 +0800

--
 .../org/apache/hadoop/hbase/client/HTable.java  | 52 +++---
 .../client/NoncedRegionServerCallable.java  | 74 ++--
 2 files changed, 31 insertions(+), 95 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c19d2cab/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index e98424c..0d1b156 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -630,17 +630,17 @@ public class HTable implements Table {
   public Result append(final Append append) throws IOException {
 checkHasFamilies(append);
 NoncedRegionServerCallable callable =
-new NoncedRegionServerCallable(this.connection,
-this.rpcControllerFactory, getName(), append.getRow()) {
-  @Override
-  protected Result call(HBaseRpcController controller) throws Exception {
-MutateRequest request = RequestConverter.buildMutateRequest(
-  getLocation().getRegionInfo().getRegionName(), append, 
getNonceGroup(), getNonce());
-MutateResponse response = getStub().mutate(controller, request);
-if (!response.hasResult()) return null;
-return ProtobufUtil.toResult(response.getResult(), 
controller.cellScanner());
-  }
-};
+new NoncedRegionServerCallable(this.connection, 
this.rpcControllerFactory,
+getName(), append.getRow()) {
+  @Override
+  protected Result rpcCall() throws Exception {
+MutateRequest request = RequestConverter.buildMutateRequest(
+  getLocation().getRegionInfo().getRegionName(), append, 
getNonceGroup(), getNonce());
+MutateResponse response = getStub().mutate(getRpcController(), 
request);
+if (!response.hasResult()) return null;
+return ProtobufUtil.toResult(response.getResult(), 
getRpcControllerCellScanner());
+  }
+};
 return rpcCallerFactory. newCaller(this.writeRpcTimeout).
 callWithRetries(callable, this.operationTimeout);
   }
@@ -652,16 +652,16 @@ public class HTable implements Table {
   public Result increment(final Increment increment) throws IOException {
 checkHasFamilies(increment);
 NoncedRegionServerCallable callable =
-new NoncedRegionServerCallable(this.connection,
-this.rpcControllerFactory, getName(), increment.getRow()) {
-  @Override
-  protected Result call(HBaseRpcController controller) throws Exception {
-MutateRequest request = RequestConverter.buildMutateRequest(
-  getLocation().getRegionInfo().getRegionName(), increment, 
getNonceGroup(), getNonce());
-MutateResponse response = getStub().mutate(controller, request);
-// Should this check for null like append does?
-return ProtobufUtil.toResult(response.getResult(), 
controller.cellScanner());
-  }
+  new NoncedRegionServerCallable(this.connection,
+  this.rpcControllerFactory, getName(), increment.getRow()) {
+@Override
+protected Result rpcCall() throws Exception {
+  MutateRequest request = RequestConverter.buildMutateRequest(
+getLocation().getRegionInfo().getRegionName(), increment, 
getNonceGroup(), getNonce());
+  MutateResponse response = getStub().mutate(getRpcController(), 
request);
+  // Should this check for null like append does?
+  return ProtobufUtil.toResult(response.getResult(), 
getRpcControllerCellScanner());
+}
 };
 return rpcCallerFactory. 
newCaller(writeRpcTimeout).callWithRetries(callable,
 this.operationTimeout);
@@ -701,12 +701,12 @@ public class HTable implements Table {
 new NoncedRegionServerCallable(this.connection, 
this.rpcControllerFactory, getName(),
 row) {
   @Override
-  protected Long call(HBaseRpcController controller) throws Exception {
+  protected Long rpcCall() throws Exception {
 MutateRequest 

[48/50] [abbrv] hbase git commit: HBASE-7612 [JDK8] Replace use of high-scale-lib counters with intrinsic facilities

2016-09-19 Thread syuanjiang
HBASE-7612 [JDK8] Replace use of high-scale-lib counters with intrinsic 
facilities


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6eb62254
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6eb62254
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6eb62254

Branch: refs/heads/hbase-12439
Commit: 6eb6225456f13cce3ab641007737b9ffb0193f24
Parents: 9c58d26
Author: zhangduo 
Authored: Sun Sep 18 15:34:37 2016 +0800
Committer: zhangduo 
Committed: Mon Sep 19 13:37:24 2016 +0800

--
 .../hadoop/hbase/trace/SpanReceiverHost.java|   4 +-
 .../org/apache/hadoop/hbase/util/Counter.java   |   2 +
 .../hadoop/hbase/util/FastLongHistogram.java|  48 +++---
 .../metrics2/lib/MetricsExecutorImpl.java   |   2 +-
 .../hadoop/metrics2/lib/MutableFastCounter.java |  10 +-
 .../hadoop/metrics2/lib/MutableHistogram.java   |   7 +-
 .../hadoop/hbase/io/hfile/CacheStats.java   | 156 +--
 .../org/apache/hadoop/hbase/io/hfile/HFile.java |  20 ++-
 .../hbase/io/hfile/bucket/BucketCacheStats.java |  20 +--
 .../ipc/MetricsHBaseServerWrapperImpl.java  |   2 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  |  31 ++--
 .../hadoop/hbase/regionserver/HRegion.java  |  77 -
 .../hbase/regionserver/HRegionServer.java   |  32 ++--
 .../hbase/regionserver/MemStoreFlusher.java |  10 +-
 .../MetricsRegionServerWrapperImpl.java |  14 +-
 .../hbase/regionserver/RSRpcServices.java   |  26 ++--
 .../hbase/regionserver/StoreFileScanner.java|  12 +-
 .../TestFilterListOrOperatorWithBlkCnt.java |   2 +-
 .../io/hfile/TestForceCacheImportantBlocks.java |   6 +-
 .../hbase/regionserver/TestBlocksRead.java  |   2 +-
 20 files changed, 235 insertions(+), 248 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6eb62254/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
index b90d191..f632ae0 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
@@ -44,8 +44,8 @@ public class SpanReceiverHost {
 
   private static enum SingletonHolder {
 INSTANCE;
-Object lock = new Object();
-SpanReceiverHost host = null;
+transient Object lock = new Object();
+transient SpanReceiverHost host = null;
   }
 
   public static SpanReceiverHost getInstance(Configuration conf) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6eb62254/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java
index 92c0a8f..d4cfe26e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java
@@ -26,9 +26,11 @@ import 
org.apache.hadoop.hbase.classification.InterfaceStability;
 
 /**
  * High scalable counter. Thread safe.
+ * @deprecated use {@link java.util.concurrent.atomic.LongAdder} instead.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
+@Deprecated
 public class Counter {
   private static final int MAX_CELLS_LENGTH = 1 << 20;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6eb62254/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
index 3c4eccc..310348e 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FastLongHistogram.java
@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.hbase.util;
 
-import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.Arrays;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.LongAdder;
+import java.util.stream.Stream;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
@@ -43,7 +45,7 @@ public class FastLongHistogram {
* Bins is a class containing a list of buckets(or bins) for estimation 
histogram of some data.
*/
   private static 

[31/50] [abbrv] hbase git commit: HBASE-16381 Shell deleteall command should support row key prefixes (Yi Liang)

2016-09-19 Thread syuanjiang
HBASE-16381 Shell deleteall command should support row key prefixes (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1d6c90b4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1d6c90b4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1d6c90b4

Branch: refs/heads/hbase-12439
Commit: 1d6c90b4969b8ec47699c69984be052050a9ee46
Parents: 8ef6c76
Author: chenheng 
Authored: Thu Sep 15 19:18:47 2016 +0800
Committer: chenheng 
Committed: Thu Sep 15 19:20:29 2016 +0800

--
 hbase-shell/src/main/ruby/hbase/table.rb| 81 +++-
 .../src/main/ruby/shell/commands/deleteall.rb   | 17 +++-
 hbase-shell/src/test/ruby/hbase/table_test.rb   | 12 +++
 3 files changed, 86 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1d6c90b4/hbase-shell/src/main/ruby/hbase/table.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb 
b/hbase-shell/src/main/ruby/hbase/table.rb
index 5930c0d..22bbcfe 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -160,6 +160,62 @@ EOF
 end
 
 
#--
+# Create a Delete mutation
+def _createdelete_internal(row, column = nil,
+timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args 
= {})
+  temptimestamp = timestamp
+  if temptimestamp.kind_of?(Hash)
+timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP
+  end
+  d = org.apache.hadoop.hbase.client.Delete.new(row.to_s.to_java_bytes, 
timestamp)
+  if temptimestamp.kind_of?(Hash)
+temptimestamp.each do |k, v|
+  if v.kind_of?(String)
+set_cell_visibility(d, v) if v
+  end
+end
+  end
+  if args.any?
+ visibility = args[VISIBILITY]
+ set_cell_visibility(d, visibility) if visibility
+  end
+  if column
+family, qualifier = parse_column_name(column)
+d.addColumns(family, qualifier, timestamp)
+  end
+  return d
+end
+
+
#--
+# Delete rows using prefix
+def _deleterows_internal(row, column = nil,
+timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, 
args={})
+  cache = row["CACHE"] ? row["CACHE"] : 100
+  prefix = row["ROWPREFIXFILTER"]
+
+  # create scan to get table names using prefix
+  scan = org.apache.hadoop.hbase.client.Scan.new
+  scan.setRowPrefixFilter(prefix.to_java_bytes)
+  # Run the scanner to get all rowkeys
+  scanner = @table.getScanner(scan)
+  # Create a list to store all deletes
+  list = java.util.ArrayList.new
+  # Iterate results
+  iter = scanner.iterator
+  while iter.hasNext
+row = iter.next
+key = org.apache.hadoop.hbase.util.Bytes::toStringBinary(row.getRow)
+d = _createdelete_internal(key, column, timestamp, args)
+list.add(d)
+if list.size >= cache
+  @table.delete(list)
+  list.clear
+end
+  end
+  @table.delete(list)
+end
+
+
#--
 # Delete a cell
 def _delete_internal(row, column,
timestamp = 
org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
@@ -175,27 +231,12 @@ EOF
   if is_meta_table?
 raise ArgumentError, "Row Not Found" if _get_internal(row).nil?
   end
-  temptimestamp = timestamp
-  if temptimestamp.kind_of?(Hash)
- timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP
-  end
-  d = org.apache.hadoop.hbase.client.Delete.new(row.to_s.to_java_bytes, 
timestamp)
-  if temptimestamp.kind_of?(Hash)
-   temptimestamp.each do |k, v|
- if v.kind_of?(String)
-   set_cell_visibility(d, v) if v
- end
-end
-  end
-  if args.any?
- visibility = args[VISIBILITY]
- set_cell_visibility(d, visibility) if visibility
-  end
-  if column
-family, qualifier = parse_column_name(column)
-d.addColumns(family, qualifier, timestamp)
+  if row.kind_of?(Hash)
+_deleterows_internal(row, column, timestamp, args)
+  else
+d = _createdelete_internal(row, column, timestamp, args)
+@table.delete(d)
   end
-  @table.delete(d)
 end
 
 
#--


[24/50] [abbrv] hbase git commit: Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/hbase

2016-09-19 Thread syuanjiang
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/hbase


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/156a8b2b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/156a8b2b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/156a8b2b

Branch: refs/heads/hbase-12439
Commit: 156a8b2bad2577ace5d0958acc79f56b2bc23e0c
Parents: 2ab3384 77b3273
Author: anoopsamjohn 
Authored: Tue Sep 13 12:08:36 2016 +0530
Committer: anoopsamjohn 
Committed: Tue Sep 13 12:08:36 2016 +0530

--

--




[37/50] [abbrv] hbase git commit: HBASE-16634 Speedup TestExportSnapshot

2016-09-19 Thread syuanjiang
HBASE-16634 Speedup TestExportSnapshot


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e19632a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e19632a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e19632a1

Branch: refs/heads/hbase-12439
Commit: e19632a19abc52a983d444811852bf0f11cd0d0b
Parents: 216e847
Author: Matteo Bertozzi 
Authored: Thu Sep 15 18:29:09 2016 -0700
Committer: Matteo Bertozzi 
Committed: Thu Sep 15 18:29:09 2016 -0700

--
 .../hbase/snapshot/MobSnapshotTestingUtils.java | 15 -
 .../hbase/snapshot/SnapshotTestingUtils.java| 34 
 .../hbase/snapshot/TestExportSnapshot.java  |  8 ++---
 .../snapshot/TestFlushSnapshotFromClient.java   |  8 ++---
 .../hbase/snapshot/TestMobExportSnapshot.java   |  8 ++---
 .../snapshot/TestMobSecureExportSnapshot.java   |  4 +--
 .../snapshot/TestSecureExportSnapshot.java  |  1 -
 7 files changed, 54 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e19632a1/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
index 9026115..107f487 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
@@ -38,6 +38,20 @@ public class MobSnapshotTestingUtils {
   public static void createMobTable(final HBaseTestingUtility util,
   final TableName tableName, int regionReplication,
   final byte[]... families) throws IOException, InterruptedException {
+createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(),
+  regionReplication, families);
+  }
+
+  public static void createPreSplitMobTable(final HBaseTestingUtility util,
+  final TableName tableName, int nRegions, final byte[]... families)
+  throws IOException, InterruptedException {
+createMobTable(util, tableName, 
SnapshotTestingUtils.getSplitKeys(nRegions),
+  1, families);
+  }
+
+  private static void createMobTable(final HBaseTestingUtility util,
+  final TableName tableName, final byte[][] splitKeys, int 
regionReplication,
+  final byte[]... families) throws IOException, InterruptedException {
 HTableDescriptor htd = new HTableDescriptor(tableName);
 htd.setRegionReplication(regionReplication);
 for (byte[] family : families) {
@@ -46,7 +60,6 @@ public class MobSnapshotTestingUtils {
   hcd.setMobThreshold(0L);
   htd.addFamily(hcd);
 }
-byte[][] splitKeys = SnapshotTestingUtils.getSplitKeys();
 util.getHBaseAdmin().createTable(htd, splitKeys);
 SnapshotTestingUtils.waitForTableToBeOnline(util, tableName);
 assertEquals((splitKeys.length + 1) * regionReplication, util

http://git-wip-us.apache.org/repos/asf/hbase/blob/e19632a1/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index 770bb00..dfd00b3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -82,9 +82,10 @@ import com.google.protobuf.ServiceException;
  */
 @InterfaceAudience.Private
 public final class SnapshotTestingUtils {
-
   private static final Log LOG = LogFactory.getLog(SnapshotTestingUtils.class);
-  private static byte[] KEYS = Bytes.toBytes("0123456789");
+
+  // default number of regions (and keys) given by getSplitKeys() and 
createTable()
+  private static byte[] KEYS = Bytes.toBytes("0123456");
 
   private SnapshotTestingUtils() {
 // private constructor for utility class
@@ -750,23 +751,32 @@ public final class SnapshotTestingUtils {
   }
 
   public static void createTable(final HBaseTestingUtility util, final 
TableName tableName,
-  int regionReplication, final byte[]... families) throws IOException, 
InterruptedException {
+  int regionReplication, int nRegions, final byte[]... families)
+  throws IOException, InterruptedException {
 HTableDescriptor htd = new HTableDescriptor(tableName);
 htd.setRegionReplication(regionReplication);
 for (byte[] family : families) {
   

[47/50] [abbrv] hbase git commit: HBASE-16507 Procedure v2 - Force DDL operation to always roll forward (addendum)

2016-09-19 Thread syuanjiang
HBASE-16507 Procedure v2 - Force DDL operation to always roll forward (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9c58d26d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9c58d26d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9c58d26d

Branch: refs/heads/hbase-12439
Commit: 9c58d26d3bd5bd4e10be1084a24a62ed25722689
Parents: 4faa8ea
Author: Matteo Bertozzi 
Authored: Sun Sep 18 19:37:46 2016 -0700
Committer: Matteo Bertozzi 
Committed: Sun Sep 18 19:37:46 2016 -0700

--
 .../org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9c58d26d/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
index 7eb6465..10467fe 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
@@ -105,8 +105,9 @@ public abstract class StateMachineProcedure
   protected void setNextState(final TState state) {
 if (aborted.get() && isRollbackSupported(getCurrentState())) {
   setAbortFailure(getClass().getSimpleName(), "abort requested");
+} else {
+  setNextState(getStateId(state));
 }
-setNextState(getStateId(state));
   }
 
   /**



[35/50] [abbrv] hbase git commit: HBASE-16640 TimeoutBlockingQueue#remove() should return whether the entry is removed

2016-09-19 Thread syuanjiang
HBASE-16640 TimeoutBlockingQueue#remove() should return whether the entry is 
removed


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e782d0bb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e782d0bb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e782d0bb

Branch: refs/heads/hbase-12439
Commit: e782d0bbdf265fddfcce5754cf2e2dbcc4ea
Parents: 8c4b09d
Author: tedyu 
Authored: Thu Sep 15 17:34:23 2016 -0700
Committer: tedyu 
Committed: Thu Sep 15 17:34:23 2016 -0700

--
 .../hadoop/hbase/procedure2/ProcedureExecutor.java |  5 +++--
 .../hbase/procedure2/util/TimeoutBlockingQueue.java|  8 +---
 .../procedure2/util/TestTimeoutBlockingQueue.java  | 13 -
 3 files changed, 16 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e782d0bb/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 5042329..1a9010d 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -615,9 +615,10 @@ public class ProcedureExecutor {
   /**
* Remove a chore procedure from the executor
* @param chore the chore to remove
+   * @return whether the chore is removed
*/
-  public void removeChore(final ProcedureInMemoryChore chore) {
-waitingTimeout.remove(chore);
+  public boolean removeChore(final ProcedureInMemoryChore chore) {
+return waitingTimeout.remove(chore);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/e782d0bb/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java
index fceabb1..2292e63 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java
@@ -92,15 +92,17 @@ public class TimeoutBlockingQueue {
 }
   }
 
-  public void remove(E e) {
+  public boolean remove(E e) {
+if (e == null) return false;
 lock.lock();
 try {
   for (int i = 0; i < objects.length; ++i) {
-if (objects[i] == e) {
+if (e.equals(objects[i])) {
   objects[i] = null;
-  return;
+  return true;
 }
   }
+  return false;
 } finally {
   lock.unlock();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e782d0bb/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java
index 209d1c5..1f901b5 100644
--- 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java
+++ 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java
@@ -137,18 +137,21 @@ public class TestTimeoutBlockingQueue {
 TimeoutBlockingQueue queue =
   new TimeoutBlockingQueue(2, new 
TestObjectTimeoutRetriever());
 
-TestObject[] objs = new TestObject[5];
-for (int i = 0; i < objs.length; ++i) {
+final int effectiveLen = 5;
+TestObject[] objs = new TestObject[6];
+for (int i = 0; i < effectiveLen; ++i) {
   objs[i] = new TestObject(0, i * 10);
   queue.add(objs[i]);
 }
+objs[effectiveLen] = new TestObject(0, effectiveLen * 10);
 queue.dump();
 
-for (int i = 0; i < objs.length; i += 2) {
-  queue.remove(objs[i]);
+for (int i = 0; i < effectiveLen; i += 2) {
+  assertTrue(queue.remove(objs[i]));
 }
+assertTrue(!queue.remove(objs[effectiveLen]));
 
-for (int i = 0; i < objs.length; ++i) {
+for (int i = 0; i < effectiveLen; ++i) {
   TestObject x = queue.poll();
   assertEquals((i % 2) == 0 ? null : objs[i], x);
 }



[17/50] [abbrv] hbase git commit: HBASE-16229 Cleaning up size and heapSize calculation.

2016-09-19 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/2ab33846/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
index a6c7912..74826b0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
@@ -19,10 +19,7 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -31,19 +28,14 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.JVMClusterUtil;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -59,7 +51,6 @@ import static org.junit.Assert.assertTrue;
 @Category({ RegionServerTests.class, LargeTests.class })
 public class TestWalAndCompactingMemStoreFlush {
 
-  private static final Log LOG = 
LogFactory.getLog(TestWalAndCompactingMemStoreFlush.class);
   private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
   private static final Path DIR = TEST_UTIL.getDataTestDir("TestHRegion");
   public static final TableName TABLENAME = 
TableName.valueOf("TestWalAndCompactingMemStoreFlush",
@@ -201,12 +192,13 @@ public class TestWalAndCompactingMemStoreFlush {
 // memstores of CF1, CF2 and CF3.
 String msg = "totalMemstoreSize="+totalMemstoreSize +
 " DefaultMemStore.DEEP_OVERHEAD="+DefaultMemStore.DEEP_OVERHEAD +
-" 
DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM="+CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM
-+
+" CompactingMemStore.DEEP_OVERHEAD="+CompactingMemStore.DEEP_OVERHEAD +
 " cf1MemstoreSizePhaseI="+cf1MemstoreSizePhaseI +
 " cf2MemstoreSizePhaseI="+cf2MemstoreSizePhaseI +
 " cf3MemstoreSizePhaseI="+cf3MemstoreSizePhaseI ;
-assertEquals(msg,totalMemstoreSize + 3 * DefaultMemStore.DEEP_OVERHEAD,
+assertEquals(msg,
+totalMemstoreSize + 2 * (CompactingMemStore.DEEP_OVERHEAD + 
MutableSegment.DEEP_OVERHEAD)
++ (DefaultMemStore.DEEP_OVERHEAD + MutableSegment.DEEP_OVERHEAD),
 cf1MemstoreSizePhaseI + cf2MemstoreSizePhaseI + cf3MemstoreSizePhaseI);
 
 // Flush!!
@@ -220,11 +212,6 @@ public class TestWalAndCompactingMemStoreFlush {
 ((CompactingMemStore) 
region.getStore(FAMILY3).getMemStore()).flushInMemory();
 region.flush(false);
 
-// CF3 should be compacted so wait here to be sure the compaction is done
-while (((CompactingMemStore) region.getStore(FAMILY3).getMemStore())
-.isMemStoreFlushingInMemory())
-  Threads.sleep(10);
-
 // Recalculate everything
 long cf1MemstoreSizePhaseII = region.getStore(FAMILY1).getMemStoreSize();
 long cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getMemStoreSize();
@@ -239,8 +226,6 @@ public class TestWalAndCompactingMemStoreFlush {
 
 s = s + "DefaultMemStore DEEP_OVERHEAD is:" + DefaultMemStore.DEEP_OVERHEAD
 + ", CompactingMemStore DEEP_OVERHEAD is:" + 
CompactingMemStore.DEEP_OVERHEAD
-+ ", CompactingMemStore DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM is:" 
+ CompactingMemStore
-.DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM
 + "\nAfter first flush! CF1 should be flushed to memory, but not 
compacted.---\n"
 + "Size of CF1 is:" + cf1MemstoreSizePhaseII + ", size of CF2 is:" + 
cf2MemstoreSizePhaseII
 + ", size of CF3 is:" + cf3MemstoreSizePhaseII + "\n";
@@ -249,12 +234,13 @@ public class TestWalAndCompactingMemStoreFlush {
 assertTrue(cf1MemstoreSizePhaseII < cf1MemstoreSizePhaseI);
 
 // 

[33/50] [abbrv] hbase git commit: HBASE-16626 User customized RegionScanner from 1.X is incompatible with 2.0.0's off-heap part. (Charlie Qiangeng Xu)

2016-09-19 Thread syuanjiang
HBASE-16626 User customized RegionScanner from 1.X is incompatible with 2.0.0's 
off-heap part. (Charlie Qiangeng Xu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/56be3ac7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/56be3ac7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/56be3ac7

Branch: refs/heads/hbase-12439
Commit: 56be3ac7c8d203710f844cd799f2fa3496f0515a
Parents: e6f8f6d
Author: anoopsamjohn 
Authored: Thu Sep 15 18:07:43 2016 +0530
Committer: anoopsamjohn 
Committed: Thu Sep 15 18:07:43 2016 +0530

--
 .../org/apache/hadoop/hbase/regionserver/RegionScanner.java | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/56be3ac7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
index 5b33db4..4f9732d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
@@ -115,4 +115,13 @@ public interface RegionScanner extends InternalScanner, 
Shipper {
*/
   boolean nextRaw(List result, ScannerContext scannerContext)
   throws IOException;
+
+  /**
+   * Empty implementation to provide compatibility for user migrating from 1.X
+   * @see https://issues.apache.org/jira/browse/HBASE-16626;>HBASE-16626
+   */
+  @Override
+  default void shipped() throws IOException {
+// do nothing
+  }
 }



[10/50] [abbrv] hbase git commit: HBASE-16609 Fake cells EmptyByteBufferedCell created in read path not implementing SettableSequenceId (Yu Sun)

2016-09-19 Thread syuanjiang
HBASE-16609 Fake cells EmptyByteBufferedCell  created in read path not 
implementing SettableSequenceId (Yu Sun)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8290b2c8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8290b2c8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8290b2c8

Branch: refs/heads/hbase-12439
Commit: 8290b2c8fa7c534e02e37b0762ebf455a77016ca
Parents: 0860bdb
Author: tedyu 
Authored: Mon Sep 12 08:53:22 2016 -0700
Committer: tedyu 
Committed: Mon Sep 12 08:53:22 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/CellUtil.java| 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8290b2c8/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index ad13e9e..94c7189 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -1974,7 +1974,11 @@ public final class CellUtil {
* These cells are used in reseeks/seeks to improve the read performance.
* They are not real cells that are returned back to the clients
*/
-  private static abstract class EmptyByteBufferedCell extends ByteBufferedCell 
{
+  private static abstract class EmptyByteBufferedCell extends ByteBufferedCell 
implements SettableSequenceId {
+@Override
+public void setSequenceId(long seqId) {
+  // Fake cells don't need seqId, so leaving it as a noop.
+}
 
 @Override
 public byte[] getRowArray() {



[41/50] [abbrv] hbase git commit: HBASE-16631 Extract AsyncRequestFuture related code from AsyncProcess

2016-09-19 Thread syuanjiang
HBASE-16631 Extract AsyncRequestFuture related code from AsyncProcess


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2cf8907d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2cf8907d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2cf8907d

Branch: refs/heads/hbase-12439
Commit: 2cf8907db53b84a0118acc1edd1dfb9b37abe8b7
Parents: b6b7236
Author: chenheng 
Authored: Sat Sep 17 00:35:23 2016 +0800
Committer: chenheng 
Committed: Sat Sep 17 00:35:23 2016 +0800

--
 .../hadoop/hbase/client/AsyncProcess.java   | 1375 +-
 .../hadoop/hbase/client/AsyncRequestFuture.java |   40 +
 .../hbase/client/AsyncRequestFutureImpl.java| 1290 
 .../apache/hadoop/hbase/client/BatchErrors.java |   69 +
 .../org/apache/hadoop/hbase/client/HTable.java  |1 -
 .../hadoop/hbase/client/HTableMultiplexer.java  |1 -
 .../hadoop/hbase/client/TestAsyncProcess.java   |   48 +-
 .../hadoop/hbase/client/TestReplicasClient.java |2 -
 8 files changed, 1481 insertions(+), 1345 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2cf8907d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 93b17bc..2ffb2e3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -20,52 +20,41 @@
 package org.apache.hadoop.hbase.client;
 
 import com.google.common.annotations.VisibleForTesting;
-import java.io.InterruptedIOException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.AsyncProcess.RowChecker.ReturnCode;
-import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-import org.apache.hadoop.hbase.client.coprocessor.Batch;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.RegionLocations;
-import org.apache.hadoop.hbase.RetryImmediatelyException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.AsyncProcess.RowChecker.ReturnCode;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.htrace.Trace;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
 
 /**
  * This class  allows a continuous flow of requests. It's written to be 
compatible with a
@@ -124,7 

[26/50] [abbrv] hbase git commit: HBASE-16615 Fix flaky TestScannerHeartbeatMessages

2016-09-19 Thread syuanjiang
HBASE-16615 Fix flaky TestScannerHeartbeatMessages


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a602aaf9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a602aaf9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a602aaf9

Branch: refs/heads/hbase-12439
Commit: a602aaf9baae779ac654fcb0fcedfdc9f8acc6ce
Parents: cd9f422
Author: zhangduo 
Authored: Tue Sep 13 11:44:04 2016 +0800
Committer: zhangduo 
Committed: Tue Sep 13 17:41:01 2016 +0800

--
 .../TestScannerHeartbeatMessages.java   | 105 ---
 1 file changed, 41 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a602aaf9/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
index 2d9ba6e..b031413 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
@@ -21,6 +21,9 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
@@ -55,6 +58,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
 import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.log4j.Level;
 import org.junit.After;
@@ -64,9 +68,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
 /**
  * Here we test to make sure that scans return the expected Results when the 
server is sending the
  * Client heartbeat messages. Heartbeat messages are essentially keep-alive 
messages (they prevent
@@ -113,12 +114,10 @@ public class TestScannerHeartbeatMessages {
 
   // In this test, we sleep after reading each row. So we should make sure 
after we get some number
   // of rows and sleep same times we must reach time limit, and do not timeout 
after next sleeping.
-  // So set this to 200, we will get 3 rows and reach time limit at the start 
of 4th row, then sleep
-  // for the 4th time. Total time is 800 ms so we will not timeout.
-  private static int DEFAULT_ROW_SLEEP_TIME = 200;
+  private static int DEFAULT_ROW_SLEEP_TIME = 300;
 
   // Similar with row sleep time.
-  private static int DEFAULT_CF_SLEEP_TIME = 200;
+  private static int DEFAULT_CF_SLEEP_TIME = 300;
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -178,7 +177,6 @@ public class TestScannerHeartbeatMessages {
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
-TEST_UTIL.deleteTable(TABLE_NAME);
 TEST_UTIL.shutdownMiniCluster();
   }
 
@@ -193,26 +191,13 @@ public class TestScannerHeartbeatMessages {
   }
 
   /**
-   * Test a variety of scan configurations to ensure that they return the 
expected Results when
-   * heartbeat messages are necessary. These tests are accumulated under one 
test case to ensure
-   * that they don't run in parallel. If the tests ran in parallel, they may 
conflict with each
-   * other due to changing static variables
-   */
-  @Test
-  public void testScannerHeartbeatMessages() throws Exception {
-testImportanceOfHeartbeats(testHeartbeatBetweenRows());
-testImportanceOfHeartbeats(testHeartbeatBetweenColumnFamilies());
-testImportanceOfHeartbeats(testHeartbeatWithSparseFilter());
-  }
-
-  /**
* Run the test callable when heartbeats are enabled/disabled. We expect all 
tests to only pass
* when heartbeat messages are enabled (otherwise the test is pointless). 
When heartbeats are
* disabled, the test should throw an exception.
* @param testCallable
* @throws InterruptedException
*/
-  public void testImportanceOfHeartbeats(Callable testCallable) throws 
InterruptedException {
+  private void testImportanceOfHeartbeats(Callable testCallable) throws 
InterruptedException {
 HeartbeatRPCServices.heartbeatsEnabled = true;
 
 try {
@@ -239,8 +224,9 @@ public 

[01/50] [abbrv] hbase git commit: HBASE-16309 TestDefaultCompactSelection.testCompactionRatio is flaky

2016-09-19 Thread syuanjiang
Repository: hbase
Updated Branches:
  refs/heads/hbase-12439 d5080e82f -> b2eac0da3


HBASE-16309 TestDefaultCompactSelection.testCompactionRatio is flaky


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6c8d1f0a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6c8d1f0a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6c8d1f0a

Branch: refs/heads/hbase-12439
Commit: 6c8d1f0ae82b736322a37bafabf795c8b3a0fdd4
Parents: e11aafa
Author: zhangduo 
Authored: Fri Sep 9 11:09:17 2016 +0800
Committer: zhangduo 
Committed: Fri Sep 9 14:20:39 2016 +0800

--
 .../compactions/RatioBasedCompactionPolicy.java  |  3 ++-
 .../regionserver/TestDefaultCompactSelection.java| 15 +--
 2 files changed, 15 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6c8d1f0a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
index 3386bfd..a3e10f8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * The default algorithm for selecting files for compaction.
@@ -61,7 +62,7 @@ public class RatioBasedCompactionPolicy extends 
SortedCompactionPolicy {
 }
 // TODO: Use better method for determining stamp of last major (HBASE-2990)
 long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
-long now = System.currentTimeMillis();
+long now = EnvironmentEdgeManager.currentTime();
 if (lowTimestamp > 0L && lowTimestamp < (now - mcTime)) {
   // Major compaction time has elapsed.
   long cfTTL = this.storeConfigInfo.getStoreFileTtl();

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c8d1f0a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
index dbd6f11..1513cd0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import 
org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -34,6 +36,8 @@ public class TestDefaultCompactSelection extends 
TestCompactionPolicy {
 
   @Test
   public void testCompactionRatio() throws IOException {
+TimeOffsetEnvironmentEdge edge = new TimeOffsetEnvironmentEdge();
+EnvironmentEdgeManager.injectEdge(edge);
 /**
  * NOTE: these tests are specific to describe the implementation of the
  * current compaction algorithm.  Developed to ensure that refactoring
@@ -90,10 +94,17 @@ public class TestDefaultCompactSelection extends 
TestCompactionPolicy {
 conf.setFloat("hbase.hregion.majorcompaction.jitter", 0);
 store.storeEngine.getCompactionPolicy().setConf(conf);
 try {
+  // The modTime of the mocked store file is currentTimeMillis, so we need 
to increase the
+  // timestamp a bit to make sure that now - lowestModTime is greater than 
major compaction
+  // period(1ms).
   // trigger an aged major compaction
-  compactEquals(sfCreate(50,25,12,12), 50, 25, 12, 12);
+  List candidates = sfCreate(50, 25, 12, 12);
+  edge.increment(2);
+  compactEquals(candidates, 50, 25, 12, 12);
   // major sure exceeding 

[28/50] [abbrv] hbase git commit: HBASE-16618 Procedure v2 - Add base class for table and ns procedures

2016-09-19 Thread syuanjiang
HBASE-16618 Procedure v2 - Add base class for table and ns procedures


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4c6a98bd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4c6a98bd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4c6a98bd

Branch: refs/heads/hbase-12439
Commit: 4c6a98bd767f3bec4d2e6107016267a09d58dd12
Parents: 981200b
Author: Matteo Bertozzi 
Authored: Tue Sep 13 11:37:52 2016 -0700
Committer: Matteo Bertozzi 
Committed: Tue Sep 13 11:37:52 2016 -0700

--
 .../AbstractStateMachineNamespaceProcedure.java |  70 
 .../AbstractStateMachineTableProcedure.java | 111 +++
 .../procedure/AddColumnFamilyProcedure.java |  38 ++-
 .../procedure/CloneSnapshotProcedure.java   |  30 +
 .../procedure/CreateNamespaceProcedure.java |  28 +
 .../master/procedure/CreateTableProcedure.java  |  43 ++-
 .../procedure/DeleteColumnFamilyProcedure.java  |  38 ++-
 .../procedure/DeleteNamespaceProcedure.java |  34 +-
 .../master/procedure/DeleteTableProcedure.java  |  49 ++--
 .../master/procedure/DisableTableProcedure.java |  55 ++---
 .../DispatchMergingRegionsProcedure.java|  16 +--
 .../master/procedure/EnableTableProcedure.java  |  55 ++---
 .../procedure/MasterDDLOperationHelper.java |  16 ---
 .../procedure/ModifyColumnFamilyProcedure.java  |  38 ++-
 .../procedure/ModifyNamespaceProcedure.java |  34 +-
 .../master/procedure/ModifyTableProcedure.java  |  44 ++--
 .../procedure/RestoreSnapshotProcedure.java |  27 +
 .../procedure/TruncateTableProcedure.java   |  36 ++
 18 files changed, 287 insertions(+), 475 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4c6a98bd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
new file mode 100644
index 000..0ba7556
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+
+/**
+ * Base class for all the Namespace procedures that want to use a 
StateMachineProcedure.
+ * It provide some basic helpers like basic locking and basic 
toStringClassDetails().
+ */
+@InterfaceAudience.Private
+public abstract class AbstractStateMachineNamespaceProcedure
+extends StateMachineProcedure
+implements TableProcedureInterface {
+
+  protected AbstractStateMachineNamespaceProcedure() {
+// Required by the Procedure framework to create the procedure on replay
+  }
+
+  protected AbstractStateMachineNamespaceProcedure(final MasterProcedureEnv 
env) {
+this.setOwner(env.getRequestUser().getShortName());
+  }
+
+  protected abstract String getNamespaceName();
+
+  @Override
+  public TableName getTableName() {
+return TableName.NAMESPACE_TABLE_NAME;
+  }
+
+  @Override
+  public abstract TableOperationType getTableOperationType();
+
+  @Override
+  public void toStringClassDetails(final StringBuilder sb) {
+sb.append(getClass().getSimpleName());
+sb.append(" (namespace=");
+sb.append(getNamespaceName());
+sb.append(")");
+  }
+
+  @Override
+  protected boolean acquireLock(final MasterProcedureEnv env) {
+if (env.waitInitialized(this)) return false;
+return 

[42/50] [abbrv] hbase git commit: HBASE-16447 Replication by namespaces config in peer (Guanghao Zhang)

2016-09-19 Thread syuanjiang
HBASE-16447 Replication by namespaces config in peer (Guanghao Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1a1003a4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1a1003a4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1a1003a4

Branch: refs/heads/hbase-12439
Commit: 1a1003a482d9bfb725fbe1097c794fdb043dcd81
Parents: 2cf8907
Author: Enis Soztutar 
Authored: Fri Sep 16 11:47:42 2016 -0700
Committer: Enis Soztutar 
Committed: Fri Sep 16 11:47:42 2016 -0700

--
 .../client/replication/ReplicationAdmin.java|  38 ++-
 .../replication/ReplicationSerDeHelper.java |  30 ++-
 .../hbase/replication/ReplicationPeer.java  |   7 +
 .../replication/ReplicationPeerConfig.java  |  15 +-
 .../replication/ReplicationPeerZKImpl.java  |  10 +
 .../replication/ReplicationPeersZKImpl.java |   4 +-
 .../hbase/zookeeper/ZooKeeperWatcher.java   |   2 +-
 .../ipc/protobuf/generated/TestProtos.java  |  10 +-
 .../protobuf/generated/ZooKeeperProtos.java | 186 --
 .../src/main/protobuf/ZooKeeper.proto   |   1 +
 .../apache/hadoop/hbase/ZKNamespaceManager.java |   2 +-
 .../replication/BaseReplicationEndpoint.java|   6 +-
 .../NamespaceTableCfWALEntryFilter.java | 126 ++
 .../replication/TableCfWALEntryFilter.java  | 101 
 .../replication/TestReplicationAdmin.java   |  84 +++
 .../replication/TestNamespaceReplication.java   | 248 +++
 .../TestReplicationWALEntryFilters.java |  73 +-
 ...egionReplicaReplicationEndpointNoMaster.java |   3 +
 .../src/main/ruby/hbase/replication_admin.rb|  36 +++
 hbase-shell/src/main/ruby/hbase_constants.rb|   1 +
 hbase-shell/src/main/ruby/shell.rb  |   1 +
 .../src/main/ruby/shell/commands/add_peer.rb|  16 +-
 .../src/main/ruby/shell/commands/list_peers.rb  |   7 +-
 .../ruby/shell/commands/set_peer_namespaces.rb  |  51 
 .../ruby/shell/commands/set_peer_tableCFs.rb|  10 +-
 .../test/ruby/hbase/replication_admin_test.rb   |  69 +-
 26 files changed, 993 insertions(+), 144 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1a1003a4/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index de6cb7f..dc1a7ad 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -189,6 +189,8 @@ public class ReplicationAdmin implements Closeable {
* @param peerConfig configuration for the replication slave cluster
*/
   public void addPeer(String id, ReplicationPeerConfig peerConfig) throws 
ReplicationException {
+checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
+  peerConfig.getTableCFsMap());
 this.replicationPeers.registerPeer(id, peerConfig);
   }
 
@@ -202,8 +204,11 @@ public class ReplicationAdmin implements Closeable {
 
   public void updatePeerConfig(String id, ReplicationPeerConfig peerConfig)
   throws ReplicationException {
+checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
+  peerConfig.getTableCFsMap());
 this.replicationPeers.updatePeerConfig(id, peerConfig);
   }
+
   /**
* Removes a peer cluster and stops the replication to it.
* @param id a short name that identifies the cluster
@@ -360,7 +365,6 @@ public class ReplicationAdmin implements Closeable {
 }
   } else {
 throw new ReplicationException("No table: " + table + " in table-cfs 
config of peer: " + id);
-
   }
 }
 setPeerTableCFs(id, preTableCfs);
@@ -376,6 +380,8 @@ public class ReplicationAdmin implements Closeable {
*/
   public void setPeerTableCFs(String id, Map tableCfs)
   throws ReplicationException {
+checkNamespacesAndTableCfsConfigConflict(
+  this.replicationPeers.getReplicationPeerConfig(id).getNamespaces(), 
tableCfs);
 this.replicationPeers.setPeerTableCFsConfig(id, tableCfs);
   }
 
@@ -627,4 +633,34 @@ public class ReplicationAdmin implements Closeable {
 }
 return true;
   }
+
+  /**
+   * Set a namespace in the peer config means that all tables in this namespace
+   * will be replicated to the peer cluster.
+   *
+   * 1. If you already have set a namespace in the peer config, then you can't 
set any table
+   *of this namespace to the peer config.
+   * 

[30/50] [abbrv] hbase git commit: HBASE-16388 Prevent client threads being blocked by only one slow region server

2016-09-19 Thread syuanjiang
HBASE-16388 Prevent client threads being blocked by only one slow region server

Signed-off-by: stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8ef6c763
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8ef6c763
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8ef6c763

Branch: refs/heads/hbase-12439
Commit: 8ef6c76344127f2f4d2f9536d87fa6fc7b5c7132
Parents: 8540171
Author: Phil Yang 
Authored: Wed Sep 14 13:21:01 2016 +0800
Committer: stack 
Committed: Wed Sep 14 09:08:20 2016 -0700

--
 .../hadoop/hbase/ipc/AbstractRpcClient.java |  22 
 .../hbase/ipc/ServerTooBusyException.java   |  38 ++
 .../org/apache/hadoop/hbase/HConstants.java |  12 ++
 .../src/main/resources/hbase-default.xml|  16 ++-
 .../org/apache/hadoop/hbase/client/TestHCM.java | 119 ++-
 5 files changed, 201 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8ef6c763/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
index 098ad3c..401a240 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
@@ -23,6 +23,9 @@ import static 
org.apache.hadoop.hbase.ipc.IPCUtil.wrapException;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
 import com.google.protobuf.BlockingRpcChannel;
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Descriptors.MethodDescriptor;
@@ -137,6 +140,16 @@ public abstract class AbstractRpcClient implements RpcC
 
   private final ScheduledFuture cleanupIdleConnectionTask;
 
+  private int maxConcurrentCallsPerServer;
+
+  private static final LoadingCache 
concurrentCounterCache =
+  CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.HOURS).
+  build(new CacheLoader() {
+@Override public AtomicInteger load(InetSocketAddress key) throws 
Exception {
+  return new AtomicInteger(0);
+}
+  });
+
   /**
* Construct an IPC client for the cluster clusterId
* @param conf configuration
@@ -167,6 +180,9 @@ public abstract class AbstractRpcClient implements RpcC
 this.readTO = conf.getInt(SOCKET_TIMEOUT_READ, 
DEFAULT_SOCKET_TIMEOUT_READ);
 this.writeTO = conf.getInt(SOCKET_TIMEOUT_WRITE, 
DEFAULT_SOCKET_TIMEOUT_WRITE);
 this.metrics = metrics;
+this.maxConcurrentCallsPerServer = conf.getInt(
+HConstants.HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD,
+HConstants.DEFAULT_HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD);
 
 this.connections = new PoolMap<>(getPoolType(conf), getPoolSize(conf));
 
@@ -382,16 +398,22 @@ public abstract class AbstractRpcClient implements RpcC
   final RpcCallback callback) {
 final MetricsConnection.CallStats cs = MetricsConnection.newCallStats();
 cs.setStartTime(EnvironmentEdgeManager.currentTime());
+final AtomicInteger counter = concurrentCounterCache.getUnchecked(addr);
 Call call = new Call(nextCallId(), md, param, hrc.cellScanner(), 
returnType,
 hrc.getCallTimeout(), hrc.getPriority(), new RpcCallback() {
 
   @Override
   public void run(Call call) {
+counter.decrementAndGet();
 onCallFinished(call, hrc, addr, callback);
   }
 }, cs);
 ConnectionId remoteId = new ConnectionId(ticket, 
md.getService().getName(), addr);
+int count = counter.incrementAndGet();
 try {
+  if (count > maxConcurrentCallsPerServer) {
+throw new ServerTooBusyException(addr, count);
+  }
   T connection = getConnection(remoteId);
   connection.sendRequest(call, hrc);
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/8ef6c763/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java
new file mode 100644
index 000..c6ba030
--- /dev/null
+++ 

[45/50] [abbrv] hbase git commit: HBASE-16534 Procedure v2 - Perf Tool for Scheduler. Tool to test performance of locks and queues in procedure scheduler independently from other framework components.

2016-09-19 Thread syuanjiang
HBASE-16534 Procedure v2 - Perf Tool for Scheduler.
Tool to test performance of locks and queues in procedure scheduler 
independently from other framework components.
Inserts table and region operations in the scheduler, then polls them and 
exercises their locks. Number of tables, regions and operations can be set 
using cli args.

Change-Id: I0fb27e67d3fcab70dd5d0b5197396b117b11eac6


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/da3abbcb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/da3abbcb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/da3abbcb

Branch: refs/heads/hbase-12439
Commit: da3abbcb3528f42292855851df6de3bfdfc2c106
Parents: edc0ef3
Author: Apekshit Sharma 
Authored: Wed Sep 14 16:09:01 2016 -0700
Committer: Apekshit Sharma 
Committed: Sat Sep 17 17:38:51 2016 -0700

--
 .../procedure2/ProcedureTestingUtility.java |  11 +
 ...ProcedureSchedulerPerformanceEvaluation.java | 284 +++
 2 files changed, 295 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/da3abbcb/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
index 7365de9..034109d 100644
--- 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
+++ 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
@@ -299,6 +299,17 @@ public class ProcedureTestingUtility {
 data = null;
   }
 }
+
+// Mark acquire/release lock functions public for test uses.
+@Override
+public boolean acquireLock(Void env) {
+  return true;
+}
+
+@Override
+public void releaseLock(Void env) {
+  // no-op
+}
   }
 
   public static class LoadCounter implements ProcedureStore.ProcedureLoader {

http://git-wip-us.apache.org/repos/asf/hbase/blob/da3abbcb/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
new file mode 100644
index 000..6e4f3cd
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.Random;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.Option;
+import org.apache.commons.lang.ArrayUtils;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.TableLockManager;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
+import org.apache.hadoop.hbase.util.AbstractHBaseTool;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Tool to test performance of locks and queues in procedure scheduler 
independently from other
+ * framework components.
+ * Inserts table and region operations in the scheduler, then polls them and 
exercises their locks
+ * Number of tables, regions and operations can be set using cli args.
+ */
+public class MasterProcedureSchedulerPerformanceEvaluation extends 
AbstractHBaseTool {
+  protected static final 

[40/50] [abbrv] hbase git commit: HBASE-16631 Extract AsyncRequestFuture related code from AsyncProcess

2016-09-19 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/2cf8907d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
new file mode 100644
index 000..c6b2a53
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
@@ -0,0 +1,1290 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.RetryImmediatelyException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.htrace.Trace;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * The context, and return value, for a single submit/submitAll call.
+ * Note on how this class (one AP submit) works. Initially, all requests are 
split into groups
+ * by server; request is sent to each server in parallel; the RPC calls are 
not async so a
+ * thread per server is used. Every time some actions fail, regions/locations 
might have
+ * changed, so we re-group them by server and region again and send these 
groups in parallel
+ * too. The result, in case of retries, is a "tree" of threads, with parent 
exiting after
+ * scheduling children. This is why lots of code doesn't require any 
synchronization.
+ */
+@InterfaceAudience.Private
+class AsyncRequestFutureImpl implements AsyncRequestFuture {
+
+  private static final Log LOG = 
LogFactory.getLog(AsyncRequestFutureImpl.class);
+
+  /**
+   * Runnable (that can be submitted to thread pool) that waits for when it's 
time
+   * to issue replica calls, finds region replicas, groups the requests by 
replica and
+   * issues the calls (on separate threads, via sendMultiAction).
+   * This is done on a separate thread because we don't want to wait on user 
thread for
+   * our asynchronous call, and usually we have to wait before making replica 
calls.
+   */
+  private final class ReplicaCallIssuingRunnable implements Runnable {
+private final long startTime;
+private final List initialActions;
+
+public ReplicaCallIssuingRunnable(List initialActions, long 
startTime) {
+  this.initialActions = initialActions;
+  this.startTime = startTime;
+}
+
+@Override
+public void run() {
+  boolean done = false;
+  if (asyncProcess.primaryCallTimeoutMicroseconds > 0) {
+try {
+  done = waitUntilDone(startTime * 1000L + 
asyncProcess.primaryCallTimeoutMicroseconds);
+} catch (InterruptedException ex) {
+  LOG.error("Replica thread was interrupted - no replica calls: " + 
ex.getMessage());
+  return;
+}
+  

[50/50] [abbrv] hbase git commit: HBASE-16554 Rebuild WAL tracker if trailer is corrupted.

2016-09-19 Thread syuanjiang
HBASE-16554 Rebuild WAL tracker if trailer is corrupted.

Change-Id: Iecc3347de3de9fc57f57ab5f498aad404d02ec52


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b2eac0da
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b2eac0da
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b2eac0da

Branch: refs/heads/hbase-12439
Commit: b2eac0da33c4161aa8188213171afb03b72048a4
Parents: c5b8aab
Author: Apekshit Sharma 
Authored: Sat Sep 17 17:38:40 2016 -0700
Committer: Apekshit Sharma 
Committed: Mon Sep 19 12:23:48 2016 -0700

--
 .../procedure2/store/ProcedureStoreTracker.java | 15 +++-
 .../procedure2/store/wal/ProcedureWALFile.java  |  2 +
 .../store/wal/ProcedureWALFormat.java   | 14 +++-
 .../store/wal/ProcedureWALFormatReader.java | 59 +++---
 .../procedure2/store/wal/WALProcedureStore.java | 50 ++--
 .../store/wal/TestWALProcedureStore.java| 82 
 6 files changed, 178 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b2eac0da/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 78d6a44..a60ba3f 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -93,6 +93,7 @@ public class ProcedureStoreTracker {
 private long[] updated;
 /**
  * Keeps track of procedure ids which belong to this bitmap's range and 
have been deleted.
+ * This represents global state since it's not reset on WAL rolls.
  */
 private long[] deleted;
 /**
@@ -449,8 +450,7 @@ public class ProcedureStoreTracker {
 }
   }
 
-  public void resetToProto(ProcedureProtos.ProcedureStoreTracker 
trackerProtoBuf)
-  throws IOException {
+  public void resetToProto(final ProcedureProtos.ProcedureStoreTracker 
trackerProtoBuf) {
 reset();
 for (ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode: 
trackerProtoBuf.getNodeList()) {
   final BitSetNode node = new BitSetNode(protoNode);
@@ -536,6 +536,7 @@ public class ProcedureStoreTracker {
 BitSetNode node = getOrCreateNode(procId);
 assert node.contains(procId) : "expected procId=" + procId + " in the 
node=" + node;
 node.updateState(procId, isDeleted);
+trackProcIds(procId);
   }
 
   public void reset() {
@@ -545,6 +546,11 @@ public class ProcedureStoreTracker {
 resetUpdates();
   }
 
+  public boolean isUpdated(long procId) {
+final Map.Entry entry = map.floorEntry(procId);
+return entry != null && entry.getValue().contains(procId) && 
entry.getValue().isUpdated(procId);
+  }
+
   /**
* If {@link #partial} is false, returns state from the bitmap. If no state 
is found for
* {@code procId}, returns YES.
@@ -583,6 +589,10 @@ public class ProcedureStoreTracker {
 }
   }
 
+  public boolean isPartial() {
+return partial;
+  }
+
   public void setPartialFlag(boolean isPartial) {
 if (this.partial && !isPartial) {
   for (Map.Entry entry : map.entrySet()) {
@@ -720,6 +730,7 @@ public class ProcedureStoreTracker {
   entry.getValue().dump();
 }
   }
+
   /**
* Iterates over
* {@link BitSetNode}s in this.map and subtracts with corresponding ones 
from {@code other}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b2eac0da/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
index 99e7a7e..b9726a8 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
@@ -62,6 +62,7 @@ public class ProcedureWALFile implements 
Comparable {
 this.logFile = logStatus.getPath();
 this.logSize = logStatus.getLen();
 this.timestamp = logStatus.getModificationTime();
+tracker.setPartialFlag(true);
   }
 
   public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader 
header,
@@ -72,6 +73,7 @@ public class ProcedureWALFile implements 

[44/50] [abbrv] hbase git commit: HBASE-16598 Enable zookeeper useMulti always and clean up in HBase code

2016-09-19 Thread syuanjiang
HBASE-16598 Enable zookeeper useMulti always and clean up in HBase code


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/edc0ef3f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/edc0ef3f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/edc0ef3f

Branch: refs/heads/hbase-12439
Commit: edc0ef3fe4b818da29ed0e581139dd4faf1cd591
Parents: bb3d9cc
Author: Jerry He 
Authored: Sat Sep 17 16:51:26 2016 -0700
Committer: Jerry He 
Committed: Sat Sep 17 16:51:26 2016 -0700

--
 .../replication/ReplicationPeersZKImpl.java |   1 -
 .../replication/ReplicationQueuesZKImpl.java| 160 +-
 .../apache/hadoop/hbase/zookeeper/ZKUtil.java   |  77 +--
 .../org/apache/hadoop/hbase/HConstants.java |   3 -
 .../src/main/resources/hbase-default.xml|  10 -
 .../protobuf/generated/ZooKeeperProtos.java | 559 +--
 .../src/main/protobuf/ZooKeeper.proto   |   7 -
 .../hadoop/hbase/rsgroup/TestRSGroups.java  |   3 -
 .../org/apache/hadoop/hbase/master/HMaster.java |  15 +-
 .../cleaner/ReplicationZKLockCleanerChore.java  | 112 
 .../replication/TestMultiSlaveReplication.java  |  38 --
 .../TestReplicationSourceManager.java   |   1 -
 .../TestReplicationSourceManagerZkImpl.java |   2 -
 .../hadoop/hbase/zookeeper/TestZKMulti.java |  47 --
 .../hbase/client/rsgroup/TestShellRSGroups.java |   3 -
 src/main/asciidoc/_chapters/configuration.adoc  |   5 +-
 src/main/asciidoc/_chapters/zookeeper.adoc  |   4 +-
 17 files changed, 47 insertions(+), 1000 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/edc0ef3f/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 90b1347..d4b93c0 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -131,7 +131,6 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
   List listOfOps = new ArrayList();
   ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(getPeerNode(id),
 ReplicationSerDeHelper.toByteArray(peerConfig));
-  // There is a race (if hbase.zookeeper.useMulti is false)
   // b/w PeerWatcher and ReplicationZookeeper#add method to create the
   // peer-state znode. This happens while adding a peer
   // The peer state data is set as "ENABLED" by default.

http://git-wip-us.apache.org/repos/asf/hbase/blob/edc0ef3f/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
index 1c579ab..40c9140 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -23,7 +23,6 @@ import java.util.List;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -31,8 +30,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -67,8 +64,6 @@ public class ReplicationQueuesZKImpl extends 
ReplicationStateZKBase implements R
 
   /** Znode containing all replication queues for this region server. */
   private String myQueuesZnode;
-  /** Name of znode we use to lock during failover */
-  private final static String RS_LOCK_ZNODE = "lock";
 
   private static final Log LOG = 
LogFactory.getLog(ReplicationQueuesZKImpl.class);
 
@@ -189,42 +184,13 @@ public class ReplicationQueuesZKImpl extends 
ReplicationStateZKBase implements R
 } catch (KeeperException e) {
   

[11/50] [abbrv] hbase git commit: HBASE-16491 A few org.apache.hadoop.hbase.rsgroup classes missing @InterfaceAudience annotation

2016-09-19 Thread syuanjiang
HBASE-16491 A few org.apache.hadoop.hbase.rsgroup classes missing 
@InterfaceAudience annotation


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6f072809
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6f072809
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6f072809

Branch: refs/heads/hbase-12439
Commit: 6f072809eee22a04be35a013ede41986484adc04
Parents: 8290b2c
Author: tedyu 
Authored: Mon Sep 12 12:14:56 2016 -0700
Committer: tedyu 
Committed: Mon Sep 12 12:14:56 2016 -0700

--
 .../java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 2 ++
 .../java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java   | 2 ++
 .../org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java| 2 ++
 .../main/java/org/apache/hadoop/hbase/rsgroup/RSGroupSerDe.java| 2 ++
 .../apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java   | 2 ++
 5 files changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6f072809/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 49b02be..1fe8d09 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
@@ -80,6 +81,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGro
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
 
 
+@InterfaceAudience.Private
 public class RSGroupAdminEndpoint extends RSGroupAdminService
 implements CoprocessorService, Coprocessor, MasterObserver {
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6f072809/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index 434c85f..309985e 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
@@ -29,12 +29,14 @@ import java.util.Set;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
  * Interface used to manage RSGroupInfo storage. An implementation
  * has the option to support offline mode.
  * See {@link RSGroupBasedLoadBalancer}
  */
+@InterfaceAudience.Private
 public interface RSGroupInfoManager {
   //Assigned before user tables
   public static final TableName RSGROUP_TABLE_NAME =

http://git-wip-us.apache.org/repos/asf/hbase/blob/6f072809/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 52cd339..e9f322e 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -57,6 +57,7 @@ import 
org.apache.hadoop.hbase.MetaTableAccessor.DefaultVisitorBase;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -90,6 +91,7 @@ import org.apache.zookeeper.KeeperException;
  * It also makes use of zookeeper to store group information needed
  * for 

[22/50] [abbrv] hbase git commit: HBASE-16592 Unify Delete request with AP

2016-09-19 Thread syuanjiang
HBASE-16592 Unify Delete request with AP


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2566cfeb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2566cfeb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2566cfeb

Branch: refs/heads/hbase-12439
Commit: 2566cfeb60de644f287ac192d360f3fc15376c8f
Parents: c57acf2
Author: chenheng 
Authored: Tue Sep 13 10:07:45 2016 +0800
Committer: Gary Helmling 
Committed: Mon Sep 12 23:23:38 2016 -0700

--
 .../hadoop/hbase/client/AbstractResponse.java   | 38 
 .../hadoop/hbase/client/AsyncProcess.java   | 22 ---
 .../org/apache/hadoop/hbase/client/HTable.java  | 43 -
 .../hadoop/hbase/client/MultiResponse.java  |  7 ++-
 .../hadoop/hbase/client/SingleResponse.java | 65 
 .../hbase/protobuf/ResponseConverter.java   | 14 +
 .../hadoop/hbase/client/TestAsyncProcess.java   | 18 +++---
 .../hadoop/hbase/client/TestFromClientSide.java | 46 ++
 8 files changed, 222 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2566cfeb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java
new file mode 100644
index 000..7878d05
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java
@@ -0,0 +1,38 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * This class is used to extend AP to process single action request, like 
delete, get etc.
+ */
+@InterfaceAudience.Private
+abstract class AbstractResponse {
+
+  public enum ResponseType {
+
+SINGLE(0),
+MULTI   (1);
+
+ResponseType(int value) {}
+  }
+
+  public abstract ResponseType type();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2566cfeb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index c5745e9..1531201 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -756,14 +756,14 @@ class AsyncProcess {
 
   @Override
   public void run() {
-MultiResponse res;
+AbstractResponse res;
 CancellableRegionServerCallable callable = currentCallable;
 try {
   // setup the callable based on the actions, if we don't have one 
already from the request
   if (callable == null) {
 callable = createCallable(server, tableName, multiAction);
   }
-  RpcRetryingCaller caller = createCaller(callable);
+  RpcRetryingCaller caller = createCaller(callable);
   try {
 if (callsInProgress != null) {
   callsInProgress.add(callable);
@@ -785,9 +785,16 @@ class AsyncProcess {
 receiveGlobalFailure(multiAction, server, numAttempt, t);
 return;
   }
-
-  // Normal case: we received an answer from the server, and it's not 
an exception.
-  receiveMultiAction(multiAction, server, res, numAttempt);
+  if (res.type() == AbstractResponse.ResponseType.MULTI) {
+// Normal case: we received an answer from the server, and it's 
not an exception.
+receiveMultiAction(multiAction, server, (MultiResponse) res, 
numAttempt);
+  } else {
+if (results != null) {
+  

[12/50] [abbrv] hbase git commit: HBASE-16491 A few org.apache.hadoop.hbase.rsgroup classes missing @InterfaceAudience annotation - Revert due to missing credit

2016-09-19 Thread syuanjiang
HBASE-16491 A few org.apache.hadoop.hbase.rsgroup classes missing 
@InterfaceAudience annotation - Revert due to missing credit


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3642287b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3642287b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3642287b

Branch: refs/heads/hbase-12439
Commit: 3642287b2f86a7c88c140bc9d9e35a9bff7253c4
Parents: 6f07280
Author: tedyu 
Authored: Mon Sep 12 12:15:50 2016 -0700
Committer: tedyu 
Committed: Mon Sep 12 12:15:50 2016 -0700

--
 .../java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 2 --
 .../java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java   | 2 --
 .../org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java| 2 --
 .../main/java/org/apache/hadoop/hbase/rsgroup/RSGroupSerDe.java| 2 --
 .../apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java   | 2 --
 5 files changed, 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3642287b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 1fe8d09..49b02be 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
@@ -81,7 +80,6 @@ import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGro
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
 
 
-@InterfaceAudience.Private
 public class RSGroupAdminEndpoint extends RSGroupAdminService
 implements CoprocessorService, Coprocessor, MasterObserver {
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3642287b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index 309985e..434c85f 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
@@ -29,14 +29,12 @@ import java.util.Set;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
  * Interface used to manage RSGroupInfo storage. An implementation
  * has the option to support offline mode.
  * See {@link RSGroupBasedLoadBalancer}
  */
-@InterfaceAudience.Private
 public interface RSGroupInfoManager {
   //Assigned before user tables
   public static final TableName RSGROUP_TABLE_NAME =

http://git-wip-us.apache.org/repos/asf/hbase/blob/3642287b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index e9f322e..52cd339 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -57,7 +57,6 @@ import 
org.apache.hadoop.hbase.MetaTableAccessor.DefaultVisitorBase;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -91,7 +90,6 @@ import org.apache.zookeeper.KeeperException;
  * It also makes use of zookeeper to store 

[19/50] [abbrv] hbase git commit: HBASE-16540 Adding checks in Scanner's setStartRow and setStopRow for invalid row key sizes.

2016-09-19 Thread syuanjiang
HBASE-16540 Adding checks in Scanner's setStartRow and setStopRow for invalid 
row key sizes.

Signed-off-by: Gary Helmling 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c57acf28
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c57acf28
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c57acf28

Branch: refs/heads/hbase-12439
Commit: c57acf28e7cabcfcbce8ae0006080088cdc47f50
Parents: 8855670
Author: Dustin Pho 
Authored: Mon Sep 12 13:25:02 2016 -0700
Committer: Gary Helmling 
Committed: Mon Sep 12 23:23:27 2016 -0700

--
 .../org/apache/hadoop/hbase/client/Scan.java| 16 +++
 .../apache/hadoop/hbase/client/TestScan.java| 28 
 2 files changed, 44 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c57acf28/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index ee3ed43..22f611a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -373,8 +373,16 @@ public class Scan extends Query {
* next closest row after the specified row.
* @param startRow row to start scanner at or after
* @return this
+   * @throws IllegalArgumentException if startRow does not meet criteria
+   * for a row key (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
   public Scan setStartRow(byte [] startRow) {
+if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
+  throw new IllegalArgumentException(
+"startRow's length must be less than or equal to " +
+HConstants.MAX_ROW_LENGTH + " to meet the criteria" +
+" for a row key.");
+}
 this.startRow = startRow;
 return this;
   }
@@ -389,8 +397,16 @@ public class Scan extends Query {
* use {@link #setRowPrefixFilter(byte[])}.
* The 'trailing 0' will not yield the desired result.
* @return this
+   * @throws IllegalArgumentException if stopRow does not meet criteria
+   * for a row key (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
   public Scan setStopRow(byte [] stopRow) {
+if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
+  throw new IllegalArgumentException(
+"stopRow's length must be less than or equal to " +
+HConstants.MAX_ROW_LENGTH + " to meet the criteria" +
+" for a row key.");
+}
 this.stopRow = stopRow;
 return this;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c57acf28/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java
index 129914f..16c74df 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java
@@ -25,6 +25,8 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.Set;
 
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.security.visibility.Authorizations;
@@ -132,5 +134,31 @@ public class TestScan {
   fail("should not throw exception");
 }
   }
+
+  @Test
+  public void testSetStartRowAndSetStopRow() {
+Scan scan = new Scan();
+scan.setStartRow(null);
+scan.setStartRow(new byte[1]);
+scan.setStartRow(new byte[HConstants.MAX_ROW_LENGTH]);
+try {
+  scan.setStartRow(new byte[HConstants.MAX_ROW_LENGTH+1]);
+  fail("should've thrown exception");
+} catch (IllegalArgumentException iae) {
+} catch (Exception e) {
+  fail("expected IllegalArgumentException to be thrown");
+}
+
+scan.setStopRow(null);
+scan.setStopRow(new byte[1]);
+scan.setStopRow(new byte[HConstants.MAX_ROW_LENGTH]);
+try {
+  scan.setStopRow(new byte[HConstants.MAX_ROW_LENGTH+1]);
+  fail("should've thrown exception");
+} catch (IllegalArgumentException iae) {
+} catch (Exception e) {
+  fail("expected IllegalArgumentException to be thrown");
+}
+  }
 }
 



[34/50] [abbrv] hbase git commit: HBASE-16624 Fix MVCC DeSerialization bug in the HFileScannerImpl

2016-09-19 Thread syuanjiang
HBASE-16624 Fix MVCC DeSerialization bug in the HFileScannerImpl

Change-Id: Ia970619ac7369d24ed432e827319dfdca16143c2

Signed-off-by: stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8c4b09df
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8c4b09df
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8c4b09df

Branch: refs/heads/hbase-12439
Commit: 8c4b09dfbaf53fd770fe3963df6095fc690f2ef5
Parents: 56be3ac
Author: Nitin Aggarwal 
Authored: Mon Sep 12 22:50:07 2016 -0700
Committer: stack 
Committed: Thu Sep 15 11:01:51 2016 -0700

--
 .../java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8c4b09df/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index fc1c04e..c9e6aea 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -662,7 +662,8 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
 long i = 0;
 offsetFromPos++;
 if (remaining >= Bytes.SIZEOF_INT) {
-  i = blockBuffer.getIntAfterPosition(offsetFromPos);
+  // The int read has to be converted to unsigned long so the & op
+  i = (blockBuffer.getIntAfterPosition(offsetFromPos) & 
0xL);
   remaining -= Bytes.SIZEOF_INT;
   offsetFromPos += Bytes.SIZEOF_INT;
 }



[18/50] [abbrv] hbase git commit: HBASE-16229 Cleaning up size and heapSize calculation.

2016-09-19 Thread syuanjiang
HBASE-16229 Cleaning up size and heapSize calculation.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2ab33846
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2ab33846
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2ab33846

Branch: refs/heads/hbase-12439
Commit: 2ab3384669f1df514de250918aeaa51e9ba6b71c
Parents: 831fb3c
Author: anoopsamjohn 
Authored: Tue Sep 13 11:43:26 2016 +0530
Committer: anoopsamjohn 
Committed: Tue Sep 13 11:43:26 2016 +0530

--
 .../org/apache/hadoop/hbase/util/ClassSize.java |  15 +++
 .../hbase/regionserver/AbstractMemStore.java|  49 +++
 .../hbase/regionserver/CompactingMemStore.java  |  92 ++---
 .../hbase/regionserver/CompactionPipeline.java  |  31 +++--
 .../hbase/regionserver/DefaultMemStore.java |  30 ++---
 .../hbase/regionserver/ImmutableSegment.java|  84 ++--
 .../hadoop/hbase/regionserver/MemStore.java |   6 +-
 .../hbase/regionserver/MemStoreCompactor.java   |  24 ++--
 .../hbase/regionserver/MemStoreSnapshot.java|   2 +-
 .../hbase/regionserver/MutableSegment.java  |  25 ++--
 .../hadoop/hbase/regionserver/Segment.java  | 103 ++-
 .../hbase/regionserver/SegmentFactory.java  |  43 +++
 .../apache/hadoop/hbase/io/TestHeapSize.java|  92 -
 .../regionserver/TestCompactingMemStore.java|  22 +---
 .../TestCompactingToCellArrayMapMemStore.java   |   4 +-
 .../regionserver/TestPerColumnFamilyFlush.java  |  49 +++
 .../TestWalAndCompactingMemStoreFlush.java  | 129 +++
 17 files changed, 417 insertions(+), 383 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2ab33846/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
index ff9dbcb..85a6483 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
@@ -46,6 +46,12 @@ public class ClassSize {
   /** Overhead for ArrayList(0) */
   public static final int ARRAYLIST;
 
+  /** Overhead for LinkedList(0) */
+  public static final int LINKEDLIST;
+
+  /** Overhead for a single entry in LinkedList */
+  public static final int LINKEDLIST_ENTRY;
+
   /** Overhead for ByteBuffer */
   public static final int BYTE_BUFFER;
 
@@ -100,6 +106,9 @@ public class ClassSize {
   /** Overhead for AtomicBoolean */
   public static final int ATOMIC_BOOLEAN;
 
+  /** Overhead for AtomicReference */
+  public static final int ATOMIC_REFERENCE;
+
   /** Overhead for CopyOnWriteArraySet */
   public static final int COPYONWRITE_ARRAYSET;
 
@@ -240,6 +249,10 @@ public class ClassSize {
 
 ARRAYLIST = align(OBJECT + REFERENCE + (2 * Bytes.SIZEOF_INT)) + 
align(ARRAY);
 
+LINKEDLIST = align(OBJECT + (2 * Bytes.SIZEOF_INT) + (2 * REFERENCE));
+
+LINKEDLIST_ENTRY = align(OBJECT + (2 * REFERENCE));
+
 //noinspection PointlessArithmeticExpression
 BYTE_BUFFER = align(OBJECT + REFERENCE +
 (5 * Bytes.SIZEOF_INT) +
@@ -292,6 +305,8 @@ public class ClassSize {
 
 ATOMIC_BOOLEAN = align(OBJECT + Bytes.SIZEOF_BOOLEAN);
 
+ATOMIC_REFERENCE = align(OBJECT + REFERENCE);
+
 COPYONWRITE_ARRAYSET = align(OBJECT + REFERENCE);
 
 COPYONWRITE_ARRAYLIST = align(OBJECT + (2 * REFERENCE) + ARRAY);

http://git-wip-us.apache.org/repos/asf/hbase/blob/2ab33846/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
index 419b76a..5e9f632 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
@@ -52,34 +52,29 @@ public abstract class AbstractMemStore implements MemStore {
   private final CellComparator comparator;
 
   // active segment absorbs write operations
-  private volatile MutableSegment active;
+  protected volatile MutableSegment active;
   // Snapshot of memstore.  Made for flusher.
-  private volatile ImmutableSegment snapshot;
+  protected volatile ImmutableSegment snapshot;
   protected volatile long snapshotId;
   // Used to track when to flush
   private volatile long timeOfOldestEdit;
 
-  public final static long FIXED_OVERHEAD = ClassSize.align(
-  

[08/50] [abbrv] hbase git commit: HBASE-15624 Move master branch/hbase-2.0.0 to jdk-8 only

2016-09-19 Thread syuanjiang
HBASE-15624 Move master branch/hbase-2.0.0 to jdk-8 only


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/105bfc7d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/105bfc7d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/105bfc7d

Branch: refs/heads/hbase-12439
Commit: 105bfc7d7cd242fea384985a6a4e6ab7eced255f
Parents: c19d2ca
Author: zhangduo 
Authored: Mon Sep 12 15:44:31 2016 +0800
Committer: zhangduo 
Committed: Mon Sep 12 21:54:52 2016 +0800

--
 pom.xml| 2 +-
 src/main/asciidoc/_chapters/configuration.adoc | 5 +
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/105bfc7d/pom.xml
--
diff --git a/pom.xml b/pom.xml
index fb3ead0..c148b19 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1163,7 +1163,7 @@
   -MM-dd'T'HH:mm
 
 ${maven.build.timestamp}
-1.7
+1.8
 
 3.0.4
 ${compileSource}

http://git-wip-us.apache.org/repos/asf/hbase/blob/105bfc7d/src/main/asciidoc/_chapters/configuration.adoc
--
diff --git a/src/main/asciidoc/_chapters/configuration.adoc 
b/src/main/asciidoc/_chapters/configuration.adoc
index 89820ca..e9f1abc 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -100,6 +100,11 @@ This section lists required services and some required 
system configuration.
 |JDK 7
 |JDK 8
 
+|2.0
+|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
+|link:http://search-hadoop.com/m/YGbbsPxZ723m3as[Not Supported]
+|yes
+
 |1.3
 |link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
 |yes



[23/50] [abbrv] hbase git commit: HBASE-15297 Correct handling of namespace existence checks in shell.

2016-09-19 Thread syuanjiang
HBASE-15297 Correct handling of namespace existence checks in shell.

Changes namespace_exists? method in SecurityAdmin ruby code to catch 
NamespaceNotFoundException
and modified Admin.java file to document the exception.

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/422734e7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/422734e7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/422734e7

Branch: refs/heads/hbase-12439
Commit: 422734e73d8846e4a357178cf665220d689e2e6e
Parents: 77b3273
Author: Umesh Agashe 
Authored: Mon Sep 12 16:24:44 2016 -0700
Committer: Sean Busbey 
Committed: Mon Sep 12 23:29:42 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/client/Admin.java | 5 -
 .../java/org/apache/hadoop/hbase/client/HBaseAdmin.java | 6 --
 hbase-shell/src/main/ruby/hbase/security.rb | 9 +++--
 3 files changed, 11 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/422734e7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 321ea55..0f66834 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
@@ -1085,9 +1086,11 @@ public interface Admin extends Abortable, Closeable {
*
* @param name name of namespace descriptor
* @return A descriptor
+   * @throws org.apache.hadoop.hbase.NamespaceNotFoundException
+   * @throws IOException if a remote or network exception occurs
*/
   NamespaceDescriptor getNamespaceDescriptor(final String name)
-  throws IOException;
+  throws NamespaceNotFoundException, IOException;
 
   /**
* List available namespace descriptors

http://git-wip-us.apache.org/repos/asf/hbase/blob/422734e7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index bdd23ab..3b41755 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ProcedureUtil;
@@ -1981,7 +1982,8 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public NamespaceDescriptor getNamespaceDescriptor(final String name) throws 
IOException {
+  public NamespaceDescriptor getNamespaceDescriptor(final String name)
+  throws NamespaceNotFoundException, IOException {
 return executeCallable(new 
MasterCallable(getConnection(),
 getRpcControllerFactory()) {
   @Override
@@ -3652,4 +3654,4 @@ public class HBaseAdmin implements Admin {
   private RpcControllerFactory getRpcControllerFactory() {
 return this.rpcControllerFactory;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/422734e7/hbase-shell/src/main/ruby/hbase/security.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/security.rb 
b/hbase-shell/src/main/ruby/hbase/security.rb
index 55519ed..4c884d6 100644
--- a/hbase-shell/src/main/ruby/hbase/security.rb
+++ b/hbase-shell/src/main/ruby/hbase/security.rb
@@ -173,12 +173,9 @@ module Hbase
 
  # Does Namespace exist
 def namespace_exists?(namespace_name)
-  namespaceDesc = @admin.getNamespaceDescriptor(namespace_name)
-  if(namespaceDesc == nil)
-return false
-  else
-return true
-  end
+  return 

[39/50] [abbrv] hbase git commit: HBASE-16586 Procedure v2 - Cleanup sched wait/lock semantic

2016-09-19 Thread syuanjiang
HBASE-16586 Procedure v2 - Cleanup sched wait/lock semantic


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b6b72361
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b6b72361
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b6b72361

Branch: refs/heads/hbase-12439
Commit: b6b72361b68634f15f8cf83738d89147633ac378
Parents: 2597217
Author: Matteo Bertozzi 
Authored: Fri Sep 16 08:50:17 2016 -0700
Committer: Matteo Bertozzi 
Committed: Fri Sep 16 08:50:17 2016 -0700

--
 .../DispatchMergingRegionsProcedure.java|   2 +-
 .../master/procedure/MasterProcedureEnv.java|   8 +-
 .../procedure/MasterProcedureScheduler.java | 215 ++-
 .../procedure/TestMasterProcedureScheduler.java | 102 +++--
 .../security/access/TestAccessController.java   |   2 +-
 5 files changed, 250 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b6b72361/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
index 9d0f0d8..0c5292b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
@@ -253,7 +253,7 @@ public class DispatchMergingRegionsProcedure
 
   @Override
   protected boolean acquireLock(final MasterProcedureEnv env) {
-return env.getProcedureQueue().waitRegions(
+return !env.getProcedureQueue().waitRegions(
   this, getTableName(), regionsToMerge[0], regionsToMerge[1]);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6b72361/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
index 52e3d60..213f80c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
@@ -137,18 +137,18 @@ public class MasterProcedureEnv {
   }
 
   public void wake(ProcedureEvent event) {
-procSched.wake(event);
+procSched.wakeEvent(event);
   }
 
   public void suspend(ProcedureEvent event) {
-procSched.suspend(event);
+procSched.suspendEvent(event);
   }
 
   public void setEventReady(ProcedureEvent event, boolean isReady) {
 if (isReady) {
-  procSched.wake(event);
+  procSched.wakeEvent(event);
 } else {
-  procSched.suspend(event);
+  procSched.suspendEvent(event);
 }
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6b72361/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 84ecf22..3a215d5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -133,7 +133,7 @@ public class MasterProcedureScheduler implements 
ProcedureRunnableSet {
 // a group for all the non-table/non-server procedures or try to find 
a key for your
 // non-table/non-server procedures and implement something similar to 
the TableRunQueue.
 throw new UnsupportedOperationException(
-  "RQs for non-table/non-server procedures are not implemented yet");
+  "RQs for non-table/non-server procedures are not implemented yet: " 
+ proc);
   }
   if (notify) {
 schedWaitCond.signal();
@@ -148,7 +148,6 @@ public class MasterProcedureScheduler implements 
ProcedureRunnableSet {
 if (proc.isSuspended()) return;
 
 queue.add(proc, addFront);
-
 if (!(queue.isSuspended() || queue.hasExclusiveLock())) {
   // the queue is not suspended or removed from the fairq (run-queue)
   // because someone has an xlock on it.
@@ 

[49/50] [abbrv] hbase git commit: HBASE-16335 RpcClient under heavy load leaks some netty bytebuf (Ram)

2016-09-19 Thread syuanjiang
HBASE-16335 RpcClient under heavy load leaks some netty bytebuf (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c5b8aaba
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c5b8aaba
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c5b8aaba

Branch: refs/heads/hbase-12439
Commit: c5b8aababe18f65f5db979128a62d8a0686b9dc5
Parents: 6eb6225
Author: Ramkrishna 
Authored: Mon Sep 19 16:12:15 2016 +0530
Committer: Ramkrishna 
Committed: Mon Sep 19 16:12:15 2016 +0530

--
 .../hadoop/hbase/ipc/AbstractRpcClient.java |  4 ++
 .../hadoop/hbase/ipc/BlockingRpcConnection.java |  5 +++
 .../hadoop/hbase/ipc/NettyRpcConnection.java| 11 +
 .../apache/hadoop/hbase/ipc/RpcConnection.java  |  5 +++
 .../hadoop/hbase/security/SaslWrapHandler.java  | 43 +---
 5 files changed, 53 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c5b8aaba/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
index 401a240..990ffe0 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
@@ -215,6 +215,7 @@ public abstract class AbstractRpcClient implements RpcC
 if (conn.getLastTouched() < closeBeforeTime && !conn.isActive()) {
   LOG.info("Cleanup idle connection to " + conn.remoteId().address);
   connections.removeValue(conn.remoteId(), conn);
+  conn.cleanupConnection();
 }
   }
 }
@@ -472,6 +473,9 @@ public abstract class AbstractRpcClient implements RpcC
   conn.shutdown();
 }
 closeInternal();
+for (T conn : connToClose) {
+  conn.cleanupConnection();
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/c5b8aaba/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
index c8b366d..528b726 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
@@ -685,6 +685,11 @@ class BlockingRpcConnection extends RpcConnection 
implements Runnable {
   }
 
   @Override
+  public void cleanupConnection() {
+// do nothing
+  }
+
+  @Override
   public synchronized void sendRequest(final Call call, HBaseRpcController 
pcrc)
   throws IOException {
 pcrc.notifyOnCancel(new RpcCallback() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c5b8aaba/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
index 5f22dfd..559b7f9 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
@@ -36,6 +36,7 @@ import io.netty.channel.ChannelOption;
 import io.netty.channel.ChannelPipeline;
 import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
 import io.netty.handler.timeout.IdleStateHandler;
+import io.netty.util.ReferenceCountUtil;
 import io.netty.util.concurrent.Future;
 import io.netty.util.concurrent.FutureListener;
 import io.netty.util.concurrent.Promise;
@@ -119,6 +120,16 @@ class NettyRpcConnection extends RpcConnection {
 shutdown0();
   }
 
+  @Override
+  public synchronized void cleanupConnection() {
+if (connectionHeaderPreamble != null) {
+  ReferenceCountUtil.safeRelease(connectionHeaderPreamble);
+}
+if (connectionHeaderWithLength != null) {
+  ReferenceCountUtil.safeRelease(connectionHeaderWithLength);
+}
+  }
+
   private void established(Channel ch) {
 ch.write(connectionHeaderWithLength.retainedDuplicate());
 ChannelPipeline p = ch.pipeline();

http://git-wip-us.apache.org/repos/asf/hbase/blob/c5b8aaba/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java
--
diff --git 

[36/50] [abbrv] hbase git commit: HBASE-16639 TestProcedureInMemoryChore#testChoreAddAndRemove occasionally fails

2016-09-19 Thread syuanjiang
HBASE-16639 TestProcedureInMemoryChore#testChoreAddAndRemove occasionally fails


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/216e8473
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/216e8473
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/216e8473

Branch: refs/heads/hbase-12439
Commit: 216e8473668c1a2f5cfb5c5bf05284d4274c8548
Parents: e782d0b
Author: Matteo Bertozzi 
Authored: Thu Sep 15 18:25:11 2016 -0700
Committer: Matteo Bertozzi 
Committed: Thu Sep 15 18:25:11 2016 -0700

--
 .../apache/hadoop/hbase/procedure2/Procedure.java |  7 +++
 .../hbase/procedure2/ProcedureExecutor.java   | 18 +++---
 .../procedure2/TestProcedureInMemoryChore.java|  5 -
 3 files changed, 22 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/216e8473/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index b401871..b9145e7 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -333,6 +333,13 @@ public abstract class Procedure implements 
Comparable {
   }
 
   /**
+   * @return true if the procedure is in a RUNNABLE state.
+   */
+  protected synchronized boolean isRunnable() {
+return state == ProcedureState.RUNNABLE;
+  }
+
+  /**
* @return true if the procedure has failed.
* true may mean failed but not yet rolledback or failed and 
rolledback.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/216e8473/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 1a9010d..5066fb4 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -609,15 +609,17 @@ public class ProcedureExecutor {
* @param chore the chore to add
*/
   public void addChore(final ProcedureInMemoryChore chore) {
+chore.setState(ProcedureState.RUNNABLE);
 waitingTimeout.add(chore);
   }
 
   /**
* Remove a chore procedure from the executor
* @param chore the chore to remove
-   * @return whether the chore is removed
+   * @return whether the chore is removed, or it will be removed later
*/
   public boolean removeChore(final ProcedureInMemoryChore chore) {
+chore.setState(ProcedureState.FINISHED);
 return waitingTimeout.remove(chore);
   }
 
@@ -907,13 +909,15 @@ public class ProcedureExecutor {
   // instead of bringing the Chore class in, we reuse this timeout thread 
for
   // this special case.
   if (proc instanceof ProcedureInMemoryChore) {
-try {
-  ((ProcedureInMemoryChore)proc).periodicExecute(getEnvironment());
-} catch (Throwable e) {
-  LOG.error("Ignoring CompletedProcedureCleaner exception: " + 
e.getMessage(), e);
+if (proc.isRunnable()) {
+  try {
+((ProcedureInMemoryChore)proc).periodicExecute(getEnvironment());
+  } catch (Throwable e) {
+LOG.error("Ignoring CompletedProcedureCleaner exception: " + 
e.getMessage(), e);
+  }
+  proc.setStartTime(EnvironmentEdgeManager.currentTime());
+  if (proc.isRunnable()) waitingTimeout.add(proc);
 }
-proc.setStartTime(EnvironmentEdgeManager.currentTime());
-waitingTimeout.add(proc);
 continue;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/216e8473/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java
index 32e3e8c..8bc8fa8 100644
--- 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java
+++ 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java
@@ -76,15 +76,18 @@ public class 

[05/50] [abbrv] hbase git commit: HBASE-16591 Add a docker file only contains java 8 for running pre commit on master

2016-09-19 Thread syuanjiang
HBASE-16591 Add a docker file only contains java 8 for running pre commit on 
master


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7bda5151
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7bda5151
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7bda5151

Branch: refs/heads/hbase-12439
Commit: 7bda5151eee2febc03a8e0434705e0aa2d6a8c34
Parents: 80d8b21
Author: zhangduo 
Authored: Sat Sep 10 07:52:54 2016 +0800
Committer: zhangduo 
Committed: Sun Sep 11 13:02:36 2016 +0800

--
 dev-support/docker/Dockerfile | 139 +
 1 file changed, 139 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7bda5151/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
new file mode 100644
index 000..44e2a58
--- /dev/null
+++ b/dev-support/docker/Dockerfile
@@ -0,0 +1,139 @@
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Dockerfile for installing the necessary dependencies for building Hadoop.
+# See BUILDING.txt.
+
+
+FROM ubuntu:trusty
+
+WORKDIR /root
+
+ENV DEBIAN_FRONTEND noninteractive
+ENV DEBCONF_TERSE true
+
+##
+# Install common dependencies from packages
+#
+# WARNING: DO NOT PUT JAVA APPS HERE! Otherwise they will install default
+# Ubuntu Java.  See Java section below!
+##
+RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
+build-essential \
+bzip2 \
+cmake \
+curl \
+doxygen \
+fuse \
+g++ \
+gcc \
+git \
+gnupg-agent \
+make \
+libbz2-dev \
+libcurl4-openssl-dev \
+libfuse-dev \
+libprotobuf-dev \
+libprotoc-dev \
+libsnappy-dev \
+libssl-dev \
+libtool \
+pinentry-curses \
+pkg-config \
+protobuf-compiler \
+protobuf-c-compiler \
+python \
+python2.7 \
+python-pip \
+rsync \
+snappy \
+zlib1g-dev
+
+###
+# Oracle Java
+###
+
+RUN echo "dot_style = mega" > "/root/.wgetrc"
+RUN echo "quiet = on" >> "/root/.wgetrc"
+
+RUN apt-get -q install --no-install-recommends -y software-properties-common
+RUN add-apt-repository -y ppa:webupd8team/java
+RUN apt-get -q update
+
+# Auto-accept the Oracle JDK license
+RUN echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select 
true | sudo /usr/bin/debconf-set-selections
+RUN apt-get -q install --no-install-recommends -y oracle-java8-installer
+
+
+# Apps that require Java
+###
+RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
+ant \
+maven
+
+# Fixing the Apache commons / Maven dependency problem under Ubuntu:
+# See http://wiki.apache.org/commons/VfsProblems
+RUN cd /usr/share/maven/lib && ln -s ../../java/commons-lang.jar .
+
+##
+# Install findbugs
+##
+RUN mkdir -p /opt/findbugs && \
+curl -L -s -S \
+ 
https://sourceforge.net/projects/findbugs/files/findbugs/3.0.1/findbugs-noUpdateChecks-3.0.1.tar.gz/download
 \
+ -o /opt/findbugs.tar.gz && \
+tar xzf /opt/findbugs.tar.gz --strip-components 1 -C /opt/findbugs
+ENV FINDBUGS_HOME /opt/findbugs
+
+
+# Install shellcheck
+
+RUN apt-get -q install -y cabal-install
+RUN mkdir /root/.cabal
+RUN echo "remote-repo: hackage.fpcomplete.com:http://hackage.fpcomplete.com/; 
>> /root/.cabal/config
+#RUN echo "remote-repo: hackage.haskell.org:http://hackage.haskell.org/; > 
/root/.cabal/config
+RUN echo "remote-repo-cache: /root/.cabal/packages" >> /root/.cabal/config
+RUN cabal update
+RUN cabal install shellcheck --global
+
+
+# Install bats
+
+RUN add-apt-repository -y ppa:duggan/bats
+RUN apt-get -q update
+RUN apt-get -q install --no-install-recommends -y bats
+
+
+# Install pylint
+
+RUN pip install pylint
+
+
+# Install dateutil.parser
+
+RUN pip install python-dateutil
+
+###
+# Avoid out of memory errors in builds
+###
+ENV MAVEN_OPTS -Xms256m -Xmx512m
+
+###
+# Everything past this 

[13/50] [abbrv] hbase git commit: HBASE-16491 A few org.apache.hadoop.hbase.rsgroup classes missing @InterfaceAudience annotation (Umesh Agashe)

2016-09-19 Thread syuanjiang
HBASE-16491 A few org.apache.hadoop.hbase.rsgroup classes missing 
@InterfaceAudience annotation (Umesh Agashe)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/552400e5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/552400e5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/552400e5

Branch: refs/heads/hbase-12439
Commit: 552400e53641991d959da4e27042b2157172e373
Parents: 3642287
Author: tedyu 
Authored: Mon Sep 12 12:16:26 2016 -0700
Committer: tedyu 
Committed: Mon Sep 12 12:16:26 2016 -0700

--
 .../java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 2 ++
 .../java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java   | 2 ++
 .../org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java| 2 ++
 .../main/java/org/apache/hadoop/hbase/rsgroup/RSGroupSerDe.java| 2 ++
 .../apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java   | 2 ++
 5 files changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/552400e5/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 49b02be..1fe8d09 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
@@ -80,6 +81,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGro
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
 
 
+@InterfaceAudience.Private
 public class RSGroupAdminEndpoint extends RSGroupAdminService
 implements CoprocessorService, Coprocessor, MasterObserver {
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/552400e5/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index 434c85f..309985e 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
@@ -29,12 +29,14 @@ import java.util.Set;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
  * Interface used to manage RSGroupInfo storage. An implementation
  * has the option to support offline mode.
  * See {@link RSGroupBasedLoadBalancer}
  */
+@InterfaceAudience.Private
 public interface RSGroupInfoManager {
   //Assigned before user tables
   public static final TableName RSGROUP_TABLE_NAME =

http://git-wip-us.apache.org/repos/asf/hbase/blob/552400e5/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 52cd339..e9f322e 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -57,6 +57,7 @@ import 
org.apache.hadoop.hbase.MetaTableAccessor.DefaultVisitorBase;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -90,6 +91,7 @@ import org.apache.zookeeper.KeeperException;
  * It also makes use of zookeeper to store group information 

[29/50] [abbrv] hbase git commit: Tune up the release candidate making section

2016-09-19 Thread syuanjiang
Tune up the release candidate making section


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8540171a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8540171a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8540171a

Branch: refs/heads/hbase-12439
Commit: 8540171a45ec544aa484ca8b23af002db1196a56
Parents: 4c6a98b
Author: stack 
Authored: Mon Aug 29 16:57:42 2016 -0700
Committer: stack 
Committed: Wed Sep 14 08:02:01 2016 -0700

--
 src/main/asciidoc/_chapters/developer.adoc | 60 -
 1 file changed, 38 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8540171a/src/main/asciidoc/_chapters/developer.adoc
--
diff --git a/src/main/asciidoc/_chapters/developer.adoc 
b/src/main/asciidoc/_chapters/developer.adoc
index 0a29864..ad9f3f4 100644
--- a/src/main/asciidoc/_chapters/developer.adoc
+++ b/src/main/asciidoc/_chapters/developer.adoc
@@ -505,7 +505,7 @@ For the build to sign them for you, you a properly 
configured _settings.xml_ in
 === Making a Release Candidate
 
 NOTE: These instructions are for building HBase 1.0.x.
-For building earlier versions, the process is different.
+For building earlier versions, e.g. 0.98.x, the process is different.
 See this section under the respective release documentation folders.
 
 .Point Releases
@@ -521,20 +521,23 @@ You should also have tried recent branch tips out on a 
cluster under load, perha
 [NOTE]
 
 At this point you should tag the previous release branch (ex: 0.96.1) with the 
new point release tag (e.g.
-0.96.1.1 tag). Any commits with changes for the point release should be 
applied to the new tag.
+0.96.1.1 tag). Any commits with changes for the point release should go 
against the new tag.
 
 
 The Hadoop link:http://wiki.apache.org/hadoop/HowToRelease[How To
-Release] wiki page is used as a model for most of the 
instructions below, and may have more detail on particular sections, so it is 
worth review.
+Release] wiki page is used as a model for most of the 
instructions below.
+Although it now stale, it may have more detail on 
particular sections, so
+it is worth review especially if you get stuck.
 
 .Specifying the Heap Space for Maven on OSX
 [NOTE]
 
-On OSX, you may need to specify the heap space for Maven commands, by setting 
the `MAVEN_OPTS` variable to `-Xmx3g`.
+On OSX, you may run into OutOfMemoryErrors building, particularly building the 
site and
+documentation. Up the heap and permgen space for Maven by setting the 
`MAVEN_OPTS` variable.
 You can prefix the variable to the Maven command, as in the following example:
 
 
-MAVEN_OPTS="-Xmx2g" mvn package
+MAVEN_OPTS="-Xmx4g -XX:MaxPermSize=256m" mvn package
 
 
 You could also set this in an environment variable or alias in your shell.
@@ -552,7 +555,8 @@ The script handles everything else, and comes in handy.
 Update _CHANGES.txt_ with the changes since the last release.
 Make sure the URL to the JIRA points to the proper location which lists fixes 
for this release.
 Adjust the version in all the POM files appropriately.
-If you are making a release candidate, you must remove the `-SNAPSHOT` label 
from all versions.
+If you are making a release candidate, you must remove the `-SNAPSHOT` label 
from all versions
+in all pom.xml files.
 If you are running this receipe to publish a snapshot, you must keep the 
`-SNAPSHOT` suffix on the hbase version.
 The link:http://mojo.codehaus.org/versions-maven-plugin/[Versions
 Maven Plugin] can be of use here.
@@ -564,7 +568,7 @@ To set a version in all the many poms of the hbase 
multi-module project, use a c
 $ mvn clean org.codehaus.mojo:versions-maven-plugin:1.3.1:set 
-DnewVersion=0.96.0
 
 +
-Checkin the _CHANGES.txt_ and any version changes.
+Make sure all versions in poms are changed! Checkin the _CHANGES.txt_ and any 
version changes.
 
 . Update the documentation.
 +
@@ -590,7 +594,7 @@ Extract the tarball and make sure it looks good.
 A good test for the src tarball being 'complete' is to see if you can build 
new tarballs from this source bundle.
 If the source tarball is good, save it off to a _version directory_, a 
directory somewhere where you are collecting all of the tarballs you will 
publish as part of the release candidate.
 For example if you were building an hbase-0.96.0 release candidate, you might 
call the directory _hbase-0.96.0RC0_.
-Later you will publish this directory as our release candidate up on 
pass:[http://people.apache.org/~YOU].
+Later you will publish this directory as our release candidate.
 
 . 

hbase git commit: HBASE-16655 hbase backup describe with incorrect backup id results in NPE (Vladimir and Ted)

2016-09-19 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 6d1e7079f -> d0d1485f2


HBASE-16655 hbase backup describe with incorrect backup id results in NPE 
(Vladimir and Ted)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d0d1485f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d0d1485f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d0d1485f

Branch: refs/heads/HBASE-7912
Commit: d0d1485f2b5f664489ed6a5b5b73ef8a47fe6715
Parents: 6d1e707
Author: tedyu 
Authored: Mon Sep 19 15:00:12 2016 -0700
Committer: tedyu 
Committed: Mon Sep 19 15:00:12 2016 -0700

--
 .../hbase/backup/impl/BackupCommands.java   | 14 --
 .../hbase/backup/TestBackupCommandLineTool.java | 51 +++-
 .../hadoop/hbase/backup/TestBackupDescribe.java | 21 +++-
 3 files changed, 66 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d0d1485f/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
index 1884788..3d40da2 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -326,13 +326,18 @@ public final class BackupCommands {
   try (final Connection conn = ConnectionFactory.createConnection(conf);
   final BackupAdmin admin = conn.getAdmin().getBackupAdmin();) {
 BackupInfo info = admin.getBackupInfo(backupId);
+if (info == null) {
+  System.err.println("ERROR: " + backupId + " does not exist");
+  printUsage();
+  throw new IOException(INCORRECT_USAGE);
+}
 System.out.println(info.getShortDescription());
   }
 }
 
 @Override
 protected void printUsage() {
-  System.err.println(DESCRIBE_CMD_USAGE);  
+  System.err.println(DESCRIBE_CMD_USAGE);
 }
   }
 
@@ -349,17 +354,16 @@ public final class BackupCommands {
   
   if (cmdline == null || cmdline.getArgs() == null ||
   cmdline.getArgs().length != 2) {
-System.out.println("No backup id was specified, "
+System.err.println("No backup id was specified, "
 + "will retrieve the most recent (ongoing) sessions");
   }
   String[] args = cmdline.getArgs();
-  if (args.length > 2) {
+  if (args.length != 2) {
 System.err.println("ERROR: wrong number of arguments: " + args.length);
 printUsage();
 throw new IOException(INCORRECT_USAGE);
   }
-  
-  
+
   String backupId = args == null ? null : args[1];
   Configuration conf = getConf() != null? getConf(): 
HBaseConfiguration.create();
   try(final Connection conn = ConnectionFactory.createConnection(conf); 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d0d1485f/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
index 8330ecb..31a859d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupCommandLineTool.java
@@ -56,7 +56,16 @@ public class TestBackupCommandLineTool {
 
 output = baos.toString();
 System.out.println(baos.toString());
-assertTrue(output.indexOf("Usage: hbase backup decsribe ") >= 
0);
+assertTrue(output.indexOf("Usage: hbase backup decsribe ") >= 
0); 
+
+baos = new ByteArrayOutputStream();
+System.setErr(new PrintStream(baos));
+args = new String[]{"describe" }; 
+ToolRunner.run(conf, new BackupDriver(), args);
+
+output = baos.toString();
+System.out.println(baos.toString());
+assertTrue(output.indexOf("Usage: hbase backup decsribe ") >= 0);
   }
 
   @Test
@@ -78,6 +87,15 @@ public class TestBackupCommandLineTool {
 output = baos.toString();
 System.out.println(baos.toString());
 assertTrue(output.indexOf("Usage: hbase backup create") >= 0);
+
+baos = new ByteArrayOutputStream();
+System.setErr(new PrintStream(baos));
+args = new String[]{"create"}; 
+ToolRunner.run(conf, new BackupDriver(), args);
+
+output = baos.toString();
+

hbase git commit: HBASE-15448 HBase Backup Phase 3: Restore optimization 2 (Vladimir Rodionov)

2016-09-19 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/HBASE-7912 fef921860 -> 6d1e7079f


HBASE-15448 HBase Backup Phase 3: Restore optimization 2 (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6d1e7079
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6d1e7079
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6d1e7079

Branch: refs/heads/HBASE-7912
Commit: 6d1e7079f7f5eccf426dc0dd5136681bbc8e4d52
Parents: fef9218
Author: tedyu 
Authored: Mon Sep 19 13:31:19 2016 -0700
Committer: tedyu 
Committed: Mon Sep 19 13:31:19 2016 -0700

--
 .../backup/BackupRestoreServerFactory.java  |  12 +-
 .../hadoop/hbase/backup/HBackupFileSystem.java  |  28 ++
 .../hbase/backup/IncrementalRestoreService.java |  42 --
 .../hadoop/hbase/backup/RestoreService.java |  50 +++
 .../backup/impl/RestoreTablesProcedure.java | 402 ---
 .../hbase/backup/mapreduce/HFileSplitter.java   | 190 +
 .../mapreduce/MapReduceRestoreService.java  | 108 ++---
 .../backup/master/FullTableBackupProcedure.java |   1 -
 .../backup/master/RestoreTablesProcedure.java   | 387 ++
 .../hbase/backup/util/RestoreServerUtil.java| 149 ---
 .../hbase/mapreduce/HFileInputFormat2.java  | 174 
 .../org/apache/hadoop/hbase/master/HMaster.java |   2 +-
 .../hadoop/hbase/backup/TestBackupBase.java |   2 +-
 .../hbase/backup/TestIncrementalBackup.java |  34 +-
 14 files changed, 1016 insertions(+), 565 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1e7079/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServerFactory.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServerFactory.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServerFactory.java
index 25ec9d9..7644a4d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServerFactory.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServerFactory.java
@@ -36,15 +36,15 @@ public final class BackupRestoreServerFactory {
   }
   
   /**
-   * Gets incremental restore service
+   * Gets backup restore service
* @param conf - configuration
-   * @return incremental backup service instance
+   * @return backup restore service instance
*/
-  public static IncrementalRestoreService 
getIncrementalRestoreService(Configuration conf) {
-Class cls =
+  public static RestoreService getRestoreService(Configuration conf) {
+Class cls =
 conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, 
MapReduceRestoreService.class,
-  IncrementalRestoreService.class);
-IncrementalRestoreService service =  ReflectionUtils.newInstance(cls, 
conf);
+  RestoreService.class);
+RestoreService service =  ReflectionUtils.newInstance(cls, conf);
 service.setConf(conf);
 return service;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d1e7079/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
index 1fc0a92..a130a9b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
@@ -20,18 +20,25 @@
 package org.apache.hadoop.hbase.backup;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.impl.BackupManifest;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 
 /**
  * View to an on-disk Backup Image FileSytem
@@ -77,6 +84,27 @@ public class HBackupFileSystem {
 return new Path(getTableBackupDir(backupRootPath.toString(), backupId, 
tableName));
   }
 
+  
+  public static List 

hbase git commit: HBASE-16554 Rebuild WAL tracker if trailer is corrupted.

2016-09-19 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master c5b8aabab -> b2eac0da3


HBASE-16554 Rebuild WAL tracker if trailer is corrupted.

Change-Id: Iecc3347de3de9fc57f57ab5f498aad404d02ec52


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b2eac0da
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b2eac0da
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b2eac0da

Branch: refs/heads/master
Commit: b2eac0da33c4161aa8188213171afb03b72048a4
Parents: c5b8aab
Author: Apekshit Sharma 
Authored: Sat Sep 17 17:38:40 2016 -0700
Committer: Apekshit Sharma 
Committed: Mon Sep 19 12:23:48 2016 -0700

--
 .../procedure2/store/ProcedureStoreTracker.java | 15 +++-
 .../procedure2/store/wal/ProcedureWALFile.java  |  2 +
 .../store/wal/ProcedureWALFormat.java   | 14 +++-
 .../store/wal/ProcedureWALFormatReader.java | 59 +++---
 .../procedure2/store/wal/WALProcedureStore.java | 50 ++--
 .../store/wal/TestWALProcedureStore.java| 82 
 6 files changed, 178 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b2eac0da/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 78d6a44..a60ba3f 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -93,6 +93,7 @@ public class ProcedureStoreTracker {
 private long[] updated;
 /**
  * Keeps track of procedure ids which belong to this bitmap's range and 
have been deleted.
+ * This represents global state since it's not reset on WAL rolls.
  */
 private long[] deleted;
 /**
@@ -449,8 +450,7 @@ public class ProcedureStoreTracker {
 }
   }
 
-  public void resetToProto(ProcedureProtos.ProcedureStoreTracker 
trackerProtoBuf)
-  throws IOException {
+  public void resetToProto(final ProcedureProtos.ProcedureStoreTracker 
trackerProtoBuf) {
 reset();
 for (ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode: 
trackerProtoBuf.getNodeList()) {
   final BitSetNode node = new BitSetNode(protoNode);
@@ -536,6 +536,7 @@ public class ProcedureStoreTracker {
 BitSetNode node = getOrCreateNode(procId);
 assert node.contains(procId) : "expected procId=" + procId + " in the 
node=" + node;
 node.updateState(procId, isDeleted);
+trackProcIds(procId);
   }
 
   public void reset() {
@@ -545,6 +546,11 @@ public class ProcedureStoreTracker {
 resetUpdates();
   }
 
+  public boolean isUpdated(long procId) {
+final Map.Entry entry = map.floorEntry(procId);
+return entry != null && entry.getValue().contains(procId) && 
entry.getValue().isUpdated(procId);
+  }
+
   /**
* If {@link #partial} is false, returns state from the bitmap. If no state 
is found for
* {@code procId}, returns YES.
@@ -583,6 +589,10 @@ public class ProcedureStoreTracker {
 }
   }
 
+  public boolean isPartial() {
+return partial;
+  }
+
   public void setPartialFlag(boolean isPartial) {
 if (this.partial && !isPartial) {
   for (Map.Entry entry : map.entrySet()) {
@@ -720,6 +730,7 @@ public class ProcedureStoreTracker {
   entry.getValue().dump();
 }
   }
+
   /**
* Iterates over
* {@link BitSetNode}s in this.map and subtracts with corresponding ones 
from {@code other}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b2eac0da/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
index 99e7a7e..b9726a8 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
@@ -62,6 +62,7 @@ public class ProcedureWALFile implements 
Comparable {
 this.logFile = logStatus.getPath();
 this.logSize = logStatus.getLen();
 this.timestamp = logStatus.getModificationTime();
+tracker.setPartialFlag(true);
   }
 
   public ProcedureWALFile(FileSystem fs, Path logFile, 

hbase git commit: HBASE-16335 RpcClient under heavy load leaks some netty bytebuf (Ram)

2016-09-19 Thread ramkrishna
Repository: hbase
Updated Branches:
  refs/heads/master 6eb622545 -> c5b8aabab


HBASE-16335 RpcClient under heavy load leaks some netty bytebuf (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c5b8aaba
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c5b8aaba
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c5b8aaba

Branch: refs/heads/master
Commit: c5b8aababe18f65f5db979128a62d8a0686b9dc5
Parents: 6eb6225
Author: Ramkrishna 
Authored: Mon Sep 19 16:12:15 2016 +0530
Committer: Ramkrishna 
Committed: Mon Sep 19 16:12:15 2016 +0530

--
 .../hadoop/hbase/ipc/AbstractRpcClient.java |  4 ++
 .../hadoop/hbase/ipc/BlockingRpcConnection.java |  5 +++
 .../hadoop/hbase/ipc/NettyRpcConnection.java| 11 +
 .../apache/hadoop/hbase/ipc/RpcConnection.java  |  5 +++
 .../hadoop/hbase/security/SaslWrapHandler.java  | 43 +---
 5 files changed, 53 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c5b8aaba/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
index 401a240..990ffe0 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
@@ -215,6 +215,7 @@ public abstract class AbstractRpcClient implements RpcC
 if (conn.getLastTouched() < closeBeforeTime && !conn.isActive()) {
   LOG.info("Cleanup idle connection to " + conn.remoteId().address);
   connections.removeValue(conn.remoteId(), conn);
+  conn.cleanupConnection();
 }
   }
 }
@@ -472,6 +473,9 @@ public abstract class AbstractRpcClient implements RpcC
   conn.shutdown();
 }
 closeInternal();
+for (T conn : connToClose) {
+  conn.cleanupConnection();
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/c5b8aaba/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
index c8b366d..528b726 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
@@ -685,6 +685,11 @@ class BlockingRpcConnection extends RpcConnection 
implements Runnable {
   }
 
   @Override
+  public void cleanupConnection() {
+// do nothing
+  }
+
+  @Override
   public synchronized void sendRequest(final Call call, HBaseRpcController 
pcrc)
   throws IOException {
 pcrc.notifyOnCancel(new RpcCallback() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c5b8aaba/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
index 5f22dfd..559b7f9 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java
@@ -36,6 +36,7 @@ import io.netty.channel.ChannelOption;
 import io.netty.channel.ChannelPipeline;
 import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
 import io.netty.handler.timeout.IdleStateHandler;
+import io.netty.util.ReferenceCountUtil;
 import io.netty.util.concurrent.Future;
 import io.netty.util.concurrent.FutureListener;
 import io.netty.util.concurrent.Promise;
@@ -119,6 +120,16 @@ class NettyRpcConnection extends RpcConnection {
 shutdown0();
   }
 
+  @Override
+  public synchronized void cleanupConnection() {
+if (connectionHeaderPreamble != null) {
+  ReferenceCountUtil.safeRelease(connectionHeaderPreamble);
+}
+if (connectionHeaderWithLength != null) {
+  ReferenceCountUtil.safeRelease(connectionHeaderWithLength);
+}
+  }
+
   private void established(Channel ch) {
 ch.write(connectionHeaderWithLength.retainedDuplicate());
 ChannelPipeline p = ch.pipeline();

http://git-wip-us.apache.org/repos/asf/hbase/blob/c5b8aaba/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java