hbase git commit: HBASE-15146 Don't block on Reader threads queueing to a scheduler queue

2016-01-28 Thread eclark
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 b71c3db1f -> 51998b9eb


HBASE-15146 Don't block on Reader threads queueing to a scheduler queue


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/51998b9e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/51998b9e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/51998b9e

Branch: refs/heads/branch-1.2
Commit: 51998b9eb5c97265c93a83047d897eb17c7a58ca
Parents: b71c3db
Author: Elliott Clark 
Authored: Wed Jan 20 17:43:22 2016 -0800
Committer: Elliott Clark 
Committed: Thu Jan 28 08:11:27 2016 -0500

--
 .../hadoop/hbase/CallQueueTooBigException.java  | 33 +++
 .../hadoop/hbase/client/AsyncProcess.java   |  3 +-
 .../hadoop/hbase/client/ConnectionManager.java  | 48 +-
 .../hbase/exceptions/ClientExceptionsUtil.java  | 95 
 .../hadoop/hbase/client/TestAsyncProcess.java   | 39 ++--
 .../exceptions/TestClientExceptionsUtil.java| 37 
 .../hbase/ipc/BalancedQueueRpcExecutor.java |  4 +-
 .../hadoop/hbase/ipc/FifoRpcScheduler.java  | 12 ++-
 .../hadoop/hbase/ipc/RWQueueRpcExecutor.java|  4 +-
 .../apache/hadoop/hbase/ipc/RpcExecutor.java|  2 +-
 .../apache/hadoop/hbase/ipc/RpcScheduler.java   |  2 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 21 +++--
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java|  8 +-
 .../org/apache/hadoop/hbase/client/TestHCM.java |  3 +-
 14 files changed, 240 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/51998b9e/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
new file mode 100644
index 000..d07c657
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class CallQueueTooBigException extends IOException {
+  public CallQueueTooBigException() {
+super();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/51998b9e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index e895a13..4069e49e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -1293,7 +1294,7 @@ class AsyncProcess {
   // Failure: retry if it's make sense else update the errors lists
   if (result == null || result instanceof Throwable) {
 Row row = sentAction.getAction();
-throwable = ConnectionManager.findException(result);
+throwable = ClientExceptionsUtil.findException(result);
 // Register corresponding failures once per server/once per region.
 if 

hbase git commit: HBASE-15146 Don't block on Reader threads queueing to a scheduler queue

2016-01-28 Thread eclark
Repository: hbase
Updated Branches:
  refs/heads/branch-1 630ad95c9 -> 421fe24e9


HBASE-15146 Don't block on Reader threads queueing to a scheduler queue


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/421fe24e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/421fe24e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/421fe24e

Branch: refs/heads/branch-1
Commit: 421fe24e9bb925e6199cc02118a5314458caeb38
Parents: 630ad95
Author: Elliott Clark 
Authored: Wed Jan 20 17:43:22 2016 -0800
Committer: Elliott Clark 
Committed: Thu Jan 28 08:11:11 2016 -0500

--
 .../hadoop/hbase/CallQueueTooBigException.java  | 33 +++
 .../hadoop/hbase/client/AsyncProcess.java   |  3 +-
 .../hadoop/hbase/client/ConnectionManager.java  | 48 +-
 .../hbase/exceptions/ClientExceptionsUtil.java  | 95 
 .../hadoop/hbase/client/TestAsyncProcess.java   | 39 ++--
 .../exceptions/TestClientExceptionsUtil.java| 37 
 .../hbase/ipc/BalancedQueueRpcExecutor.java |  4 +-
 .../hadoop/hbase/ipc/FifoRpcScheduler.java  | 12 ++-
 .../hadoop/hbase/ipc/RWQueueRpcExecutor.java|  4 +-
 .../apache/hadoop/hbase/ipc/RpcExecutor.java|  2 +-
 .../apache/hadoop/hbase/ipc/RpcScheduler.java   |  2 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 21 +++--
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java|  8 +-
 .../org/apache/hadoop/hbase/client/TestHCM.java |  3 +-
 14 files changed, 240 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/421fe24e/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
new file mode 100644
index 000..d07c657
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class CallQueueTooBigException extends IOException {
+  public CallQueueTooBigException() {
+super();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/421fe24e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index e895a13..4069e49e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -1293,7 +1294,7 @@ class AsyncProcess {
   // Failure: retry if it's make sense else update the errors lists
   if (result == null || result instanceof Throwable) {
 Row row = sentAction.getAction();
-throwable = ConnectionManager.findException(result);
+throwable = ClientExceptionsUtil.findException(result);
 // Register corresponding failures once per server/once per region.
 if (!re

hbase git commit: HBASE-15146 Don't block on Reader threads queueing to a scheduler queue

2016-01-28 Thread eclark
Repository: hbase
Updated Branches:
  refs/heads/master 47c414794 -> 138b75467


HBASE-15146 Don't block on Reader threads queueing to a scheduler queue


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/138b7546
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/138b7546
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/138b7546

Branch: refs/heads/master
Commit: 138b754671d51d3f494adc250ab0cb9e085c858a
Parents: 47c4147
Author: Elliott Clark 
Authored: Wed Jan 20 17:43:22 2016 -0800
Committer: Elliott Clark 
Committed: Thu Jan 28 07:58:43 2016 -0500

--
 .../hadoop/hbase/CallQueueTooBigException.java  | 33 +++
 .../hadoop/hbase/client/AsyncProcess.java   |  9 +-
 .../hbase/client/ConnectionImplementation.java  | 52 +--
 .../hbase/exceptions/ClientExceptionsUtil.java  | 95 
 .../hadoop/hbase/client/TestAsyncProcess.java   | 39 ++--
 .../exceptions/TestClientExceptionsUtil.java| 37 
 .../hbase/ipc/BalancedQueueRpcExecutor.java |  4 +-
 .../hadoop/hbase/ipc/FifoRpcScheduler.java  | 12 ++-
 .../hadoop/hbase/ipc/RWQueueRpcExecutor.java|  4 +-
 .../apache/hadoop/hbase/ipc/RpcExecutor.java|  2 +-
 .../apache/hadoop/hbase/ipc/RpcScheduler.java   |  2 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 21 +++--
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java|  8 +-
 .../org/apache/hadoop/hbase/client/TestHCM.java |  3 +-
 14 files changed, 243 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/138b7546/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
new file mode 100644
index 000..d07c657
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class CallQueueTooBigException extends IOException {
+  public CallQueueTooBigException() {
+super();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/138b7546/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 5102ec5..4ceb89a 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -1174,9 +1175,9 @@ class AsyncProcess {
 byte[] row = e.getValue().iterator().next().getAction().getRow();
 // Do not use the exception for updating cache because it might be 
coming from
 // any of the regions in the MultiAction.
-// TODO: depending on type of exception we might not want to update 
cache at all?
 if (tableName != null) {
-  connection.updateCachedLocations(tableName, regionName, row, null, 
server);

hbase git commit: HBASE-15171 Addendum removes extra loop (Yu Li)

2016-01-28 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 138b75467 -> 37ed0f6d0


HBASE-15171 Addendum removes extra loop (Yu Li)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/37ed0f6d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/37ed0f6d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/37ed0f6d

Branch: refs/heads/master
Commit: 37ed0f6d0815389e0b368bc98b3a01dd02f193ac
Parents: 138b754
Author: tedyu 
Authored: Thu Jan 28 07:06:10 2016 -0800
Committer: tedyu 
Committed: Thu Jan 28 07:06:10 2016 -0800

--
 .../org/apache/hadoop/hbase/mapreduce/PutSortReducer.java | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/37ed0f6d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
index d18ea34..b302d58 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
@@ -67,12 +67,11 @@ public class PutSortReducer extends
 for (List cells: p.getFamilyCellMap().values()) {
   for (Cell cell: cells) {
 KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
-map.add(kv);
+if (map.add(kv)) {// don't count duplicated kv into size
+  curSize += kv.heapSize();
+}
   }
 }
-for(KeyValue kv: map){
-  curSize +=kv.heapSize();
-}
   }
   context.setStatus("Read " + map.size() + " entries of " + map.getClass()
   + "(" + StringUtils.humanReadableInt(curSize) + ")");



hbase git commit: HBASE-15171 Addendum removes extra loop (Yu Li)

2016-01-28 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 421fe24e9 -> dfa948413


HBASE-15171 Addendum removes extra loop (Yu Li)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dfa94841
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dfa94841
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dfa94841

Branch: refs/heads/branch-1
Commit: dfa94841374f78422d4e44a5623cc8b601966b1d
Parents: 421fe24
Author: tedyu 
Authored: Thu Jan 28 07:08:03 2016 -0800
Committer: tedyu 
Committed: Thu Jan 28 07:08:03 2016 -0800

--
 .../org/apache/hadoop/hbase/mapreduce/PutSortReducer.java | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dfa94841/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
index 5533bad..a71b66a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
@@ -66,12 +66,11 @@ public class PutSortReducer extends
 for (List cells: p.getFamilyCellMap().values()) {
   for (Cell cell: cells) {
 KeyValue kv = KeyValueUtil.ensureKeyValueTypeForMR(cell);
-map.add(kv);
+if (map.add(kv)) {// don't count duplicated kv into size
+  curSize += kv.heapSize();
+}
   }
 }
-for(KeyValue kv: map){
-  curSize +=kv.heapSize();
-}
   }
   context.setStatus("Read " + map.size() + " entries of " + map.getClass()
   + "(" + StringUtils.humanReadableInt(curSize) + ")");



[06/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.ManyServersManyRegionsConnection.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.ManyServersManyRegionsConnection.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.ManyServersManyRegionsConnection.html
index 8921949..4ce68a2 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.ManyServersManyRegionsConnection.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.ManyServersManyRegionsConnection.html
@@ -293,286 +293,282 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 finalize() 
 
 
-static http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable
-findException(http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object arg0) 
-
-
 org.apache.hadoop.hbase.client.Admin
 getAdmin() 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
 getAdmin(org.apache.hadoop.hbase.ServerName arg0) 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
 getAdmin(org.apache.hadoop.hbase.ServerName arg0,
 boolean arg1) 
 
-
+
 org.apache.hadoop.hbase.client.AsyncProcess
 getAsyncProcess() 
 
-
+
 org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy
 getBackoffPolicy() 
 
-
+
 org.apache.hadoop.hbase.client.BufferedMutator
 getBufferedMutator(org.apache.hadoop.hbase.client.BufferedMutatorParams arg0) 
 
-
+
 org.apache.hadoop.hbase.client.BufferedMutator
 getBufferedMutator(org.apache.hadoop.hbase.TableName arg0) 
 
-
+
 (package private) 
org.apache.hadoop.hbase.RegionLocations
 getCachedLocation(org.apache.hadoop.hbase.TableName arg0,
   byte[] arg1) 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface
 getClient(org.apache.hadoop.hbase.ServerName sn) 
 
-
+
 org.apache.hadoop.conf.Configuration
 getConfiguration() 
 
-
+
 org.apache.hadoop.hbase.client.MetricsConnection
 getConnectionMetrics() 
 
-
+
 protected http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService
 getCurrentBatchPool() 
 
-
+
 protected http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService
 getCurrentMetaLookupPool() 
 
-
+
 int
 getCurrentNrHRS() 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor
 getHTableDescriptor(byte[] arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor
 getHTableDescriptor(org.apache.hadoop.hbase.TableName arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor[]
 getHTableDescriptors(http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor[]
 getHTableDescriptorsByTableName(http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.client.MasterKeepAliveConnection
 getKeepAliveMasterService() 
 
-
+
 (package private) 
org.apache.hadoop.hbase.client.ZooKeeperKeepAliveConnection
 getKeepAliveZooKeeperWatcher() 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.BlockingInterface
 getMaster() 
 
-
+
 org.apache.hadoop.hbase.client.RpcRetryingCallerFactory
 getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configuration arg0) 
 
-
+
 org.apache.hadoop.hbase.client.NonceGenerator
 getNonceGenerator() 
 
-
+
 (package private) int
 getNumberOfCachedRegionLocations(org.apache.hadoop.hbase.TableName arg0) 
 
-
+
 boolean
 getRegionCachePrefetch(byte[] arg0)
 Deprecated. 
 
 
-
+
 boolean
 getRegionCachePrefetch(org.apache.hadoop.hbase.TableName arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.HRegionLocation
 getRegionLocation(byte[] arg0,
   byte[] arg1,
   boolean arg2) 
 
-
+
 org.apache.hadoop.hbase.HRegionLocation
 getRegionLocation(org.apache.hadoop.hbase.TableName arg0,
   byte[] arg1,
   boolean arg2) 
 
-
+
 org.apache.hadoop.hbase.client.RegionLocator
 getRegionLocator(org.apache.hadoop.hbase.TableName arg0) 
 
-
+
 (package private) 
or

[08/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
index 4e2d5b2..77ae142 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
@@ -108,7 +108,7 @@
 
 
 
-static class TestAsyncProcess.MyConnectionImpl2
+static class TestAsyncProcess.MyConnectionImpl2
 extends TestAsyncProcess.MyConnectionImpl
 Returns our async process.
 
@@ -302,286 +302,282 @@ extends finalize() 
 
 
-static http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable
-findException(http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object arg0) 
-
-
 org.apache.hadoop.hbase.client.Admin
 getAdmin() 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
 getAdmin(org.apache.hadoop.hbase.ServerName arg0) 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
 getAdmin(org.apache.hadoop.hbase.ServerName arg0,
 boolean arg1) 
 
-
+
 org.apache.hadoop.hbase.client.AsyncProcess
 getAsyncProcess() 
 
-
+
 org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy
 getBackoffPolicy() 
 
-
+
 org.apache.hadoop.hbase.client.BufferedMutator
 getBufferedMutator(org.apache.hadoop.hbase.client.BufferedMutatorParams arg0) 
 
-
+
 org.apache.hadoop.hbase.client.BufferedMutator
 getBufferedMutator(org.apache.hadoop.hbase.TableName arg0) 
 
-
+
 (package private) 
org.apache.hadoop.hbase.RegionLocations
 getCachedLocation(org.apache.hadoop.hbase.TableName arg0,
   byte[] arg1) 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface
 getClient(org.apache.hadoop.hbase.ServerName arg0) 
 
-
+
 org.apache.hadoop.conf.Configuration
 getConfiguration() 
 
-
+
 org.apache.hadoop.hbase.client.MetricsConnection
 getConnectionMetrics() 
 
-
+
 protected http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService
 getCurrentBatchPool() 
 
-
+
 protected http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService
 getCurrentMetaLookupPool() 
 
-
+
 int
 getCurrentNrHRS() 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor
 getHTableDescriptor(byte[] arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor
 getHTableDescriptor(org.apache.hadoop.hbase.TableName arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor[]
 getHTableDescriptors(http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor[]
 getHTableDescriptorsByTableName(http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.client.MasterKeepAliveConnection
 getKeepAliveMasterService() 
 
-
+
 (package private) 
org.apache.hadoop.hbase.client.ZooKeeperKeepAliveConnection
 getKeepAliveZooKeeperWatcher() 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.BlockingInterface
 getMaster() 
 
-
+
 org.apache.hadoop.hbase.client.RpcRetryingCallerFactory
 getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configuration arg0) 
 
-
+
 org.apache.hadoop.hbase.client.NonceGenerator
 getNonceGenerator() 
 
-
+
 (package private) int
 getNumberOfCachedRegionLocations(org.apache.hadoop.hbase.TableName arg0) 
 
-
+
 boolean
 getRegionCachePrefetch(byte[] arg0)
 Deprecated. 
 
 
-
+
 boolean
 getRegionCachePrefetch(org.apache.hadoop.hbase.TableName arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.HRegionLocation
 getRegionLocation(byte[] arg0,
   byte[] arg1,
   boolean arg2) 
 
-
+
 org.apache.hadoop.hbase.HRegionLocation
 getRegionLocation(org.apache.hadoop.hbase.TableName arg0,
   byte[] arg1,
   boolean arg2) 
 
-
+
 org.apache.hadoop.hbase.client.RegionLocator
 getRegionLocator(org.apache.hadoop.hbase.T

[03/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testdevapidocs/serialized-form.html
--
diff --git a/testdevapidocs/serialized-form.html 
b/testdevapidocs/serialized-form.html
index a857885..58b8382 100644
--- a/testdevapidocs/serialized-form.html
+++ b/testdevapidocs/serialized-form.html
@@ -69,6 +69,11 @@
 
 Package org.apache.hadoop.hbase
 
+
+
+
+Class org.apache.hadoop.hbase.CallQueueTooBigException extends http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException implements 
Serializable
+
 
 
 
@@ -521,6 +526,185 @@
 
 
 
+Package org.apache.hadoop.hbase.exceptions
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.ConnectionClosingException 
extends http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException implements 
Serializable
+
+serialVersionUID:
+-8980028569652624236L
+
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.DeserializationException extends 
org.apache.hadoop.hbase.exceptions.HBaseException implements Serializable
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.FailedSanityCheckException 
extends org.apache.hadoop.hbase.DoNotRetryIOException implements 
Serializable
+
+serialVersionUID:
+1788783640409186240L
+
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.HBaseException extends http://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception implements 
Serializable
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException 
extends http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException implements 
Serializable
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.LockTimeoutException extends 
org.apache.hadoop.hbase.DoNotRetryIOException implements Serializable
+
+serialVersionUID:
+-1770764924258999825L
+
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.MergeRegionException extends 
org.apache.hadoop.hbase.client.DoNotRetryRegionException implements 
Serializable
+
+serialVersionUID:
+4970899110066124122L
+
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.OperationConflictException 
extends org.apache.hadoop.hbase.DoNotRetryIOException implements 
Serializable
+
+serialVersionUID:
+-8930333627489862872L
+
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException 
extends org.apache.hadoop.hbase.DoNotRetryIOException implements 
Serializable
+
+serialVersionUID:
+4595751007554273567L
+
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.PreemptiveFastFailException 
extends http://docs.oracle.com/javase/7/docs/api/java/net/ConnectException.html?is-external=true";
 title="class or interface in java.net">ConnectException implements 
Serializable
+
+serialVersionUID:
+7129103682617007177L
+
+
+
+
+
+Serialized Fields
+
+
+failureCount
+long failureCount
+
+
+timeOfFirstFailureMilliSec
+long timeOfFirstFailureMilliSec
+
+
+timeOfLatestAttemptMilliSec
+long timeOfLatestAttemptMilliSec
+
+
+
+
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.RegionInRecoveryException extends 
org.apache.hadoop.hbase.NotServingRegionException implements Serializable
+
+serialVersionUID:
+327302071153799L
+
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.RegionMovedException extends 
org.apache.hadoop.hbase.NotServingRegionException implements Serializable
+
+serialVersionUID:
+-7232903522310558396L
+
+
+
+
+
+Serialized Fields
+
+
+hostname
+http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String hostname
+
+
+port
+int port
+
+
+startCode
+long startCode
+
+
+locationSeqNum
+long locationSeqNum
+
+
+
+
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.RegionOpeningException extends 
org.apache.hadoop.hbase.NotServingRegionException implements Serializable
+
+serialVersionUID:
+-7232903522310558395L
+
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.TimeoutIOException extends http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException implements 
Serializable
+
+
+
+
+Class org.apache.hadoop.hbase.exceptions.UnknownProtocolException extends 
org.apache.hadoop.hbase.DoNotRetryIOException implements Serializable
+
+
+
+
+Serialized Fields
+
+
+protocol
+http://docs.oracle.com/javase/7/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">ClassT> protocol
+
+
+
+
+
+
+
+
 Package org.apache.hadoop.hbase.filter
 
 
@@ -879,11 +1063,6 @@
 
 
 
-
-
-
-Class org.apache.hadoop.hbase.ipc.RpcServer.CallQueueTooBigException 
extends http://docs.oracle.com/javase/7/docs/api/java/io/IOE

[01/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 6d411951d -> 39cf5e9b6


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessWithFailure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessWithFailure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessWithFailure.html
index 02eb12c..104ec8d 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessWithFailure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessWithFailure.html
@@ -53,1051 +53,1080 @@
 045import org.apache.commons.logging.Log;
 046import 
org.apache.commons.logging.LogFactory;
 047import 
org.apache.hadoop.conf.Configuration;
-048import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-049import org.apache.hadoop.hbase.Cell;
-050import 
org.apache.hadoop.hbase.HConstants;
-051import 
org.apache.hadoop.hbase.HRegionInfo;
-052import 
org.apache.hadoop.hbase.HRegionLocation;
-053import 
org.apache.hadoop.hbase.RegionLocations;
-054import 
org.apache.hadoop.hbase.ServerName;
-055import 
org.apache.hadoop.hbase.TableName;
-056import 
org.apache.hadoop.hbase.client.AsyncProcess.AsyncRequestFuture;
-057import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-058import 
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
-059import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-060import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-061import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-062import 
org.apache.hadoop.hbase.util.Bytes;
-063import 
org.apache.hadoop.hbase.util.Threads;
-064import org.junit.Assert;
-065import org.junit.BeforeClass;
-066import org.junit.Rule;
-067import org.junit.Test;
-068import 
org.junit.experimental.categories.Category;
-069import org.junit.rules.TestRule;
-070import org.mockito.Mockito;
-071
-072@Category({ClientTests.class, 
MediumTests.class})
-073public class TestAsyncProcess {
-074  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-075  
withLookingForStuckThread(true).build();
-076  private final static Log LOG = 
LogFactory.getLog(TestAsyncProcess.class);
-077  private static final TableName 
DUMMY_TABLE =
-078  TableName.valueOf("DUMMY_TABLE");
-079  private static final byte[] 
DUMMY_BYTES_1 = "DUMMY_BYTES_1".getBytes();
-080  private static final byte[] 
DUMMY_BYTES_2 = "DUMMY_BYTES_2".getBytes();
-081  private static final byte[] 
DUMMY_BYTES_3 = "DUMMY_BYTES_3".getBytes();
-082  private static final byte[] FAILS = 
"FAILS".getBytes();
-083  private static final Configuration conf 
= new Configuration();
-084
-085  private static ServerName sn = 
ServerName.valueOf("s1:1,1");
-086  private static ServerName sn2 = 
ServerName.valueOf("s2:2,2");
-087  private static ServerName sn3 = 
ServerName.valueOf("s3:3,3");
-088  private static HRegionInfo hri1 =
-089  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-090  private static HRegionInfo hri2 =
-091  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-092  private static HRegionInfo hri3 =
-093  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-094  private static HRegionLocation loc1 = 
new HRegionLocation(hri1, sn);
-095  private static HRegionLocation loc2 = 
new HRegionLocation(hri2, sn);
-096  private static HRegionLocation loc3 = 
new HRegionLocation(hri3, sn2);
-097
-098  // Replica stuff
-099  private static HRegionInfo hri1r1 = 
RegionReplicaUtil.getRegionInfoForReplica(hri1, 1),
-100  hri1r2 = 
RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-101  private static HRegionInfo hri2r1 = 
RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-102  private static RegionLocations hrls1 = 
new RegionLocations(new HRegionLocation(hri1, sn),
-103  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-104  private static RegionLocations hrls2 = 
new RegionLocations(new HRegionLocation(hri2, sn2),
-105  new HRegionLocation(hri2r1, 
sn3));
-106  private static RegionLocations hrls3 = 
new RegionLocations(new HRegionLocation(hri3, sn3), null);
-107
-108  private static final String success = 
"success";
-109  private static Exception failure = new 
Exception("failure");
-110
-111  private static int NB_RETRIES = 3;
-112
-113  @BeforeClass
-114  public static void beforeClass(){
-115
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, NB_RETRIES);
-116  }
-117
-118  static class CountingThreadFactory 
implements ThreadFactory {
-119final AtomicInteger nbThreads;
-120ThreadFactory realFactory =  
Threads.newDaemonThreadFactory("test-TestAsyncProcess");
-121@Override
-122

[02/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
index 02eb12c..104ec8d 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
@@ -53,1051 +53,1080 @@
 045import org.apache.commons.logging.Log;
 046import 
org.apache.commons.logging.LogFactory;
 047import 
org.apache.hadoop.conf.Configuration;
-048import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-049import org.apache.hadoop.hbase.Cell;
-050import 
org.apache.hadoop.hbase.HConstants;
-051import 
org.apache.hadoop.hbase.HRegionInfo;
-052import 
org.apache.hadoop.hbase.HRegionLocation;
-053import 
org.apache.hadoop.hbase.RegionLocations;
-054import 
org.apache.hadoop.hbase.ServerName;
-055import 
org.apache.hadoop.hbase.TableName;
-056import 
org.apache.hadoop.hbase.client.AsyncProcess.AsyncRequestFuture;
-057import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-058import 
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
-059import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-060import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-061import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-062import 
org.apache.hadoop.hbase.util.Bytes;
-063import 
org.apache.hadoop.hbase.util.Threads;
-064import org.junit.Assert;
-065import org.junit.BeforeClass;
-066import org.junit.Rule;
-067import org.junit.Test;
-068import 
org.junit.experimental.categories.Category;
-069import org.junit.rules.TestRule;
-070import org.mockito.Mockito;
-071
-072@Category({ClientTests.class, 
MediumTests.class})
-073public class TestAsyncProcess {
-074  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-075  
withLookingForStuckThread(true).build();
-076  private final static Log LOG = 
LogFactory.getLog(TestAsyncProcess.class);
-077  private static final TableName 
DUMMY_TABLE =
-078  TableName.valueOf("DUMMY_TABLE");
-079  private static final byte[] 
DUMMY_BYTES_1 = "DUMMY_BYTES_1".getBytes();
-080  private static final byte[] 
DUMMY_BYTES_2 = "DUMMY_BYTES_2".getBytes();
-081  private static final byte[] 
DUMMY_BYTES_3 = "DUMMY_BYTES_3".getBytes();
-082  private static final byte[] FAILS = 
"FAILS".getBytes();
-083  private static final Configuration conf 
= new Configuration();
-084
-085  private static ServerName sn = 
ServerName.valueOf("s1:1,1");
-086  private static ServerName sn2 = 
ServerName.valueOf("s2:2,2");
-087  private static ServerName sn3 = 
ServerName.valueOf("s3:3,3");
-088  private static HRegionInfo hri1 =
-089  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-090  private static HRegionInfo hri2 =
-091  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-092  private static HRegionInfo hri3 =
-093  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-094  private static HRegionLocation loc1 = 
new HRegionLocation(hri1, sn);
-095  private static HRegionLocation loc2 = 
new HRegionLocation(hri2, sn);
-096  private static HRegionLocation loc3 = 
new HRegionLocation(hri3, sn2);
-097
-098  // Replica stuff
-099  private static HRegionInfo hri1r1 = 
RegionReplicaUtil.getRegionInfoForReplica(hri1, 1),
-100  hri1r2 = 
RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-101  private static HRegionInfo hri2r1 = 
RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-102  private static RegionLocations hrls1 = 
new RegionLocations(new HRegionLocation(hri1, sn),
-103  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-104  private static RegionLocations hrls2 = 
new RegionLocations(new HRegionLocation(hri2, sn2),
-105  new HRegionLocation(hri2r1, 
sn3));
-106  private static RegionLocations hrls3 = 
new RegionLocations(new HRegionLocation(hri3, sn3), null);
-107
-108  private static final String success = 
"success";
-109  private static Exception failure = new 
Exception("failure");
-110
-111  private static int NB_RETRIES = 3;
-112
-113  @BeforeClass
-114  public static void beforeClass(){
-115
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, NB_RETRIES);
-116  }
-117
-118  static class CountingThreadFactory 
implements ThreadFactory {
-119final AtomicInteger nbThreads;
-120ThreadFactory realFactory =  
Threads.newDaemonThreadFactory("test-TestAsyncProcess");
-121@Override
-122public Thread newThread(Runnable r) 
{
-123  nbThreads

[44/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
index b584f3a..6ee723a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
@@ -196,7 +196,7 @@ extends ConnectionImplementation
-abort,
 clearCaches,
 clearRegionCache,
 clearRegionCache,
 clearRegionCache,
 clearRegionCache,
 close,
 <
 a 
href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#createAsyncProcess(org.apache.hadoop.conf.Configuration)">createAsyncProcess,
 deleteCachedRegionLocation,
 finalize,
 findException,
 getAdmin,
 getAdmin,
 getAdmin, href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#getAsyncProcess()">getAsyncProcess,
 > href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#getBackoffPolicy()">getBackoffPolicy,
 > href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#getBufferedMutator(org.apache.hadoop.hbase.client.BufferedMutatorParams)">getBufferedMutator,
 > href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#getBufferedMutator(org.apache.hadoop.hbase.TableName)">getBufferedMutator,
 > href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#getCachedLocation(org.apache.hadoop.hbase.TableName,%20byte[])">getCachedLocation,
 > href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#getClient(org.apache.hadoop.hbase.ServerName)">getClient,
 > getConfiguration, getConnectionMetrics,
 getCurrentBatchPool,
 getCurrentMetaLookupPool,
 getCurrentNrHRS,
 getHTableDescriptor,
 getHTableDescriptor,
 getHTableDescriptors, getHTableDescriptorsByTableName,
 getKeepAliveMasterService,
 getKeepAliveZooKeeperWatcher,
 getMaster,
 getNewRpcRetryingCallerFactory,
 getNonceGenerator,
 getNumberOfCachedRegionLocations,
 getRegionCachePrefetch,
 getRegionCachePrefetch,
 getRegionLocation,
 getRegionLocation,
 getRegionLocator,
 getRpcClient,
 getStatisticsTracker,
 getStubKey,
 getTable,
 getTable,
 getTable,
 getTable,
 getTable,
 getTable,
 getTableNames,
 getTableState,
 injectNonceGeneratorForTesting,
 isAborted,
 isClosed, isDeadServer,
 isMasterRunning,
 isTableAvailable,
 isTableAvailable,
 isTableAvailable,
 isTableAvailable,
 isTableDisabled, isTableEnabled,
 isTableEnabled,
 listTableNames,
 listTables,
 locateRegion,
 locateRegion,
 lo
 cateRegion, locateRegion,
 locateRegion,
 locateRegions,
 locateRegions,
 locateRegions,
 locateRegions,
 
 processBatch,
 processBatch,
 processBatchCallback,
 processBatchCallback, releaseMaster,
 releaseZooKeeperWatcher,
 relocateRegion,
 relocateRegion,
 relocateRegion,
 retrieveClusterId, setRegionCachePrefetch,
 setRegionCachePrefetch,
 toString,
 updateCachedLocation,
 updateCachedLocations,
 updateCachedLocations,
 updateCachedLocations
+abort,
 clearCaches,
 clearRegionCache,
 clearRegionCache,
 clearRegionCache,
 clearRegionCache,
 close,
 <
 a 
href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#createAsyncProcess(org.apache.hadoop.conf.Configuration)">createAsyncProcess,
 deleteCachedRegionLocation,
 finalize,
 getAdmin,
 getAdmin,
 getAdmin,
 getAsyncProcess, href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#getBackoffPolicy()">getBackoffPolicy,
 > href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#getBufferedMutator(org.apache.hadoop.hbase.client.BufferedMutatorParams)">getBufferedMutator,
 > href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#getBufferedMutator(org.apache.hadoop.hbase.TableName)">getBufferedMutator,
 > href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#getCachedLocation(org.apache.hadoop.hbase.TableName,%20byte[])">getCachedLocation,
 > href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#getClient(org.apache.hadoop.hbase.ServerName)">getClient,
 > href="../../../../../org/apache/hadoop/hbase/client/ConnectionImplementation.html#getConfiguration()">getConfiguration,
 > getConnectionMetrics, getCurrentBatchPool,
 getCurrentMetaLookupPool,
 getCurrentNrHRS,
 getHTableDescriptor,
 getHTableDescriptor,
 getHTableDescriptors,
 getHTableDescriptorsByTableName,
 getKeepAliveMasterService,
 getKeepAliveZooKeeperWatcher,
 getMaster,
 getNewRpcRetryingCallerFactory,
 getNonceGenerator,
 getNumberOfCachedRegionL
 ocations, getRegionCachePrefetch,
 getRegionCachePrefetch,
 getRegionLocation,
 getRegionLocation,
 getRegionLocator,
 g

[15/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Responder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Responder.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Responder.html
index bf27873..e4df7cc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Responder.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Responder.html
@@ -75,2529 +75,2534 @@
 067
 068import org.apache.commons.logging.Log;
 069import 
org.apache.commons.logging.LogFactory;
-070import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-071import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-072import 
org.apache.hadoop.conf.Configuration;
-073import 
org.apache.hadoop.hbase.CellScanner;
-074import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-075import 
org.apache.hadoop.hbase.HBaseIOException;
-076import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-077import 
org.apache.hadoop.hbase.HConstants;
-078import 
org.apache.hadoop.hbase.HRegionInfo;
-079import org.apache.hadoop.hbase.Server;
-080import 
org.apache.hadoop.hbase.TableName;
-081import 
org.apache.hadoop.hbase.client.Operation;
-082import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-083import 
org.apache.hadoop.hbase.codec.Codec;
-084import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-085import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-086import 
org.apache.hadoop.hbase.io.ByteBufferOutputStream;
-087import 
org.apache.hadoop.hbase.io.BoundedByteBufferPool;
-088import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-089import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-090import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-091import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo;
-092import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta;
-093import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader;
-094import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse;
-095import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-096import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader;
-097import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation;
-098import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-099import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-100import 
org.apache.hadoop.hbase.security.AuthMethod;
-101import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-102import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
-103import 
org.apache.hadoop.hbase.security.User;
-104import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler;
-105import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
-106import 
org.apache.hadoop.hbase.security.SaslStatus;
-107import 
org.apache.hadoop.hbase.security.SaslUtil;
-108import 
org.apache.hadoop.hbase.security.UserProvider;
-109import 
org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
-110import 
org.apache.hadoop.hbase.util.Bytes;
-111import 
org.apache.hadoop.hbase.util.Counter;
-112import 
org.apache.hadoop.hbase.util.Pair;
-113import 
org.apache.hadoop.io.BytesWritable;
-114import 
org.apache.hadoop.io.IntWritable;
-115import org.apache.hadoop.io.Writable;
-116import 
org.apache.hadoop.io.WritableUtils;
-117import 
org.apache.hadoop.io.compress.CompressionCodec;
-118import 
org.apache.hadoop.security.UserGroupInformation;
-119import 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-120import 
org.apache.hadoop.security.authorize.AuthorizationException;
-121import 
org.apache.hadoop.security.authorize.PolicyProvider;
-122import 
org.apache.hadoop.security.authorize.ProxyUsers;
-123import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-124import 
org.apache.hadoop.security.token.SecretManager;
-125import 
org.apache.hadoop.security.token.SecretManager.InvalidToken;
-126import 
org.apache.hadoop.security.token.TokenIdentifier;
-127import 
org.apache.hadoop.util.StringUtils;
-128import 
org.codehaus.jackson.map.ObjectMapper;
-129import org.apache.htrace.TraceInfo;
-130
-131import 
com.google.common.util.concurrent.ThreadFactoryBuilder;
-132import 
com.google.protobuf.BlockingService;
-133import 
com.google.protobuf.CodedInputStream;
-134import 
com.google.protobuf.Descriptors.MethodDescriptor;
-135import com.google.protobuf.Message;
-136import 
com.google.protobuf.ServiceException;
-137import com.google.protobuf.TextFormat;
-138
-139/**
-140 * An RPC server that hosts protobuf 
described Services.
-141 *
-142 * An RpcServer instance has a Listener 
that hosts the socket.  Listener has fixed number
-143 * of Readers in an ExecutorPool, 10 by 
d

[35/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.SingleServerRequestRunnable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.SingleServerRequestRunnable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.SingleServerRequestRunnable.html
index d0040f6..3180076 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.SingleServerRequestRunnable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.SingleServerRequestRunnable.html
@@ -61,1780 +61,1781 @@
 053import 
org.apache.hadoop.hbase.TableName;
 054import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 055import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-059import org.apache.htrace.Trace;
-060
-061import 
com.google.common.annotations.VisibleForTesting;
-062
-063/**
-064 * This class  allows a continuous flow 
of requests. It's written to be compatible with a
-065 * synchronous caller such as HTable.
-066 * 

-067 * The caller sends a buffer of operation, by calling submit. This class extract from this list -068 * the operations it can send, i.e. the operations that are on region that are not considered -069 * as busy. The process is asynchronous, i.e. it returns immediately when if has finished to -070 * iterate on the list. If, and only if, the maximum number of current task is reached, the call -071 * to submit will block. Alternatively, the caller can call submitAll, in which case all the -072 * operations will be sent. Each call to submit returns a future-like object that can be used -073 * to track operation progress. -074 *

-075 *

-076 * The class manages internally the retries. -077 *

-078 *

-079 * The class can be constructed in regular mode, or "global error" mode. In global error mode, -080 * AP tracks errors across all calls (each "future" also has global view of all errors). That -081 * mode is necessary for backward compat with HTable behavior, where multiple submissions are -082 * made and the errors can propagate using any put/flush call, from previous calls. -083 * In "regular" mode, the errors are tracked inside the Future object that is returned. -084 * The results are always tracked inside the Future object and can be retrieved when the call -085 * has finished. Partial results can also be retrieved if some part of multi-request failed. -086 *

-087 *

-088 * This class is thread safe in regular mode; in global error code, submitting operations and -089 * retrieving errors from different threads may be not thread safe. -090 * Internally, the class is thread safe enough to manage simultaneously new submission and results -091 * arising from older operations. -092 *

-093 *

-094 * Internally, this class works with {@link Row}, this mean it could be theoretically used for -095 * gets as well. -096 *

-097 */ -098@InterfaceAudience.Private -099class AsyncProcess { -100 private static final Log LOG = LogFactory.getLog(AsyncProcess.class); -101 protected static final AtomicLong COUNTER = new AtomicLong(); -102 -103 public static final String PRIMARY_CALL_TIMEOUT_KEY = "hbase.client.primaryCallTimeout.multiget"; -104 -105 /** -106 * Configure the number of failures after which the client will start logging. A few failures -107 * is fine: region moved, then is not opened, then is overloaded. We try to have an acceptable -108 * heuristic for the number of errors we don't log. 9 was chosen because we wait for 1s at -109 * this stage. -110 */ -111 public static final String START_LOG_ERRORS_AFTER_COUNT_KEY = -112 "hbase.client.start.log.errors.counter"; -113 public static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT = 9; -114 -115 /** -116 * The context used to wait for results from one submit call. -117 * 1) If AsyncProcess is set to track errors globally, and not per call (for HTable puts), -118 *then errors and failed operations in this object will reflect global errors. -119 * 2) If submit call is made with needResults false, results will not be saved. -120 * */ -121 public static interface AsyncRequestFuture { -122public boolean hasError(); -123public RetriesExhaustedWithDetailsException getErrors(); -124public List getFailedOperations(); -125public Object[] getResults() throws InterruptedIOException; -126/** Wait until all tasks are executed, successfully or not. */ -127public void waitUntilDone() throws InterruptedIOException; -128 } -

[13/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.CallPriorityComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.CallPriorityComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.CallPriorityComparator.html
index dae3d27..856be7b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.CallPriorityComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.CallPriorityComparator.html
@@ -198,15 +198,15 @@
 190  }
 191
 192  @Override
-193  public void dispatch(CallRunner 
callTask) throws InterruptedException {
+193  public boolean dispatch(CallRunner 
callTask) throws InterruptedException {
 194RpcServer.Call call = 
callTask.getCall();
 195int level = 
priority.getPriority(call.getHeader(), call.param, call.getRequestUser());
 196if (priorityExecutor != null 
&& level > highPriorityLevel) {
-197  
priorityExecutor.dispatch(callTask);
+197  return 
priorityExecutor.dispatch(callTask);
 198} else if (replicationExecutor != 
null && level == HConstants.REPLICATION_QOS) {
-199  
replicationExecutor.dispatch(callTask);
+199  return 
replicationExecutor.dispatch(callTask);
 200} else {
-201  callExecutor.dispatch(callTask);
+201  return 
callExecutor.dispatch(callTask);
 202}
 203  }
 204

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.html
index dae3d27..856be7b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.html
@@ -198,15 +198,15 @@
 190  }
 191
 192  @Override
-193  public void dispatch(CallRunner 
callTask) throws InterruptedException {
+193  public boolean dispatch(CallRunner 
callTask) throws InterruptedException {
 194RpcServer.Call call = 
callTask.getCall();
 195int level = 
priority.getPriority(call.getHeader(), call.param, call.getRequestUser());
 196if (priorityExecutor != null 
&& level > highPriorityLevel) {
-197  
priorityExecutor.dispatch(callTask);
+197  return 
priorityExecutor.dispatch(callTask);
 198} else if (replicationExecutor != 
null && level == HConstants.REPLICATION_QOS) {
-199  
replicationExecutor.dispatch(callTask);
+199  return 
replicationExecutor.dispatch(callTask);
 200} else {
-201  callExecutor.dispatch(callTask);
+201  return 
callExecutor.dispatch(callTask);
 202}
 203  }
 204

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/PutSortReducer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/PutSortReducer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/PutSortReducer.html
index 305bf89..7aeda8d 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/PutSortReducer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/PutSortReducer.html
@@ -30,12 +30,12 @@
 022import java.util.List;
 023import java.util.TreeSet;
 024
-025import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-026import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-027import org.apache.hadoop.hbase.Cell;
-028import 
org.apache.hadoop.hbase.CellComparator;
-029import 
org.apache.hadoop.hbase.KeyValue;
-030import 
org.apache.hadoop.hbase.KeyValueUtil;
+025import org.apache.hadoop.hbase.Cell;
+026import 
org.apache.hadoop.hbase.CellComparator;
+027import 
org.apache.hadoop.hbase.KeyValue;
+028import 
org.apache.hadoop.hbase.KeyValueUtil;
+029import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+030import 
org.apache.hadoop.hbase.classification.InterfaceStability;
 031import 
org.apache.hadoop.hbase.client.Put;
 032import 
org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 033import 
org.apache.hadoop.mapreduce.Reducer;
@@ -76,27 +76,29 @@
 068  for (Cell cell: cells) {
 069KeyValue kv = 
KeyValueUtil.ensureKeyValue(cell);
 070map.add(kv);
-071curSize += kv.heapSize();
-072  }
-073}
-074  }
-075  context.setStatus("Read " + 
map.size() + " entries of " + map.getClass()
-076  + "(" + 
StringUtils.humanReadableInt(curSize) + ")");
-077  int index = 0;
-078  for (KeyValue kv : map) {
-079context.write(row, kv);
-080if (++index % 100 == 0)
-081  context.setStatus("Wrote " + 
index);

[39/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index d1af835..5ac4364 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -379,58 +379,58 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 private HMaster m_master
 
 
-
+
 
 
 
 
-m_deadServers
-private http://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set m_deadServers
+m_assignmentManager
+private AssignmentManager m_assignmentManager
 
 
-
+
 
 
 
 
-m_deadServers__IsNotDefault
-private boolean m_deadServers__IsNotDefault
+m_assignmentManager__IsNotDefault
+private boolean m_assignmentManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_filter
-private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String m_filter
+m_catalogJanitorEnabled
+private boolean m_catalogJanitorEnabled
 
 
-
+
 
 
 
 
-m_filter__IsNotDefault
-private boolean m_filter__IsNotDefault
+m_catalogJanitorEnabled__IsNotDefault
+private boolean m_catalogJanitorEnabled__IsNotDefault
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled
-private boolean m_catalogJanitorEnabled
+m_frags
+private http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer> m_frags
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled__IsNotDefault
-private boolean m_catalogJanitorEnabled__IsNotDefault
+m_frags__IsNotDefault
+private boolean m_frags__IsNotDefault
 
 
 
@@ -451,94 +451,94 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 private boolean m_serverManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_format
-private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String m_format
+m_deadServers
+private http://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set m_deadServers
 
 
-
+
 
 
 
 
-m_format__IsNotDefault
-private boolean m_format__IsNotDefault
+m_deadServers__IsNotDefault
+private boolean m_deadServers__IsNotDefault
 
 
-
+
 
 
 
 
-m_metaLocation
-private ServerName m_metaLocation
+m_servers
+private http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List m_servers
 
 
-
+
 
 
 
 
-m_metaLocation__IsNotDefault
-private boolean m_metaLocation__IsNotDefault
+m_servers__IsNotDefault
+private boolean m_servers__IsNotDefault
 
 
-
+
 
 
 
 
-m_assignmentManager
-private AssignmentManager m_assignmentManager
+m_metaLocation
+private ServerName m_metaLocation
 
 
-
+
 
 
 
 
-m_assignmentManager__IsNotDefault
-private boolean m_assignmentManager__IsNotDefault
+m_metaLocation__IsNotDefault
+private boolean m_metaLocation__IsNotDefault
 
 
-
+
 
 
 
 
-m_servers
-private http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List m_servers
+m_filter
+private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String m_filter
 
 
-
+
 
 
 
 
-m_servers__IsNotDefault
-private boolean m_servers__IsNotDefault
+m_filter__IsNotDefault
+private boolean m_filter__IsNotDefault
 
 
-
+
 
 
 
 
-m_frags
-private http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer> m_frags
+m_format
+private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String m_format
 
 
-
+
 
 
 
 
-m_frags__IsNotDefault
-private boolean m_frags__IsNotDefault
+m_format__IsNotDefault
+private boolean m_format__IsNotDefault
 
 
 
@@ -584,85 +584,85 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 public HMaster getMaster()
 
 
-
+
 
 
 
 
-setDeadServers
-public void setDeadServers(http://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true";
 title="

[32/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.ReplicaResultState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.ReplicaResultState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.ReplicaResultState.html
index d0040f6..3180076 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.ReplicaResultState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.ReplicaResultState.html
@@ -61,1780 +61,1781 @@
 053import 
org.apache.hadoop.hbase.TableName;
 054import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 055import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-059import org.apache.htrace.Trace;
-060
-061import 
com.google.common.annotations.VisibleForTesting;
-062
-063/**
-064 * This class  allows a continuous flow 
of requests. It's written to be compatible with a
-065 * synchronous caller such as HTable.
-066 * 

-067 * The caller sends a buffer of operation, by calling submit. This class extract from this list -068 * the operations it can send, i.e. the operations that are on region that are not considered -069 * as busy. The process is asynchronous, i.e. it returns immediately when if has finished to -070 * iterate on the list. If, and only if, the maximum number of current task is reached, the call -071 * to submit will block. Alternatively, the caller can call submitAll, in which case all the -072 * operations will be sent. Each call to submit returns a future-like object that can be used -073 * to track operation progress. -074 *

-075 *

-076 * The class manages internally the retries. -077 *

-078 *

-079 * The class can be constructed in regular mode, or "global error" mode. In global error mode, -080 * AP tracks errors across all calls (each "future" also has global view of all errors). That -081 * mode is necessary for backward compat with HTable behavior, where multiple submissions are -082 * made and the errors can propagate using any put/flush call, from previous calls. -083 * In "regular" mode, the errors are tracked inside the Future object that is returned. -084 * The results are always tracked inside the Future object and can be retrieved when the call -085 * has finished. Partial results can also be retrieved if some part of multi-request failed. -086 *

-087 *

-088 * This class is thread safe in regular mode; in global error code, submitting operations and -089 * retrieving errors from different threads may be not thread safe. -090 * Internally, the class is thread safe enough to manage simultaneously new submission and results -091 * arising from older operations. -092 *

-093 *

-094 * Internally, this class works with {@link Row}, this mean it could be theoretically used for -095 * gets as well. -096 *

-097 */ -098@InterfaceAudience.Private -099class AsyncProcess { -100 private static final Log LOG = LogFactory.getLog(AsyncProcess.class); -101 protected static final AtomicLong COUNTER = new AtomicLong(); -102 -103 public static final String PRIMARY_CALL_TIMEOUT_KEY = "hbase.client.primaryCallTimeout.multiget"; -104 -105 /** -106 * Configure the number of failures after which the client will start logging. A few failures -107 * is fine: region moved, then is not opened, then is overloaded. We try to have an acceptable -108 * heuristic for the number of errors we don't log. 9 was chosen because we wait for 1s at -109 * this stage. -110 */ -111 public static final String START_LOG_ERRORS_AFTER_COUNT_KEY = -112 "hbase.client.start.log.errors.counter"; -113 public static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT = 9; -114 -115 /** -116 * The context used to wait for results from one submit call. -117 * 1) If AsyncProcess is set to track errors globally, and not per call (for HTable puts), -118 *then errors and failed operations in this object will reflect global errors. -119 * 2) If submit call is made with needResults false, results will not be saved. -120 * */ -121 public static interface AsyncRequestFuture { -122public boolean hasError(); -123public RetriesExhaustedWithDetailsException getErrors(); -124public List getFailedOperations(); -125public Object[] getResults() throws InterruptedIOException; -126/** Wait until all tasks are executed, successfully or not. */ -127public void waitUntilDone() throws InterruptedIOException; -128 } -129 -130 /** -131 * Return value from a submit that didn't contain any requests. -132 */ -133 private static final AsyncRequestFuture NO_REQS_RESULT = n

[10/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/supportingprojects.html
--
diff --git a/supportingprojects.html b/supportingprojects.html
index 749a263..2361dfe 100644
--- a/supportingprojects.html
+++ b/supportingprojects.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Supporting Projects
 
@@ -519,7 +519,7 @@ under the License. -->
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-01-27
+  Last Published: 
2016-01-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/team-list.html
--
diff --git a/team-list.html b/team-list.html
index 00efa7c..a8cf85f 100644
--- a/team-list.html
+++ b/team-list.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Team list
 
@@ -785,7 +785,7 @@ window.onLoad = init();
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-01-27
+  Last Published: 
2016-01-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testapidocs/index-all.html
--
diff --git a/testapidocs/index-all.html b/testapidocs/index-all.html
index 6b72c64..f363614 100644
--- a/testapidocs/index-all.html
+++ b/testapidocs/index-all.html
@@ -846,6 +846,8 @@
  
 org.apache.hadoop.hbase.errorhandling
 - package org.apache.hadoop.hbase.errorhandling
  
+org.apache.hadoop.hbase.exceptions
 - package org.apache.hadoop.hbase.exceptions
+ 
 org.apache.hadoop.hbase.filter
 - package org.apache.hadoop.hbase.filter
  
 org.apache.hadoop.hbase.http
 - package org.apache.hadoop.hbase.http

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testapidocs/org/apache/hadoop/hbase/errorhandling/package-summary.html
--
diff --git 
a/testapidocs/org/apache/hadoop/hbase/errorhandling/package-summary.html 
b/testapidocs/org/apache/hadoop/hbase/errorhandling/package-summary.html
index 8ebad1c..6115a84 100644
--- a/testapidocs/org/apache/hadoop/hbase/errorhandling/package-summary.html
+++ b/testapidocs/org/apache/hadoop/hbase/errorhandling/package-summary.html
@@ -36,7 +36,7 @@
 
 
 Prev
 Package
-Next 
Package
+Next
 Package
 
 
 Frames
@@ -84,7 +84,7 @@
 
 
 Prev
 Package
-Next 
Package
+Next
 Package
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testapidocs/org/apache/hadoop/hbase/errorhandling/package-tree.html
--
diff --git 
a/testapidocs/org/apache/hadoop/hbase/errorhandling/package-tree.html 
b/testapidocs/org/apache/hadoop/hbase/errorhandling/package-tree.html
index 8c9809d..91d01ff 100644
--- a/testapidocs/org/apache/hadoop/hbase/errorhandling/package-tree.html
+++ b/testapidocs/org/apache/hadoop/hbase/errorhandling/package-tree.html
@@ -36,7 +36,7 @@
 
 
 Prev
-Next
+Next
 
 
 Frames
@@ -88,7 +88,7 @@
 
 
 Prev
-Next
+Next
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testapidocs/org/apache/hadoop/hbase/exceptions/package-frame.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/exceptions/package-frame.html 
b/testapidocs/org/apache/hadoop/hbase/exceptions/package-frame.html
new file mode 100644
index 000..17ca55f
--- /dev/null
+++ b/testapidocs/org/apache/hadoop/hbase/exceptions/package-frame.html
@@ -0,0 +1,12 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+org.apache.hadoop.hbase.exceptions (Apache HBase 2.0.0-SNAPSHOT Test 
API)
+
+
+
+org.apache.hadoop.hbase.exceptions
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testapidocs/org/apache/hadoop/hbase/exceptions/package-summary.html
--
diff --git 
a/testapidocs/org/apache/hadoop/hbase/exceptions/package-summary.html 
b/testapidocs/org/apache/hadoop/hbase/exceptions/package-summary.html
new file mode 100644
index 000..6263f45
--- /dev/null
+++ b/testapidocs/org/apache/hadoop/hbase/exceptions/package-summary.html
@@ -0,0 +1,114 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+org.apache.hadoop.hbase.exceptions (Apache HBase 2.0.0-SNAPSHOT Test 
API)
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+
+
+
+Overview
+Package
+Class
+

[34/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.html
index d0040f6..3180076 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.html
@@ -61,1780 +61,1781 @@
 053import 
org.apache.hadoop.hbase.TableName;
 054import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 055import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-059import org.apache.htrace.Trace;
-060
-061import 
com.google.common.annotations.VisibleForTesting;
-062
-063/**
-064 * This class  allows a continuous flow 
of requests. It's written to be compatible with a
-065 * synchronous caller such as HTable.
-066 * 

-067 * The caller sends a buffer of operation, by calling submit. This class extract from this list -068 * the operations it can send, i.e. the operations that are on region that are not considered -069 * as busy. The process is asynchronous, i.e. it returns immediately when if has finished to -070 * iterate on the list. If, and only if, the maximum number of current task is reached, the call -071 * to submit will block. Alternatively, the caller can call submitAll, in which case all the -072 * operations will be sent. Each call to submit returns a future-like object that can be used -073 * to track operation progress. -074 *

-075 *

-076 * The class manages internally the retries. -077 *

-078 *

-079 * The class can be constructed in regular mode, or "global error" mode. In global error mode, -080 * AP tracks errors across all calls (each "future" also has global view of all errors). That -081 * mode is necessary for backward compat with HTable behavior, where multiple submissions are -082 * made and the errors can propagate using any put/flush call, from previous calls. -083 * In "regular" mode, the errors are tracked inside the Future object that is returned. -084 * The results are always tracked inside the Future object and can be retrieved when the call -085 * has finished. Partial results can also be retrieved if some part of multi-request failed. -086 *

-087 *

-088 * This class is thread safe in regular mode; in global error code, submitting operations and -089 * retrieving errors from different threads may be not thread safe. -090 * Internally, the class is thread safe enough to manage simultaneously new submission and results -091 * arising from older operations. -092 *

-093 *

-094 * Internally, this class works with {@link Row}, this mean it could be theoretically used for -095 * gets as well. -096 *

-097 */ -098@InterfaceAudience.Private -099class AsyncProcess { -100 private static final Log LOG = LogFactory.getLog(AsyncProcess.class); -101 protected static final AtomicLong COUNTER = new AtomicLong(); -102 -103 public static final String PRIMARY_CALL_TIMEOUT_KEY = "hbase.client.primaryCallTimeout.multiget"; -104 -105 /** -106 * Configure the number of failures after which the client will start logging. A few failures -107 * is fine: region moved, then is not opened, then is overloaded. We try to have an acceptable -108 * heuristic for the number of errors we don't log. 9 was chosen because we wait for 1s at -109 * this stage. -110 */ -111 public static final String START_LOG_ERRORS_AFTER_COUNT_KEY = -112 "hbase.client.start.log.errors.counter"; -113 public static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT = 9; -114 -115 /** -116 * The context used to wait for results from one submit call. -117 * 1) If AsyncProcess is set to track errors globally, and not per call (for HTable puts), -118 *then errors and failed operations in this object will reflect global errors. -119 * 2) If submit call is made with needResults false, results will not be saved. -120 * */ -121 public static interface AsyncRequestFuture { -122public boolean hasError(); -123public RetriesExhaustedWithDetailsException getErrors(); -124public List getFailedOperations(); -125public Object[] getResults() throws InterruptedIOException; -126/** Wait until all tasks are executed, successfully or not. */ -127public void waitUntilDone() throws InterruptedIOException; -128 } -129 -130 /** -131 * Return value from a submit that didn't contain any requests. -132 */ -133 private static final AsyncRequestFuture

[12/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
index 9da7c42..cd1e448 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
@@ -67,15 +67,15 @@
 059  requiredArguments = {
 060@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 061  optionalArguments = {
-062@org.jamon.annotations.Argument(name 
= "deadServers", type = "Set"),
-063@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-064@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
+062@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+063@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
+064@org.jamon.annotations.Argument(name 
= "frags", type = "Map"),
 065@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-066@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-067@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
-068@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
-069@org.jamon.annotations.Argument(name 
= "servers", type = "List"),
-070@org.jamon.annotations.Argument(name 
= "frags", type = "Map")})
+066@org.jamon.annotations.Argument(name 
= "deadServers", type = "Set"),
+067@org.jamon.annotations.Argument(name 
= "servers", type = "List"),
+068@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
+069@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+070@org.jamon.annotations.Argument(name 
= "format", type = "String")})
 071public class MasterStatusTmpl
 072  extends 
org.jamon.AbstractTemplateProxy
 073{
@@ -116,57 +116,57 @@
 108  return m_master;
 109}
 110private HMaster m_master;
-111// 24, 1
-112public void 
setDeadServers(Set deadServers)
+111// 29, 1
+112public void 
setAssignmentManager(AssignmentManager assignmentManager)
 113{
-114  // 24, 1
-115  m_deadServers = deadServers;
-116  m_deadServers__IsNotDefault = 
true;
+114  // 29, 1
+115  m_assignmentManager = 
assignmentManager;
+116  m_assignmentManager__IsNotDefault = 
true;
 117}
-118public Set 
getDeadServers()
+118public AssignmentManager 
getAssignmentManager()
 119{
-120  return m_deadServers;
+120  return m_assignmentManager;
 121}
-122private Set 
m_deadServers;
-123public boolean 
getDeadServers__IsNotDefault()
+122private AssignmentManager 
m_assignmentManager;
+123public boolean 
getAssignmentManager__IsNotDefault()
 124{
-125  return 
m_deadServers__IsNotDefault;
+125  return 
m_assignmentManager__IsNotDefault;
 126}
-127private boolean 
m_deadServers__IsNotDefault;
-128// 26, 1
-129public void setFilter(String 
filter)
+127private boolean 
m_assignmentManager__IsNotDefault;
+128// 25, 1
+129public void 
setCatalogJanitorEnabled(boolean catalogJanitorEnabled)
 130{
-131  // 26, 1
-132  m_filter = filter;
-133  m_filter__IsNotDefault = true;
+131  // 25, 1
+132  m_catalogJanitorEnabled = 
catalogJanitorEnabled;
+133  
m_catalogJanitorEnabled__IsNotDefault = true;
 134}
-135public String getFilter()
+135public boolean 
getCatalogJanitorEnabled()
 136{
-137  return m_filter;
+137  return m_catalogJanitorEnabled;
 138}
-139private String m_filter;
-140public boolean 
getFilter__IsNotDefault()
+139private boolean 
m_catalogJanitorEnabled;
+140public boolean 
getCatalogJanitorEnabled__IsNotDefault()
 141{
-142  return m_filter__IsNotDefault;
+142  return 
m_catalogJanitorEnabled__IsNotDefault;
 143}
-144private boolean 
m_filter__IsNotDefault;
-145// 25, 1
-146public void 
setCatalogJanitorEnabled(boolean catalogJanitorEnabled)
+144private boolean 
m_catalogJanitorEnabled__IsNotDefault;
+145// 21, 1
+146public void 
setFrags(Map frags)
 147{
-148  // 25, 1
-149  m_catalogJanitorEnabled = 
catalogJanitorEnabled;
-150  
m_catalogJanitorEnabled__IsNotDefault = true;
+148  // 21, 1
+149  m_frags = frags;
+150  m_frags__IsNotDefault = true;
 151}
-152public boolean 
getCatalogJanitorEnabled()
+152public Map 
getFrags()
 153{
-

[04/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testdevapidocs/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.html
 
b/testdevapidocs/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.html
new file mode 100644
index 000..9302351
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.html
@@ -0,0 +1,259 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+TestClientExceptionsUtil (Apache HBase 2.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.exceptions
+Class 
TestClientExceptionsUtil
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.exceptions.TestClientExceptionsUtil
+
+
+
+
+
+
+
+
+public class TestClientExceptionsUtil
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+TestClientExceptionsUtil() 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+Methods 
+
+Modifier and Type
+Method and Description
+
+
+void
+testFindException() 
+
+
+
+
+
+
+Methods inherited from class java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone()"
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#equals(java.lang.Object)"
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#finalize()"
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#getClass()"
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#hashCode()"
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notify()"
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/7/docs/api/java/lang
 /Object.html?is-external=true#notifyAll()" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#toString()"
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait()"
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait(long)"
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait(long,%20int)"
 title="class or interface in java.lang">wait
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Detail
+
+
+
+
+
+TestClientExceptionsUtil
+public TestClientExceptionsUtil()
+
+
+
+
+
+
+
+
+
+Method Detail
+
+
+
+
+
+testFindException
+public void testFindException()
+   throws http://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
+Throws:
+http://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+Copyright © 2007–2016 http://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+
+

ht

[07/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
index 2591d73..7ec9317 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
@@ -95,7 +95,7 @@
 
 
 
-public class TestAsyncProcess
+public class TestAsyncProcess
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -377,108 +377,112 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 void
-testErrorsServers() 
+testCallQueueTooLarge() 
 
 
 void
-testFail() 
+testErrorsServers() 
 
 
 void
-testFailAndSuccess() 
+testFail() 
 
 
 void
-testFlush() 
+testFailAndSuccess() 
 
 
 void
-testGlobalErrors() 
+testFlush() 
 
 
 void
-testHTableFailedPutAndNewPut() 
+testGlobalErrors() 
 
 
 void
-testHTableFailedPutWithBuffer() 
+testHTableFailedPutAndNewPut() 
 
 
 void
-testHTableFailedPutWithoutBuffer() 
+testHTableFailedPutWithBuffer() 
 
 
 void
-testHTablePutSuccess() 
+testHTableFailedPutWithoutBuffer() 
 
 
 void
-testMaxTask() 
+testHTablePutSuccess() 
 
 
 void
-testReplicaAllCallsFailForOneRegion() 
+testMaxTask() 
 
 
 void
-testReplicaMainFailsBeforeReplicaCalls() 
+testReplicaAllCallsFailForOneRegion() 
 
 
 void
-testReplicaParallelCallsSucceed() 
+testReplicaMainFailsBeforeReplicaCalls() 
 
 
 void
-testReplicaPartialReplicaCall() 
+testReplicaParallelCallsSucceed() 
 
 
 void
-testReplicaPrimarySuccessWoReplicaCalls() 
+testReplicaPartialReplicaCall() 
 
 
 void
-testReplicaReplicaSuccess() 
+testReplicaPrimarySuccessWoReplicaCalls() 
 
 
 void
-testReplicaReplicaSuccessWithParallelFailures() 
+testReplicaReplicaSuccess() 
 
 
 void
-testSubmit() 
+testReplicaReplicaSuccessWithParallelFailures() 
 
 
 void
-testSubmitBusyRegion() 
+testSubmit() 
 
 
 void
-testSubmitBusyRegionServer() 
+testSubmitBusyRegion() 
 
 
 void
-testSubmitTrue() 
+testSubmitBusyRegionServer() 
 
 
 void
-testSubmitWithCB() 
+testSubmitTrue() 
 
 
 void
+testSubmitWithCB() 
+
+
+void
 testThreadCreation()
 This test simulates multiple regions on 2 servers.
 
 
-
+
 void
 testUncheckedException() 
 
-
+
 private void
 verifyReplicaResult(org.apache.hadoop.hbase.client.AsyncProcess.AsyncRequestFuture ars,
   TestAsyncProcess.RR... expecteds) 
 
-
+
 private void
 verifyResult(org.apache.hadoop.hbase.client.AsyncProcess.AsyncRequestFuture ars,
 boolean... expected) 
@@ -511,7 +515,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 timeout
-public final org.junit.rules.TestRule timeout
+public final org.junit.rules.TestRule timeout
 
 
 
@@ -520,7 +524,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -529,7 +533,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 DUMMY_TABLE
-private static final org.apache.hadoop.hbase.TableName DUMMY_TABLE
+private static final org.apache.hadoop.hbase.TableName DUMMY_TABLE
 
 
 
@@ -538,7 +542,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 DUMMY_BYTES_1
-private static final byte[] DUMMY_BYTES_1
+private static final byte[] DUMMY_BYTES_1
 
 
 
@@ -547,7 +551,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 DUMMY_BYTES_2
-private static final byte[] DUMMY_BYTES_2
+private static final byte[] DUMMY_BYTES_2
 
 
 
@@ -556,7 +560,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 DUMMY_BYTES_3
-private static final byte[] DUMMY_BYTES_3
+private static final byte[] DUMMY_BYTES_3
 
 
 
@@ -565,7 +569,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 FAILS
-private static final byte[] FAILS
+private static final byte[] FAILS
 
 
 
@@ -574,7 +578,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 conf
-private static final org.apache.hadoop.conf.Configuration conf
+private static final org.apache.hadoop.conf.Configuration conf
 
 
 
@@ -583,7 +587,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 sn
-private static org.apache.hadoop.hbase.ServerName sn
+private static org.apache.hadoop.hbase.ServerName sn
 
 
 
@@ -592,7 +596,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 sn2
-private static org.apache.hadoop.hbase.ServerName sn2
+private static org.apache.hadoop.hbase.ServerName sn2
 
 
 
@@ -601,7 +605,7 @@ extends http://docs.ora

[31/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.Retry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.Retry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.Retry.html
index d0040f6..3180076 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.Retry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.Retry.html
@@ -61,1780 +61,1781 @@
 053import 
org.apache.hadoop.hbase.TableName;
 054import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 055import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-059import org.apache.htrace.Trace;
-060
-061import 
com.google.common.annotations.VisibleForTesting;
-062
-063/**
-064 * This class  allows a continuous flow 
of requests. It's written to be compatible with a
-065 * synchronous caller such as HTable.
-066 * 

-067 * The caller sends a buffer of operation, by calling submit. This class extract from this list -068 * the operations it can send, i.e. the operations that are on region that are not considered -069 * as busy. The process is asynchronous, i.e. it returns immediately when if has finished to -070 * iterate on the list. If, and only if, the maximum number of current task is reached, the call -071 * to submit will block. Alternatively, the caller can call submitAll, in which case all the -072 * operations will be sent. Each call to submit returns a future-like object that can be used -073 * to track operation progress. -074 *

-075 *

-076 * The class manages internally the retries. -077 *

-078 *

-079 * The class can be constructed in regular mode, or "global error" mode. In global error mode, -080 * AP tracks errors across all calls (each "future" also has global view of all errors). That -081 * mode is necessary for backward compat with HTable behavior, where multiple submissions are -082 * made and the errors can propagate using any put/flush call, from previous calls. -083 * In "regular" mode, the errors are tracked inside the Future object that is returned. -084 * The results are always tracked inside the Future object and can be retrieved when the call -085 * has finished. Partial results can also be retrieved if some part of multi-request failed. -086 *

-087 *

-088 * This class is thread safe in regular mode; in global error code, submitting operations and -089 * retrieving errors from different threads may be not thread safe. -090 * Internally, the class is thread safe enough to manage simultaneously new submission and results -091 * arising from older operations. -092 *

-093 *

-094 * Internally, this class works with {@link Row}, this mean it could be theoretically used for -095 * gets as well. -096 *

-097 */ -098@InterfaceAudience.Private -099class AsyncProcess { -100 private static final Log LOG = LogFactory.getLog(AsyncProcess.class); -101 protected static final AtomicLong COUNTER = new AtomicLong(); -102 -103 public static final String PRIMARY_CALL_TIMEOUT_KEY = "hbase.client.primaryCallTimeout.multiget"; -104 -105 /** -106 * Configure the number of failures after which the client will start logging. A few failures -107 * is fine: region moved, then is not opened, then is overloaded. We try to have an acceptable -108 * heuristic for the number of errors we don't log. 9 was chosen because we wait for 1s at -109 * this stage. -110 */ -111 public static final String START_LOG_ERRORS_AFTER_COUNT_KEY = -112 "hbase.client.start.log.errors.counter"; -113 public static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT = 9; -114 -115 /** -116 * The context used to wait for results from one submit call. -117 * 1) If AsyncProcess is set to track errors globally, and not per call (for HTable puts), -118 *then errors and failed operations in this object will reflect global errors. -119 * 2) If submit call is made with needResults false, results will not be saved. -120 * */ -121 public static interface AsyncRequestFuture { -122public boolean hasError(); -123public RetriesExhaustedWithDetailsException getErrors(); -124public List getFailedOperations(); -125public Object[] getResults() throws InterruptedIOException; -126/** Wait until all tasks are executed, successfully or not. */ -127public void waitUntilDone() throws InterruptedIOException; -128 } -129 -130 /** -131 * Return value from a submit that didn't contain any requests. -132 */ -133 private static final AsyncRequestFuture NO_REQS_RESULT = new AsyncRequestFuture() { -134final Object[] result = new Objec

[38/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
index 62ba729..515651f 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
@@ -148,9 +148,9 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.wal.RegionGroupingProvider.Strategies
 org.apache.hadoop.hbase.wal.WALFactory.Providers
 org.apache.hadoop.hbase.wal.WALKey.Version
-org.apache.hadoop.hbase.wal.RegionGroupingProvider.Strategies
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/overview-tree.html
--
diff --git a/devapidocs/overview-tree.html b/devapidocs/overview-tree.html
index e46a924..59a212b 100644
--- a/devapidocs/overview-tree.html
+++ b/devapidocs/overview-tree.html
@@ -815,6 +815,7 @@
 org.apache.hadoop.hbase.client.ClientAsyncPrefetchScanner.PrefetchRunnable 
(implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Runnable.html?is-external=true";
 title="class or interface in java.lang">Runnable)
 org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory
 org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory.NoBackoffPolicy 
(implements org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy)
+org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil
 org.apache.hadoop.hbase.client.ClientIdGenerator
 org.apache.hadoop.hbase.client.ClientSmallScanner.SmallScannerCallableFactory
 org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils
@@ -2977,6 +2978,7 @@
 org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocatorException
 org.apache.hadoop.hbase.io.hfile.bucket.CacheFullException
 org.apache.hadoop.hbase.ipc.CallerDisconnectedException
+org.apache.hadoop.hbase.CallQueueTooBigException
 org.apache.hadoop.hbase.ipc.CallTimeoutException
 org.apache.hadoop.hbase.ClockOutOfSyncException
 org.apache.hadoop.hbase.exceptions.ConnectionClosingException
@@ -3121,7 +3123,6 @@
 org.apache.hadoop.hbase.MultiActionResultTooLarge
 
 
-org.apache.hadoop.hbase.ipc.RpcServer.CallQueueTooBigException
 org.apache.hadoop.hbase.ipc.ServerNotRunningYetException
 java.net.http://docs.oracle.com/javase/7/docs/api/java/net/SocketException.html?is-external=true";
 title="class or interface in java.net">SocketException
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/serialized-form.html
--
diff --git a/devapidocs/serialized-form.html b/devapidocs/serialized-form.html
index c521746..4c23b06 100644
--- a/devapidocs/serialized-form.html
+++ b/devapidocs/serialized-form.html
@@ -69,6 +69,11 @@
 
 Package org.apache.hadoop.hbase
 
+
+
+
+Class org.apache.hadoop.hbase.CallQueueTooBigException 
extends http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException implements 
Serializable
+
 
 
 
@@ -1061,11 +1066,6 @@
 
 
 
-
-
-
-Class org.apache.hadoop.hbase.ipc.RpcServer.CallQueueTooBigException
 extends http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException implements 
Serializable
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/CallQueueTooBigException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/CallQueueTooBigException.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/CallQueueTooBigException.html
new file mode 100644
index 000..dd0ce43
--- /dev/null
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/CallQueueTooBigException.html
@@ -0,0 +1,105 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy

[24/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.StubMaker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.StubMaker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.StubMaker.html
index ee363fc..16f504e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.StubMaker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.StubMaker.html
@@ -43,2273 +43,2227 @@
 035import 
org.apache.hadoop.hbase.HTableDescriptor;
 036import 
org.apache.hadoop.hbase.MasterNotRunningException;
 037import 
org.apache.hadoop.hbase.MetaTableAccessor;
-038import 
org.apache.hadoop.hbase.MultiActionResultTooLarge;
-039import 
org.apache.hadoop.hbase.RegionLocations;
-040import 
org.apache.hadoop.hbase.RegionTooBusyException;
-041import 
org.apache.hadoop.hbase.RetryImmediatelyException;
-042import 
org.apache.hadoop.hbase.ServerName;
-043import 
org.apache.hadoop.hbase.TableName;
-044import 
org.apache.hadoop.hbase.TableNotEnabledException;
-045import 
org.apache.hadoop.hbase.TableNotFoundException;
-046import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-047import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-048import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-049import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
-050import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-051import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-052import 
org.apache.hadoop.hbase.exceptions.RegionOpeningException;
-053import 
org.apache.hadoop.hbase.ipc.RpcClient;
-054import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-055import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-056import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-057import 
org.apache.hadoop.hbase.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
-059import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-060import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
-061import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-062import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-063import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-064import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-065import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest;
-066import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse;
-067import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-068import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
-069import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-070import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-071import 
org.apache.hadoop.hbase.quotas.ThrottlingException;
-072import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-073import 
org.apache.hadoop.hbase.security.User;
-074import 
org.apache.hadoop.hbase.util.Bytes;
-075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-076import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-077import 
org.apache.hadoop.hbase.util.Pair;
-078import 
org.apache.hadoop.hbase.util.Threads;
-079import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-080import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-081import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-082import 
org.apache.hadoop.ipc.RemoteException;
-083import 
org.apache.zookeeper.KeeperException;
-084
-085import javax.annotation.Nullable;
-086
-087import java.io.Closeable;
-088import java.io.IOException;
-089import java.io.InterruptedIOException;
-090import 
java.lang.reflect.UndeclaredThrowableException;
-091import java.net.InetAddress;
-092import java.net.InetSocketAddress;
-093import java.util.ArrayList;
-094import java.util.Date;
-095import java.util.List;
-096import 
java.util.concurrent.BlockingQueue;
-097import 
java.util.concurrent.ConcurrentHashMap;
-098import 
java.util.concurrent.ConcurrentMap;
-099import 
java.util.concurrent.ExecutorService;
-100import 
java.util.concurrent.LinkedBlockingQueue;
-101import 
java.util.concurrent.ThreadPoolExecutor;
-102import java.util.concurrent.TimeUnit;
-103import 
java.util.concurrent.atomic.AtomicInteger;
-104
-105/**
-106 * Main implementation of {@link 
Connection} and {@link ClusterConnection} interfaces.
-107 * Encapsulates connection to zookeeper 
and regionservers.
-108 */
-109@edu.umd.cs.findbugs.annotations.SuppressWarnings(
-110
v

[14/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html
index bf27873..e4df7cc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html
@@ -75,2529 +75,2534 @@
 067
 068import org.apache.commons.logging.Log;
 069import 
org.apache.commons.logging.LogFactory;
-070import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-071import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-072import 
org.apache.hadoop.conf.Configuration;
-073import 
org.apache.hadoop.hbase.CellScanner;
-074import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-075import 
org.apache.hadoop.hbase.HBaseIOException;
-076import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-077import 
org.apache.hadoop.hbase.HConstants;
-078import 
org.apache.hadoop.hbase.HRegionInfo;
-079import org.apache.hadoop.hbase.Server;
-080import 
org.apache.hadoop.hbase.TableName;
-081import 
org.apache.hadoop.hbase.client.Operation;
-082import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-083import 
org.apache.hadoop.hbase.codec.Codec;
-084import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-085import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-086import 
org.apache.hadoop.hbase.io.ByteBufferOutputStream;
-087import 
org.apache.hadoop.hbase.io.BoundedByteBufferPool;
-088import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-089import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-090import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-091import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo;
-092import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta;
-093import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader;
-094import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse;
-095import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-096import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader;
-097import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation;
-098import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-099import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-100import 
org.apache.hadoop.hbase.security.AuthMethod;
-101import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-102import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
-103import 
org.apache.hadoop.hbase.security.User;
-104import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler;
-105import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
-106import 
org.apache.hadoop.hbase.security.SaslStatus;
-107import 
org.apache.hadoop.hbase.security.SaslUtil;
-108import 
org.apache.hadoop.hbase.security.UserProvider;
-109import 
org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
-110import 
org.apache.hadoop.hbase.util.Bytes;
-111import 
org.apache.hadoop.hbase.util.Counter;
-112import 
org.apache.hadoop.hbase.util.Pair;
-113import 
org.apache.hadoop.io.BytesWritable;
-114import 
org.apache.hadoop.io.IntWritable;
-115import org.apache.hadoop.io.Writable;
-116import 
org.apache.hadoop.io.WritableUtils;
-117import 
org.apache.hadoop.io.compress.CompressionCodec;
-118import 
org.apache.hadoop.security.UserGroupInformation;
-119import 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-120import 
org.apache.hadoop.security.authorize.AuthorizationException;
-121import 
org.apache.hadoop.security.authorize.PolicyProvider;
-122import 
org.apache.hadoop.security.authorize.ProxyUsers;
-123import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-124import 
org.apache.hadoop.security.token.SecretManager;
-125import 
org.apache.hadoop.security.token.SecretManager.InvalidToken;
-126import 
org.apache.hadoop.security.token.TokenIdentifier;
-127import 
org.apache.hadoop.util.StringUtils;
-128import 
org.codehaus.jackson.map.ObjectMapper;
-129import org.apache.htrace.TraceInfo;
-130
-131import 
com.google.common.util.concurrent.ThreadFactoryBuilder;
-132import 
com.google.protobuf.BlockingService;
-133import 
com.google.protobuf.CodedInputStream;
-134import 
com.google.protobuf.Descriptors.MethodDescriptor;
-135import com.google.protobuf.Message;
-136import 
com.google.protobuf.ServiceException;
-137import com.google.protobuf.TextFormat;
-138
-139/**
-140 * An RPC server that hosts protobuf 
described Services.
-141 *
-142 * An RpcServer instance has a Listener 
that hosts the socket.  Listener has fixed number
-143 * of Readers in an ExecutorPool, 10 by 
default.  The Listener does an accept and then
-144 

[27/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.NoNonceGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.NoNonceGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.NoNonceGenerator.html
index ee363fc..16f504e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.NoNonceGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.NoNonceGenerator.html
@@ -43,2273 +43,2227 @@
 035import 
org.apache.hadoop.hbase.HTableDescriptor;
 036import 
org.apache.hadoop.hbase.MasterNotRunningException;
 037import 
org.apache.hadoop.hbase.MetaTableAccessor;
-038import 
org.apache.hadoop.hbase.MultiActionResultTooLarge;
-039import 
org.apache.hadoop.hbase.RegionLocations;
-040import 
org.apache.hadoop.hbase.RegionTooBusyException;
-041import 
org.apache.hadoop.hbase.RetryImmediatelyException;
-042import 
org.apache.hadoop.hbase.ServerName;
-043import 
org.apache.hadoop.hbase.TableName;
-044import 
org.apache.hadoop.hbase.TableNotEnabledException;
-045import 
org.apache.hadoop.hbase.TableNotFoundException;
-046import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-047import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-048import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-049import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
-050import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-051import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-052import 
org.apache.hadoop.hbase.exceptions.RegionOpeningException;
-053import 
org.apache.hadoop.hbase.ipc.RpcClient;
-054import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-055import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-056import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-057import 
org.apache.hadoop.hbase.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
-059import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-060import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
-061import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-062import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-063import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-064import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-065import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest;
-066import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse;
-067import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-068import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
-069import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-070import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-071import 
org.apache.hadoop.hbase.quotas.ThrottlingException;
-072import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-073import 
org.apache.hadoop.hbase.security.User;
-074import 
org.apache.hadoop.hbase.util.Bytes;
-075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-076import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-077import 
org.apache.hadoop.hbase.util.Pair;
-078import 
org.apache.hadoop.hbase.util.Threads;
-079import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-080import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-081import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-082import 
org.apache.hadoop.ipc.RemoteException;
-083import 
org.apache.zookeeper.KeeperException;
-084
-085import javax.annotation.Nullable;
-086
-087import java.io.Closeable;
-088import java.io.IOException;
-089import java.io.InterruptedIOException;
-090import 
java.lang.reflect.UndeclaredThrowableException;
-091import java.net.InetAddress;
-092import java.net.InetSocketAddress;
-093import java.util.ArrayList;
-094import java.util.Date;
-095import java.util.List;
-096import 
java.util.concurrent.BlockingQueue;
-097import 
java.util.concurrent.ConcurrentHashMap;
-098import 
java.util.concurrent.ConcurrentMap;
-099import 
java.util.concurrent.ExecutorService;
-100import 
java.util.concurrent.LinkedBlockingQueue;
-101import 
java.util.concurrent.ThreadPoolExecutor;
-102import java.util.concurrent.TimeUnit;
-103import 
java.util.concurrent.atomic.AtomicInteger;
-104
-105/**
-106 * Main implementation of {@link 
Connection} and {@link ClusterConnection} interfaces.
-107 * Encapsulates connection to zookeeper 
and regionservers.
-108 */
-109@edu.umd.cs.findbugs.annot

[47/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.html
index 873bef6..357f586 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.html
@@ -103,7 +103,7 @@
 
 
 
-protected class AsyncProcess.AsyncRequestFutureImpl
+protected class AsyncProcess.AsyncRequestFutureImpl
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements AsyncProcess.AsyncRequestFuture
 The context, and return value, for a single 
submit/submitAll call.
@@ -463,7 +463,7 @@ implements 
 
 callback
-private final Batch.Callback 
callback
+private final Batch.Callback 
callback
 
 
 
@@ -472,7 +472,7 @@ implements 
 
 errors
-private final AsyncProcess.BatchErrors 
errors
+private final AsyncProcess.BatchErrors 
errors
 
 
 
@@ -481,7 +481,7 @@ implements 
 
 errorsByServer
-private final ConnectionImplementation.ServerErrorTracker 
errorsByServer
+private final ConnectionImplementation.ServerErrorTracker 
errorsByServer
 
 
 
@@ -490,7 +490,7 @@ implements 
 
 pool
-private final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService pool
+private final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService pool
 
 
 
@@ -499,7 +499,7 @@ implements 
 
 callsInProgress
-private final http://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set> callsInProgress
+private final http://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set> callsInProgress
 
 
 
@@ -508,7 +508,7 @@ implements 
 
 tableName
-private final TableName tableName
+private final TableName tableName
 
 
 
@@ -517,7 +517,7 @@ implements 
 
 actionsInProgress
-private final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicLong actionsInProgress
+private final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicLong actionsInProgress
 
 
 
@@ -526,7 +526,7 @@ implements 
 
 replicaResultLock
-private final http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object replicaResultLock
+private final http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object replicaResultLock
 The lock controls access to results. It is only held when 
populating results where
  there might be several callers (eventual consistency gets). For other 
requests,
  there's one unique call going on per result index.
@@ -538,7 +538,7 @@ implements 
 
 results
-private final http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object[] results
+private final http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object[] results
 Result array.  Null if results are not needed. Otherwise, 
each index corresponds to
  the action index in initial actions submitted. For most request types, has 
null-s for
  requests that are not done, and result/exception for those that are done.
@@ -554,7 +554,7 @@ implements 
 
 replicaGetIndices
-private final int[] replicaGetIndices
+private final int[] replicaGetIndices
 Indices of replica gets in results. If null, all or no 
actions are replica-gets.
 
 
@@ -564,7 +564,7 @@ implements 
 
 hasAnyReplicaGets
-private final boolean hasAnyReplicaGets
+private final boolean hasAnyReplicaGets
 
 
 
@@ -573,7 +573,7 @@ implements 
 
 nonceGroup
-private final long nonceGroup
+private final long nonceGroup
 
 
 
@@ -590,7 +590,7 @@ implements 
 
 AsyncProcess.AsyncRequestFutureImpl
-public AsyncProcess.AsyncRequestFutureImpl(TableName tableName,
+public AsyncProcess.AsyncRequestFutureImpl(TableName tableName,
http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List

[33/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.BatchErrors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.BatchErrors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.BatchErrors.html
index d0040f6..3180076 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.BatchErrors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.BatchErrors.html
@@ -61,1780 +61,1781 @@
 053import 
org.apache.hadoop.hbase.TableName;
 054import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 055import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-059import org.apache.htrace.Trace;
-060
-061import 
com.google.common.annotations.VisibleForTesting;
-062
-063/**
-064 * This class  allows a continuous flow 
of requests. It's written to be compatible with a
-065 * synchronous caller such as HTable.
-066 * 

-067 * The caller sends a buffer of operation, by calling submit. This class extract from this list -068 * the operations it can send, i.e. the operations that are on region that are not considered -069 * as busy. The process is asynchronous, i.e. it returns immediately when if has finished to -070 * iterate on the list. If, and only if, the maximum number of current task is reached, the call -071 * to submit will block. Alternatively, the caller can call submitAll, in which case all the -072 * operations will be sent. Each call to submit returns a future-like object that can be used -073 * to track operation progress. -074 *

-075 *

-076 * The class manages internally the retries. -077 *

-078 *

-079 * The class can be constructed in regular mode, or "global error" mode. In global error mode, -080 * AP tracks errors across all calls (each "future" also has global view of all errors). That -081 * mode is necessary for backward compat with HTable behavior, where multiple submissions are -082 * made and the errors can propagate using any put/flush call, from previous calls. -083 * In "regular" mode, the errors are tracked inside the Future object that is returned. -084 * The results are always tracked inside the Future object and can be retrieved when the call -085 * has finished. Partial results can also be retrieved if some part of multi-request failed. -086 *

-087 *

-088 * This class is thread safe in regular mode; in global error code, submitting operations and -089 * retrieving errors from different threads may be not thread safe. -090 * Internally, the class is thread safe enough to manage simultaneously new submission and results -091 * arising from older operations. -092 *

-093 *

-094 * Internally, this class works with {@link Row}, this mean it could be theoretically used for -095 * gets as well. -096 *

-097 */ -098@InterfaceAudience.Private -099class AsyncProcess { -100 private static final Log LOG = LogFactory.getLog(AsyncProcess.class); -101 protected static final AtomicLong COUNTER = new AtomicLong(); -102 -103 public static final String PRIMARY_CALL_TIMEOUT_KEY = "hbase.client.primaryCallTimeout.multiget"; -104 -105 /** -106 * Configure the number of failures after which the client will start logging. A few failures -107 * is fine: region moved, then is not opened, then is overloaded. We try to have an acceptable -108 * heuristic for the number of errors we don't log. 9 was chosen because we wait for 1s at -109 * this stage. -110 */ -111 public static final String START_LOG_ERRORS_AFTER_COUNT_KEY = -112 "hbase.client.start.log.errors.counter"; -113 public static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT = 9; -114 -115 /** -116 * The context used to wait for results from one submit call. -117 * 1) If AsyncProcess is set to track errors globally, and not per call (for HTable puts), -118 *then errors and failed operations in this object will reflect global errors. -119 * 2) If submit call is made with needResults false, results will not be saved. -120 * */ -121 public static interface AsyncRequestFuture { -122public boolean hasError(); -123public RetriesExhaustedWithDetailsException getErrors(); -124public List getFailedOperations(); -125public Object[] getResults() throws InterruptedIOException; -126/** Wait until all tasks are executed, successfully or not. */ -127public void waitUntilDone() throws InterruptedIOException; -128 } -129 -130 /** -131 * Return value from a submit that didn't contain any requests. -132 */ -133 private static final AsyncRequestFuture NO_REQS_RESULT = new AsyncRequestFuture() { -134f

[25/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
index ee363fc..16f504e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
@@ -43,2273 +43,2227 @@
 035import 
org.apache.hadoop.hbase.HTableDescriptor;
 036import 
org.apache.hadoop.hbase.MasterNotRunningException;
 037import 
org.apache.hadoop.hbase.MetaTableAccessor;
-038import 
org.apache.hadoop.hbase.MultiActionResultTooLarge;
-039import 
org.apache.hadoop.hbase.RegionLocations;
-040import 
org.apache.hadoop.hbase.RegionTooBusyException;
-041import 
org.apache.hadoop.hbase.RetryImmediatelyException;
-042import 
org.apache.hadoop.hbase.ServerName;
-043import 
org.apache.hadoop.hbase.TableName;
-044import 
org.apache.hadoop.hbase.TableNotEnabledException;
-045import 
org.apache.hadoop.hbase.TableNotFoundException;
-046import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-047import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-048import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-049import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
-050import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-051import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-052import 
org.apache.hadoop.hbase.exceptions.RegionOpeningException;
-053import 
org.apache.hadoop.hbase.ipc.RpcClient;
-054import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-055import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-056import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-057import 
org.apache.hadoop.hbase.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
-059import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-060import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
-061import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-062import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-063import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-064import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-065import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest;
-066import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse;
-067import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-068import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
-069import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-070import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-071import 
org.apache.hadoop.hbase.quotas.ThrottlingException;
-072import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-073import 
org.apache.hadoop.hbase.security.User;
-074import 
org.apache.hadoop.hbase.util.Bytes;
-075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-076import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-077import 
org.apache.hadoop.hbase.util.Pair;
-078import 
org.apache.hadoop.hbase.util.Threads;
-079import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-080import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-081import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-082import 
org.apache.hadoop.ipc.RemoteException;
-083import 
org.apache.zookeeper.KeeperException;
-084
-085import javax.annotation.Nullable;
-086
-087import java.io.Closeable;
-088import java.io.IOException;
-089import java.io.InterruptedIOException;
-090import 
java.lang.reflect.UndeclaredThrowableException;
-091import java.net.InetAddress;
-092import java.net.InetSocketAddress;
-093import java.util.ArrayList;
-094import java.util.Date;
-095import java.util.List;
-096import 
java.util.concurrent.BlockingQueue;
-097import 
java.util.concurrent.ConcurrentHashMap;
-098import 
java.util.concurrent.ConcurrentMap;
-099import 
java.util.concurrent.ExecutorService;
-100import 
java.util.concurrent.LinkedBlockingQueue;
-101import 
java.util.concurrent.ThreadPoolExecutor;
-102import java.util.concurrent.TimeUnit;
-103import 
java.util.concurrent.atomic.AtomicInteger;
-104
-105/**
-106 * Main implementation of {@link 
Connection} and {@link ClusterConnection} interfaces.
-107 * Encapsulates connection to zookeeper 
and regionservers.
-108 */
-109@edu.umd.cs.find

[16/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Listener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Listener.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Listener.html
index bf27873..e4df7cc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Listener.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Listener.html
@@ -75,2529 +75,2534 @@
 067
 068import org.apache.commons.logging.Log;
 069import 
org.apache.commons.logging.LogFactory;
-070import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-071import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-072import 
org.apache.hadoop.conf.Configuration;
-073import 
org.apache.hadoop.hbase.CellScanner;
-074import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-075import 
org.apache.hadoop.hbase.HBaseIOException;
-076import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-077import 
org.apache.hadoop.hbase.HConstants;
-078import 
org.apache.hadoop.hbase.HRegionInfo;
-079import org.apache.hadoop.hbase.Server;
-080import 
org.apache.hadoop.hbase.TableName;
-081import 
org.apache.hadoop.hbase.client.Operation;
-082import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-083import 
org.apache.hadoop.hbase.codec.Codec;
-084import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-085import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-086import 
org.apache.hadoop.hbase.io.ByteBufferOutputStream;
-087import 
org.apache.hadoop.hbase.io.BoundedByteBufferPool;
-088import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-089import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-090import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-091import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo;
-092import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta;
-093import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader;
-094import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse;
-095import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-096import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader;
-097import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation;
-098import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-099import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-100import 
org.apache.hadoop.hbase.security.AuthMethod;
-101import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-102import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
-103import 
org.apache.hadoop.hbase.security.User;
-104import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler;
-105import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
-106import 
org.apache.hadoop.hbase.security.SaslStatus;
-107import 
org.apache.hadoop.hbase.security.SaslUtil;
-108import 
org.apache.hadoop.hbase.security.UserProvider;
-109import 
org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
-110import 
org.apache.hadoop.hbase.util.Bytes;
-111import 
org.apache.hadoop.hbase.util.Counter;
-112import 
org.apache.hadoop.hbase.util.Pair;
-113import 
org.apache.hadoop.io.BytesWritable;
-114import 
org.apache.hadoop.io.IntWritable;
-115import org.apache.hadoop.io.Writable;
-116import 
org.apache.hadoop.io.WritableUtils;
-117import 
org.apache.hadoop.io.compress.CompressionCodec;
-118import 
org.apache.hadoop.security.UserGroupInformation;
-119import 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-120import 
org.apache.hadoop.security.authorize.AuthorizationException;
-121import 
org.apache.hadoop.security.authorize.PolicyProvider;
-122import 
org.apache.hadoop.security.authorize.ProxyUsers;
-123import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-124import 
org.apache.hadoop.security.token.SecretManager;
-125import 
org.apache.hadoop.security.token.SecretManager.InvalidToken;
-126import 
org.apache.hadoop.security.token.TokenIdentifier;
-127import 
org.apache.hadoop.util.StringUtils;
-128import 
org.codehaus.jackson.map.ObjectMapper;
-129import org.apache.htrace.TraceInfo;
-130
-131import 
com.google.common.util.concurrent.ThreadFactoryBuilder;
-132import 
com.google.protobuf.BlockingService;
-133import 
com.google.protobuf.CodedInputStream;
-134import 
com.google.protobuf.Descriptors.MethodDescriptor;
-135import com.google.protobuf.Message;
-136import 
com.google.protobuf.ServiceException;
-137import com.google.protobuf.TextFormat;
-138
-139/**
-140 * An RPC server that hosts protobuf 
described Services.
-141 *
-142 * An RpcServer instance has a Listener 
that hosts the socket.  Listener has fixed number
-143 * of Readers in an ExecutorPool, 10 by 
defaul

[40/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcServer.CallQueueTooBigException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcServer.CallQueueTooBigException.html
 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcServer.CallQueueTooBigException.html
deleted file mode 100644
index 9e5ab0f..000
--- 
a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcServer.CallQueueTooBigException.html
+++ /dev/null
@@ -1,157 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-
-
-Uses of Class 
org.apache.hadoop.hbase.ipc.RpcServer.CallQueueTooBigException (Apache HBase 
2.0.0-SNAPSHOT API)
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-No 
Frames
-
-
-All Classes
-
-
-
-
-
-
-
-
-
-
-Uses of 
Classorg.apache.hadoop.hbase.ipc.RpcServer.CallQueueTooBigException
-
-
-
-
-
-Packages that use RpcServer.CallQueueTooBigException 
-
-Package
-Description
-
-
-
-org.apache.hadoop.hbase.ipc
-
-Tools to help define network clients and servers.
-
-
-
-
-
-
-
-
-
-
-Uses of RpcServer.CallQueueTooBigException in org.apache.hadoop.hbase.ipc
-
-Fields in org.apache.hadoop.hbase.ipc
 declared as RpcServer.CallQueueTooBigException 
-
-Modifier and Type
-Field and Description
-
-
-
-private static RpcServer.CallQueueTooBigException
-RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION 
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-No 
Frames
-
-
-All Classes
-
-
-
-
-
-
-
-
-
-Copyright © 2007–2016 http://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
-
-

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/ipc/package-frame.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/package-frame.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/package-frame.html
index bd5133f..626bb83 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/package-frame.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/package-frame.html
@@ -82,7 +82,6 @@
 FatalConnectionException
 IPCUtil.CellScannerButNoCodecException
 RemoteWithExtrasException
-RpcServer.CallQueueTooBigException
 ServerNotRunningYetException
 StoppedRpcClientException
 UnknownServiceException

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/ipc/package-summary.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/package-summary.html
index 88ef99d..6a54972 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/package-summary.html
@@ -464,30 +464,26 @@
 
 
 
-RpcServer.CallQueueTooBigException
- 
-
-
 ServerNotRunningYetException
  
 
-
+
 StoppedRpcClientException
  
 
-
+
 UnknownServiceException
  
 
-
+
 UnsupportedCellCodecException
  
 
-
+
 UnsupportedCompressionCodecException
  
 
-
+
 WrongVersionException
  
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
index 9986a73..4306cd7 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
@@ -201,7 +201,6 @@
 org.apache.hadoop.hbase.ipc.RemoteWithExtrasException
 
 
-org.apache.hadoop.hbase.ipc.RpcServer.CallQueueTooBigException
 org.apache.hadoop.hbase.ipc.ServerNotRunningYetException
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/ipc/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/package-use.html
index 6f6

[48/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceStability.Evolving.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceStability.Evolving.html
 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceStability.Evolving.html
index 726f5ff..dd311d8 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceStability.Evolving.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceStability.Evolving.html
@@ -340,69 +340,73 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
+class 
+CallQueueTooBigException 
+
+
 interface 
 Cell
 The unit of storage in HBase consisting of the following 
fields:
  
 
 
-
+
 class 
 CellComparator
 Compare two HBase cells.
 
 
-
+
 class 
 CellUtil
 Utility methods helpful slinging Cell instances.
 
 
-
+
 class 
 ClusterStatus
 Status information on the HBase cluster.
 
 
-
+
 interface 
 Coprocessor
 Coprocessor interface.
 
 
-
+
 class 
 HBaseInterfaceAudience
 This class defines constants for different classes of hbase 
limited private apis
 
 
-
+
 class 
 HBaseIOException
 All hbase specific IOExceptions should be subclasses of 
HBaseIOException
 
 
-
+
 class 
 HColumnDescriptor
 An HColumnDescriptor contains information about a column 
family such as the
  number of versions, compression settings, etc.
 
 
-
+
 class 
 HRegionInfo
 Information about a region.
 
 
-
+
 class 
 HRegionLocation
 Data structure to hold HRegionInfo and the address for the 
hosting
  HRegionServer.
 
 
-
+
 class 
 HTableDescriptor
 HTableDescriptor contains the details about an HBase table  
such as the descriptors of
@@ -411,118 +415,118 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  when the region split should occur, coprocessors associated with it 
etc...
 
 
-
+
 class 
 InvalidFamilyOperationException
 Thrown if a request is table schema modification is 
requested but
  made for an invalid family name.
 
 
-
+
 class 
 KeepDeletedCells
 Ways to keep cells marked for delete around.
 
 
-
+
 class 
 LocalHBaseCluster
 This class creates a single process HBase cluster.
 
 
-
+
 class 
 MultiActionResultTooLarge
 Exception thrown when the result needs to be chunked on the 
server side.
 
 
-
+
 class 
 NamespaceDescriptor
 Namespace POJO class.
 
 
-
+
 static class 
 NamespaceDescriptor.Builder 
 
-
+
 class 
 OffheapTag
 This is a Tag implementation in which value is 
backed by an off heap
  http://docs.oracle.com/javase/7/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer
 
 
-
+
 class 
 ProcedureInfo
 Procedure information
 
 
-
+
 class 
 RegionLoad
 Encapsulates per-region load metrics.
 
 
-
+
 class 
 RegionTooBusyException
 Thrown by a region server if it will block and wait to 
serve a request.
 
 
-
+
 class 
 RetryImmediatelyException 
 
-
+
 interface 
 Server
 Defines the set of shared functions implemented by HBase 
servers (Masters
  and RegionServers).
 
 
-
+
 class 
 ServerLoad
 This class is used for exporting current state of load on a 
RegionServer.
 
 
-
+
 class 
 ServerName
 Instance of an HBase ServerName.
 
 
-
+
 interface 
 Stoppable
 Implementers are Stoppable.
 
 
-
+
 class 
 TableInfoMissingException
 Failed to find .tableinfo file under table dir
 
 
-
+
 class 
 TableName
 Immutable POJO class for representing a table name.
 
 
-
+
 interface 
 Tag
 Tags are part of cells and helps to add metadata about 
them.
 
 
-
+
 class 
 TagType 
 
-
+
 class 
 ZooKeeperConnectionException
 Thrown if the client can't connect to zookeeper
@@ -1069,43 +1073,47 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 class 
+ClientExceptionsUtil 
+
+
+class 
 ConnectionClosingException
 Thrown when the client believes that we are trying to 
communicate to has
  been repeatedly unresponsive for a while.
 
 
-
+
 class 
 OperationConflictException
 The exception that is thrown if there's duplicate execution 
of non-idempotent operation.
 
 
-
+
 class 
 PreemptiveFastFailException
 Thrown when the client believes that we are trying to 
communicate to has
  been repeatedly unresponsive for a while.
 
 
-
+
 class 
 RegionInRecoveryException
 Thrown when a read request issued against a region which is 
in recovering state.
 
 
-
+
 class 
 RegionMovedException
 Subclass if the server knows the region is now on another 
server.
 
 
-
+
 class 
 RegionOpeningException
 Subclass if the server knows the region is now on another 
server.
 
 
-
+
 class 
 UnknownProtocolException
 An error requesting an RPC protocol that the server is not 
serving.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceSt

[42/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.Connection.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.Connection.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.Connection.html
index f7bb610..82f0842 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.Connection.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.Connection.html
@@ -35,7 +35,7 @@
 
 
 
-Prev 
Class
+Prev 
Class
 Next 
Class
 
 
@@ -99,7 +99,7 @@
 
 
 
-public class RpcServer.Connection
+public class RpcServer.Connection
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Reads calls from a connection and queues them for 
handling.
 
@@ -454,7 +454,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 connectionPreambleRead
-private boolean connectionPreambleRead
+private boolean connectionPreambleRead
 
 
 
@@ -463,7 +463,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 connectionHeaderRead
-private boolean connectionHeaderRead
+private boolean connectionHeaderRead
 
 
 
@@ -472,7 +472,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 channel
-protected http://docs.oracle.com/javase/7/docs/api/java/nio/channels/SocketChannel.html?is-external=true";
 title="class or interface in java.nio.channels">SocketChannel channel
+protected http://docs.oracle.com/javase/7/docs/api/java/nio/channels/SocketChannel.html?is-external=true";
 title="class or interface in java.nio.channels">SocketChannel channel
 
 
 
@@ -481,7 +481,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 data
-private http://docs.oracle.com/javase/7/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer data
+private http://docs.oracle.com/javase/7/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer data
 
 
 
@@ -490,7 +490,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 dataLengthBuffer
-private http://docs.oracle.com/javase/7/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer dataLengthBuffer
+private http://docs.oracle.com/javase/7/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer dataLengthBuffer
 
 
 
@@ -499,7 +499,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 responseQueue
-protected final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ConcurrentLinkedDeque.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentLinkedDeque responseQueue
+protected final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ConcurrentLinkedDeque.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentLinkedDeque responseQueue
 
 
 
@@ -508,7 +508,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 responseWriteLock
-private final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/Lock.html?is-external=true";
 title="class or interface in java.util.concurrent.locks">Lock responseWriteLock
+private final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/Lock.html?is-external=true";
 title="class or interface in java.util.concurrent.locks">Lock responseWriteLock
 
 
 
@@ -517,7 +517,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 rpcCount
-private Counter rpcCount
+private Counter rpcCount
 
 
 
@@ -526,7 +526,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 lastContact
-private long lastContact
+private long lastContact
 
 
 
@@ -535,7 +535,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 addr
-private http://docs.oracle.com/javase/7/docs/api/java/net/InetAddress.html?is-external=true";
 title="class or interface in java.net">InetAddress addr
+private http://docs.oracle.com/javase/7/docs/api/java/net/InetAddress.html?is-external=true";
 title="class or interface in java.net">InetAddress addr
 
 
 
@@ -544,7 +544,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 socket
-protected http://docs.oracle.com/javase/7/docs/api/java/net/Socket.html?is-external=true";
 title="class or interface in java.net">Socket socket
+protected http://docs.oracle.com/javase/7/docs/api/java/net/Socket.html?is-external=true";
 title="class or interface in java.net">Socket socket
 
 
 
@@ -553,7 +553,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 hostAddress
-protected http://docs.oracle.com/javase/7/docs/api/

[21/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
index bf27873..e4df7cc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
@@ -75,2529 +75,2534 @@
 067
 068import org.apache.commons.logging.Log;
 069import 
org.apache.commons.logging.LogFactory;
-070import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-071import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-072import 
org.apache.hadoop.conf.Configuration;
-073import 
org.apache.hadoop.hbase.CellScanner;
-074import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-075import 
org.apache.hadoop.hbase.HBaseIOException;
-076import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-077import 
org.apache.hadoop.hbase.HConstants;
-078import 
org.apache.hadoop.hbase.HRegionInfo;
-079import org.apache.hadoop.hbase.Server;
-080import 
org.apache.hadoop.hbase.TableName;
-081import 
org.apache.hadoop.hbase.client.Operation;
-082import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-083import 
org.apache.hadoop.hbase.codec.Codec;
-084import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-085import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-086import 
org.apache.hadoop.hbase.io.ByteBufferOutputStream;
-087import 
org.apache.hadoop.hbase.io.BoundedByteBufferPool;
-088import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-089import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-090import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-091import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo;
-092import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta;
-093import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader;
-094import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse;
-095import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-096import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader;
-097import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation;
-098import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-099import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-100import 
org.apache.hadoop.hbase.security.AuthMethod;
-101import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-102import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
-103import 
org.apache.hadoop.hbase.security.User;
-104import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler;
-105import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
-106import 
org.apache.hadoop.hbase.security.SaslStatus;
-107import 
org.apache.hadoop.hbase.security.SaslUtil;
-108import 
org.apache.hadoop.hbase.security.UserProvider;
-109import 
org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
-110import 
org.apache.hadoop.hbase.util.Bytes;
-111import 
org.apache.hadoop.hbase.util.Counter;
-112import 
org.apache.hadoop.hbase.util.Pair;
-113import 
org.apache.hadoop.io.BytesWritable;
-114import 
org.apache.hadoop.io.IntWritable;
-115import org.apache.hadoop.io.Writable;
-116import 
org.apache.hadoop.io.WritableUtils;
-117import 
org.apache.hadoop.io.compress.CompressionCodec;
-118import 
org.apache.hadoop.security.UserGroupInformation;
-119import 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-120import 
org.apache.hadoop.security.authorize.AuthorizationException;
-121import 
org.apache.hadoop.security.authorize.PolicyProvider;
-122import 
org.apache.hadoop.security.authorize.ProxyUsers;
-123import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-124import 
org.apache.hadoop.security.token.SecretManager;
-125import 
org.apache.hadoop.security.token.SecretManager.InvalidToken;
-126import 
org.apache.hadoop.security.token.TokenIdentifier;
-127import 
org.apache.hadoop.util.StringUtils;
-128import 
org.codehaus.jackson.map.ObjectMapper;
-129import org.apache.htrace.TraceInfo;
-130
-131import 
com.google.common.util.concurrent.ThreadFactoryBuilder;
-132import 
com.google.protobuf.BlockingService;
-133import 
com.google.protobuf.CodedInputStream;
-134import 
com.google.protobuf.Descriptors.MethodDescriptor;
-135import com.google.protobuf.Message;
-136import 
com.google.protobuf.ServiceException;
-137import com.google.protobuf.TextFormat;
-138
-139/**
-140 * An RPC server that hosts protobuf 
described Services.
-141 *
-142 * An RpcServer instance has a Listener 
tha

[51/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/39cf5e9b
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/39cf5e9b
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/39cf5e9b

Branch: refs/heads/asf-site
Commit: 39cf5e9b626bef2e0e6c444a234edee30c235c72
Parents: 6d41195
Author: jenkins 
Authored: Thu Jan 28 15:21:49 2016 +
Committer: Misty Stanley-Jones 
Committed: Thu Jan 28 09:24:05 2016 -0800

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 apache_hbase_reference_guide.pdfmarks   | 4 +-
 apidocs/allclasses-frame.html   | 1 +
 apidocs/allclasses-noframe.html | 1 +
 apidocs/index-all.html  | 4 +
 .../hadoop/hbase/CallQueueTooBigException.html  |   257 +
 apidocs/org/apache/hadoop/hbase/Cell.html   | 4 +-
 .../class-use/CallQueueTooBigException.html |   115 +
 .../org/apache/hadoop/hbase/package-frame.html  | 1 +
 .../apache/hadoop/hbase/package-summary.html|48 +-
 .../org/apache/hadoop/hbase/package-tree.html   | 1 +
 apidocs/overview-tree.html  | 1 +
 apidocs/serialized-form.html| 5 +
 .../hadoop/hbase/CallQueueTooBigException.html  |   105 +
 .../hadoop/hbase/mapreduce/PutSortReducer.html  |56 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 19068 -
 checkstyle.rss  |38 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/allclasses-frame.html| 3 +-
 devapidocs/allclasses-noframe.html  | 3 +-
 devapidocs/index-all.html   |20 +-
 .../hbase/ByteBufferedKeyOnlyKeyValue.html  | 4 +-
 .../hadoop/hbase/CallQueueTooBigException.html  |   257 +
 devapidocs/org/apache/hadoop/hbase/Cell.html| 4 +-
 .../class-use/CallQueueTooBigException.html |   157 +
 .../class-use/InterfaceAudience.Private.html|16 +-
 .../class-use/InterfaceAudience.Public.html |88 +-
 .../class-use/InterfaceStability.Evolving.html  |80 +-
 .../class-use/InterfaceStability.Unstable.html  |12 +-
 .../hbase/classification/package-tree.html  | 6 +-
 .../client/AsyncProcess.AsyncRequestFuture.html |12 +-
 ...stFutureImpl.ReplicaCallIssuingRunnable.html |14 +-
 ...tFutureImpl.SingleServerRequestRunnable.html |14 +-
 .../AsyncProcess.AsyncRequestFutureImpl.html|82 +-
 .../hbase/client/AsyncProcess.BatchErrors.html  |20 +-
 .../client/AsyncProcess.ReplicaResultState.html |10 +-
 .../hadoop/hbase/client/AsyncProcess.Retry.html |12 +-
 .../hadoop/hbase/client/AsyncProcess.html   |92 +-
 ...ectionImplementation.MasterServiceState.html |18 +-
 ...onImplementation.MasterServiceStubMaker.html |14 +-
 ...nnectionImplementation.NoNonceGenerator.html | 8 +-
 ...ntation.ServerErrorTracker.ServerErrors.html |10 +-
 ...ectionImplementation.ServerErrorTracker.html |20 +-
 .../ConnectionImplementation.StubMaker.html |14 +-
 .../hbase/client/ConnectionImplementation.html  |   505 +-
 .../ConnectionUtils.MasterlessConnection.html   | 2 +-
 .../hbase/client/CoprocessorHConnection.html| 2 +-
 .../hadoop/hbase/client/package-tree.html   | 4 +-
 .../hbase/exceptions/ClientExceptionsUtil.html  |   300 +
 .../exceptions/ConnectionClosingException.html  | 4 +-
 .../class-use/ClientExceptionsUtil.html |   115 +
 .../hadoop/hbase/exceptions/package-frame.html  | 4 +
 .../hbase/exceptions/package-summary.html   |15 +
 .../hadoop/hbase/exceptions/package-tree.html   | 1 +
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   | 8 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../hbase/ipc/BalancedQueueRpcExecutor.html | 6 +-
 .../hadoop/hbase/ipc/FifoRpcScheduler.html  |45 +-
 .../hadoop/hbase/ipc/RWQueueRpcExecutor.html| 6 +-
 .../apache/hadoop/hbase/ipc/RpcExecutor.html| 6 +-
 .../apache/hadoop/hbase/ipc/RpcScheduler.html   | 8 +-
 .../RpcServer.BlockingServiceAndInterface.html  |12 +-
 .../apache/hadoop/hbase/ipc/RpcServer.Call.html |94 +-
 .../ipc/RpcServer.CallQueueTooBigExceptio

[29/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
index ee363fc..16f504e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
@@ -43,2273 +43,2227 @@
 035import 
org.apache.hadoop.hbase.HTableDescriptor;
 036import 
org.apache.hadoop.hbase.MasterNotRunningException;
 037import 
org.apache.hadoop.hbase.MetaTableAccessor;
-038import 
org.apache.hadoop.hbase.MultiActionResultTooLarge;
-039import 
org.apache.hadoop.hbase.RegionLocations;
-040import 
org.apache.hadoop.hbase.RegionTooBusyException;
-041import 
org.apache.hadoop.hbase.RetryImmediatelyException;
-042import 
org.apache.hadoop.hbase.ServerName;
-043import 
org.apache.hadoop.hbase.TableName;
-044import 
org.apache.hadoop.hbase.TableNotEnabledException;
-045import 
org.apache.hadoop.hbase.TableNotFoundException;
-046import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-047import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-048import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-049import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
-050import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-051import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-052import 
org.apache.hadoop.hbase.exceptions.RegionOpeningException;
-053import 
org.apache.hadoop.hbase.ipc.RpcClient;
-054import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-055import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-056import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-057import 
org.apache.hadoop.hbase.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
-059import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-060import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
-061import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-062import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-063import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-064import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-065import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest;
-066import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse;
-067import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-068import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
-069import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-070import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-071import 
org.apache.hadoop.hbase.quotas.ThrottlingException;
-072import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-073import 
org.apache.hadoop.hbase.security.User;
-074import 
org.apache.hadoop.hbase.util.Bytes;
-075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-076import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-077import 
org.apache.hadoop.hbase.util.Pair;
-078import 
org.apache.hadoop.hbase.util.Threads;
-079import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-080import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-081import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-082import 
org.apache.hadoop.ipc.RemoteException;
-083import 
org.apache.zookeeper.KeeperException;
-084
-085import javax.annotation.Nullable;
-086
-087import java.io.Closeable;
-088import java.io.IOException;
-089import java.io.InterruptedIOException;
-090import 
java.lang.reflect.UndeclaredThrowableException;
-091import java.net.InetAddress;
-092import java.net.InetSocketAddress;
-093import java.util.ArrayList;
-094import java.util.Date;
-095import java.util.List;
-096import 
java.util.concurrent.BlockingQueue;
-097import 
java.util.concurrent.ConcurrentHashMap;
-098import 
java.util.concurrent.ConcurrentMap;
-099import 
java.util.concurrent.ExecutorService;
-100import 
java.util.concurrent.LinkedBlockingQueue;
-101import 
java.util.concurrent.ThreadPoolExecutor;
-102import java.util.concurrent.TimeUnit;
-103import 
java.util.concurrent.atomic.AtomicInteger;
-104
-105/**
-106 * Main implementation of {@link 
Connection} and {@link ClusterConnection} interfaces.
-107 * Encapsulates connection to zookeeper 
and regionservers.
-108 */
-109@edu.umd.cs.find

[43/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
index dc0d7a6..434ae88 100644
--- a/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
@@ -96,8 +96,8 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.executor.EventType
 org.apache.hadoop.hbase.executor.ExecutorType
+org.apache.hadoop.hbase.executor.EventType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
index dbcc00f..74313cd 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
@@ -161,13 +161,13 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.filter.FilterWrapper.FilterRowRetCode
-org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
-org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
+org.apache.hadoop.hbase.filter.FilterList.Operator
 org.apache.hadoop.hbase.filter.Filter.ReturnCode
+org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
 org.apache.hadoop.hbase.filter.FuzzyRowFilter.Order
+org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
+org.apache.hadoop.hbase.filter.FilterWrapper.FilterRowRetCode
 org.apache.hadoop.hbase.filter.BitComparator.BitwiseOp
-org.apache.hadoop.hbase.filter.FilterList.Operator
 org.apache.hadoop.hbase.filter.FuzzyRowFilter.SatisfiesCode
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
index 8c9224d..1bc5d29 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
@@ -270,12 +270,12 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType
-org.apache.hadoop.hbase.io.hfile.BlockPriority
 org.apache.hadoop.hbase.io.hfile.BlockType
-org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory
+org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType
 org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
+org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory
 org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State
+org.apache.hadoop.hbase.io.hfile.BlockPriority
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.html
index 2937869..b36eabc 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.html
@@ -207,7 +207,7 @@ extends Method and Description
 
 
-void
+boolean
 dispatch(CallRunner callTask)
 Add the request to the executor queue
 
@@ -364,8 +364,8 @@ extends 
 
 dispatch
-public void di

[20/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Call.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Call.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Call.html
index bf27873..e4df7cc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Call.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Call.html
@@ -75,2529 +75,2534 @@
 067
 068import org.apache.commons.logging.Log;
 069import 
org.apache.commons.logging.LogFactory;
-070import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-071import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-072import 
org.apache.hadoop.conf.Configuration;
-073import 
org.apache.hadoop.hbase.CellScanner;
-074import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-075import 
org.apache.hadoop.hbase.HBaseIOException;
-076import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-077import 
org.apache.hadoop.hbase.HConstants;
-078import 
org.apache.hadoop.hbase.HRegionInfo;
-079import org.apache.hadoop.hbase.Server;
-080import 
org.apache.hadoop.hbase.TableName;
-081import 
org.apache.hadoop.hbase.client.Operation;
-082import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-083import 
org.apache.hadoop.hbase.codec.Codec;
-084import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-085import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-086import 
org.apache.hadoop.hbase.io.ByteBufferOutputStream;
-087import 
org.apache.hadoop.hbase.io.BoundedByteBufferPool;
-088import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-089import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-090import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-091import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo;
-092import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta;
-093import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader;
-094import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse;
-095import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-096import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader;
-097import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation;
-098import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-099import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-100import 
org.apache.hadoop.hbase.security.AuthMethod;
-101import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-102import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
-103import 
org.apache.hadoop.hbase.security.User;
-104import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler;
-105import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
-106import 
org.apache.hadoop.hbase.security.SaslStatus;
-107import 
org.apache.hadoop.hbase.security.SaslUtil;
-108import 
org.apache.hadoop.hbase.security.UserProvider;
-109import 
org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
-110import 
org.apache.hadoop.hbase.util.Bytes;
-111import 
org.apache.hadoop.hbase.util.Counter;
-112import 
org.apache.hadoop.hbase.util.Pair;
-113import 
org.apache.hadoop.io.BytesWritable;
-114import 
org.apache.hadoop.io.IntWritable;
-115import org.apache.hadoop.io.Writable;
-116import 
org.apache.hadoop.io.WritableUtils;
-117import 
org.apache.hadoop.io.compress.CompressionCodec;
-118import 
org.apache.hadoop.security.UserGroupInformation;
-119import 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-120import 
org.apache.hadoop.security.authorize.AuthorizationException;
-121import 
org.apache.hadoop.security.authorize.PolicyProvider;
-122import 
org.apache.hadoop.security.authorize.ProxyUsers;
-123import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-124import 
org.apache.hadoop.security.token.SecretManager;
-125import 
org.apache.hadoop.security.token.SecretManager.InvalidToken;
-126import 
org.apache.hadoop.security.token.TokenIdentifier;
-127import 
org.apache.hadoop.util.StringUtils;
-128import 
org.codehaus.jackson.map.ObjectMapper;
-129import org.apache.htrace.TraceInfo;
-130
-131import 
com.google.common.util.concurrent.ThreadFactoryBuilder;
-132import 
com.google.protobuf.BlockingService;
-133import 
com.google.protobuf.CodedInputStream;
-134import 
com.google.protobuf.Descriptors.MethodDescriptor;
-135import com.google.protobuf.Message;
-136import 
com.google.protobuf.ServiceException;
-137import com.google.protobuf.TextFormat;
-138
-139/**
-140 * An RPC server that hosts protobuf 
described Services.
-141 *
-142 * An RpcServer instance has a Listener 
that hosts the socket.  Listener has fixed number
-143 * of Readers in an ExecutorPool, 10 by 
default.  The Listener doe

[26/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
index ee363fc..16f504e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
@@ -43,2273 +43,2227 @@
 035import 
org.apache.hadoop.hbase.HTableDescriptor;
 036import 
org.apache.hadoop.hbase.MasterNotRunningException;
 037import 
org.apache.hadoop.hbase.MetaTableAccessor;
-038import 
org.apache.hadoop.hbase.MultiActionResultTooLarge;
-039import 
org.apache.hadoop.hbase.RegionLocations;
-040import 
org.apache.hadoop.hbase.RegionTooBusyException;
-041import 
org.apache.hadoop.hbase.RetryImmediatelyException;
-042import 
org.apache.hadoop.hbase.ServerName;
-043import 
org.apache.hadoop.hbase.TableName;
-044import 
org.apache.hadoop.hbase.TableNotEnabledException;
-045import 
org.apache.hadoop.hbase.TableNotFoundException;
-046import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-047import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-048import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-049import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
-050import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-051import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-052import 
org.apache.hadoop.hbase.exceptions.RegionOpeningException;
-053import 
org.apache.hadoop.hbase.ipc.RpcClient;
-054import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-055import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-056import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-057import 
org.apache.hadoop.hbase.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
-059import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-060import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
-061import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-062import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-063import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-064import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-065import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest;
-066import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse;
-067import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-068import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
-069import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-070import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-071import 
org.apache.hadoop.hbase.quotas.ThrottlingException;
-072import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-073import 
org.apache.hadoop.hbase.security.User;
-074import 
org.apache.hadoop.hbase.util.Bytes;
-075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-076import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-077import 
org.apache.hadoop.hbase.util.Pair;
-078import 
org.apache.hadoop.hbase.util.Threads;
-079import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-080import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-081import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-082import 
org.apache.hadoop.ipc.RemoteException;
-083import 
org.apache.zookeeper.KeeperException;
-084
-085import javax.annotation.Nullable;
-086
-087import java.io.Closeable;
-088import java.io.IOException;
-089import java.io.InterruptedIOException;
-090import 
java.lang.reflect.UndeclaredThrowableException;
-091import java.net.InetAddress;
-092import java.net.InetSocketAddress;
-093import java.util.ArrayList;
-094import java.util.Date;
-095import java.util.List;
-096import 
java.util.concurrent.BlockingQueue;
-097import 
java.util.concurrent.ConcurrentHashMap;
-098import 
java.util.concurrent.ConcurrentMap;
-099import 
java.util.concurrent.ExecutorService;
-100import 
java.util.concurrent.LinkedBlockingQueue;
-101import 
java.util.concurrent.ThreadPoolExecutor;
-102import java.util.concurrent.TimeUnit;
-103import 
java.util.concurrent.atomic.AtomicInteger;
-104
-105/**
-106 * Main implementation of {@link 
Connection} and {@link ClusterConnection} interfaces.
-107 * Encapsulates connect

[23/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index ee363fc..16f504e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -43,2273 +43,2227 @@
 035import 
org.apache.hadoop.hbase.HTableDescriptor;
 036import 
org.apache.hadoop.hbase.MasterNotRunningException;
 037import 
org.apache.hadoop.hbase.MetaTableAccessor;
-038import 
org.apache.hadoop.hbase.MultiActionResultTooLarge;
-039import 
org.apache.hadoop.hbase.RegionLocations;
-040import 
org.apache.hadoop.hbase.RegionTooBusyException;
-041import 
org.apache.hadoop.hbase.RetryImmediatelyException;
-042import 
org.apache.hadoop.hbase.ServerName;
-043import 
org.apache.hadoop.hbase.TableName;
-044import 
org.apache.hadoop.hbase.TableNotEnabledException;
-045import 
org.apache.hadoop.hbase.TableNotFoundException;
-046import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-047import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-048import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-049import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
-050import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-051import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-052import 
org.apache.hadoop.hbase.exceptions.RegionOpeningException;
-053import 
org.apache.hadoop.hbase.ipc.RpcClient;
-054import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-055import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-056import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-057import 
org.apache.hadoop.hbase.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
-059import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-060import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
-061import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-062import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-063import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-064import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-065import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest;
-066import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse;
-067import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-068import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
-069import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-070import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-071import 
org.apache.hadoop.hbase.quotas.ThrottlingException;
-072import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-073import 
org.apache.hadoop.hbase.security.User;
-074import 
org.apache.hadoop.hbase.util.Bytes;
-075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-076import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-077import 
org.apache.hadoop.hbase.util.Pair;
-078import 
org.apache.hadoop.hbase.util.Threads;
-079import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-080import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-081import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-082import 
org.apache.hadoop.ipc.RemoteException;
-083import 
org.apache.zookeeper.KeeperException;
-084
-085import javax.annotation.Nullable;
-086
-087import java.io.Closeable;
-088import java.io.IOException;
-089import java.io.InterruptedIOException;
-090import 
java.lang.reflect.UndeclaredThrowableException;
-091import java.net.InetAddress;
-092import java.net.InetSocketAddress;
-093import java.util.ArrayList;
-094import java.util.Date;
-095import java.util.List;
-096import 
java.util.concurrent.BlockingQueue;
-097import 
java.util.concurrent.ConcurrentHashMap;
-098import 
java.util.concurrent.ConcurrentMap;
-099import 
java.util.concurrent.ExecutorService;
-100import 
java.util.concurrent.LinkedBlockingQueue;
-101import 
java.util.concurrent.ThreadPoolExecutor;
-102import java.util.concurrent.TimeUnit;
-103import 
java.util.concurrent.atomic.AtomicInteger;
-104
-105/**
-106 * Main implementation of {@link 
Connection} and {@link ClusterConnection} interfaces.
-107 * Encapsulates connection to zookeeper 
and regionservers.
-108 */
-109@edu.umd.cs.findbugs.annotations.SuppressWarnings(
-110
value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACT

[41/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.html
index d4351b1..4cacc12 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.html
@@ -101,7 +101,7 @@
 
 @InterfaceAudience.LimitedPrivate(value={"Coprocesssor","Phoenix"})
 @InterfaceStability.Evolving
-public class RpcServer
+public class RpcServer
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements RpcServerInterface, ConfigurationObserver
 An RPC server that hosts protobuf described Services.
@@ -154,22 +154,18 @@ implements 
-static class 
-RpcServer.CallQueueTooBigException 
-
-
 class 
 RpcServer.Connection
 Reads calls from a connection and queues them for 
handling.
 
 
-
+
 private class 
 RpcServer.Listener
 Listens on the socket.
 
 
-
+
 protected class 
 RpcServer.Responder 
 
@@ -225,7 +221,7 @@ implements bindAddress 
 
 
-private static RpcServer.CallQueueTooBigException
+private static CallQueueTooBigException
 CALL_QUEUE_TOO_BIG_EXCEPTION 
 
 
@@ -724,7 +720,7 @@ implements 
 
 LOG
-public static final org.apache.commons.logging.Log LOG
+public static final org.apache.commons.logging.Log LOG
 
 
 
@@ -733,7 +729,7 @@ implements 
 
 CALL_QUEUE_TOO_BIG_EXCEPTION
-private static final RpcServer.CallQueueTooBigException CALL_QUEUE_TOO_BIG_EXCEPTION
+private static final CallQueueTooBigException CALL_QUEUE_TOO_BIG_EXCEPTION
 
 
 
@@ -742,7 +738,7 @@ implements 
 
 authorize
-private final boolean authorize
+private final boolean authorize
 
 
 
@@ -751,7 +747,7 @@ implements 
 
 isSecurityEnabled
-private boolean isSecurityEnabled
+private boolean isSecurityEnabled
 
 
 
@@ -760,7 +756,7 @@ implements 
 
 CURRENT_VERSION
-public static final byte CURRENT_VERSION
+public static final byte CURRENT_VERSION
 See Also:Constant
 Field Values
 
 
@@ -770,7 +766,7 @@ implements 
 
 FALLBACK_TO_INSECURE_CLIENT_AUTH
-public static final http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String FALLBACK_TO_INSECURE_CLIENT_AUTH
+public static final http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String FALLBACK_TO_INSECURE_CLIENT_AUTH
 Whether we allow a fallback to SIMPLE auth for insecure 
clients when security is enabled.
 See Also:Constant
 Field Values
 
@@ -781,7 +777,7 @@ implements 
 
 DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER
-static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER
+static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER
 How many calls/handler are allowed in the queue.
 See Also:Constant
 Field Values
 
@@ -792,7 +788,7 @@ implements 
 
 DEFAULT_MAX_CALLQUEUE_SIZE
-private static final int DEFAULT_MAX_CALLQUEUE_SIZE
+private static final int DEFAULT_MAX_CALLQUEUE_SIZE
 The maximum size that we can hold in the RPC queue
 See Also:Constant
 Field Values
 
@@ -803,7 +799,7 @@ implements 
 
 ipcUtil
-private final IPCUtil ipcUtil
+private final IPCUtil ipcUtil
 
 
 
@@ -812,7 +808,7 @@ implements 
 
 AUTH_FAILED_FOR
-private static final http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String AUTH_FAILED_FOR
+private static final http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String AUTH_FAILED_FOR
 See Also:Constant
 Field Values
 
 
@@ -822,7 +818,7 @@ implements 
 
 AUTH_SUCCESSFUL_FOR
-private static final http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String AUTH_SUCCESSFUL_FOR
+private static final http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String AUTH_SUCCESSFUL_FOR
 See Also:Constant
 Field Values
 
 
@@ -832,7 +828,7 @@ implements 
 
 AUDITLOG
-private static final org.apache.commons.logging.Log AUDITLOG
+private static final org.apache.commons.logging.Log AUDITLOG
 
 
 
@@ -841,7 +837,7 @@ implements 
 
 secretManager
-protected org.apache.hadoop.security.token.SecretManager
 secretManager
+protected org.apache.hadoop.security.token.SecretManager
 secretManager
 
 
 
@@ -850,7 +846,7 @@ implements 
 
 authManager
-protected org.apache.hadoop.security.authorize.ServiceAuthorizationManager
 authManager
+protected org.apache.hadoop.security.authorize.ServiceAuthorizationManager
 authManager
 
 
 
@@ -859,7 +855,7 @@ implements 
 
 CurCall
-protected static fi

[45/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index 4d2974d..e3c5f93 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -104,7 +104,7 @@
 
 
 @InterfaceAudience.Private
- class ConnectionImplementation
+ class ConnectionImplementation
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements ClusterConnection, http://docs.oracle.com/javase/7/docs/api/java/io/Closeable.html?is-external=true";
 title="class or interface in java.io">Closeable
 Main implementation of Connection 
and ClusterConnection interfaces.
@@ -462,95 +462,84 @@ implements 
-static http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable
-findException(http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object exception)
-Look for an exception we know in the remote exception:
- - hadoop.ipc wrapped exceptions
- - nested exceptions
-
- Looks for: RegionMovedException / RegionOpeningException / 
RegionTooBusyException /
-ThrottlingException
-
-
-
 Admin
 getAdmin()
 Retrieve an Admin implementation to administer an HBase 
cluster.
 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
 getAdmin(ServerName serverName)
 Establishes a connection to the region server at the 
specified address.
 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
 getAdmin(ServerName serverName,
 boolean master)
 Establishes a connection to the region server at the 
specified address.
 
 
-
+
 AsyncProcess
 getAsyncProcess() 
 
-
+
 ClientBackoffPolicy
 getBackoffPolicy() 
 
-
+
 private http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService
 getBatchPool() 
 
-
+
 BufferedMutator
 getBufferedMutator(BufferedMutatorParams params)
 Retrieve a BufferedMutator for performing 
client-side buffering of writes.
 
 
-
+
 BufferedMutator
 getBufferedMutator(TableName tableName)
 
  Retrieve a BufferedMutator for performing 
client-side buffering of writes.
 
 
-
+
 (package private) RegionLocations
 getCachedLocation(TableName tableName,
   byte[] row)
 Search the cache for a location that fits our table and row 
key.
 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface
 getClient(ServerName sn)
 Establishes a connection to the region server at the 
specified address, and returns
  a region client protocol.
 
 
-
+
 org.apache.hadoop.conf.Configuration
 getConfiguration() 
 
-
+
 MetricsConnection
 getConnectionMetrics() 
 
-
+
 protected http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService
 getCurrentBatchPool() 
 
-
+
 protected http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService
 getCurrentMetaLookupPool() 
 
-
+
 int
 getCurrentNrHRS() 
 
-
+
 HTableDescriptor
 getHTableDescriptor(byte[] tableName)
 Deprecated. 
@@ -559,7 +548,7 @@ implements 
+
 HTableDescriptor
 getHTableDescriptor(TableName tableName)
 Deprecated. 
@@ -568,7 +557,7 @@ implements 
+
 HTableDescriptor[]
 getHTableDescriptors(http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> names)
 Deprecated. 
@@ -578,7 +567,7 @@ implements 
+
 HTableDescriptor[]
 getHTableDescriptorsByTableName(http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List tableNames)
 Deprecated. 
@@ -586,44 +575,44 @@ implements 
+
 MasterKeepAliveConnection
 getKeepAliveMasterService()
 This function allows HBaseAdmin and potentially others to 
get a shared MasterService
  connection.
 
 
-
+
 (package private) ZooKeeperKeepAliveConnection
 getKeepAliveZooKeeperWatcher()
 Retrieve a shared ZooKeeperWatcher.
 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.BlockingInterface
 getMaster()
 Returns a MasterKeepAliveConnection to 

[49/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 7fe4ad8..066a233 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 ©2007 - 2016 The Apache Software Foundation
 
-  File: 1665,
- Errors: 12894,
+  File: 1667,
+ Errors: 12893,
  Warnings: 0,
  Infos: 0
   
@@ -200,6 +200,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.java";>org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.ShutdownHook.java";>org/apache/hadoop/hbase/regionserver/ShutdownHook.java
 
 
@@ -4022,6 +4036,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.CallQueueTooBigException.java";>org/apache/hadoop/hbase/CallQueueTooBigException.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.ipc.BlockingRpcCallback.java";>org/apache/hadoop/hbase/ipc/BlockingRpcCallback.java
 
 
@@ -6822,7 +6850,7 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.hfile.CachedBlock.java";>org/apache/hadoop/hbase/io/hfile/CachedBlock.java
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.thrift2.generated.TScan.java";>org/apache/hadoop/hbase/thrift2/generated/TScan.java
 
 
   0
@@ -6836,7 +6864,7 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.thrift2.generated.TScan.java";>org/apache/hadoop/hbase/thrift2/generated/TScan.java
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.hfile.CachedBlock.java";>org/apache/hadoop/hbase/io/hfile/CachedBlock.java
 
 
   0
@@ -19501,7 +19529,7 @@ under the License.
   0
 
 
-  3
+  2
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/coc.html
--
diff --git a/coc.html b/coc.html
index 7d8678a..84fbeba 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Code of Conduct Policy
@@ -331,7 +331,7 @@ For flagrant violations requiring a firm response the PMC 
may opt to skip early
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-01-27
+  Last Published: 
2016-01-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 8567df8..4763252 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -673,7 +673,7 @@ Now your HBase server is running, start 
coding and build that next
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-01-27
+  Last Published: 
2016-01-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index d5c64a8..ed2d0f4 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Depende

[37/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFuture.html
index d0040f6..3180076 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFuture.html
@@ -61,1780 +61,1781 @@
 053import 
org.apache.hadoop.hbase.TableName;
 054import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 055import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-059import org.apache.htrace.Trace;
-060
-061import 
com.google.common.annotations.VisibleForTesting;
-062
-063/**
-064 * This class  allows a continuous flow 
of requests. It's written to be compatible with a
-065 * synchronous caller such as HTable.
-066 * 

-067 * The caller sends a buffer of operation, by calling submit. This class extract from this list -068 * the operations it can send, i.e. the operations that are on region that are not considered -069 * as busy. The process is asynchronous, i.e. it returns immediately when if has finished to -070 * iterate on the list. If, and only if, the maximum number of current task is reached, the call -071 * to submit will block. Alternatively, the caller can call submitAll, in which case all the -072 * operations will be sent. Each call to submit returns a future-like object that can be used -073 * to track operation progress. -074 *

-075 *

-076 * The class manages internally the retries. -077 *

-078 *

-079 * The class can be constructed in regular mode, or "global error" mode. In global error mode, -080 * AP tracks errors across all calls (each "future" also has global view of all errors). That -081 * mode is necessary for backward compat with HTable behavior, where multiple submissions are -082 * made and the errors can propagate using any put/flush call, from previous calls. -083 * In "regular" mode, the errors are tracked inside the Future object that is returned. -084 * The results are always tracked inside the Future object and can be retrieved when the call -085 * has finished. Partial results can also be retrieved if some part of multi-request failed. -086 *

-087 *

-088 * This class is thread safe in regular mode; in global error code, submitting operations and -089 * retrieving errors from different threads may be not thread safe. -090 * Internally, the class is thread safe enough to manage simultaneously new submission and results -091 * arising from older operations. -092 *

-093 *

-094 * Internally, this class works with {@link Row}, this mean it could be theoretically used for -095 * gets as well. -096 *

-097 */ -098@InterfaceAudience.Private -099class AsyncProcess { -100 private static final Log LOG = LogFactory.getLog(AsyncProcess.class); -101 protected static final AtomicLong COUNTER = new AtomicLong(); -102 -103 public static final String PRIMARY_CALL_TIMEOUT_KEY = "hbase.client.primaryCallTimeout.multiget"; -104 -105 /** -106 * Configure the number of failures after which the client will start logging. A few failures -107 * is fine: region moved, then is not opened, then is overloaded. We try to have an acceptable -108 * heuristic for the number of errors we don't log. 9 was chosen because we wait for 1s at -109 * this stage. -110 */ -111 public static final String START_LOG_ERRORS_AFTER_COUNT_KEY = -112 "hbase.client.start.log.errors.counter"; -113 public static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT = 9; -114 -115 /** -116 * The context used to wait for results from one submit call. -117 * 1) If AsyncProcess is set to track errors globally, and not per call (for HTable puts), -118 *then errors and failed operations in this object will reflect global errors. -119 * 2) If submit call is made with needResults false, results will not be saved. -120 * */ -121 public static interface AsyncRequestFuture { -122public boolean hasError(); -123public RetriesExhaustedWithDetailsException getErrors(); -124public List getFailedOperations(); -125public Object[] getResults() throws InterruptedIOException; -126/** Wait until all tasks are executed, successfully or not. */ -127public void waitUntilDone() throws InterruptedIOException; -128 } -129 -130 /** -131 * Return value from a submit that didn't contain any requests. -132 */ -133 private static final AsyncRequestFuture NO_REQS_RESULT = n

hbase-site git commit: Trivial commit to trigger gitsubpub

2016-01-28 Thread misty
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 39cf5e9b6 -> f17dc2fa0


Trivial commit to trigger gitsubpub


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/f17dc2fa
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/f17dc2fa
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/f17dc2fa

Branch: refs/heads/asf-site
Commit: f17dc2fa0271c1e0e511226ffc38e4448dc974fd
Parents: 39cf5e9
Author: Misty Stanley-Jones 
Authored: Thu Jan 28 09:25:52 2016 -0800
Committer: Misty Stanley-Jones 
Committed: Thu Jan 28 09:26:02 2016 -0800

--
 index.html | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17dc2fa/index.html
--
diff --git a/index.html b/index.html
index 750564e..c7f5cb1 100644
--- a/index.html
+++ b/index.html
@@ -14,7 +14,6 @@
 
 
 
-  
 
 
   



[19/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallQueueTooBigException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallQueueTooBigException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallQueueTooBigException.html
deleted file mode 100644
index bf27873..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallQueueTooBigException.html
+++ /dev/null
@@ -1,2664 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-Source code
-
-
-
-
-001/**
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018
-019package org.apache.hadoop.hbase.ipc;
-020
-021import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION;
-022
-023import java.io.ByteArrayInputStream;
-024import java.io.ByteArrayOutputStream;
-025import java.io.DataOutputStream;
-026import java.io.IOException;
-027import java.net.BindException;
-028import java.net.InetAddress;
-029import java.net.InetSocketAddress;
-030import java.net.ServerSocket;
-031import java.net.Socket;
-032import java.net.SocketException;
-033import java.net.UnknownHostException;
-034import java.nio.ByteBuffer;
-035import 
java.nio.channels.CancelledKeyException;
-036import java.nio.channels.Channels;
-037import 
java.nio.channels.ClosedChannelException;
-038import 
java.nio.channels.GatheringByteChannel;
-039import 
java.nio.channels.ReadableByteChannel;
-040import java.nio.channels.SelectionKey;
-041import java.nio.channels.Selector;
-042import 
java.nio.channels.ServerSocketChannel;
-043import java.nio.channels.SocketChannel;
-044import 
java.nio.channels.WritableByteChannel;
-045import 
java.security.PrivilegedExceptionAction;
-046import java.util.ArrayList;
-047import java.util.Arrays;
-048import java.util.Collections;
-049import java.util.HashMap;
-050import java.util.Iterator;
-051import java.util.LinkedList;
-052import java.util.List;
-053import java.util.Map;
-054import java.util.Random;
-055import java.util.Set;
-056import 
java.util.concurrent.ConcurrentHashMap;
-057import 
java.util.concurrent.ConcurrentLinkedDeque;
-058import 
java.util.concurrent.ExecutorService;
-059import java.util.concurrent.Executors;
-060import 
java.util.concurrent.atomic.AtomicInteger;
-061import java.util.concurrent.locks.Lock;
-062import 
java.util.concurrent.locks.ReentrantLock;
-063
-064import javax.security.sasl.Sasl;
-065import 
javax.security.sasl.SaslException;
-066import javax.security.sasl.SaslServer;
-067
-068import org.apache.commons.logging.Log;
-069import 
org.apache.commons.logging.LogFactory;
-070import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-071import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-072import 
org.apache.hadoop.conf.Configuration;
-073import 
org.apache.hadoop.hbase.CellScanner;
-074import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-075import 
org.apache.hadoop.hbase.HBaseIOException;
-076import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-077import 
org.apache.hadoop.hbase.HConstants;
-078import 
org.apache.hadoop.hbase.HRegionInfo;
-079import org.apache.hadoop.hbase.Server;
-080import 
org.apache.hadoop.hbase.TableName;
-081import 
org.apache.hadoop.hbase.client.Operation;
-082import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-083import 
org.apache.hadoop.hbase.codec.Codec;
-084import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-085import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-086import 
org.apache.hadoop.hbase.io.ByteBufferOutputStream;
-087import 
org.apache.hadoop.hbase.io.BoundedByteBufferPool;
-088import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-089import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-090import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-091import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo;
-092import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta;
-093import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader;
-

[36/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.ReplicaCallIssuingRunnable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.ReplicaCallIssuingRunnable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.ReplicaCallIssuingRunnable.html
index d0040f6..3180076 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.ReplicaCallIssuingRunnable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.AsyncRequestFutureImpl.ReplicaCallIssuingRunnable.html
@@ -61,1780 +61,1781 @@
 053import 
org.apache.hadoop.hbase.TableName;
 054import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 055import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-059import org.apache.htrace.Trace;
-060
-061import 
com.google.common.annotations.VisibleForTesting;
-062
-063/**
-064 * This class  allows a continuous flow 
of requests. It's written to be compatible with a
-065 * synchronous caller such as HTable.
-066 * 

-067 * The caller sends a buffer of operation, by calling submit. This class extract from this list -068 * the operations it can send, i.e. the operations that are on region that are not considered -069 * as busy. The process is asynchronous, i.e. it returns immediately when if has finished to -070 * iterate on the list. If, and only if, the maximum number of current task is reached, the call -071 * to submit will block. Alternatively, the caller can call submitAll, in which case all the -072 * operations will be sent. Each call to submit returns a future-like object that can be used -073 * to track operation progress. -074 *

-075 *

-076 * The class manages internally the retries. -077 *

-078 *

-079 * The class can be constructed in regular mode, or "global error" mode. In global error mode, -080 * AP tracks errors across all calls (each "future" also has global view of all errors). That -081 * mode is necessary for backward compat with HTable behavior, where multiple submissions are -082 * made and the errors can propagate using any put/flush call, from previous calls. -083 * In "regular" mode, the errors are tracked inside the Future object that is returned. -084 * The results are always tracked inside the Future object and can be retrieved when the call -085 * has finished. Partial results can also be retrieved if some part of multi-request failed. -086 *

-087 *

-088 * This class is thread safe in regular mode; in global error code, submitting operations and -089 * retrieving errors from different threads may be not thread safe. -090 * Internally, the class is thread safe enough to manage simultaneously new submission and results -091 * arising from older operations. -092 *

-093 *

-094 * Internally, this class works with {@link Row}, this mean it could be theoretically used for -095 * gets as well. -096 *

-097 */ -098@InterfaceAudience.Private -099class AsyncProcess { -100 private static final Log LOG = LogFactory.getLog(AsyncProcess.class); -101 protected static final AtomicLong COUNTER = new AtomicLong(); -102 -103 public static final String PRIMARY_CALL_TIMEOUT_KEY = "hbase.client.primaryCallTimeout.multiget"; -104 -105 /** -106 * Configure the number of failures after which the client will start logging. A few failures -107 * is fine: region moved, then is not opened, then is overloaded. We try to have an acceptable -108 * heuristic for the number of errors we don't log. 9 was chosen because we wait for 1s at -109 * this stage. -110 */ -111 public static final String START_LOG_ERRORS_AFTER_COUNT_KEY = -112 "hbase.client.start.log.errors.counter"; -113 public static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT = 9; -114 -115 /** -116 * The context used to wait for results from one submit call. -117 * 1) If AsyncProcess is set to track errors globally, and not per call (for HTable puts), -118 *then errors and failed operations in this object will reflect global errors. -119 * 2) If submit call is made with needResults false, results will not be saved. -120 * */ -121 public static interface AsyncRequestFuture { -122public boolean hasError(); -123public RetriesExhaustedWithDetailsException getErrors(); -124public List getFailedOperations(); -125public Object[] getResults() throws InterruptedIOException; -126/** Wait until all tasks are executed, successfully or not. */ -127public void waitUntilDone() throws InterruptedIOException; -128 } -129 -

[05/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RpcTimeoutConnection.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RpcTimeoutConnection.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RpcTimeoutConnection.html
index 64cabd4..d2496d9 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RpcTimeoutConnection.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RpcTimeoutConnection.html
@@ -279,286 +279,282 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 finalize() 
 
 
-static http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable
-findException(http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object arg0) 
-
-
 org.apache.hadoop.hbase.client.Admin
 getAdmin() 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
 getAdmin(org.apache.hadoop.hbase.ServerName arg0) 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
 getAdmin(org.apache.hadoop.hbase.ServerName arg0,
 boolean arg1) 
 
-
+
 org.apache.hadoop.hbase.client.AsyncProcess
 getAsyncProcess() 
 
-
+
 org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy
 getBackoffPolicy() 
 
-
+
 org.apache.hadoop.hbase.client.BufferedMutator
 getBufferedMutator(org.apache.hadoop.hbase.client.BufferedMutatorParams arg0) 
 
-
+
 org.apache.hadoop.hbase.client.BufferedMutator
 getBufferedMutator(org.apache.hadoop.hbase.TableName arg0) 
 
-
+
 (package private) 
org.apache.hadoop.hbase.RegionLocations
 getCachedLocation(org.apache.hadoop.hbase.TableName arg0,
   byte[] arg1) 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface
 getClient(org.apache.hadoop.hbase.ServerName sn) 
 
-
+
 org.apache.hadoop.conf.Configuration
 getConfiguration() 
 
-
+
 org.apache.hadoop.hbase.client.MetricsConnection
 getConnectionMetrics() 
 
-
+
 protected http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService
 getCurrentBatchPool() 
 
-
+
 protected http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService
 getCurrentMetaLookupPool() 
 
-
+
 int
 getCurrentNrHRS() 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor
 getHTableDescriptor(byte[] arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor
 getHTableDescriptor(org.apache.hadoop.hbase.TableName arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor[]
 getHTableDescriptors(http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor[]
 getHTableDescriptorsByTableName(http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.client.MasterKeepAliveConnection
 getKeepAliveMasterService() 
 
-
+
 (package private) 
org.apache.hadoop.hbase.client.ZooKeeperKeepAliveConnection
 getKeepAliveZooKeeperWatcher() 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.BlockingInterface
 getMaster() 
 
-
+
 org.apache.hadoop.hbase.client.RpcRetryingCallerFactory
 getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configuration arg0) 
 
-
+
 org.apache.hadoop.hbase.client.NonceGenerator
 getNonceGenerator() 
 
-
+
 (package private) int
 getNumberOfCachedRegionLocations(org.apache.hadoop.hbase.TableName arg0) 
 
-
+
 boolean
 getRegionCachePrefetch(byte[] arg0)
 Deprecated. 
 
 
-
+
 boolean
 getRegionCachePrefetch(org.apache.hadoop.hbase.TableName arg0)
 Deprecated. 
 
 
-
+
 org.apache.hadoop.hbase.HRegionLocation
 getRegionLocation(byte[] arg0,
   byte[] arg1,
   boolean arg2) 
 
-
+
 org.apache.hadoop.hbase.HRegionLocation
 getRegionLocation(org.apache.hadoop.hbase.TableName arg0,
   byte[] arg1,
   boolean arg2) 
 
-
+
 org.apache.hadoop.hbase.client.RegionLocator
 getRegionLocator(org.apache.hadoop.hbase.TableName arg0) 
 
-
+
 (package private) 
org.apache.hadoop.hbase.ipc.RpcClient
 getRpcClient() 
 
-
+
 

[28/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
index ee363fc..16f504e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
@@ -43,2273 +43,2227 @@
 035import 
org.apache.hadoop.hbase.HTableDescriptor;
 036import 
org.apache.hadoop.hbase.MasterNotRunningException;
 037import 
org.apache.hadoop.hbase.MetaTableAccessor;
-038import 
org.apache.hadoop.hbase.MultiActionResultTooLarge;
-039import 
org.apache.hadoop.hbase.RegionLocations;
-040import 
org.apache.hadoop.hbase.RegionTooBusyException;
-041import 
org.apache.hadoop.hbase.RetryImmediatelyException;
-042import 
org.apache.hadoop.hbase.ServerName;
-043import 
org.apache.hadoop.hbase.TableName;
-044import 
org.apache.hadoop.hbase.TableNotEnabledException;
-045import 
org.apache.hadoop.hbase.TableNotFoundException;
-046import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-047import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-048import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-049import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
-050import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-051import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-052import 
org.apache.hadoop.hbase.exceptions.RegionOpeningException;
-053import 
org.apache.hadoop.hbase.ipc.RpcClient;
-054import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-055import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-056import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-057import 
org.apache.hadoop.hbase.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
-059import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-060import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
-061import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-062import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-063import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-064import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-065import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest;
-066import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse;
-067import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-068import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
-069import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-070import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-071import 
org.apache.hadoop.hbase.quotas.ThrottlingException;
-072import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-073import 
org.apache.hadoop.hbase.security.User;
-074import 
org.apache.hadoop.hbase.util.Bytes;
-075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-076import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-077import 
org.apache.hadoop.hbase.util.Pair;
-078import 
org.apache.hadoop.hbase.util.Threads;
-079import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-080import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-081import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-082import 
org.apache.hadoop.ipc.RemoteException;
-083import 
org.apache.zookeeper.KeeperException;
-084
-085import javax.annotation.Nullable;
-086
-087import java.io.Closeable;
-088import java.io.IOException;
-089import java.io.InterruptedIOException;
-090import 
java.lang.reflect.UndeclaredThrowableException;
-091import java.net.InetAddress;
-092import java.net.InetSocketAddress;
-093import java.util.ArrayList;
-094import java.util.Date;
-095import java.util.List;
-096import 
java.util.concurrent.BlockingQueue;
-097import 
java.util.concurrent.ConcurrentHashMap;
-098import 
java.util.concurrent.ConcurrentMap;
-099import 
java.util.concurrent.ExecutorService;
-100import 
java.util.concurrent.LinkedBlockingQueue;
-101import 
java.util.concurrent.ThreadPoolExecutor;
-102import java.util.concurrent.TimeUnit;
-103import 
java.util.concurrent.atomic.AtomicInteger;
-104
-105/**
-106 * Main implementation of {@link 
Connection} and {@link ClusterConnection} interfaces.
-107 * Encapsulates connection to zookeeper 
and regionservers.
-108 */

[18/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Connection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Connection.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Connection.html
index bf27873..e4df7cc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Connection.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Connection.html
@@ -75,2529 +75,2534 @@
 067
 068import org.apache.commons.logging.Log;
 069import 
org.apache.commons.logging.LogFactory;
-070import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-071import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-072import 
org.apache.hadoop.conf.Configuration;
-073import 
org.apache.hadoop.hbase.CellScanner;
-074import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-075import 
org.apache.hadoop.hbase.HBaseIOException;
-076import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-077import 
org.apache.hadoop.hbase.HConstants;
-078import 
org.apache.hadoop.hbase.HRegionInfo;
-079import org.apache.hadoop.hbase.Server;
-080import 
org.apache.hadoop.hbase.TableName;
-081import 
org.apache.hadoop.hbase.client.Operation;
-082import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-083import 
org.apache.hadoop.hbase.codec.Codec;
-084import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-085import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-086import 
org.apache.hadoop.hbase.io.ByteBufferOutputStream;
-087import 
org.apache.hadoop.hbase.io.BoundedByteBufferPool;
-088import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-089import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-090import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-091import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo;
-092import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta;
-093import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader;
-094import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse;
-095import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-096import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader;
-097import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation;
-098import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-099import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-100import 
org.apache.hadoop.hbase.security.AuthMethod;
-101import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-102import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
-103import 
org.apache.hadoop.hbase.security.User;
-104import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler;
-105import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
-106import 
org.apache.hadoop.hbase.security.SaslStatus;
-107import 
org.apache.hadoop.hbase.security.SaslUtil;
-108import 
org.apache.hadoop.hbase.security.UserProvider;
-109import 
org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
-110import 
org.apache.hadoop.hbase.util.Bytes;
-111import 
org.apache.hadoop.hbase.util.Counter;
-112import 
org.apache.hadoop.hbase.util.Pair;
-113import 
org.apache.hadoop.io.BytesWritable;
-114import 
org.apache.hadoop.io.IntWritable;
-115import org.apache.hadoop.io.Writable;
-116import 
org.apache.hadoop.io.WritableUtils;
-117import 
org.apache.hadoop.io.compress.CompressionCodec;
-118import 
org.apache.hadoop.security.UserGroupInformation;
-119import 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-120import 
org.apache.hadoop.security.authorize.AuthorizationException;
-121import 
org.apache.hadoop.security.authorize.PolicyProvider;
-122import 
org.apache.hadoop.security.authorize.ProxyUsers;
-123import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-124import 
org.apache.hadoop.security.token.SecretManager;
-125import 
org.apache.hadoop.security.token.SecretManager.InvalidToken;
-126import 
org.apache.hadoop.security.token.TokenIdentifier;
-127import 
org.apache.hadoop.util.StringUtils;
-128import 
org.codehaus.jackson.map.ObjectMapper;
-129import org.apache.htrace.TraceInfo;
-130
-131import 
com.google.common.util.concurrent.ThreadFactoryBuilder;
-132import 
com.google.protobuf.BlockingService;
-133import 
com.google.protobuf.CodedInputStream;
-134import 
com.google.protobuf.Descriptors.MethodDescriptor;
-135import com.google.protobuf.Message;
-136import 
com.google.protobuf.ServiceException;
-137import com.google.protobuf.TextFormat;
-138
-139/**
-140 * An RPC server that hosts protobuf 
described Services.
-141 *
-142 * An RpcServer instance has a Listener 
that hosts the socket.  Listener has fixed number
-143 * of Readers in an ExecutorPool, 10 

[17/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Listener.Reader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Listener.Reader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Listener.Reader.html
index bf27873..e4df7cc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Listener.Reader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.Listener.Reader.html
@@ -75,2529 +75,2534 @@
 067
 068import org.apache.commons.logging.Log;
 069import 
org.apache.commons.logging.LogFactory;
-070import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-071import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-072import 
org.apache.hadoop.conf.Configuration;
-073import 
org.apache.hadoop.hbase.CellScanner;
-074import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-075import 
org.apache.hadoop.hbase.HBaseIOException;
-076import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-077import 
org.apache.hadoop.hbase.HConstants;
-078import 
org.apache.hadoop.hbase.HRegionInfo;
-079import org.apache.hadoop.hbase.Server;
-080import 
org.apache.hadoop.hbase.TableName;
-081import 
org.apache.hadoop.hbase.client.Operation;
-082import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-083import 
org.apache.hadoop.hbase.codec.Codec;
-084import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-085import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-086import 
org.apache.hadoop.hbase.io.ByteBufferOutputStream;
-087import 
org.apache.hadoop.hbase.io.BoundedByteBufferPool;
-088import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-089import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-090import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-091import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo;
-092import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta;
-093import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader;
-094import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse;
-095import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-096import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader;
-097import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation;
-098import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-099import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-100import 
org.apache.hadoop.hbase.security.AuthMethod;
-101import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-102import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
-103import 
org.apache.hadoop.hbase.security.User;
-104import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler;
-105import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
-106import 
org.apache.hadoop.hbase.security.SaslStatus;
-107import 
org.apache.hadoop.hbase.security.SaslUtil;
-108import 
org.apache.hadoop.hbase.security.UserProvider;
-109import 
org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
-110import 
org.apache.hadoop.hbase.util.Bytes;
-111import 
org.apache.hadoop.hbase.util.Counter;
-112import 
org.apache.hadoop.hbase.util.Pair;
-113import 
org.apache.hadoop.io.BytesWritable;
-114import 
org.apache.hadoop.io.IntWritable;
-115import org.apache.hadoop.io.Writable;
-116import 
org.apache.hadoop.io.WritableUtils;
-117import 
org.apache.hadoop.io.compress.CompressionCodec;
-118import 
org.apache.hadoop.security.UserGroupInformation;
-119import 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-120import 
org.apache.hadoop.security.authorize.AuthorizationException;
-121import 
org.apache.hadoop.security.authorize.PolicyProvider;
-122import 
org.apache.hadoop.security.authorize.ProxyUsers;
-123import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-124import 
org.apache.hadoop.security.token.SecretManager;
-125import 
org.apache.hadoop.security.token.SecretManager.InvalidToken;
-126import 
org.apache.hadoop.security.token.TokenIdentifier;
-127import 
org.apache.hadoop.util.StringUtils;
-128import 
org.codehaus.jackson.map.ObjectMapper;
-129import org.apache.htrace.TraceInfo;
-130
-131import 
com.google.common.util.concurrent.ThreadFactoryBuilder;
-132import 
com.google.protobuf.BlockingService;
-133import 
com.google.protobuf.CodedInputStream;
-134import 
com.google.protobuf.Descriptors.MethodDescriptor;
-135import com.google.protobuf.Message;
-136import 
com.google.protobuf.ServiceException;
-137import com.google.protobuf.TextFormat;
-138
-139/**
-140 * An RPC server that hosts protobuf 
described Services.
-141 *
-142 * An RpcServer instance has a Listener 
that hosts the socket.  Listener has fixed number
-143 * of Rea

[22/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.html
new file mode 100644
index 000..5d603c1
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.html
@@ -0,0 +1,167 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/*
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019
+020package 
org.apache.hadoop.hbase.exceptions;
+021
+022import 
org.apache.hadoop.hbase.CallQueueTooBigException;
+023import 
org.apache.hadoop.hbase.MultiActionResultTooLarge;
+024import 
org.apache.hadoop.hbase.RegionTooBusyException;
+025import 
org.apache.hadoop.hbase.RetryImmediatelyException;
+026import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+027import 
org.apache.hadoop.hbase.classification.InterfaceStability;
+028import 
org.apache.hadoop.hbase.quotas.ThrottlingException;
+029import 
org.apache.hadoop.ipc.RemoteException;
+030
+031@InterfaceAudience.Private
+032@InterfaceStability.Evolving
+033public final class ClientExceptionsUtil 
{
+034
+035  private ClientExceptionsUtil() {}
+036
+037  public static boolean 
isMetaClearingException(Throwable cur) {
+038cur = findException(cur);
+039
+040if (cur == null) {
+041  return true;
+042}
+043return !isSpecialException(cur) || 
(cur instanceof RegionMovedException);
+044  }
+045
+046  public static boolean 
isSpecialException(Throwable cur) {
+047return (cur instanceof 
RegionMovedException || cur instanceof RegionOpeningException
+048|| cur instanceof 
RegionTooBusyException || cur instanceof ThrottlingException
+049|| cur instanceof 
MultiActionResultTooLarge || cur instanceof RetryImmediatelyException
+050|| cur instanceof 
CallQueueTooBigException);
+051  }
+052
+053
+054  /**
+055   * Look for an exception we know in the 
remote exception:
+056   * - hadoop.ipc wrapped exceptions
+057   * - nested exceptions
+058   *
+059   * Looks for: RegionMovedException / 
RegionOpeningException / RegionTooBusyException /
+060   *ThrottlingException
+061   * @return null if we didn't find the 
exception, the exception otherwise.
+062   */
+063  public static Throwable 
findException(Object exception) {
+064if (exception == null || !(exception 
instanceof Throwable)) {
+065  return null;
+066}
+067Throwable cur = (Throwable) 
exception;
+068while (cur != null) {
+069  if (isSpecialException(cur)) {
+070return cur;
+071  }
+072  if (cur instanceof RemoteException) 
{
+073RemoteException re = 
(RemoteException) cur;
+074cur = re.unwrapRemoteException(
+075RegionOpeningException.class, 
RegionMovedException.class,
+076
RegionTooBusyException.class);
+077if (cur == null) {
+078  cur = 
re.unwrapRemoteException();
+079}
+080// unwrapRemoteException can 
return the exception given as a parameter when it cannot
+081//  unwrap it. In this case, 
there is no need to look further
+082// noinspection ObjectEquality
+083if (cur == re) {
+084  return cur;
+085}
+086  } else if (cur.getCause() != null) 
{
+087cur = cur.getCause();
+088  } else {
+089return cur;
+090  }
+091}
+092
+093return null;
+094  }
+095}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.html

[09/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
index 047fe1f..3255d2e 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
@@ -103,7 +103,7 @@
 
 
 
-static class TestAsyncProcess.MyAsyncProcess
+static class TestAsyncProcess.MyAsyncProcess
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -428,7 +428,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 nbMultiResponse
-final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger nbMultiResponse
+final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger nbMultiResponse
 
 
 
@@ -437,7 +437,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 nbActions
-final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger nbActions
+final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger nbActions
 
 
 
@@ -446,7 +446,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 allReqs
-public http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List
 allReqs
+public http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List
 allReqs
 
 
 
@@ -455,7 +455,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 callsCt
-public http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger callsCt
+public http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger callsCt
 
 
 
@@ -664,7 +664,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 TestAsyncProcess.MyAsyncProcess
-public TestAsyncProcess.MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,
+public TestAsyncProcess.MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,

org.apache.hadoop.conf.Configuration conf)
 
 
@@ -674,7 +674,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 TestAsyncProcess.MyAsyncProcess
-public TestAsyncProcess.MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,
+public TestAsyncProcess.MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,
org.apache.hadoop.conf.Configuration conf,
http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicInteger nbThreads)
 
@@ -685,7 +685,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 TestAsyncProcess.MyAsyncProcess
-public TestAsyncProcess.MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,
+public TestAsyncProcess.MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,
org.apache.hadoop.conf.Configuration conf,
boolean useGlobalErrors)
 
@@ -696,7 +696,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 TestAsyncProcess.MyAsyncProcess
-public TestAsyncProcess.MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,
+public TestAsyncProcess.MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,
org.apache.hadoop.conf.Configuration conf,
boolean useGlobalErrors,
boolean dummy)
@@ -716,7 +716,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 createAsyncRe

[50/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index cf2f43a..d3025a3 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -280,10 +280,10 @@
  Warnings
  Errors
 
-1665
+1667
 0
 0
-12894
+12893
 
 Files
 
@@ -2441,7 +2441,7 @@
 org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
 0
 0
-3
+2
 
 org/apache/hadoop/hbase/mapreduce/ResultSerialization.java
 0
@@ -6104,7 +6104,7 @@
 
 ordered: "true"
 sortStaticImportsAlphabetically: "true"
-980
+979
  Error
 
 
@@ -12147,133 +12147,133 @@
 imports
 ImportOrder
 Wrong order for 'com.google.common.annotations.VisibleForTesting' 
import.
-61
+62
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-427
+428
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-587
+588
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-629
+630
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-664
+665
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-667
+668
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-677
+678
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-699
+700
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-732
+733
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-891
+892
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-927
+928
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-943
+944
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-947
+948
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-969
+970
 
  Error
 indentation
 Indentation
 'if' child have incorrect indentation level 15, expected level should be 
16.
-1018
+1019
 
  Error
 indentation
 Indentation
 'method call' child have incorrect indentation level 15, expected level 
should be 16.
-1018
+1019
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-1096
+1097
 
  Error
 indentation
 Indentation
 'if' child have incorrect indentation level 9, expected level should be 
8.
-1433
+1434
 
  Error
 indentation
 Indentation
 'if' child have incorrect indentation level 9, expected level should be 
8.
-1434
+1435
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1527
+1528
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1636
+1637
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1718
+1719
 
 org/apache/hadoop/hbase/client/BufferedMutator.java
 
@@ -13836,121 +13836,121 @@
 imports
 ImportOrder
 Wrong order for 'javax.annotation.Nullable' import.
-85
+81
 
  Error
 imports
 ImportOrder
 Wrong order for 'java.io.Closeable' import.
-87
+83
 
  Error
 indentation
 Indentation
 'member def type' have incorrect indentation level 3, expected level 
should be 2.
-184
+180
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-507
+462
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-603
+558
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-771
+726
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1232
+1187
 
  Error
 sizes
 MethodLength
 Method length is 388 lines (max allowed is 150).
-1412
+1367
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1827
+1782
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-1875
+1830
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-1982
+1936
 
  Error
 coding
 NoFinalizer
 Avoid using finalizer method.
-2150
+2104
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2214
+2168
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-2230
+2184
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-2252
+2206
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-2255
+2209
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2261
+2215
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-2282
+2236
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2293
+2247
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2295
+2249
 
 org/apache/hadoop/hbase/client/ConnectionUtils.java
 
@@ -32808,529 +32808,529 @@
 imports
 ImportOrder
 Wrong order for 'org.apache.hadoop.conf.Conf

[30/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.html
index d0040f6..3180076 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncProcess.html
@@ -61,1780 +61,1781 @@
 053import 
org.apache.hadoop.hbase.TableName;
 054import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 055import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-059import org.apache.htrace.Trace;
-060
-061import 
com.google.common.annotations.VisibleForTesting;
-062
-063/**
-064 * This class  allows a continuous flow 
of requests. It's written to be compatible with a
-065 * synchronous caller such as HTable.
-066 * 

-067 * The caller sends a buffer of operation, by calling submit. This class extract from this list -068 * the operations it can send, i.e. the operations that are on region that are not considered -069 * as busy. The process is asynchronous, i.e. it returns immediately when if has finished to -070 * iterate on the list. If, and only if, the maximum number of current task is reached, the call -071 * to submit will block. Alternatively, the caller can call submitAll, in which case all the -072 * operations will be sent. Each call to submit returns a future-like object that can be used -073 * to track operation progress. -074 *

-075 *

-076 * The class manages internally the retries. -077 *

-078 *

-079 * The class can be constructed in regular mode, or "global error" mode. In global error mode, -080 * AP tracks errors across all calls (each "future" also has global view of all errors). That -081 * mode is necessary for backward compat with HTable behavior, where multiple submissions are -082 * made and the errors can propagate using any put/flush call, from previous calls. -083 * In "regular" mode, the errors are tracked inside the Future object that is returned. -084 * The results are always tracked inside the Future object and can be retrieved when the call -085 * has finished. Partial results can also be retrieved if some part of multi-request failed. -086 *

-087 *

-088 * This class is thread safe in regular mode; in global error code, submitting operations and -089 * retrieving errors from different threads may be not thread safe. -090 * Internally, the class is thread safe enough to manage simultaneously new submission and results -091 * arising from older operations. -092 *

-093 *

-094 * Internally, this class works with {@link Row}, this mean it could be theoretically used for -095 * gets as well. -096 *

-097 */ -098@InterfaceAudience.Private -099class AsyncProcess { -100 private static final Log LOG = LogFactory.getLog(AsyncProcess.class); -101 protected static final AtomicLong COUNTER = new AtomicLong(); -102 -103 public static final String PRIMARY_CALL_TIMEOUT_KEY = "hbase.client.primaryCallTimeout.multiget"; -104 -105 /** -106 * Configure the number of failures after which the client will start logging. A few failures -107 * is fine: region moved, then is not opened, then is overloaded. We try to have an acceptable -108 * heuristic for the number of errors we don't log. 9 was chosen because we wait for 1s at -109 * this stage. -110 */ -111 public static final String START_LOG_ERRORS_AFTER_COUNT_KEY = -112 "hbase.client.start.log.errors.counter"; -113 public static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT = 9; -114 -115 /** -116 * The context used to wait for results from one submit call. -117 * 1) If AsyncProcess is set to track errors globally, and not per call (for HTable puts), -118 *then errors and failed operations in this object will reflect global errors. -119 * 2) If submit call is made with needResults false, results will not be saved. -120 * */ -121 public static interface AsyncRequestFuture { -122public boolean hasError(); -123public RetriesExhaustedWithDetailsException getErrors(); -124public List getFailedOperations(); -125public Object[] getResults() throws InterruptedIOException; -126/** Wait until all tasks are executed, successfully or not. */ -127public void waitUntilDone() throws InterruptedIOException; -128 } -129 -130 /** -131 * Return value from a submit that didn't contain any requests. -132 */ -133 private static final AsyncRequestFuture NO_REQS_RESULT = new AsyncRequestFuture() { -134final Object[] result = new Object[0]; -135 -136@Override -

[11/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
index 36edc93..5f88bf5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
@@ -34,27 +34,27 @@
 026
 027{
 028  private final HRegionServer 
regionServer;
-029  private final String bcv;
-030  private final String format;
+029  private final String format;
+030  private final String bcn;
 031  private final String filter;
-032  private final String bcn;
+032  private final String bcv;
 033  protected static 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData 
__jamon_setOptionalArguments(org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData
 p_implData)
 034  {
-035if(! 
p_implData.getBcv__IsNotDefault())
+035if(! 
p_implData.getFormat__IsNotDefault())
 036{
-037  p_implData.setBcv("");
+037  p_implData.setFormat("html");
 038}
-039if(! 
p_implData.getFormat__IsNotDefault())
+039if(! 
p_implData.getBcn__IsNotDefault())
 040{
-041  p_implData.setFormat("html");
+041  p_implData.setBcn("");
 042}
 043if(! 
p_implData.getFilter__IsNotDefault())
 044{
 045  p_implData.setFilter("general");
 046}
-047if(! 
p_implData.getBcn__IsNotDefault())
+047if(! 
p_implData.getBcv__IsNotDefault())
 048{
-049  p_implData.setBcn("");
+049  p_implData.setBcv("");
 050}
 051return p_implData;
 052  }
@@ -62,10 +62,10 @@
 054  {
 055super(p_templateManager, 
__jamon_setOptionalArguments(p_implData));
 056regionServer = 
p_implData.getRegionServer();
-057bcv = p_implData.getBcv();
-058format = p_implData.getFormat();
+057format = p_implData.getFormat();
+058bcn = p_implData.getBcn();
 059filter = p_implData.getFilter();
-060bcn = p_implData.getBcn();
+060bcv = p_implData.getBcv();
 061  }
 062  
 063  @Override public void 
renderNoFlush(final java.io.Writer jamonWriter)
@@ -94,8 +94,8 @@
 086  // 41, 3
 087  {
 088
org.apache.hadoop.hbase.tmpl.common.TaskMonitorTmpl __jamon__var_1 = new 
org.apache.hadoop.hbase.tmpl.common.TaskMonitorTmpl(this.getTemplateManager());
-089
__jamon__var_1.setFilter(filter);
-090__jamon__var_1.setFormat("json" 
);
+089__jamon__var_1.setFormat("json" 
);
+090
__jamon__var_1.setFilter(filter);
 091
__jamon__var_1.renderNoFlush(jamonWriter);
 092  }
 093  // 41, 68

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/distribution-management.html
--
diff --git a/distribution-management.html b/distribution-management.html
index 8654772..04ac58b 100644
--- a/distribution-management.html
+++ b/distribution-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Distribution Management
 
@@ -290,7 +290,7 @@
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-01-27
+  Last Published: 
2016-01-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/export_control.html
--
diff --git a/export_control.html b/export_control.html
index dcca9d5..bf37587 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Export Control
@@ -330,7 +330,7 @@ for more details.
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-01-27
+  Last Published: 
2016-01-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/hbase-annotations/checkstyle.html
--
diff --git a/hbase-annotations/checkstyle.html 
b/hbase-annotations/checkstyle.html
index d7bdb55..ae5afc0 100644
--- a/hbase-annotations/checkstyle.html
+++ b/hbase-annotations/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-La

[46/51] [partial] hbase-site git commit: Published site at 138b754671d51d3f494adc250ab0cb9e085c858a.

2016-01-28 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/39cf5e9b/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.html
index e584a3d..0efe897 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.html
@@ -96,7 +96,7 @@
 
 
 @InterfaceAudience.Private
- class AsyncProcess
+ class AsyncProcess
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 This class  allows a continuous flow of requests. It's 
written to be compatible with a
  synchronous caller such as HTable.
@@ -514,7 +514,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -523,7 +523,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 COUNTER
-protected static final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicLong COUNTER
+protected static final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicLong COUNTER
 
 
 
@@ -532,7 +532,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 PRIMARY_CALL_TIMEOUT_KEY
-public static final http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String PRIMARY_CALL_TIMEOUT_KEY
+public static final http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String PRIMARY_CALL_TIMEOUT_KEY
 See Also:Constant
 Field Values
 
 
@@ -542,7 +542,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 START_LOG_ERRORS_AFTER_COUNT_KEY
-public static final http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String START_LOG_ERRORS_AFTER_COUNT_KEY
+public static final http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String START_LOG_ERRORS_AFTER_COUNT_KEY
 Configure the number of failures after which the client 
will start logging. A few failures
  is fine: region moved, then is not opened, then is overloaded. We try to have 
an acceptable
  heuristic for the number of errors we don't log. 9 was chosen because we wait 
for 1s at
@@ -556,7 +556,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 DEFAULT_START_LOG_ERRORS_AFTER_COUNT
-public static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT
+public static final int DEFAULT_START_LOG_ERRORS_AFTER_COUNT
 See Also:Constant
 Field Values
 
 
@@ -566,7 +566,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 NO_REQS_RESULT
-private static final AsyncProcess.AsyncRequestFuture NO_REQS_RESULT
+private static final AsyncProcess.AsyncRequestFuture NO_REQS_RESULT
 Return value from a submit that didn't contain any 
requests.
 
 
@@ -576,7 +576,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 id
-protected final long id
+protected final long id
 
 
 
@@ -585,7 +585,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 connection
-protected final ClusterConnection connection
+protected final ClusterConnection connection
 
 
 
@@ -594,7 +594,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 rpcCallerFactory
-protected final RpcRetryingCallerFactory 
rpcCallerFactory
+protected final RpcRetryingCallerFactory 
rpcCallerFactory
 
 
 
@@ -603,7 +603,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 rpcFactory
-protected final RpcControllerFactory rpcFactory
+protected final RpcControllerFactory rpcFactory
 
 
 
@@ -612,7 +612,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 globalErrors
-protected final AsyncProcess.BatchErrors 
globalErrors
+protected final AsyncProcess.BatchErrors 
globalErrors
 
 
 
@@ -621,7 +621,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 pool
-protected final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService pool
+protected final http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent

hbase git commit: HBASE-15173 Execute mergeRegions RPC call as the request user

2016-01-28 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 37ed0f6d0 -> 1ee07688c


HBASE-15173 Execute mergeRegions RPC call as the request user


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1ee07688
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1ee07688
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1ee07688

Branch: refs/heads/master
Commit: 1ee07688c8e75bf8507c1613feec9c56e950ab4c
Parents: 37ed0f6
Author: tedyu 
Authored: Thu Jan 28 09:53:24 2016 -0800
Committer: tedyu 
Committed: Thu Jan 28 09:53:24 2016 -0800

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 32 
 .../org/apache/hadoop/hbase/master/HMaster.java |  5 +--
 .../hadoop/hbase/master/MasterRpcServices.java  |  3 +-
 .../hadoop/hbase/master/MasterServices.java |  6 ++--
 .../hadoop/hbase/master/ServerManager.java  |  5 +--
 .../handler/DispatchMergingRegionHandler.java   |  7 +++--
 .../apache/hadoop/hbase/client/TestAdmin1.java  |  3 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java |  3 +-
 8 files changed, 47 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1ee07688/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index f5e4305..7cd0d91 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -23,12 +23,14 @@ import static 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpeci
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.InterruptedIOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.ParameterizedType;
 import java.lang.reflect.Type;
 import java.nio.ByteBuffer;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -137,6 +139,7 @@ import org.apache.hadoop.hbase.quotas.QuotaType;
 import org.apache.hadoop.hbase.quotas.ThrottleType;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.TablePermission;
 import org.apache.hadoop.hbase.security.access.UserPermission;
@@ -1846,17 +1849,34 @@ public final class ProtobufUtil {
* @param region_b
* @param forcible true if do a compulsory merge, otherwise we will only 
merge
*  two adjacent regions
+   * @param user effective user
* @throws IOException
*/
   public static void mergeRegions(final AdminService.BlockingInterface admin,
   final HRegionInfo region_a, final HRegionInfo region_b,
-  final boolean forcible) throws IOException {
-MergeRegionsRequest request = RequestConverter.buildMergeRegionsRequest(
+  final boolean forcible, final User user) throws IOException {
+final MergeRegionsRequest request = 
RequestConverter.buildMergeRegionsRequest(
 region_a.getRegionName(), region_b.getRegionName(),forcible);
-try {
-  admin.mergeRegions(null, request);
-} catch (ServiceException se) {
-  throw ProtobufUtil.getRemoteException(se);
+if (user != null) {
+  try {
+user.getUGI().doAs(new PrivilegedExceptionAction() {
+  @Override
+  public Void run() throws Exception {
+admin.mergeRegions(null, request);
+return null;
+  }
+});
+  } catch (InterruptedException ie) {
+InterruptedIOException iioe = new InterruptedIOException();
+iioe.initCause(ie);
+throw iioe;
+  }
+} else {
+  try {
+admin.mergeRegions(null, request);
+  } catch (ServiceException se) {
+throw ProtobufUtil.getRemoteException(se);
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/1ee07688/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 43f8efa..3cf750e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/

hbase git commit: HBASE-15173 Execute mergeRegions RPC call as the request user

2016-01-28 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 dfa948413 -> 486f7612b


HBASE-15173 Execute mergeRegions RPC call as the request user


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/486f7612
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/486f7612
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/486f7612

Branch: refs/heads/branch-1
Commit: 486f7612be6d0bdfb2721890ca9982dbcd3f80c2
Parents: dfa9484
Author: tedyu 
Authored: Thu Jan 28 10:02:49 2016 -0800
Committer: tedyu 
Committed: Thu Jan 28 10:02:49 2016 -0800

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 32 
 .../org/apache/hadoop/hbase/master/HMaster.java |  5 +--
 .../hadoop/hbase/master/MasterRpcServices.java  |  2 +-
 .../hadoop/hbase/master/MasterServices.java |  6 ++--
 .../hadoop/hbase/master/ServerManager.java  |  5 +--
 .../handler/DispatchMergingRegionHandler.java   |  7 +++--
 .../apache/hadoop/hbase/client/TestAdmin1.java  |  3 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java |  3 +-
 8 files changed, 46 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/486f7612/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 5f5c3a3..c5c8b88 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -23,12 +23,14 @@ import static 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpeci
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.InterruptedIOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.ParameterizedType;
 import java.lang.reflect.Type;
 import java.nio.ByteBuffer;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -138,6 +140,7 @@ import org.apache.hadoop.hbase.quotas.QuotaType;
 import org.apache.hadoop.hbase.quotas.ThrottleType;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.TablePermission;
 import org.apache.hadoop.hbase.security.access.UserPermission;
@@ -1896,17 +1899,34 @@ public final class ProtobufUtil {
* @param region_b
* @param forcible true if do a compulsory merge, otherwise we will only 
merge
*  two adjacent regions
+   * @param user effective user
* @throws IOException
*/
   public static void mergeRegions(final AdminService.BlockingInterface admin,
   final HRegionInfo region_a, final HRegionInfo region_b,
-  final boolean forcible) throws IOException {
-MergeRegionsRequest request = RequestConverter.buildMergeRegionsRequest(
+  final boolean forcible, final User user) throws IOException {
+final MergeRegionsRequest request = 
RequestConverter.buildMergeRegionsRequest(
 region_a.getRegionName(), region_b.getRegionName(),forcible);
-try {
-  admin.mergeRegions(null, request);
-} catch (ServiceException se) {
-  throw ProtobufUtil.getRemoteException(se);
+if (user != null) {
+  try {
+user.getUGI().doAs(new PrivilegedExceptionAction() {
+  @Override
+  public Void run() throws Exception {
+admin.mergeRegions(null, request);
+return null;
+  }
+});
+  } catch (InterruptedException ie) {
+InterruptedIOException iioe = new InterruptedIOException();
+iioe.initCause(ie);
+throw iioe;
+  }
+} else {
+  try {
+admin.mergeRegions(null, request);
+  } catch (ServiceException se) {
+throw ProtobufUtil.getRemoteException(se);
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/486f7612/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 13347b3..cc05fa1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/

[6/6] hbase git commit: HBASE-15019 Replication stuck when HDFS is restarted.

2016-01-28 Thread mbertozzi
HBASE-15019 Replication stuck when HDFS is restarted.

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/67c2fc7c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/67c2fc7c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/67c2fc7c

Branch: refs/heads/branch-1
Commit: 67c2fc7cd62f5d53da633f08d5a3c93600ac86f0
Parents: 486f761
Author: Matteo Bertozzi 
Authored: Thu Jan 21 00:05:57 2016 -0600
Committer: Matteo Bertozzi 
Committed: Thu Jan 28 10:10:55 2016 -0800

--
 .../regionserver/ReplicationSource.java | 32 ++---
 .../hbase/util/LeaseNotRecoveredException.java  | 47 
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  5 ++-
 3 files changed, 78 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/67c2fc7c/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 342b826..99666df 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -60,8 +60,10 @@ import org.apache.hadoop.hbase.replication.ReplicationQueues;
 import org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter;
 import org.apache.hadoop.hbase.replication.WALEntryFilter;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.DefaultWALProvider;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -450,9 +452,9 @@ public class ReplicationSource extends Thread
  * @param p path to split
  * @return start time
  */
-private long getTS(Path p) {
-  String[] parts = p.getName().split("\\.");
-  return Long.parseLong(parts[parts.length-1]);
+private static long getTS(Path p) {
+  int tsIndex = p.getName().lastIndexOf('.') + 1;
+  return Long.parseLong(p.getName().substring(tsIndex));
 }
   }
 
@@ -791,7 +793,6 @@ public class ReplicationSource extends Thread
  * @return true if we should continue with that file, false if we are over 
with it
  */
 protected boolean openReader(int sleepMultiplier) {
-
   try {
 try {
   if (LOG.isTraceEnabled()) {
@@ -872,6 +873,11 @@ public class ReplicationSource extends Thread
 // TODO What happens the log is missing in both places?
   }
 }
+  } catch (LeaseNotRecoveredException lnre) {
+// HBASE-15019 the WAL was not closed due to some hiccup.
+LOG.warn(peerClusterZnode + " Try to recover the WAL lease " + 
currentPath, lnre);
+recoverLease(conf, currentPath);
+this.reader = null;
   } catch (IOException ioe) {
 if (ioe instanceof EOFException && isCurrentLogEmpty()) return true;
 LOG.warn(peerClusterZnode + " Got: ", ioe);
@@ -881,7 +887,7 @@ public class ReplicationSource extends Thread
   // which throws a NPE if we open a file before any data node has the 
most recent block
   // Just sleep and retry. Will require re-reading compressed WALs for 
compressionContext.
   LOG.warn("Got NPE opening reader, will retry.");
-} else if (sleepMultiplier == maxRetriesMultiplier) {
+} else if (sleepMultiplier >= maxRetriesMultiplier) {
   // TODO Need a better way to determine if a file is really gone but
   // TODO without scanning all logs dir
   LOG.warn("Waited too long for this file, considering dumping");
@@ -891,6 +897,22 @@ public class ReplicationSource extends Thread
   return true;
 }
 
+private void recoverLease(final Configuration conf, final Path path) {
+  try {
+final FileSystem dfs = FSUtils.getCurrentFileSystem(conf);
+FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
+fsUtils.recoverFileLease(dfs, path, conf, new CancelableProgressable() 
{
+  @Override
+  public boolean progress() {
+LOG.debug("recover WAL lease: " + path);
+return isWorkerActive();
+  }
+});
+  } catch (IOException e) {
+LOG.warn("unable to recover lease for WAL: " + path, e);
+  }
+}
+
 /*
  * Checks w

[4/6] hbase git commit: HBASE-15019 Replication stuck when HDFS is restarted.

2016-01-28 Thread mbertozzi
HBASE-15019 Replication stuck when HDFS is restarted.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/60c6b6df
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/60c6b6df
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/60c6b6df

Branch: refs/heads/0.98
Commit: 60c6b6df104030995754bb1470a0d5d3e20cf220
Parents: 444debd
Author: Matteo Bertozzi 
Authored: Thu Jan 28 10:04:20 2016 -0800
Committer: Matteo Bertozzi 
Committed: Thu Jan 28 10:04:20 2016 -0800

--
 .../hbase/regionserver/wal/HLogFactory.java | 11 +++--
 .../regionserver/ReplicationSource.java | 30 +++--
 .../hbase/util/LeaseNotRecoveredException.java  | 47 
 3 files changed, 81 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/60c6b6df/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java
index e6107bf..6999f8e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
 import org.apache.hadoop.hbase.regionserver.wal.HLog.Writer;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
 
 @InterfaceAudience.Private
 public class HLogFactory {
@@ -46,12 +47,12 @@ public class HLogFactory {
 final Configuration conf) throws IOException {
   return new FSHLog(fs, root, logName, conf);
 }
-
+
 public static HLog createHLog(final FileSystem fs, final Path root, final 
String logName,
 final String oldLogName, final Configuration conf) throws IOException {
   return new FSHLog(fs, root, logName, oldLogName, conf);
 }
-
+
 public static HLog createHLog(final FileSystem fs, final Path root, final 
String logName,
 final Configuration conf, final List listeners,
 final String prefix) throws IOException {
@@ -61,7 +62,7 @@ public class HLogFactory {
 public static HLog createMetaHLog(final FileSystem fs, final Path root, 
final String logName,
 final Configuration conf, final List listeners,
 final String prefix) throws IOException {
-  return new FSHLog(fs, root, logName, HConstants.HREGION_OLDLOGDIR_NAME, 
+  return new FSHLog(fs, root, logName, HConstants.HREGION_OLDLOGDIR_NAME,
 conf, listeners, false, prefix, true);
 }
 
@@ -162,8 +163,10 @@ public class HLogFactory {
   throw iioe;
 }
   }
+  throw new LeaseNotRecoveredException(e);
+} else {
+  throw e;
 }
-throw e;
   }
 }
   } catch (IOException ie) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/60c6b6df/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 97993bb..b7330aa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -50,7 +50,10 @@ import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
 import org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter;
 import org.apache.hadoop.hbase.replication.WALEntryFilter;
+import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
 import org.apache.hadoop.hbase.util.Threads;
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.ListenableFuture;
@@ -577,6 +580,11 @@ public class ReplicationSource extends Thread
   // TODO What happens the log is missing in both places?
 }
   }
+} catch (LeaseNotRecoveredException lnre) {
+  // HBASE-15019 the WAL was not closed due to some hiccup.
+  LOG.warn(peerClusterZnode

[2/6] hbase git commit: HBASE-15019 Replication stuck when HDFS is restarted.

2016-01-28 Thread mbertozzi
HBASE-15019 Replication stuck when HDFS is restarted.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5041485a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5041485a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5041485a

Branch: refs/heads/branch-1.1
Commit: 5041485aa5c1ecfaa4697b8d0b8a78d027ceaa8a
Parents: aa5dfae
Author: Matteo Bertozzi 
Authored: Thu Jan 21 00:05:57 2016 -0600
Committer: Matteo Bertozzi 
Committed: Thu Jan 28 09:49:38 2016 -0800

--
 .../regionserver/ReplicationSource.java | 30 +++--
 .../hbase/util/LeaseNotRecoveredException.java  | 47 
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  5 ++-
 3 files changed, 78 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5041485a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 755654a..0496f73 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -53,8 +53,11 @@ import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
 import org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter;
 import org.apache.hadoop.hbase.replication.WALEntryFilter;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
 import org.apache.hadoop.hbase.util.Threads;
 
 import com.google.common.collect.Lists;
@@ -587,6 +590,11 @@ public class ReplicationSource extends Thread
   // TODO What happens the log is missing in both places?
 }
   }
+} catch (LeaseNotRecoveredException lnre) {
+  // HBASE-15019 the WAL was not closed due to some hiccup.
+  LOG.warn(peerClusterZnode + " Try to recover the WAL lease " + 
currentPath, lnre);
+  recoverLease(conf, currentPath);
+  this.reader = null;
 } catch (IOException ioe) {
   if (ioe instanceof EOFException && isCurrentLogEmpty()) return true;
   LOG.warn(this.peerClusterZnode + " Got: ", ioe);
@@ -606,6 +614,22 @@ public class ReplicationSource extends Thread
 return true;
   }
 
+  private void recoverLease(final Configuration conf, final Path path) {
+try {
+  final FileSystem dfs = FSUtils.getCurrentFileSystem(conf);
+  FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
+  fsUtils.recoverFileLease(dfs, path, conf, new CancelableProgressable() {
+@Override
+public boolean progress() {
+  LOG.debug("recover WAL lease: " + path);
+  return isActive();
+}
+  });
+} catch (IOException e) {
+  LOG.warn("unable to recover lease for WAL: " + path, e);
+}
+  }
+
   /*
* Checks whether the current log file is empty, and it is not a recovered 
queue. This is to
* handle scenario when in an idle cluster, there is no entry in the current 
log and we keep on
@@ -861,9 +885,9 @@ public class ReplicationSource extends Thread
  * @param p path to split
  * @return start time
  */
-private long getTS(Path p) {
-  String[] parts = p.getName().split("\\.");
-  return Long.parseLong(parts[parts.length-1]);
+private static long getTS(Path p) {
+  int tsIndex = p.getName().lastIndexOf('.') + 1;
+  return Long.parseLong(p.getName().substring(tsIndex));
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5041485a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java
new file mode 100644
index 000..ca769b8
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java
@@ -0,0 +1,47 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this fi

[5/6] hbase git commit: HBASE-15019 Replication stuck when HDFS is restarted.

2016-01-28 Thread mbertozzi
HBASE-15019 Replication stuck when HDFS is restarted.

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8a217da8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8a217da8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8a217da8

Branch: refs/heads/master
Commit: 8a217da8fd3990f9880270eb1e50d8f87d1e92fb
Parents: 1ee0768
Author: Matteo Bertozzi 
Authored: Thu Jan 21 00:05:57 2016 -0600
Committer: Matteo Bertozzi 
Committed: Thu Jan 28 10:09:02 2016 -0800

--
 .../regionserver/ReplicationSource.java | 32 ++---
 .../hbase/util/LeaseNotRecoveredException.java  | 47 
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  5 ++-
 3 files changed, 78 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8a217da8/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index b4975bf..51ca7ed 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -60,8 +60,10 @@ import org.apache.hadoop.hbase.replication.ReplicationQueues;
 import org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter;
 import org.apache.hadoop.hbase.replication.WALEntryFilter;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.DefaultWALProvider;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -450,9 +452,9 @@ public class ReplicationSource extends Thread
  * @param p path to split
  * @return start time
  */
-private long getTS(Path p) {
-  String[] parts = p.getName().split("\\.");
-  return Long.parseLong(parts[parts.length-1]);
+private static long getTS(Path p) {
+  int tsIndex = p.getName().lastIndexOf('.') + 1;
+  return Long.parseLong(p.getName().substring(tsIndex));
 }
   }
 
@@ -791,7 +793,6 @@ public class ReplicationSource extends Thread
  * @return true if we should continue with that file, false if we are over 
with it
  */
 protected boolean openReader(int sleepMultiplier) {
-
   try {
 try {
   if (LOG.isTraceEnabled()) {
@@ -872,6 +873,11 @@ public class ReplicationSource extends Thread
 // TODO What happens the log is missing in both places?
   }
 }
+  } catch (LeaseNotRecoveredException lnre) {
+// HBASE-15019 the WAL was not closed due to some hiccup.
+LOG.warn(peerClusterZnode + " Try to recover the WAL lease " + 
currentPath, lnre);
+recoverLease(conf, currentPath);
+this.reader = null;
   } catch (IOException ioe) {
 if (ioe instanceof EOFException && isCurrentLogEmpty()) return true;
 LOG.warn(peerClusterZnode + " Got: ", ioe);
@@ -881,7 +887,7 @@ public class ReplicationSource extends Thread
   // which throws a NPE if we open a file before any data node has the 
most recent block
   // Just sleep and retry. Will require re-reading compressed WALs for 
compressionContext.
   LOG.warn("Got NPE opening reader, will retry.");
-} else if (sleepMultiplier == maxRetriesMultiplier) {
+} else if (sleepMultiplier >= maxRetriesMultiplier) {
   // TODO Need a better way to determine if a file is really gone but
   // TODO without scanning all logs dir
   LOG.warn("Waited too long for this file, considering dumping");
@@ -891,6 +897,22 @@ public class ReplicationSource extends Thread
   return true;
 }
 
+private void recoverLease(final Configuration conf, final Path path) {
+  try {
+final FileSystem dfs = FSUtils.getCurrentFileSystem(conf);
+FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
+fsUtils.recoverFileLease(dfs, path, conf, new CancelableProgressable() 
{
+  @Override
+  public boolean progress() {
+LOG.debug("recover WAL lease: " + path);
+return isWorkerActive();
+  }
+});
+  } catch (IOException e) {
+LOG.warn("unable to recover lease for WAL: " + path, e);
+  }
+}
+
 /*
  * Checks whe

[1/6] hbase git commit: HBASE-15019 Replication stuck when HDFS is restarted.

2016-01-28 Thread mbertozzi
Repository: hbase
Updated Branches:
  refs/heads/0.98 444debddd -> 60c6b6df1
  refs/heads/branch-1 486f7612b -> 67c2fc7cd
  refs/heads/branch-1.0 ed2dbda59 -> 9c42beaa3
  refs/heads/branch-1.1 aa5dfae30 -> 5041485aa
  refs/heads/branch-1.2 51998b9eb -> 778c9730b
  refs/heads/master 1ee07688c -> 8a217da8f


HBASE-15019 Replication stuck when HDFS is restarted.

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/778c9730
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/778c9730
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/778c9730

Branch: refs/heads/branch-1.2
Commit: 778c9730b3403f4b330578b44cce3f56d19cf25e
Parents: 51998b9
Author: Matteo Bertozzi 
Authored: Thu Jan 21 00:05:57 2016 -0600
Committer: Matteo Bertozzi 
Committed: Thu Jan 28 09:07:31 2016 -0800

--
 .../regionserver/ReplicationSource.java | 33 +++---
 .../hbase/util/LeaseNotRecoveredException.java  | 47 
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  5 ++-
 3 files changed, 79 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/778c9730/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 0ded4fe..95f14fa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -59,8 +59,11 @@ import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
 import org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter;
 import org.apache.hadoop.hbase.replication.WALEntryFilter;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
 import org.apache.hadoop.hbase.util.Threads;
 
 import com.google.common.collect.Lists;
@@ -418,9 +421,9 @@ public class ReplicationSource extends Thread
  * @param p path to split
  * @return start time
  */
-private long getTS(Path p) {
-  String[] parts = p.getName().split("\\.");
-  return Long.parseLong(parts[parts.length-1]);
+private static long getTS(Path p) {
+  int tsIndex = p.getName().lastIndexOf('.') + 1;
+  return Long.parseLong(p.getName().substring(tsIndex));
 }
   }
 
@@ -734,7 +737,6 @@ public class ReplicationSource extends Thread
  * @return true if we should continue with that file, false if we are over 
with it
  */
 protected boolean openReader(int sleepMultiplier) {
-
   try {
 try {
   if (LOG.isTraceEnabled()) {
@@ -815,6 +817,11 @@ public class ReplicationSource extends Thread
 // TODO What happens the log is missing in both places?
   }
 }
+  } catch (LeaseNotRecoveredException lnre) {
+// HBASE-15019 the WAL was not closed due to some hiccup.
+LOG.warn(peerClusterZnode + " Try to recover the WAL lease " + 
currentPath, lnre);
+recoverLease(conf, currentPath);
+this.reader = null;
   } catch (IOException ioe) {
 if (ioe instanceof EOFException && isCurrentLogEmpty()) return true;
 LOG.warn(peerClusterZnode + " Got: ", ioe);
@@ -824,7 +831,7 @@ public class ReplicationSource extends Thread
   // which throws a NPE if we open a file before any data node has the 
most recent block
   // Just sleep and retry. Will require re-reading compressed WALs for 
compressionContext.
   LOG.warn("Got NPE opening reader, will retry.");
-} else if (sleepMultiplier == maxRetriesMultiplier) {
+} else if (sleepMultiplier >= maxRetriesMultiplier) {
   // TODO Need a better way to determine if a file is really gone but
   // TODO without scanning all logs dir
   LOG.warn("Waited too long for this file, considering dumping");
@@ -834,6 +841,22 @@ public class ReplicationSource extends Thread
   return true;
 }
 
+private void recoverLease(final Configuration conf, final Path path) {
+  try {
+final FileSystem dfs = FSUtils.getCurrentFileSystem(conf);
+FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
+fsUtils.recoverFileLease(dfs, path, conf, new CancelableProgressable(

[3/6] hbase git commit: HBASE-15019 Replication stuck when HDFS is restarted.

2016-01-28 Thread mbertozzi
HBASE-15019 Replication stuck when HDFS is restarted.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9c42beaa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9c42beaa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9c42beaa

Branch: refs/heads/branch-1.0
Commit: 9c42beaa3423e1476aa87e56f59168ed5ce0f461
Parents: ed2dbda
Author: Matteo Bertozzi 
Authored: Thu Jan 21 00:05:57 2016 -0600
Committer: Matteo Bertozzi 
Committed: Thu Jan 28 09:49:54 2016 -0800

--
 .../regionserver/ReplicationSource.java | 30 +++--
 .../hbase/util/LeaseNotRecoveredException.java  | 47 
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  5 ++-
 3 files changed, 78 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9c42beaa/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index f3734b2..c542502 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -53,8 +53,11 @@ import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
 import org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter;
 import org.apache.hadoop.hbase.replication.WALEntryFilter;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
 import org.apache.hadoop.hbase.util.Threads;
 
 import com.google.common.collect.Lists;
@@ -587,6 +590,11 @@ public class ReplicationSource extends Thread
   // TODO What happens the log is missing in both places?
 }
   }
+} catch (LeaseNotRecoveredException lnre) {
+  // HBASE-15019 the WAL was not closed due to some hiccup.
+  LOG.warn(peerClusterZnode + " Try to recover the WAL lease " + 
currentPath, lnre);
+  recoverLease(conf, currentPath);
+  this.reader = null;
 } catch (IOException ioe) {
   if (ioe instanceof EOFException && isCurrentLogEmpty()) return true;
   LOG.warn(this.peerClusterZnode + " Got: ", ioe);
@@ -606,6 +614,22 @@ public class ReplicationSource extends Thread
 return true;
   }
 
+  private void recoverLease(final Configuration conf, final Path path) {
+try {
+  final FileSystem dfs = FSUtils.getCurrentFileSystem(conf);
+  FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
+  fsUtils.recoverFileLease(dfs, path, conf, new CancelableProgressable() {
+@Override
+public boolean progress() {
+  LOG.debug("recover WAL lease: " + path);
+  return isActive();
+}
+  });
+} catch (IOException e) {
+  LOG.warn("unable to recover lease for WAL: " + path, e);
+}
+  }
+
   /*
* Checks whether the current log file is empty, and it is not a recovered 
queue. This is to
* handle scenario when in an idle cluster, there is no entry in the current 
log and we keep on
@@ -857,9 +881,9 @@ public class ReplicationSource extends Thread
  * @param p path to split
  * @return start time
  */
-private long getTS(Path p) {
-  String[] parts = p.getName().split("\\.");
-  return Long.parseLong(parts[parts.length-1]);
+private static long getTS(Path p) {
+  int tsIndex = p.getName().lastIndexOf('.') + 1;
+  return Long.parseLong(p.getName().substring(tsIndex));
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9c42beaa/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java
new file mode 100644
index 000..ca769b8
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LeaseNotRecoveredException.java
@@ -0,0 +1,47 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this fi

[1/2] hbase git commit: HBASE-15142 Procedure v2 - Basic WebUI listing the procedures

2016-01-28 Thread mbertozzi
Repository: hbase
Updated Branches:
  refs/heads/branch-1 67c2fc7cd -> 2f571b145
  refs/heads/master 8a217da8f -> 14dd959aa


HBASE-15142 Procedure v2 - Basic WebUI listing the procedures


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/14dd959a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/14dd959a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/14dd959a

Branch: refs/heads/master
Commit: 14dd959aa2145be3fddee6c4dc001508393784e7
Parents: 8a217da
Author: Matteo Bertozzi 
Authored: Thu Jan 28 10:13:42 2016 -0800
Committer: Matteo Bertozzi 
Committed: Thu Jan 28 10:13:42 2016 -0800

--
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|   1 +
 .../hbase-webapps/master/procedures.jsp | 126 +++
 .../resources/hbase-webapps/master/snapshot.jsp |   1 +
 .../resources/hbase-webapps/master/table.jsp|   1 +
 .../hbase-webapps/master/tablesDetailed.jsp |   1 +
 .../main/resources/hbase-webapps/master/zk.jsp  |   1 +
 6 files changed, 131 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/14dd959a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index d3685d8..1af3db9 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -124,6 +124,7 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 
 Home
 Table Details
+Procedures
 Local Logs
 Log Level
 Debug Dump

http://git-wip-us.apache.org/repos/asf/hbase/blob/14dd959a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
--
diff --git 
a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
new file mode 100644
index 000..443d9d6
--- /dev/null
+++ b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
@@ -0,0 +1,126 @@
+<%--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+--%>
+<%@ page contentType="text/html;charset=UTF-8"
+  import="static org.apache.commons.lang.StringEscapeUtils.escapeXml"
+  import="java.util.Collections"
+  import="java.util.Comparator"
+  import="java.util.Date"
+  import="java.util.List"
+  import="org.apache.hadoop.hbase.HBaseConfiguration"
+  import="org.apache.hadoop.hbase.ProcedureInfo"
+  import="org.apache.hadoop.hbase.master.HMaster"
+  import="org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv"
+  import="org.apache.hadoop.hbase.procedure2.ProcedureExecutor"
+%>
+<%
+  HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
+  ProcedureExecutor procExecutor = 
master.getMasterProcedureExecutor();
+
+  List procedures = procExecutor.listProcedures();
+  Collections.sort(procedures, new Comparator() {
+@Override
+public int compare(ProcedureInfo lhs, ProcedureInfo rhs) {
+  long cmp = lhs.getParentId() - rhs.getParentId();
+  cmp = cmp != 0 ? cmp : lhs.getProcId() - rhs.getProcId();
+  return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+  });
+%>
+
+
+http://www.w3.org/1999/xhtml";>
+
+
+HBase Master Procedures: <%= master.getServerName() %>
+
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+Home
+Table Details
+Procedures
+Local Logs
+Log Level
+Debug Dump
+Me

[2/2] hbase git commit: HBASE-15142 Procedure v2 - Basic WebUI listing the procedures

2016-01-28 Thread mbertozzi
HBASE-15142 Procedure v2 - Basic WebUI listing the procedures


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2f571b14
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2f571b14
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2f571b14

Branch: refs/heads/branch-1
Commit: 2f571b1457acc3a4b9cbc0cf14f191f8657c20f5
Parents: 67c2fc7
Author: Matteo Bertozzi 
Authored: Thu Jan 28 10:13:42 2016 -0800
Committer: Matteo Bertozzi 
Committed: Thu Jan 28 10:35:10 2016 -0800

--
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|   1 +
 .../hbase-webapps/master/procedures.jsp | 126 +++
 .../resources/hbase-webapps/master/snapshot.jsp |   1 +
 .../resources/hbase-webapps/master/table.jsp|   1 +
 .../hbase-webapps/master/tablesDetailed.jsp |   1 +
 .../main/resources/hbase-webapps/master/zk.jsp  |   1 +
 6 files changed, 131 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2f571b14/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 33df3d8..7429498 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -125,6 +125,7 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 
 Home
 Table Details
+Procedures
 Local Logs
 Log Level
 Debug Dump

http://git-wip-us.apache.org/repos/asf/hbase/blob/2f571b14/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
--
diff --git 
a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
new file mode 100644
index 000..443d9d6
--- /dev/null
+++ b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
@@ -0,0 +1,126 @@
+<%--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+--%>
+<%@ page contentType="text/html;charset=UTF-8"
+  import="static org.apache.commons.lang.StringEscapeUtils.escapeXml"
+  import="java.util.Collections"
+  import="java.util.Comparator"
+  import="java.util.Date"
+  import="java.util.List"
+  import="org.apache.hadoop.hbase.HBaseConfiguration"
+  import="org.apache.hadoop.hbase.ProcedureInfo"
+  import="org.apache.hadoop.hbase.master.HMaster"
+  import="org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv"
+  import="org.apache.hadoop.hbase.procedure2.ProcedureExecutor"
+%>
+<%
+  HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
+  ProcedureExecutor procExecutor = 
master.getMasterProcedureExecutor();
+
+  List procedures = procExecutor.listProcedures();
+  Collections.sort(procedures, new Comparator() {
+@Override
+public int compare(ProcedureInfo lhs, ProcedureInfo rhs) {
+  long cmp = lhs.getParentId() - rhs.getParentId();
+  cmp = cmp != 0 ? cmp : lhs.getProcId() - rhs.getProcId();
+  return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+  });
+%>
+
+
+http://www.w3.org/1999/xhtml";>
+
+
+HBase Master Procedures: <%= master.getServerName() %>
+
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+Home
+Table Details
+Procedures
+Local Logs
+Log Level
+Debug Dump
+Metrics Dump
+<% if (HBaseConfiguration.isShowConfInServlet()) { %>
+HBase Configuration
+

[hbase] Git Push Summary

2016-01-28 Thread enis
Repository: hbase
Updated Tags:  refs/tags/rel/1.0.3 [created] f8ec84f2b


svn commit: r12075 - in /release/hbase/hbase-1.0.3: ./ hbase-1.0.3-bin.tar.gz hbase-1.0.3-bin.tar.gz.asc hbase-1.0.3-bin.tar.gz.mds hbase-1.0.3-src.tar.gz hbase-1.0.3-src.tar.gz.asc hbase-1.0.3-src.ta

2016-01-28 Thread enis
Author: enis
Date: Thu Jan 28 21:49:23 2016
New Revision: 12075

Log:
Release HBase 1.0.3

Added:
release/hbase/hbase-1.0.3/
release/hbase/hbase-1.0.3/hbase-1.0.3-bin.tar.gz   (with props)
release/hbase/hbase-1.0.3/hbase-1.0.3-bin.tar.gz.asc
release/hbase/hbase-1.0.3/hbase-1.0.3-bin.tar.gz.mds
release/hbase/hbase-1.0.3/hbase-1.0.3-src.tar.gz   (with props)
release/hbase/hbase-1.0.3/hbase-1.0.3-src.tar.gz.asc
release/hbase/hbase-1.0.3/hbase-1.0.3-src.tar.gz.mds

Added: release/hbase/hbase-1.0.3/hbase-1.0.3-bin.tar.gz
==
Binary file - no diff available.

Propchange: release/hbase/hbase-1.0.3/hbase-1.0.3-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: release/hbase/hbase-1.0.3/hbase-1.0.3-bin.tar.gz.asc
==
--- release/hbase/hbase-1.0.3/hbase-1.0.3-bin.tar.gz.asc (added)
+++ release/hbase/hbase-1.0.3/hbase-1.0.3-bin.tar.gz.asc Thu Jan 28 21:49:23 
2016
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABCgAGBQJWnwTwAAoJEAhFjDnpZLX/i4QP/2ceZCdDjgxdVgH8OJZJvw2j
+Z4mGXIEP/txQnxntfe7O9Pfv0ekyaXnzF1kvaWyupepQgqASd1nKsTJt9RZr+XEy
+bpI83LWkkenpilrKNSbXhERL8eNWXbAVBhv5fx1jLfXv6hdTLcUrEfZbKxwehz8N
+PLxlxvBqfS+cRjKXvUPFTa8acD+IEg3ESaj/H+MtPSUsgVl8bOav0WGVekD4ZJ3v
+b3NjTWK9C9AMCp3c305mX7ZllD3xf9ht/sL/4SHY0brOjRXgzH7SeDZixCXfFWQ5
+dhagzZKX25yTGBDP3lGbhzcPoZnK73nsr70/KwfwedCNkTflmQxEiZ3JHZ935KO8
+USaMwWyjX+cKyRyE+WzL7g1FUYhABGfR8OzsJj1YOhrSfXue7YPnNRQ7Z2NAWBAd
+mZkqaVG0tB8wBGJ4DPs2NIANWwGrHyi23SFk7ifS3V2PLk3X5FFBD4eWuPLJe64C
+6yLZf5uHZb7YOgz4ObyidDlAruA62v8AlzqWq6jOfgafqLP1eL/BbJ77VVlqWviB
+Ax4AOXbbQjeK2+N1UcSZC7UXd+QRdwBVDFKqfZkQRwbyV3/mp0JcwQzp7vsw/JNS
+YHVD27gAB6yM9HltCyZKnNIt28kftfy5zXZPgpyjSQJJS3MIDXntdd1vQJqZexxy
+SMZQkBr0VaBAIxDj2VlZ
+=jNTO
+-END PGP SIGNATURE-

Added: release/hbase/hbase-1.0.3/hbase-1.0.3-bin.tar.gz.mds
==
--- release/hbase/hbase-1.0.3/hbase-1.0.3-bin.tar.gz.mds (added)
+++ release/hbase/hbase-1.0.3/hbase-1.0.3-bin.tar.gz.mds Thu Jan 28 21:49:23 
2016
@@ -0,0 +1,17 @@
+hbase-1.0.3-bin.tar.gz:MD5 = 7F B0 EB 04 6D 80 11 3D  DB 85 D5 B0 B1 8F 04
+ 36
+hbase-1.0.3-bin.tar.gz:   SHA1 = F04E 66A8 9481 466D 0B91  800F 76C0 90D8 8DA4
+ 52F4
+hbase-1.0.3-bin.tar.gz: RMD160 = 79C5 5BA4 3940 1F56 3014  10DE 2D57 C6C2 C0E1
+ AA07
+hbase-1.0.3-bin.tar.gz: SHA224 = A88C633E 6DA169C4 F8F66119 ED9E1ADE 3B1474A9
+ D9134A6A 1496CD3C
+hbase-1.0.3-bin.tar.gz: SHA256 = 869605E7 E4D614E3 F22B8DC9 3C7BA6D8 EC9037C7
+ A5A5FCAE A0EEDF44 9E8D244C
+hbase-1.0.3-bin.tar.gz: SHA384 = 5C7F940A F3DB6F03 8321194E 357093A8 B416653F
+ 217E192B E59A4568 EFC64716 EB754619 A5288C03
+ A4A9F0A0 98C48F1F
+hbase-1.0.3-bin.tar.gz: SHA512 = 83097A02 45247D9A C017E311 C47A1ED5 32194161
+ 69400A06 EA1D2DF0 A6A2E9DC 08566B9D 965B3141
+ 590C8C0D 4A927C07 47304636 59A58B03 7D605292
+ 14A8B548

Added: release/hbase/hbase-1.0.3/hbase-1.0.3-src.tar.gz
==
Binary file - no diff available.

Propchange: release/hbase/hbase-1.0.3/hbase-1.0.3-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: release/hbase/hbase-1.0.3/hbase-1.0.3-src.tar.gz.asc
==
--- release/hbase/hbase-1.0.3/hbase-1.0.3-src.tar.gz.asc (added)
+++ release/hbase/hbase-1.0.3/hbase-1.0.3-src.tar.gz.asc Thu Jan 28 21:49:23 
2016
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABCgAGBQJWnwT9AAoJEAhFjDnpZLX/u1YQAKxrzgirQZUB4aq7qy10w9f/
+7OC0Pk/mEv8URk3dtgDqRPsdDTeWeA329kmq9iNDrjDQkjzniB0XyUUYZzTOt5wJ
+rwJDUHaJLTyGPq2vbkEXsGpEfkB3yQbQwaZTKx4INEssh0Dc+otPINcjwcEfhbzA
+5qRK2CDji9OG0YLh8VZ1IPz7ZoWTHEHjcQoco6VVk0AuWsw82q+u2AlwCGry3pAK
+FF0ma8hq2y2cLYH7RLdyiT1tcJa/rXIc1WVm1lv2zuOrodYzz1iObp1bHk1nU42j
+OzZ8xdykzw6XHwpEcvsm3+EPOYLmRfzNmjz23PTq6B7TpfJHBDhW7gDCAOYOQlcP
+r40U3VsSXIuRzqta6uOSGZsIQDYR5NsHujFiLbzYD26q7nDPvfyMSyMvta6C0Fjk
+5LqBNPARGI4V+tq/PlP0Aw7dFKFP4BJ2xZn90CekPbJ1t+mgzYc4afN87Xo4YTu5
+saqn0RgZ9uQX9pJoJXCAtmBEnGiQuP6XgXVry94qns8dRdR5A+wpB5Q6dx1fS3p6
+lw88msZe9sgRnCLeADuc3Xyok55t7TrF7EeWP+4PX7HhNqLBsFZMMFrYFKFf58Ff
+i7zKOsqIoehNcEb5FOkeKDiiNNbS1CRN+usrdb/BIcQS9hd55oAd6baZIruPWOfw
+jEfcQTrghqz82Owzuy49
+=zGCF
+-END PGP SIGNATURE-

Added: release/hbase/hbase-1.0.3/hbase-1.0.3-src.tar.gz.mds

svn commit: r12076 - /release/hbase/hbase-1.0.2/

2016-01-28 Thread enis
Author: enis
Date: Thu Jan 28 21:50:12 2016
New Revision: 12076

Log:
Remove HBase-1.0.2 after 1.0.3

Removed:
release/hbase/hbase-1.0.2/



hbase git commit: Updated pom.xml version to 1.0.4-SNAPSHOT

2016-01-28 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 9c42beaa3 -> 5c2022f56


Updated pom.xml version to 1.0.4-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5c2022f5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5c2022f5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5c2022f5

Branch: refs/heads/branch-1.0
Commit: 5c2022f56ebff3b7fd387c609970cc7042651e14
Parents: 9c42bea
Author: Enis Soztutar 
Authored: Thu Jan 28 13:58:21 2016 -0800
Committer: Enis Soztutar 
Committed: Thu Jan 28 13:58:21 2016 -0800

--
 hbase-annotations/pom.xml | 2 +-
 hbase-assembly/pom.xml| 2 +-
 hbase-checkstyle/pom.xml  | 4 ++--
 hbase-client/pom.xml  | 2 +-
 hbase-common/pom.xml  | 2 +-
 hbase-examples/pom.xml| 2 +-
 hbase-hadoop-compat/pom.xml   | 2 +-
 hbase-hadoop2-compat/pom.xml  | 2 +-
 hbase-it/pom.xml  | 2 +-
 hbase-prefix-tree/pom.xml | 2 +-
 hbase-protocol/pom.xml| 2 +-
 hbase-resource-bundle/pom.xml | 2 +-
 hbase-rest/pom.xml| 2 +-
 hbase-server/pom.xml  | 2 +-
 hbase-shell/pom.xml   | 2 +-
 hbase-testing-util/pom.xml| 2 +-
 hbase-thrift/pom.xml  | 2 +-
 pom.xml   | 2 +-
 18 files changed, 19 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-annotations/pom.xml
--
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index f8b7986..f22ea05 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.0.3
+1.0.4-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-assembly/pom.xml
--
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 02ce2d8..9bbb9d4 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.0.3
+1.0.4-SNAPSHOT
 ..
   
   hbase-assembly

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-checkstyle/pom.xml
--
diff --git a/hbase-checkstyle/pom.xml b/hbase-checkstyle/pom.xml
index 05af30d..654ecec 100644
--- a/hbase-checkstyle/pom.xml
+++ b/hbase-checkstyle/pom.xml
@@ -24,14 +24,14 @@
 4.0.0
 org.apache.hbase
 hbase-checkstyle
-1.0.3
+1.0.4-SNAPSHOT
 Apache HBase - Checkstyle
 Module to hold Checkstyle properties for HBase.
 
   
 hbase
 org.apache.hbase
-1.0.3
+1.0.4-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-client/pom.xml
--
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index 58732f9..770b60c 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -24,7 +24,7 @@
   
 hbase
 org.apache.hbase
-1.0.3
+1.0.4-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-common/pom.xml
--
diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index 1b654ba..260b268 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.0.3
+1.0.4-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-examples/pom.xml
--
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index 966c2f9..24af59e 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.0.3
+1.0.4-SNAPSHOT
 ..
   
   hbase-examples

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-hadoop-compat/pom.xml
--
diff --git a/hbase-hadoop-compat/pom.xml b/hbase-hadoop-compat/pom.xml
index 1de2bb5..3964048 100644
--- a/hbase-hadoop-compat/pom.xml
+++ b/hbase-hadoop-compat/pom.xml
@@ -23,7 +23,7 @@
 
 hbase
 org.apache.hbase
-1.0.3
+1.0.4-SNAPSHOT
 ..
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c2022f5/hbase-hadoop2-compat/pom.xml
--
diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml
index 5c85b01..5198e5e 100644
--- a/hbase-hadoop2-compat/pom.xml
+++ b/hbase-hadoop2-compat/pom.xml
@@ -21,7 +21,7 @@ limitations under the License.
   
 hbase
 org.apache.hbase
-1.0.3
+1.0.4-SNAPSH

[1/3] hbase git commit: HBASE-14969 Add throughput controller for flush

2016-01-28 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 14dd959aa -> b3b1ce99c


http://git-wip-us.apache.org/repos/asf/hbase/blob/b3b1ce99/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
new file mode 100644
index 000..5d5be87
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
@@ -0,0 +1,217 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
+ * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
+ * for the specific language governing permissions and limitations under the 
License.
+ */
+package org.apache.hadoop.hbase.regionserver.throttle;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.StoreEngine;
+import org.apache.hadoop.hbase.regionserver.StripeStoreEngine;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestFlushWithThroughputController {
+
+  private static final Log LOG = 
LogFactory.getLog(TestFlushWithThroughputController.class);
+
+  private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+
+  private static final double EPSILON = 1E-6;
+
+  private final TableName tableName = 
TableName.valueOf(getClass().getSimpleName());
+
+  private final byte[] family = Bytes.toBytes("f");
+
+  private final byte[] qualifier = Bytes.toBytes("q");
+
+  private Store getStoreWithName(TableName tableName) {
+MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
+List rsts = 
cluster.getRegionServerThreads();
+for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
+  HRegionServer hrs = rsts.get(i).getRegionServer();
+  for (Region region : hrs.getOnlineRegions(tableName)) {
+return region.getStores().iterator().next();
+  }
+}
+return null;
+  }
+
+  private Store generateAndFlushData() throws IOException {
+HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+if (admin.tableExists(tableName)) {
+  admin.disableTable(tableName);
+  admin.deleteTable(tableName);
+}
+HTable table = TEST_UTIL.createTable(tableName, family);
+Random rand = new Random();
+for (int i = 0; i < 10; i++) {
+  for (int j = 0; j < 10; j++) {
+byte[] value = new byte[256 * 1024];
+rand.nextBytes(value);
+table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, 
qualifier, value));
+  }
+  admin.flush(tableName);
+}
+return getStoreWithName(tableName);
+  }
+
+  private long testFlushWithThroughputLimit() throws Exception {
+long throughputLimit = 1L * 1024 * 1024;
+Configuration conf = TEST_UTIL.getConfiguration();
+
conf.set(FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY,
+  PressureAwareFlushThroughputCont

[2/3] hbase git commit: HBASE-14969 Add throughput controller for flush

2016-01-28 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/b3b1ce99/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
new file mode 100644
index 000..c0d3b74
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.throttle;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.ScheduledChore;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours;
+
+/**
+ * A throughput controller which uses the follow schema to limit throughput
+ * 
+ * If compaction pressure is greater than 1.0, no limitation.
+ * In off peak hours, use a fixed throughput limitation
+ * {@value #HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK}
+ * In normal hours, the max throughput is tuned between
+ * {@value #HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND} and
+ * {@value #HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND}, using the 
formula "lower +
+ * (higer - lower) * compactionPressure", where compactionPressure is in 
range [0.0, 1.0]
+ * 
+ * @see org.apache.hadoop.hbase.regionserver.Store#getCompactionPressure()
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+public class PressureAwareCompactionThroughputController extends 
PressureAwareThroughputController {
+
+  private final static Log LOG = LogFactory
+  .getLog(PressureAwareCompactionThroughputController.class);
+
+  public static final String 
HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND =
+  "hbase.hstore.compaction.throughput.higher.bound";
+
+  private static final long 
DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND =
+  20L * 1024 * 1024;
+
+  public static final String 
HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND =
+  "hbase.hstore.compaction.throughput.lower.bound";
+
+  private static final long 
DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND =
+  10L * 1024 * 1024;
+
+  public static final String HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK =
+  "hbase.hstore.compaction.throughput.offpeak";
+
+  private static final long 
DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK = Long.MAX_VALUE;
+
+  public static final String HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD =
+  "hbase.hstore.compaction.throughput.tune.period";
+
+  private static final int DEFAULT_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD = 
60 * 1000;
+
+  // check compaction throughput every this size
+  private static final String 
HBASE_HSTORE_COMPACTION_THROUGHPUT_CONTROL_CHECK_INTERVAL =
+"hbase.hstore.compaction.throughput.control.check.interval";
+
+  private long maxThroughputOffpeak;
+
+  @Override
+  public void setup(final RegionServerServices server) {
+server.getChoreService().scheduleChore(
+  new ScheduledChore("CompactionThroughputTuner", this, tuningPeriod) {
+
+@Override
+protected void chore() {
+  tune(server.getCompactionPressure());
+}
+  });
+  }
+
+  private void tune(double compactionPressure) {
+double maxThroughputToSet;
+if (compactionPressure > 1.0) {
+  // set to unlimited if some stores already reach the blocking store file 
count
+  maxThroughputToSet = Double.MAX_VALUE;
+} else if (offPeakHours.isOffPeakHour()) {
+  maxThroughputToSet = maxThroughputOffpeak;
+} else {
+  // compactionPressure is between 0.0 and 1.0, we use a simp

[3/3] hbase git commit: HBASE-14969 Add throughput controller for flush

2016-01-28 Thread zhangduo
HBASE-14969 Add throughput controller for flush

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b3b1ce99
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b3b1ce99
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b3b1ce99

Branch: refs/heads/master
Commit: b3b1ce99c63d79401ddda9c114850dea61af0afb
Parents: 14dd959
Author: Yu Li 
Authored: Fri Jan 29 09:30:20 2016 +0800
Committer: zhangduo 
Committed: Fri Jan 29 09:32:01 2016 +0800

--
 .../hbase/mob/DefaultMobStoreCompactor.java |   4 +-
 .../hbase/mob/DefaultMobStoreFlusher.java   |   3 +-
 .../hbase/regionserver/CompactSplitThread.java  |  10 +-
 .../hbase/regionserver/CompactionTool.java  |   4 +-
 .../hbase/regionserver/DefaultStoreEngine.java  |  10 +-
 .../hbase/regionserver/DefaultStoreFlusher.java |   5 +-
 .../hadoop/hbase/regionserver/HMobStore.java|   4 +-
 .../hadoop/hbase/regionserver/HRegion.java  |  18 +-
 .../hbase/regionserver/HRegionServer.java   |  35 ++-
 .../hadoop/hbase/regionserver/HStore.java   |  23 +-
 .../regionserver/RegionServerServices.java  |  13 +
 .../apache/hadoop/hbase/regionserver/Store.java |  12 +-
 .../hadoop/hbase/regionserver/StoreFlusher.java |  51 +++-
 .../hbase/regionserver/StripeStoreEngine.java   |   8 +-
 .../hbase/regionserver/StripeStoreFlusher.java  |   5 +-
 .../compactions/CompactionContext.java  |   5 +-
 .../CompactionThroughputController.java |  52 
 .../CompactionThroughputControllerFactory.java  |  61 
 .../regionserver/compactions/Compactor.java |  28 +-
 .../compactions/DefaultCompactor.java   |  10 +-
 .../NoLimitCompactionThroughputController.java  |  66 
 ...sureAwareCompactionThroughputController.java | 263 
 .../compactions/StripeCompactionPolicy.java |   9 +-
 .../compactions/StripeCompactor.java|  11 +-
 .../CompactionThroughputControllerFactory.java  |  91 ++
 .../FlushThroughputControllerFactory.java   |  65 
 .../throttle/NoLimitThroughputController.java   |  62 
 ...sureAwareCompactionThroughputController.java | 153 ++
 .../PressureAwareFlushThroughputController.java | 136 +
 .../PressureAwareThroughputController.java  | 177 +++
 .../throttle/ThroughputControlUtil.java |  55 
 .../throttle/ThroughputController.java  |  52 
 .../hadoop/hbase/MockRegionServerServices.java  |  10 +
 .../org/apache/hadoop/hbase/TestIOFencing.java  |   7 +-
 .../TestRegionObserverScannerOpenHook.java  |   8 +-
 .../hadoop/hbase/master/MockRegionServer.java   |  10 +
 .../regionserver/TestCompactSplitThread.java|   4 +-
 .../hbase/regionserver/TestCompaction.java  |  20 +-
 .../hbase/regionserver/TestHMobStore.java   |   4 +-
 .../regionserver/TestHRegionReplayEvents.java   |   4 +-
 .../TestSplitTransactionOnCluster.java  |   4 +-
 .../hadoop/hbase/regionserver/TestStore.java|   4 +-
 .../hbase/regionserver/TestStripeCompactor.java |   6 +-
 .../regionserver/TestStripeStoreEngine.java |  10 +-
 .../TestCompactionWithThroughputController.java | 302 --
 .../compactions/TestStripeCompactionPolicy.java |  15 +-
 .../TestCompactionWithThroughputController.java | 306 +++
 .../TestFlushWithThroughputController.java  | 217 +
 .../hbase/regionserver/wal/TestWALReplay.java   |  18 +-
 49 files changed, 1559 insertions(+), 891 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b3b1ce99/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
index b5f412d..33eb7b9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
@@ -45,8 +45,8 @@ import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFile.Writer;
 import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
-import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController;
 import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
+import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -151,7 +151,7 @@ public class DefaultMobStoreCompactor extends 
DefaultCompactor {
   @Override
   protected boolean performCompaction(FileDetails 

[3/3] hbase git commit: HBASE-14969 Add throughput controller for flush

2016-01-28 Thread zhangduo
HBASE-14969 Add throughput controller for flush

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0d21fa92
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0d21fa92
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0d21fa92

Branch: refs/heads/branch-1
Commit: 0d21fa92791ae7d704f48311539facba7061770b
Parents: 2f571b1
Author: 绝顶 
Authored: Fri Jan 29 09:38:13 2016 +0800
Committer: zhangduo 
Committed: Fri Jan 29 09:39:15 2016 +0800

--
 .../hbase/regionserver/CompactSplitThread.java  |  10 +-
 .../hbase/regionserver/CompactionTool.java  |   4 +-
 .../hbase/regionserver/DefaultStoreEngine.java  |  10 +-
 .../hbase/regionserver/DefaultStoreFlusher.java |   5 +-
 .../hadoop/hbase/regionserver/HRegion.java  |  16 +-
 .../hbase/regionserver/HRegionServer.java   |  34 ++-
 .../hadoop/hbase/regionserver/HStore.java   |  23 +-
 .../regionserver/RegionServerServices.java  |  13 +
 .../apache/hadoop/hbase/regionserver/Store.java |  12 +-
 .../hadoop/hbase/regionserver/StoreFlusher.java |  51 +++-
 .../hbase/regionserver/StripeStoreEngine.java   |   8 +-
 .../hbase/regionserver/StripeStoreFlusher.java  |   5 +-
 .../compactions/CompactionContext.java  |   5 +-
 .../CompactionThroughputController.java |  52 
 .../CompactionThroughputControllerFactory.java  |  16 +-
 .../regionserver/compactions/Compactor.java |  25 +-
 .../compactions/DefaultCompactor.java   |  10 +-
 .../NoLimitCompactionThroughputController.java  |  66 
 ...sureAwareCompactionThroughputController.java | 263 
 .../compactions/StripeCompactionPolicy.java |   9 +-
 .../compactions/StripeCompactor.java|  11 +-
 .../CompactionThroughputControllerFactory.java  |  91 ++
 .../FlushThroughputControllerFactory.java   |  65 
 .../throttle/NoLimitThroughputController.java   |  62 
 ...sureAwareCompactionThroughputController.java | 153 ++
 .../PressureAwareFlushThroughputController.java | 136 +
 .../PressureAwareThroughputController.java  | 177 +++
 .../throttle/ThroughputControlUtil.java |  55 
 .../throttle/ThroughputController.java  |  52 
 .../hadoop/hbase/MockRegionServerServices.java  |  11 +
 .../org/apache/hadoop/hbase/TestIOFencing.java  |   8 +-
 .../TestRegionObserverScannerOpenHook.java  |  10 +-
 .../hadoop/hbase/master/MockRegionServer.java   |  11 +
 .../regionserver/TestCompactSplitThread.java|   4 +-
 .../hbase/regionserver/TestCompaction.java  |  20 +-
 .../regionserver/TestHRegionReplayEvents.java   |   4 +-
 .../TestSplitTransactionOnCluster.java  |   5 +-
 .../hadoop/hbase/regionserver/TestStore.java|   4 +-
 .../hbase/regionserver/TestStripeCompactor.java |   6 +-
 .../regionserver/TestStripeStoreEngine.java |  10 +-
 .../TestCompactionWithThroughputController.java | 287 -
 .../compactions/TestStripeCompactionPolicy.java |  15 +-
 .../TestCompactionWithThroughputController.java | 306 +++
 .../TestFlushWithThroughputController.java  | 217 +
 .../hbase/regionserver/wal/TestWALReplay.java   |  20 +-
 45 files changed, 1562 insertions(+), 815 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0d21fa92/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index 93a686f..4a40025 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -41,8 +41,8 @@ import org.apache.hadoop.hbase.conf.ConfigurationManager;
 import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController;
-import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputControllerFactory;
+import 
org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
+import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
@@ -89,7 +89,7 @@ public class CompactSplitThread implements 
CompactionRequestor, PropagatingConfi
   

[2/3] hbase git commit: HBASE-14969 Add throughput controller for flush

2016-01-28 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/0d21fa92/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
new file mode 100644
index 000..c0d3b74
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.throttle;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.ScheduledChore;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours;
+
+/**
+ * A throughput controller which uses the follow schema to limit throughput
+ * 
+ * If compaction pressure is greater than 1.0, no limitation.
+ * In off peak hours, use a fixed throughput limitation
+ * {@value #HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK}
+ * In normal hours, the max throughput is tuned between
+ * {@value #HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND} and
+ * {@value #HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND}, using the 
formula "lower +
+ * (higer - lower) * compactionPressure", where compactionPressure is in 
range [0.0, 1.0]
+ * 
+ * @see org.apache.hadoop.hbase.regionserver.Store#getCompactionPressure()
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+public class PressureAwareCompactionThroughputController extends 
PressureAwareThroughputController {
+
+  private final static Log LOG = LogFactory
+  .getLog(PressureAwareCompactionThroughputController.class);
+
+  public static final String 
HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND =
+  "hbase.hstore.compaction.throughput.higher.bound";
+
+  private static final long 
DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND =
+  20L * 1024 * 1024;
+
+  public static final String 
HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND =
+  "hbase.hstore.compaction.throughput.lower.bound";
+
+  private static final long 
DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND =
+  10L * 1024 * 1024;
+
+  public static final String HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK =
+  "hbase.hstore.compaction.throughput.offpeak";
+
+  private static final long 
DEFAULT_HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_OFFPEAK = Long.MAX_VALUE;
+
+  public static final String HBASE_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD =
+  "hbase.hstore.compaction.throughput.tune.period";
+
+  private static final int DEFAULT_HSTORE_COMPACTION_THROUGHPUT_TUNE_PERIOD = 
60 * 1000;
+
+  // check compaction throughput every this size
+  private static final String 
HBASE_HSTORE_COMPACTION_THROUGHPUT_CONTROL_CHECK_INTERVAL =
+"hbase.hstore.compaction.throughput.control.check.interval";
+
+  private long maxThroughputOffpeak;
+
+  @Override
+  public void setup(final RegionServerServices server) {
+server.getChoreService().scheduleChore(
+  new ScheduledChore("CompactionThroughputTuner", this, tuningPeriod) {
+
+@Override
+protected void chore() {
+  tune(server.getCompactionPressure());
+}
+  });
+  }
+
+  private void tune(double compactionPressure) {
+double maxThroughputToSet;
+if (compactionPressure > 1.0) {
+  // set to unlimited if some stores already reach the blocking store file 
count
+  maxThroughputToSet = Double.MAX_VALUE;
+} else if (offPeakHours.isOffPeakHour()) {
+  maxThroughputToSet = maxThroughputOffpeak;
+} else {
+  // compactionPressure is between 0.0 and 1.0, we use a simp

[1/3] hbase git commit: HBASE-14969 Add throughput controller for flush

2016-01-28 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1 2f571b145 -> 0d21fa927


http://git-wip-us.apache.org/repos/asf/hbase/blob/0d21fa92/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
new file mode 100644
index 000..5d5be87
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
@@ -0,0 +1,217 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
+ * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
+ * for the specific language governing permissions and limitations under the 
License.
+ */
+package org.apache.hadoop.hbase.regionserver.throttle;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.StoreEngine;
+import org.apache.hadoop.hbase.regionserver.StripeStoreEngine;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestFlushWithThroughputController {
+
+  private static final Log LOG = 
LogFactory.getLog(TestFlushWithThroughputController.class);
+
+  private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+
+  private static final double EPSILON = 1E-6;
+
+  private final TableName tableName = 
TableName.valueOf(getClass().getSimpleName());
+
+  private final byte[] family = Bytes.toBytes("f");
+
+  private final byte[] qualifier = Bytes.toBytes("q");
+
+  private Store getStoreWithName(TableName tableName) {
+MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
+List rsts = 
cluster.getRegionServerThreads();
+for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
+  HRegionServer hrs = rsts.get(i).getRegionServer();
+  for (Region region : hrs.getOnlineRegions(tableName)) {
+return region.getStores().iterator().next();
+  }
+}
+return null;
+  }
+
+  private Store generateAndFlushData() throws IOException {
+HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+if (admin.tableExists(tableName)) {
+  admin.disableTable(tableName);
+  admin.deleteTable(tableName);
+}
+HTable table = TEST_UTIL.createTable(tableName, family);
+Random rand = new Random();
+for (int i = 0; i < 10; i++) {
+  for (int j = 0; j < 10; j++) {
+byte[] value = new byte[256 * 1024];
+rand.nextBytes(value);
+table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, 
qualifier, value));
+  }
+  admin.flush(tableName);
+}
+return getStoreWithName(tableName);
+  }
+
+  private long testFlushWithThroughputLimit() throws Exception {
+long throughputLimit = 1L * 1024 * 1024;
+Configuration conf = TEST_UTIL.getConfiguration();
+
conf.set(FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY,
+  PressureAwareFlushThroughputCo

hbase git commit: Add hbasecon2016 logo

2016-01-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master b3b1ce99c -> 83304f823


Add hbasecon2016 logo


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/83304f82
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/83304f82
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/83304f82

Branch: refs/heads/master
Commit: 83304f8232099e3b09291720f5ea81b0b2d76979
Parents: b3b1ce9
Author: stack 
Authored: Thu Jan 28 19:24:22 2016 -0400
Committer: stack 
Committed: Thu Jan 28 22:42:19 2016 -0400

--
 .../resources/images/hbasecon2016-stack-logo.jpg   | Bin 0 -> 46653 bytes
 src/main/site/site.xml |   6 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/83304f82/src/main/site/resources/images/hbasecon2016-stack-logo.jpg
--
diff --git a/src/main/site/resources/images/hbasecon2016-stack-logo.jpg 
b/src/main/site/resources/images/hbasecon2016-stack-logo.jpg
new file mode 100644
index 000..a613c72
Binary files /dev/null and 
b/src/main/site/resources/images/hbasecon2016-stack-logo.jpg differ

http://git-wip-us.apache.org/repos/asf/hbase/blob/83304f82/src/main/site/site.xml
--
diff --git a/src/main/site/site.xml b/src/main/site/site.xml
index 0efc006..b847293 100644
--- a/src/main/site/site.xml
+++ b/src/main/site/site.xml
@@ -43,9 +43,13 @@
 
   
   
-
+hbasecon2016
+
+images/hbasecon2016-stack-logo.jpg
+http://hbasecon.com/
   
   
 Apache HBase



hbase git commit: Make hbasecon2016 image half size

2016-01-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 83304f823 -> 47506e805


Make hbasecon2016 image half size


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/47506e80
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/47506e80
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/47506e80

Branch: refs/heads/master
Commit: 47506e805dd62a6a21bd1a7cff9cb4799f0a7b44
Parents: 83304f8
Author: stack 
Authored: Thu Jan 28 22:46:25 2016 -0400
Committer: stack 
Committed: Thu Jan 28 22:46:25 2016 -0400

--
 .../images/hbasecon2016-stack-logo.jpg  | Bin 46653 -> 32105 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/47506e80/src/main/site/resources/images/hbasecon2016-stack-logo.jpg
--
diff --git a/src/main/site/resources/images/hbasecon2016-stack-logo.jpg 
b/src/main/site/resources/images/hbasecon2016-stack-logo.jpg
index a613c72..b59280d 100644
Binary files a/src/main/site/resources/images/hbasecon2016-stack-logo.jpg and 
b/src/main/site/resources/images/hbasecon2016-stack-logo.jpg differ



hbase git commit: HBASE-14841 Allow Dictionary to work with BytebufferedCells (Ram)

2016-01-28 Thread ramkrishna
Repository: hbase
Updated Branches:
  refs/heads/master 47506e805 -> 0de221a19


HBASE-14841 Allow Dictionary to work with BytebufferedCells (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0de221a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0de221a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0de221a1

Branch: refs/heads/master
Commit: 0de221a19d799ad515f8f4556cacd05e6b4e74f8
Parents: 47506e8
Author: ramkrishna 
Authored: Fri Jan 29 10:05:26 2016 +0530
Committer: ramkrishna 
Committed: Fri Jan 29 10:06:20 2016 +0530

--
 .../java/org/apache/hadoop/hbase/CellUtil.java  |  19 +++
 .../hadoop/hbase/io/TagCompressionContext.java  |  35 -
 .../io/encoding/BufferedDataBlockEncoder.java   |   6 +-
 .../apache/hadoop/hbase/io/util/Dictionary.java |  13 +-
 .../hadoop/hbase/io/util/LRUDictionary.java | 141 ---
 .../hadoop/hbase/util/ByteBufferUtils.java  |  16 +++
 .../hbase/io/TestTagCompressionContext.java |  78 +-
 7 files changed, 272 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0de221a1/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index 1b38b56..7db1c76 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.io.TagCompressionContext;
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.ByteRange;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -1629,6 +1630,24 @@ public final class CellUtil {
 return new FirstOnRowDeleteFamilyCell(row, fam);
   }
 
+  /**
+   * Compresses the tags to the given outputstream using the 
TagcompressionContext
+   * @param out the outputstream to which the compression should happen
+   * @param cell the cell which has tags
+   * @param tagCompressionContext the TagCompressionContext
+   * @throws IOException can throw IOException if the compression encounters 
issue
+   */
+  public static void compressTags(DataOutputStream out, Cell cell,
+  TagCompressionContext tagCompressionContext) throws IOException {
+if (cell instanceof ByteBufferedCell) {
+  tagCompressionContext.compressTags(out, ((ByteBufferedCell) 
cell).getTagsByteBuffer(),
+  ((ByteBufferedCell) cell).getTagsPosition(), cell.getTagsLength());
+} else {
+  tagCompressionContext.compressTags(out, cell.getTagsArray(), 
cell.getTagsOffset(),
+  cell.getTagsLength());
+}
+  }
+
   @InterfaceAudience.Private
   /**
* These cells are used in reseeks/seeks to improve the read performance.

http://git-wip-us.apache.org/repos/asf/hbase/blob/0de221a1/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
index 05c4ad1..278dfc4 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
@@ -79,17 +79,24 @@ public class TagCompressionContext {
* Compress tags one by one and writes to the OutputStream.
* @param out Stream to which the compressed tags to be written
* @param in Source buffer where tags are available
+   * @param offset Offset for the tags byte buffer
* @param length Length of all tag bytes
* @throws IOException
*/
-  public void compressTags(OutputStream out, ByteBuffer in, int length) throws 
IOException {
+  public void compressTags(OutputStream out, ByteBuffer in, int offset, int 
length)
+  throws IOException {
 if (in.hasArray()) {
-  compressTags(out, in.array(), in.arrayOffset() + in.position(), length);
-  ByteBufferUtils.skip(in, length);
+  compressTags(out, in.array(), offset, length);
 } else {
-  byte[] tagBuf = new byte[length];
-  in.get(tagBuf);
-  compressTags(out, tagBuf, 0, length);
+  int pos = offset;
+  int endOffset = pos + length;
+  assert pos < endOffset;
+  while (pos < endOffset) {
+int tagLen = ByteBufferUtils.readAsInt(in,

hbase git commit: HBASE-14025 update CHANGES.txt for 1.2 RC1

2016-01-28 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 778c9730b -> 46fc1d876


HBASE-14025 update CHANGES.txt for 1.2 RC1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/46fc1d87
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/46fc1d87
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/46fc1d87

Branch: refs/heads/branch-1.2
Commit: 46fc1d876bd604f2f71f8692d79978055a095a7a
Parents: 778c973
Author: Sean Busbey 
Authored: Fri Jan 29 01:32:31 2016 -0600
Committer: Sean Busbey 
Committed: Fri Jan 29 01:32:31 2016 -0600

--
 CHANGES.txt | 38 --
 1 file changed, 36 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/46fc1d87/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index 21d571d..3438d58 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,6 +1,6 @@
 HBase Change Log
 
-Release Notes - HBase - Version 1.2.0 01/11/2016
+Release Notes - HBase - Version 1.2.0 02/05/2016
 
 ** Sub-task
 * [HBASE-12748] - RegionCoprocessorHost.execOperation creates too many 
iterator objects
@@ -40,8 +40,11 @@ Release Notes - HBase - Version 1.2.0 01/11/2016
 * [HBASE-14087] - ensure correct ASF policy compliant headers on 
source/docs
 * [HBASE-14104] - Add vectorportal.com to NOTICES.txt as src of our logo
 * [HBASE-14105] - Add shell tests for Snapshot
+* [HBASE-14107] - Procedure V2 - Administrative Task: Provide an API to 
List all procedures
+* [HBASE-14108] - Procedure V2 - Administrative Task: provide an API to 
abort a procedure
 * [HBASE-14147] - REST Support for Namespaces
 * [HBASE-14176] - Add missing headers to META-INF files
+* [HBASE-14221] - Reduce the number of time row comparison is done in a 
Scan
 * [HBASE-14239] - Branch-1.2 AM can get stuck when meta moves
 * [HBASE-14274] - Deadlock in region metrics on shutdown: 
MetricsRegionSourceImpl vs MetricsRegionAggregateSourceImpl
 * [HBASE-14278] - Fix NPE that is showing up since HBASE-14274 went in
@@ -51,12 +54,15 @@ Release Notes - HBase - Version 1.2.0 01/11/2016
 * [HBASE-14421] - TestFastFail* are flakey
 * [HBASE-14428] - Upgrade our surefire-plugin from 2.18 to 2.18.1
 * [HBASE-14430] - TestHttpServerLifecycle#testStartedServerIsAlive times 
out
+* [HBASE-14432] - Procedure V2 - enforce ACL on procedure admin tasks
 * [HBASE-14433] - Set down the client executor core thread count from 256 
in tests
 * [HBASE-14435] - thrift tests don't have test-specific hbase-site.xml so 
'BindException: Address already in use' because info port is not turned off
 * [HBASE-14447] - Spark tests failing: bind exception when putting up info 
server
 * [HBASE-14465] - Backport 'Allow rowlock to be reader/write' to branch-1
 * [HBASE-14472] - TestHCM and TestRegionServerNoMaster fixes
 * [HBASE-14484] - Follow-on from HBASE-14421, just disable TestFastFail* 
until someone digs in and fixes it
+* [HBASE-14487] - Procedure V2 - shell command to list all procedures
+* [HBASE-14488] - Procedure V2 - shell command to abort a procedure
 * [HBASE-14513] - TestBucketCache runs obnoxious 1k threads in a unit test
 * [HBASE-14519] - Purge TestFavoredNodeAssignmentHelper, a test for an 
abandoned feature that can hang
 * [HBASE-14535] - Integration test for rpc connection concurrency / 
deadlock testing 
@@ -92,8 +98,15 @@ Release Notes - HBase - Version 1.2.0 01/11/2016
 * [HBASE-14908] - TestRowCounter flakey especially on branch-1
 * [HBASE-14909] - NPE testing for RIT
 * [HBASE-14915] - Hanging test : 
org.apache.hadoop.hbase.mapreduce.TestImportExport
-* [HBASE-14947] - WALProcedureStore improvements
+* [HBASE-14947] - Procedure V2 - WALProcedureStore improvements
+* [HBASE-14962] - TestSplitWalDataLoss fails on all branches
 * [HBASE-15023] - Reenable TestShell and TestStochasticLoadBalancer
+* [HBASE-15087] - Fix hbase-common findbugs complaints
+* [HBASE-15091] - Forward-port to 1.2+ HBASE-15031 "Fix merge of MVCC and 
SequenceID performance regression in branch-1.0 for Increments"
+* [HBASE-15106] - Procedure V2 - Procedure Queue pass Procedure for better 
debuggability
+* [HBASE-15114] - NPE when IPC server ByteBuffer reservoir is turned off
+* [HBASE-15115] - Fix findbugs complaints in hbase-client
+* [HBASE-15118] - Fix findbugs complaint in hbase-server
 
 ** Bug
 * [HBASE-5878] - Use getVisibleLength public api from HdfsDataInputStream 
from Hadoop-2.
@@ -398,6 +411,7 @@ Release Notes - HBase - Version 1.2.0 01/11/2016
 * [HBASE-14840] - Sink cluster reports data replication request as success 
though the data is not replicated
 * [HBASE-14