[25/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
index d2da8f4..27235ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
@@ -63,3784 +63,3870 @@
 055import javax.servlet.http.HttpServlet;
 056import 
javax.servlet.http.HttpServletRequest;
 057import 
javax.servlet.http.HttpServletResponse;
-058import 
org.apache.commons.lang3.StringUtils;
-059import 
org.apache.hadoop.conf.Configuration;
-060import org.apache.hadoop.fs.Path;
-061import 
org.apache.hadoop.hbase.ChoreService;
-062import 
org.apache.hadoop.hbase.ClusterId;
-063import 
org.apache.hadoop.hbase.ClusterMetrics;
-064import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-065import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-066import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-067import 
org.apache.hadoop.hbase.HBaseIOException;
-068import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-069import 
org.apache.hadoop.hbase.HConstants;
-070import 
org.apache.hadoop.hbase.InvalidFamilyOperationException;
-071import 
org.apache.hadoop.hbase.MasterNotRunningException;
-072import 
org.apache.hadoop.hbase.MetaTableAccessor;
-073import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-074import 
org.apache.hadoop.hbase.PleaseHoldException;
-075import 
org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-076import 
org.apache.hadoop.hbase.ServerName;
-077import 
org.apache.hadoop.hbase.TableDescriptors;
-078import 
org.apache.hadoop.hbase.TableName;
-079import 
org.apache.hadoop.hbase.TableNotDisabledException;
-080import 
org.apache.hadoop.hbase.TableNotFoundException;
-081import 
org.apache.hadoop.hbase.UnknownRegionException;
-082import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-083import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-084import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-085import 
org.apache.hadoop.hbase.client.RegionInfo;
-086import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-087import 
org.apache.hadoop.hbase.client.Result;
-088import 
org.apache.hadoop.hbase.client.TableDescriptor;
-089import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-090import 
org.apache.hadoop.hbase.client.TableState;
-091import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-092import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-093import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-094import 
org.apache.hadoop.hbase.executor.ExecutorType;
-095import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-096import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-097import 
org.apache.hadoop.hbase.http.InfoServer;
-098import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-099import 
org.apache.hadoop.hbase.ipc.RpcServer;
-100import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-101import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-102import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-103import 
org.apache.hadoop.hbase.master.assignment.AssignProcedure;
-104import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-105import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-106import 
org.apache.hadoop.hbase.master.assignment.MoveRegionProcedure;
-107import 
org.apache.hadoop.hbase.master.assignment.RegionStateNode;
-108import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-109import 
org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
-110import 
org.apache.hadoop.hbase.master.assignment.UnassignProcedure;
-111import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-112import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-113import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-114import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-115import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-116import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-117import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-118import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-119import 
org.apache.hadoop.hbase.master.locking.LockManager;
-120import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-121import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-122import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-123import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
-124import 

[25/40] hbase-site git commit: Published site at 2aae247e3f8f8a393b403a82593bdc3a1ba81193.

2018-09-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/80652933/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
index af3b364..d974429 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
@@ -749,869 +749,894 @@
 741  int region = 
regionsToIndex.get(regionInfo);
 742
 743  int primary = 
regionIndexToPrimaryIndex[region];
-744
-745  // there is a subset relation for 
server  host  rack
-746  // check server first
-747
-748  if 
(contains(primariesOfRegionsPerServer[server], primary)) {
-749// check for whether there are 
other servers that we can place this region
-750for (int i = 0; i  
primariesOfRegionsPerServer.length; i++) {
-751  if (i != server  
!contains(primariesOfRegionsPerServer[i], primary)) {
-752return true; // meaning there 
is a better server
-753  }
-754}
-755return false; // there is not a 
better server to place this
-756  }
-757
-758  // check host
-759  if (multiServersPerHost) { // these 
arrays would only be allocated if we have more than one server per host
-760int host = 
serverIndexToHostIndex[server];
-761if 
(contains(primariesOfRegionsPerHost[host], primary)) {
-762  // check for whether there are 
other hosts that we can place this region
-763  for (int i = 0; i  
primariesOfRegionsPerHost.length; i++) {
-764if (i != host  
!contains(primariesOfRegionsPerHost[i], primary)) {
-765  return true; // meaning 
there is a better host
-766}
-767  }
-768  return false; // there is not a 
better host to place this
-769}
-770  }
-771
-772  // check rack
-773  if (numRacks  1) {
-774int rack = 
serverIndexToRackIndex[server];
-775if 
(contains(primariesOfRegionsPerRack[rack], primary)) {
-776  // check for whether there are 
other racks that we can place this region
-777  for (int i = 0; i  
primariesOfRegionsPerRack.length; i++) {
-778if (i != rack  
!contains(primariesOfRegionsPerRack[i], primary)) {
-779  return true; // meaning 
there is a better rack
-780}
-781  }
-782  return false; // there is not a 
better rack to place this
-783}
-784  }
-785  return false;
-786}
-787
-788void doAssignRegion(RegionInfo 
regionInfo, ServerName serverName) {
-789  if 
(!serversToIndex.containsKey(serverName.getHostAndPort())) {
-790return;
-791  }
-792  int server = 
serversToIndex.get(serverName.getHostAndPort());
-793  int region = 
regionsToIndex.get(regionInfo);
-794  doAction(new 
AssignRegionAction(region, server));
-795}
-796
-797void regionMoved(int region, int 
oldServer, int newServer) {
-798  regionIndexToServerIndex[region] = 
newServer;
-799  if 
(initialRegionIndexToServerIndex[region] == newServer) {
-800numMovedRegions--; //region moved 
back to original location
-801  } else if (oldServer = 0 
 initialRegionIndexToServerIndex[region] == oldServer) {
-802numMovedRegions++; //region moved 
from original location
-803  }
-804  int tableIndex = 
regionIndexToTableIndex[region];
-805  if (oldServer = 0) {
-806
numRegionsPerServerPerTable[oldServer][tableIndex]--;
-807  }
-808  
numRegionsPerServerPerTable[newServer][tableIndex]++;
-809
-810  //check whether this caused 
maxRegionsPerTable in the new Server to be updated
-811  if 
(numRegionsPerServerPerTable[newServer][tableIndex]  
numMaxRegionsPerTable[tableIndex]) {
-812numMaxRegionsPerTable[tableIndex] 
= numRegionsPerServerPerTable[newServer][tableIndex];
-813  } else if (oldServer = 0 
 (numRegionsPerServerPerTable[oldServer][tableIndex] + 1)
-814  == 
numMaxRegionsPerTable[tableIndex]) {
-815//recompute maxRegionsPerTable 
since the previous value was coming from the old server
-816numMaxRegionsPerTable[tableIndex] 
= 0;
-817for (int[] 
aNumRegionsPerServerPerTable : numRegionsPerServerPerTable) {
-818  if 
(aNumRegionsPerServerPerTable[tableIndex]  
numMaxRegionsPerTable[tableIndex]) {
-819
numMaxRegionsPerTable[tableIndex] = aNumRegionsPerServerPerTable[tableIndex];
-820  }
-821}
-822  }
-823
-824  // update for servers
-825  int primary = 
regionIndexToPrimaryIndex[region];
-826  if (oldServer = 0) {
-827

[25/40] hbase-site git commit: Published site at 5fd16f38533591615aa9afa48bb89bcbd8313caf.

2018-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f0b7674/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 062c73e..a30e311 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -704,20 +704,20 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
-org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
 org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
-org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
 org.apache.hadoop.hbase.regionserver.ScanType
-org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
-org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
 org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
+org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
 org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType
 org.apache.hadoop.hbase.regionserver.FlushType
+org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
+org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
+org.apache.hadoop.hbase.regionserver.BloomType
+org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
+org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f0b7674/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index 858ccf6..3bd22b5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -130,9 +130,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
-org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
 org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
+org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
+org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f0b7674/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
index a4ab1b7..f6fc79b 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
@@ -238,8 +238,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 

[25/40] hbase-site git commit: Published site at e2b0490d18f7cc03aa59475a1b423597ddc481fb.

2018-04-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6c67ddd7/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/Bytes.html 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
index 99240f2..9dd868b 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class Bytes
+public class Bytes
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableBytes
 Utility class that handles byte arrays, conversions to/from 
other types,
@@ -1293,7 +1293,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 UTF8_CSN
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UTF8_CSN
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UTF8_CSN
 
 
 
@@ -1302,7 +1302,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 EMPTY_BYTE_ARRAY
-private static finalbyte[] EMPTY_BYTE_ARRAY
+private static finalbyte[] EMPTY_BYTE_ARRAY
 
 
 
@@ -1311,7 +1311,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -1320,7 +1320,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 SIZEOF_BOOLEAN
-public static finalint SIZEOF_BOOLEAN
+public static finalint SIZEOF_BOOLEAN
 Size of boolean in bytes
 
 See Also:
@@ -1334,7 +1334,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 SIZEOF_BYTE
-public static finalint SIZEOF_BYTE
+public static finalint SIZEOF_BYTE
 Size of byte in bytes
 
 See Also:
@@ -1348,7 +1348,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 SIZEOF_CHAR
-public static finalint SIZEOF_CHAR
+public static finalint SIZEOF_CHAR
 Size of char in bytes
 
 See Also:
@@ -1362,7 +1362,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 SIZEOF_DOUBLE
-public static finalint SIZEOF_DOUBLE
+public static finalint SIZEOF_DOUBLE
 Size of double in bytes
 
 See Also:
@@ -1376,7 +1376,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 SIZEOF_FLOAT
-public static finalint SIZEOF_FLOAT
+public static finalint SIZEOF_FLOAT
 Size of float in bytes
 
 See Also:
@@ -1390,7 +1390,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 SIZEOF_INT
-public static finalint SIZEOF_INT
+public static finalint SIZEOF_INT
 Size of int in bytes
 
 See Also:
@@ -1404,7 +1404,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 SIZEOF_LONG
-public static finalint SIZEOF_LONG
+public static finalint SIZEOF_LONG
 Size of long in bytes
 
 See Also:
@@ -1418,7 +1418,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 SIZEOF_SHORT
-public static finalint SIZEOF_SHORT
+public static finalint SIZEOF_SHORT
 Size of short in bytes
 
 See Also:
@@ -1432,7 +1432,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 MASK_FOR_LOWER_INT_IN_LONG
-public static finallong MASK_FOR_LOWER_INT_IN_LONG
+public static finallong MASK_FOR_LOWER_INT_IN_LONG
 Mask to apply to a long to reveal the lower int only. Use 
like this:
  int i = (int)(0xL ^ some_long_value);
 
@@ -1447,7 +1447,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 ESTIMATED_HEAP_TAX
-public static finalint ESTIMATED_HEAP_TAX
+public static finalint ESTIMATED_HEAP_TAX
 Estimate of size cost to pay beyond payload in jvm for 
instance of byte [].
  Estimate based on study of jhat and jprofiler numbers.
 
@@ -1462,7 +1462,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 UNSAFE_UNALIGNED
-private static finalboolean UNSAFE_UNALIGNED
+private static finalboolean UNSAFE_UNALIGNED
 
 
 
@@ -1471,7 +1471,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 bytes
-privatebyte[] bytes
+privatebyte[] bytes
 
 
 
@@ -1480,7 +1480,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 offset
-privateint offset
+privateint offset
 
 
 
@@ -1489,7 +1489,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 length
-privateint length
+privateint length
 
 
 
@@ -1498,7 +1498,7 @@ implements 

[25/40] hbase-site git commit: Published site at .

2017-08-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c0fcd7f3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
index c687435..f6f823c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
@@ -440,543 +440,549 @@
 432serverMap.clear();
 433  }
 434
-435  // 
==
-436  //  RegionStateNode helpers
-437  // 
==
-438  protected RegionStateNode 
createRegionNode(final HRegionInfo regionInfo) {
-439RegionStateNode newNode = new 
RegionStateNode(regionInfo);
-440RegionStateNode oldNode = 
regionsMap.putIfAbsent(regionInfo.getRegionName(), newNode);
-441return oldNode != null ? oldNode : 
newNode;
-442  }
-443
-444  protected RegionStateNode 
getOrCreateRegionNode(final HRegionInfo regionInfo) {
-445RegionStateNode node = 
regionsMap.get(regionInfo.getRegionName());
-446return node != null ? node : 
createRegionNode(regionInfo);
-447  }
-448
-449  RegionStateNode 
getRegionNodeFromName(final byte[] regionName) {
-450return regionsMap.get(regionName);
-451  }
-452
-453  protected RegionStateNode 
getRegionNode(final HRegionInfo regionInfo) {
-454return 
getRegionNodeFromName(regionInfo.getRegionName());
-455  }
-456
-457  RegionStateNode 
getRegionNodeFromEncodedName(final String encodedRegionName) {
-458// TODO: Need a map encodedName, 
... but it is just dispatch merge...
-459for (RegionStateNode node: 
regionsMap.values()) {
-460  if 
(node.getRegionInfo().getEncodedName().equals(encodedRegionName)) {
-461return node;
-462  }
-463}
-464return null;
-465  }
-466
-467  public void deleteRegion(final 
HRegionInfo regionInfo) {
-468
regionsMap.remove(regionInfo.getRegionName());
-469// Remove from the offline regions 
map too if there.
-470if 
(this.regionOffline.containsKey(regionInfo)) {
-471  if (LOG.isTraceEnabled()) 
LOG.trace("Removing from regionOffline Map: " + regionInfo);
-472  
this.regionOffline.remove(regionInfo);
-473}
-474  }
-475
-476  ArrayListRegionStateNode 
getTableRegionStateNodes(final TableName tableName) {
-477final 
ArrayListRegionStateNode regions = new 
ArrayListRegionStateNode();
-478for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-479  if 
(!node.getTable().equals(tableName)) break;
-480  regions.add(node);
-481}
-482return regions;
-483  }
-484
-485  ArrayListRegionState 
getTableRegionStates(final TableName tableName) {
-486final ArrayListRegionState 
regions = new ArrayListRegionState();
-487for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-488  if 
(!node.getTable().equals(tableName)) break;
-489  
regions.add(createRegionState(node));
-490}
-491return regions;
-492  }
-493
-494  ArrayListHRegionInfo 
getTableRegionsInfo(final TableName tableName) {
-495final ArrayListHRegionInfo 
regions = new ArrayListHRegionInfo();
-496for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-497  if 
(!node.getTable().equals(tableName)) break;
-498  
regions.add(node.getRegionInfo());
-499}
-500return regions;
-501  }
-502
-503  CollectionRegionStateNode 
getRegionNodes() {
-504return regionsMap.values();
-505  }
-506
-507  public ArrayListRegionState 
getRegionStates() {
-508final ArrayListRegionState 
regions = new ArrayListRegionState(regionsMap.size());
-509for (RegionStateNode node: 
regionsMap.values()) {
-510  
regions.add(createRegionState(node));
-511}
-512return regions;
-513  }
-514
-515  // 
==
-516  //  RegionState helpers
-517  // 
==
-518  public RegionState getRegionState(final 
HRegionInfo regionInfo) {
-519return 
createRegionState(getRegionNode(regionInfo));
-520  }
-521
-522  public RegionState getRegionState(final 
String encodedRegionName) {
-523return 
createRegionState(getRegionNodeFromEncodedName(encodedRegionName));
-524  }
-525
-526  private RegionState 
createRegionState(final RegionStateNode node) {
-527return node == null ? null :
-528  new 
RegionState(node.getRegionInfo(), node.getState(),
-529node.getLastUpdate(), 
node.getRegionLocation());
+435  @VisibleForTesting
+436 

[25/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 

[25/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9db7c5d/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.CompactionOrFlushMergeCopy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.CompactionOrFlushMergeCopy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.CompactionOrFlushMergeCopy.html
index b09e20b..3c088c8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.CompactionOrFlushMergeCopy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.CompactionOrFlushMergeCopy.html
@@ -155,937 +155,942 @@
 147  }
 148
 149  @Override
-150  public void 
insertNewFiles(CollectionStoreFile sfs) throws IOException {
-151CompactionOrFlushMergeCopy cmc = new 
CompactionOrFlushMergeCopy(true);
-152// Passing null does not cause 
NPE??
-153cmc.mergeResults(null, sfs);
-154debugDumpState("Added new files");
-155  }
-156
-157  @Override
-158  public 
ImmutableCollectionStoreFile clearFiles() {
-159ImmutableCollectionStoreFile 
result = state.allFilesCached;
-160this.state = new State();
-161this.fileStarts.clear();
-162this.fileEnds.clear();
-163return result;
-164  }
-165
-166  @Override
-167  public 
ImmutableCollectionStoreFile clearCompactedFiles() {
-168ImmutableCollectionStoreFile 
result = state.allCompactedFilesCached;
-169this.state = new State();
-170return result;
-171  }
-172
-173  @Override
-174  public int getStorefileCount() {
-175return state.allFilesCached.size();
+150  public int getCompactedFilesCount() {
+151return 
state.allCompactedFilesCached.size();
+152  }
+153
+154  @Override
+155  public void 
insertNewFiles(CollectionStoreFile sfs) throws IOException {
+156CompactionOrFlushMergeCopy cmc = new 
CompactionOrFlushMergeCopy(true);
+157// Passing null does not cause 
NPE??
+158cmc.mergeResults(null, sfs);
+159debugDumpState("Added new files");
+160  }
+161
+162  @Override
+163  public 
ImmutableCollectionStoreFile clearFiles() {
+164ImmutableCollectionStoreFile 
result = state.allFilesCached;
+165this.state = new State();
+166this.fileStarts.clear();
+167this.fileEnds.clear();
+168return result;
+169  }
+170
+171  @Override
+172  public 
ImmutableCollectionStoreFile clearCompactedFiles() {
+173ImmutableCollectionStoreFile 
result = state.allCompactedFilesCached;
+174this.state = new State();
+175return result;
 176  }
 177
-178  /** See {@link 
StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)}
-179   * for details on this methods. */
-180  @Override
-181  public IteratorStoreFile 
getCandidateFilesForRowKeyBefore(final KeyValue targetKey) {
-182KeyBeforeConcatenatedLists result = 
new KeyBeforeConcatenatedLists();
-183// Order matters for this call.
-184
result.addSublist(state.level0Files);
-185if (!state.stripeFiles.isEmpty()) {
-186  int lastStripeIndex = 
findStripeForRow(CellUtil.cloneRow(targetKey), false);
-187  for (int stripeIndex = 
lastStripeIndex; stripeIndex = 0; --stripeIndex) {
-188
result.addSublist(state.stripeFiles.get(stripeIndex));
-189  }
-190}
-191return result.iterator();
-192  }
-193
-194  /** See {@link 
StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} and
-195   * {@link 
StoreFileManager#updateCandidateFilesForRowKeyBefore(Iterator, KeyValue, 
Cell)}
-196   * for details on this methods. */
-197  @Override
-198  public IteratorStoreFile 
updateCandidateFilesForRowKeyBefore(
-199  IteratorStoreFile 
candidateFiles, final KeyValue targetKey, final Cell candidate) {
-200KeyBeforeConcatenatedLists.Iterator 
original =
-201
(KeyBeforeConcatenatedLists.Iterator)candidateFiles;
-202assert original != null;
-203
ArrayListListStoreFile components = original.getComponents();
-204for (int firstIrrelevant = 0; 
firstIrrelevant  components.size(); ++firstIrrelevant) {
-205  StoreFile sf = 
components.get(firstIrrelevant).get(0);
-206  byte[] endKey = endOf(sf);
-207  // Entries are ordered as such: L0, 
then stripes in reverse order. We never remove
-208  // level 0; we remove the stripe, 
and all subsequent ones, as soon as we find the
-209  // first one that cannot possibly 
have better candidates.
-210  if (!isInvalid(endKey)  
!isOpen(endKey)
-211   
(nonOpenRowCompare(targetKey, endKey) = 0)) {
-212
original.removeComponents(firstIrrelevant);
-213break;
-214  }
-215}
-216return original;
-217  }
-218
-219  private byte[] 
getSplitPoint(CollectionStoreFile sfs) throws IOException {
-220OptionalStoreFile largestFile 
= StoreUtils.getLargestFile(sfs);
-221return largestFile.isPresent()
-222? 
StoreUtils.getFileSplitPoint(largestFile.get(), 

[25/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f8f0a032/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
index ac13492..ce4327f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
@@ -1101,284 +1101,280 @@
 1093
"hbase.regionserver.wal.enablecompression";
 1094
 1095  /** Configuration name of WAL storage 
policy
-1096   * Valid values are:
-1097   *  NONE: no preference in destination 
of block replicas
-1098   *  ONE_SSD: place only one block 
replica in SSD and the remaining in default storage
-1099   *  and ALL_SSD: place all block 
replicas on SSD
-1100   *
-1101   * See 
http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html*/
-1102  public static final String 
WAL_STORAGE_POLICY = "hbase.wal.storage.policy";
-1103  public static final String 
DEFAULT_WAL_STORAGE_POLICY = "NONE";
+1096   * Valid values are: HOT, COLD, WARM, 
ALL_SSD, ONE_SSD, LAZY_PERSIST
+1097   * See 
http://hadoop.apache.org/docs/r2.7.3/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html*/
+1098  public static final String 
WAL_STORAGE_POLICY = "hbase.wal.storage.policy";
+1099  public static final String 
DEFAULT_WAL_STORAGE_POLICY = "HOT";
+1100
+1101  /** Region in Transition metrics 
threshold time */
+1102  public static final String 
METRICS_RIT_STUCK_WARNING_THRESHOLD =
+1103  
"hbase.metrics.rit.stuck.warning.threshold";
 1104
-1105  /** Region in Transition metrics 
threshold time */
-1106  public static final String 
METRICS_RIT_STUCK_WARNING_THRESHOLD =
-1107  
"hbase.metrics.rit.stuck.warning.threshold";
-1108
-1109  public static final String 
LOAD_BALANCER_SLOP_KEY = "hbase.regions.slop";
-1110
-  /** delimiter used between portions of 
a region name */
-1112  public static final int DELIMITER = 
',';
-1113
-1114  /**
-1115   * QOS attributes: these attributes 
are used to demarcate RPC call processing
-1116   * by different set of handlers. For 
example, HIGH_QOS tagged methods are
-1117   * handled by high priority 
handlers.
-1118   */
-1119  // normal_QOS  replication_QOS 
 replay_QOS  QOS_threshold  admin_QOS  high_QOS
-1120  public static final int NORMAL_QOS = 
0;
-1121  public static final int 
REPLICATION_QOS = 5;
-1122  public static final int REPLAY_QOS = 
6;
-1123  public static final int QOS_THRESHOLD 
= 10;
-1124  public static final int ADMIN_QOS = 
100;
-1125  public static final int HIGH_QOS = 
200;
-1126  public static final int 
SYSTEMTABLE_QOS = HIGH_QOS;
-1127
-1128  /** Directory under /hbase where 
archived hfiles are stored */
-1129  public static final String 
HFILE_ARCHIVE_DIRECTORY = "archive";
-1130
-1131  /**
-1132   * Name of the directory to store all 
snapshots. See SnapshotDescriptionUtils for
-1133   * remaining snapshot constants; this 
is here to keep HConstants dependencies at a minimum and
-1134   * uni-directional.
-1135   */
-1136  public static final String 
SNAPSHOT_DIR_NAME = ".hbase-snapshot";
-1137
-1138  /* Name of old snapshot directory. See 
HBASE-8352 for details on why it needs to be renamed */
-1139  public static final String 
OLD_SNAPSHOT_DIR_NAME = ".snapshot";
-1140
-1141  /** Temporary directory used for table 
creation and deletion */
-1142  public static final String 
HBASE_TEMP_DIRECTORY = ".tmp";
-1143  /**
-1144   * The period (in milliseconds) 
between computing region server point in time metrics
-1145   */
-1146  public static final String 
REGIONSERVER_METRICS_PERIOD = "hbase.regionserver.metrics.period";
-1147  public static final long 
DEFAULT_REGIONSERVER_METRICS_PERIOD = 5000;
-1148  /** Directories that are not HBase 
table directories */
-1149  public static final ListString 
HBASE_NON_TABLE_DIRS =
-1150
Collections.unmodifiableList(Arrays.asList(new String[] {
-1151  HBCK_SIDELINEDIR_NAME, 
HBASE_TEMP_DIRECTORY, MIGRATION_NAME
-1152}));
-1153
-1154  /** Directories that are not HBase 
user table directories */
-1155  public static final ListString 
HBASE_NON_USER_TABLE_DIRS =
-1156
Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll(
-1157  new String[] { 
TableName.META_TABLE_NAME.getNameAsString() },
-1158  
HBASE_NON_TABLE_DIRS.toArray(;
-1159
-1160  /** Health script related settings. 
*/
-1161  public static final String 
HEALTH_SCRIPT_LOC = "hbase.node.health.script.location";
-1162  public static final String 
HEALTH_SCRIPT_TIMEOUT = "hbase.node.health.script.timeout";
-1163  public static final String 
HEALTH_CHORE_WAKE_FREQ =
-1164  
"hbase.node.health.script.frequency";
-1165  public static final long 
DEFAULT_HEALTH_SCRIPT_TIMEOUT = 6;
-1166  /**
-1167   * The maximum number of health check 
failures a server can encounter