hbase git commit: HBASE-17073 Increase the max number of buffers in ByteBufferPool.

2016-11-14 Thread anoopsamjohn
Repository: hbase
Updated Branches:
  refs/heads/master 3f1f58726 -> 4d1bff9e7


HBASE-17073 Increase the max number of buffers in ByteBufferPool.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4d1bff9e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4d1bff9e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4d1bff9e

Branch: refs/heads/master
Commit: 4d1bff9e78884adf689dd587d65afe36a336c56b
Parents: 3f1f587
Author: anoopsamjohn 
Authored: Tue Nov 15 11:23:03 2016 +0530
Committer: anoopsamjohn 
Committed: Tue Nov 15 11:23:03 2016 +0530

--
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 26 +++-
 1 file changed, 20 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4d1bff9e/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 6eefaac..49c7f8a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -93,7 +93,6 @@ import org.apache.hadoop.hbase.codec.Codec;
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.exceptions.RegionMovedException;
 import org.apache.hadoop.hbase.exceptions.RequestTooBigException;
-import org.apache.hadoop.hbase.io.ByteBufferInputStream;
 import org.apache.hadoop.hbase.io.ByteBufferListOutputStream;
 import org.apache.hadoop.hbase.io.ByteBufferOutputStream;
 import org.apache.hadoop.hbase.io.ByteBufferPool;
@@ -2377,11 +2376,26 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
   RpcScheduler scheduler)
   throws IOException {
 if (conf.getBoolean("hbase.ipc.server.reservoir.enabled", true)) {
-  this.reservoir = new ByteBufferPool(
-  conf.getInt(ByteBufferPool.BUFFER_SIZE_KEY, 
ByteBufferPool.DEFAULT_BUFFER_SIZE),
-  conf.getInt(ByteBufferPool.MAX_POOL_SIZE_KEY,
-  conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
-  HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT) * 2));
+  int poolBufSize = conf.getInt(ByteBufferPool.BUFFER_SIZE_KEY,
+  ByteBufferPool.DEFAULT_BUFFER_SIZE);
+  // The max number of buffers to be pooled in the ByteBufferPool. The 
default value been
+  // selected based on the #handlers configured. When it is read request, 
2 MB is the max size
+  // at which we will send back one RPC request. Means max we need 2 MB 
for creating the
+  // response cell block. (Well it might be much lesser than this because 
in 2 MB size calc, we
+  // include the heap size overhead of each cells also.) Considering 2 MB, 
we will need
+  // (2 * 1024 * 1024) / poolBufSize buffers to make the response cell 
block. Pool buffer size
+  // is by default 64 KB.
+  // In case of read request, at the end of the handler process, we will 
make the response
+  // cellblock and add the Call to connection's response Q and a single 
Responder thread takes
+  // connections and responses from that one by one and do the socket 
write. So there is chances
+  // that by the time a handler originated response is actually done 
writing to socket and so
+  // released the BBs it used, the handler might have processed one more 
read req. On an avg 2x
+  // we consider and consider that also for the max buffers to pool
+  int bufsForTwoMB = (2 * 1024 * 1024) / poolBufSize;
+  int maxPoolSize = conf.getInt(ByteBufferPool.MAX_POOL_SIZE_KEY,
+  conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
+  HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT) * bufsForTwoMB * 
2);
+  this.reservoir = new ByteBufferPool(poolBufSize, maxPoolSize);
   this.minSizeForReservoirUse = getMinSizeForReservoirUse(this.reservoir);
 } else {
   reservoir = null;



[03/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/xref/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
--
diff --git 
a/xref/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html 
b/xref/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
index d089d65..e342d7a 100644
--- a/xref/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
+++ b/xref/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
@@ -261,145 +261,153 @@
 251   }
 252
 253   /**
-254* It "atomically" copies all the wals queues from 
another region server and returns them all
-255* sorted per peer cluster (appended with the dead 
server's znode).
+254* It "atomically" copies one peer's wals queue 
from another dead region server and returns them
+255* all sorted. The new peer id is equal to the old 
peer id appended with the dead server's znode.
 256* @param znode pertaining to the region server to 
copy the queues from
-257*/
-258   private PairString, 
SortedSetString moveQueueUsingMulti(String znode, String peerId) {
-259 try {
-260   // hbase/replication/rs/deadrs
-261   String 
deadRSZnodePath = ZKUtil.joinZNode(this.queuesZNode, znode);
-262   
ListZKUtilOp listOfOps = new 
ArrayList();
-263   ReplicationQueueInfo
 replicationQueueInfo = new ReplicationQueueInfo(peerId);
-264   if (!peerExists(replicationQueueInfo.getPeerId())) 
{
-265 // the orphaned queues must be moved, otherwise the delete 
op of dead rs will fail,
-266 // this will cause the whole multi op fail.
-267 // NodeFailoverWorker will skip the orphaned queues.
-268 
LOG.warn("Peer " + peerId +
-269 " didn't exist, will move its queue to avoid the failure of 
multi op");
-270   }
-271   String 
newPeerId = peerId + "-" + znode;
-272   String 
newPeerZnode = ZKUtil.joinZNode(this.myQueuesZnode, newPeerId);
-273   // check the logs queue for the old peer cluster
-274   String 
oldClusterZnode = ZKUtil.joinZNode(deadRSZnodePath, peerId);
-275   
ListString wals = ZKUtil.listChildrenNoWatch(this.zookeeper, oldClusterZnode);
-276   
SortedSetString logQueue = new 
TreeSet();
-277   if (wals == null || wals.size() == 0) {
-278 
listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode));
-279   } else {
-280 // create the new cluster znode
-281 ZKUtilOp
 op = ZKUtilOp.createAndFailSilent(newPeerZnode, HConstants.EMPTY_BYTE_ARRAY);
-282 
listOfOps.add(op);
-283 // get the offset of the logs and set it to new znodes
-284 for (String wal : wals) {
-285   String 
oldWalZnode = ZKUtil.joinZNode(oldClusterZnode, wal);
-286   byte[] 
logOffset = ZKUtil.getData(this.zookeeper, 
oldWalZnode);
-287   
LOG.debug("Creating " + wal + " with data " + Bytes.toString(logOffset));
-288   String 
newLogZnode = ZKUtil.joinZNode(newPeerZnode, wal);
-289   
listOfOps.add(ZKUtilOp.createAndFailSilent(newLogZnode, logOffset));
-290   
listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldWalZnode));
-291   
logQueue.add(wal);
-292 }
-293 // add delete op for peer
-294 
listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode));
-295
-296 if (LOG.isTraceEnabled())
-297   
LOG.trace(" The multi list size is: " + 
listOfOps.size());
-298   }
-299   
ZKUtil.multiOrSequential(this.zookeeper, 
listOfOps, false);
-300   if (LOG.isTraceEnabled())
-301 
LOG.trace("Atomically moved the dead regionserver 
logs. ");
-302   return new 
Pair(newPeerId, logQueue);
-303 } catch (KeeperException e) {
-304   // Multi call failed; it looks like some other regionserver 
took away the logs.
-305   LOG.warn("Got exception in copyQueuesFromRSUsingMulti: ", e);
-306 } catch (InterruptedException e) {
-307   LOG.warn("Got exception in copyQueuesFromRSUsingMulti: ", e);
-308   
Thread.currentThread().interrupt();
-309 }
-310 return null;
-311   }
-312
-313   @Override
-314   public void 
addHFileRefs(String peerId, ListString files) throws ReplicationException {
-315 String peerZnode 
= ZKUtil.joinZNode(this.hfileRefsZNode, 
peerId);
-316 boolean debugEnabled = LOG.isDebugEnabled();
-317 if (debugEnabled) {
-318   LOG.debug("Adding hfile references " + files + " in queue " + peerZnode);
-319 }
-320 
ListZKUtilOp listOfOps = new 
ArrayListZKUtil.ZKUtilOp();
-321 int size = files.size();
-322 for (int i = 
0; i  size; i++) {
-323   
listOfOps.add(ZKUtilOp.createAndFailSilent(ZKUtil.joinZNode(peerZnode, 
files.get(i)),
-324 
HConstants.EMPTY_BYTE_ARRAY));
-325 }
-326 if (debugEnabled) {
-327   LOG.debug(" The multi list size for adding hfile references in zk for 
node " + peerZnode
-328   + " is " + 

[04/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/xref/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
--
diff --git a/xref/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html 
b/xref/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
index bd34b19..cae8355 100644
--- a/xref/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
+++ b/xref/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
@@ -372,12 +372,12 @@
 362* @param regionLocator region locator
 363* @param silence true to ignore unmatched column 
families
 364* @param copyFile always copy hfiles if true
-365* @return List of filenames which were not 
found
+365* @return Map of LoadQueueItem to region
 366* @throws TableNotFoundException if table does 
not yet exist
 367*/
-368   public ListString 
doBulkLoad(Mapbyte[], ListPath map, final Admin 
admin, Table table,
-369   RegionLocator
 regionLocator, boolean silence, boolean copyFile)
-370   throws TableNotFoundException, IOException {
+368   public MapLoadQueueItem, ByteBuffer 
doBulkLoad(Mapbyte[], ListPath map, final Admin admin,
+369   Table 
table, RegionLocator
 regionLocator, boolean silence, boolean copyFile)
+370   throws TableNotFoundException, IOException {
 371 if 
(!admin.isTableAvailable(regionLocator.getName())) {
 372   throw new TableNotFoundException("Table " + table.getName() + " is not currently available.");
 373 }
@@ -459,8 +459,8 @@
 449 }
 450   }
 451
-452   ListString 
performBulkLoad(final Admin 
admin, Table 
table, RegionLocator
 regionLocator,
-453   
DequeLoadQueueItem queue, ExecutorService pool,
+452   
MapLoadQueueItem, ByteBuffer performBulkLoad(final Admin 
admin, Table table,
+453   RegionLocator
 regionLocator, DequeLoadQueueItem queue, ExecutorService pool,
 454   SecureBulkLoadClient
 secureClient, boolean copyFile) throws IOException {
 455 int count = 0;
 456
@@ -474,802 +474,815 @@
 464 // fs is the source filesystem
 465 
fsDelegationToken.acquireDelegationToken(fs);
 466 bulkToken = 
secureClient.prepareBulkLoad(admin.getConnection());
-467 
PairMultimapByteBuffer, LoadQueueItem, ListString pair 
= null;
+467 
PairMultimapByteBuffer, LoadQueueItem, SetString pair = 
null;
 468
-469 // Assumes that region splits can happen while this 
occurs.
-470 while (!queue.isEmpty()) {
-471   // need to reload split keys each iteration.
-472   final Pairbyte[][], byte[][] startEndKeys 
= regionLocator.getStartEndKeys();
-473   if (count != 0) {
-474 
LOG.info("Split occured while grouping HFiles, retry 
attempt " +
-475 + count + 
" with " + queue.size() + " files remaining to group or split");
-476   }
-477
-478   int maxRetries = 
getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);
-479   maxRetries = 
Math.max(maxRetries, startEndKeys.getFirst().length + 1);
-480   if (maxRetries != 0  count = 
maxRetries) {
-481 throw new 
IOException("Retry attempted " + count +
-482 " times without completing, bailing out");
-483   }
-484   count++;
-485
-486   // Using ByteBuffer for byte[] equality semantics
-487   pair = 
groupOrSplitPhase(table, pool, queue, startEndKeys);
-488   
MultimapByteBuffer, LoadQueueItem regionGroups = pair.getFirst();
-489
-490   if 
(!checkHFilesCountPerRegionPerFamily(regionGroups)) {
-491 // Error is logged inside 
checkHFilesCountPerRegionPerFamily.
-492 throw new 
IOException("Trying to load more than " + 
maxFilesPerRegionPerFamily
-493 + " hfiles to one family of one region");
-494   }
-495
-496   
bulkLoadPhase(table, admin.getConnection(), pool, queue, regionGroups, 
copyFile);
-497
-498   // NOTE: The next iteration's split / group could happen in 
parallel to
-499   // atomic bulkloads assuming that there are splits and no 
merges, and
-500   // that we can atomically pull out the groups we want to 
retry.
-501 }
-502
-503 if (!queue.isEmpty()) {
-504   throw new 
RuntimeException("Bulk load aborted with some files 
not yet loaded."
-505 + "Please check log for more details.");
-506 }
-507 if (pair == null) return 
null;
-508 return pair.getSecond();
-509   }
-510
-511   /**
-512* Prepare a collection of {@link LoadQueueItem} 
from list of source hfiles contained in the
-513* passed directory and validates whether the 
prepared queue has all the valid table column
-514* families in it.
-515* @param hfilesDir directory containing list of 
hfiles to be loaded into the table
-516* @param table table to which hfiles should be 
loaded
-517* @param queue queue which needs to be loaded 
into the table
-518* @param validateHFile if true hfiles will be 
validated for its format
-519* 

[17/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
index 89b0f0c..df6f4e7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
@@ -28,1103 +28,1096 @@
 020
 021import java.io.IOException;
 022import java.math.BigInteger;
-023import java.util.Arrays;
-024import java.util.Collection;
-025import java.util.Collections;
-026import java.util.Comparator;
-027import java.util.LinkedList;
-028import java.util.List;
+023
+024import java.util.Arrays;
+025import java.util.Collection;
+026import java.util.LinkedList;
+027import java.util.List;
+028import java.util.Map;
 029import java.util.Set;
 030import java.util.TreeMap;
-031
-032import 
org.apache.commons.cli.CommandLine;
-033import 
org.apache.commons.cli.GnuParser;
-034import 
org.apache.commons.cli.HelpFormatter;
-035import 
org.apache.commons.cli.OptionBuilder;
-036import org.apache.commons.cli.Options;
-037import 
org.apache.commons.cli.ParseException;
-038import 
org.apache.commons.lang.ArrayUtils;
-039import 
org.apache.commons.lang.StringUtils;
-040import org.apache.commons.logging.Log;
-041import 
org.apache.commons.logging.LogFactory;
-042import 
org.apache.hadoop.conf.Configuration;
-043import 
org.apache.hadoop.fs.FSDataInputStream;
-044import 
org.apache.hadoop.fs.FSDataOutputStream;
-045import org.apache.hadoop.fs.FileSystem;
-046import org.apache.hadoop.fs.Path;
-047import 
org.apache.hadoop.hbase.ClusterStatus;
-048import 
org.apache.hadoop.hbase.HBaseConfiguration;
-049import 
org.apache.hadoop.hbase.HColumnDescriptor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.HTableDescriptor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor;
-054import 
org.apache.hadoop.hbase.ServerName;
-055import 
org.apache.hadoop.hbase.TableName;
-056import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-057import 
org.apache.hadoop.hbase.client.Admin;
-058import 
org.apache.hadoop.hbase.client.ClusterConnection;
-059import 
org.apache.hadoop.hbase.client.Connection;
-060import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-061import 
org.apache.hadoop.hbase.client.NoServerForRegionException;
-062import 
org.apache.hadoop.hbase.client.RegionLocator;
-063import 
org.apache.hadoop.hbase.client.Table;
-064import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-065
-066import 
com.google.common.base.Preconditions;
-067import com.google.common.collect.Lists;
-068import com.google.common.collect.Maps;
-069import com.google.common.collect.Sets;
-070
-071/**
-072 * The {@link RegionSplitter} class 
provides several utilities to help in the
-073 * administration lifecycle for 
developers who choose to manually split regions
-074 * instead of having HBase handle that 
automatically. The most useful utilities
-075 * are:
-076 * p
-077 * ul
-078 * liCreate a table with a 
specified number of pre-split regions
-079 * liExecute a rolling split of 
all regions on an existing table
-080 * /ul
-081 * p
-082 * Both operations can be safely done on 
a live server.
-083 * p
-084 * bQuestion:/b How do I 
turn off automatic splitting? br
-085 * bAnswer:/b Automatic 
splitting is determined by the configuration value
-086 * 
iHConstants.HREGION_MAX_FILESIZE/i. It is not recommended that 
you set this
-087 * to Long.MAX_VALUE in case you forget 
about manual splits. A suggested setting
-088 * is 100GB, which would result in 
gt; 1hr major compactions if reached.
-089 * p
-090 * bQuestion:/b Why did 
the original authors decide to manually split? br
-091 * bAnswer:/b Specific 
workload characteristics of our use case allowed us
-092 * to benefit from a manual split 
system.
-093 * p
-094 * ul
-095 * liData (~1k) that would grow 
instead of being replaced
-096 * liData growth was roughly 
uniform across all regions
-097 * liOLTP workload. Data loss is 
a big deal.
-098 * /ul
-099 * p
-100 * bQuestion:/b Why is 
manual splitting good for this workload? br
-101 * bAnswer:/b Although 
automated splitting is not a bad option, there are
-102 * benefits to manual splitting.
-103 * p
-104 * ul
-105 * liWith growing amounts of 
data, splits will continually be needed. Since
-106 * you always know exactly what regions 
you have, long-term debugging and
-107 * profiling is much easier with manual 
splits. It is hard to trace the logs to
-108 * understand region level problems if it 
keeps splitting and getting renamed.
-109 * liData offlining bugs + 
unknown number of split regions == oh crap! If an
-110 * WAL or 

[02/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/xref/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
--
diff --git a/xref/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html 
b/xref/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
index bbb9563..0d2b675 100644
--- a/xref/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
+++ b/xref/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
@@ -71,15 +71,15 @@
 61requiredArguments = {
 62  
@org.jamon.annotations.Argument(name = "master", type = "HMaster")},
 63optionalArguments = {
-64  
@org.jamon.annotations.Argument(name = "frags", 
type = "MapString,Integer"),
-65  
@org.jamon.annotations.Argument(name = "metaLocation", type = "ServerName"),
+64  
@org.jamon.annotations.Argument(name = "filter", type = "String"),
+65  
@org.jamon.annotations.Argument(name = "deadServers", type = "SetServerName"),
 66  
@org.jamon.annotations.Argument(name = "catalogJanitorEnabled", type = "boolean"),
-67  
@org.jamon.annotations.Argument(name = "deadServers", type = "SetServerName"),
-68  
@org.jamon.annotations.Argument(name = "assignmentManager", type = "AssignmentManager"),
-69  
@org.jamon.annotations.Argument(name = "servers", type = "ListServerName"),
-70  
@org.jamon.annotations.Argument(name = "format", type = "String"),
-71  
@org.jamon.annotations.Argument(name = "filter", type = "String"),
-72  
@org.jamon.annotations.Argument(name = "serverManager", type = "ServerManager")})
+67  
@org.jamon.annotations.Argument(name = "format", type = "String"),
+68  
@org.jamon.annotations.Argument(name = "serverManager", type = "ServerManager"),
+69  
@org.jamon.annotations.Argument(name = "frags", 
type = "MapString,Integer"),
+70  
@org.jamon.annotations.Argument(name = "metaLocation", type = "ServerName"),
+71  
@org.jamon.annotations.Argument(name = "assignmentManager", type = "AssignmentManager"),
+72  
@org.jamon.annotations.Argument(name = "servers", type = "ListServerName")})
 73  public class 
MasterStatusTmpl
 74extends org.jamon.AbstractTemplateProxy
 75  {
@@ -120,40 +120,40 @@
 110   return m_master;
 111 }
 112 private HMaster
 m_master;
-113 // 21, 1
-114 public void 
setFrags(MapString,Integer frags)
+113 // 26, 1
+114 public void 
setFilter(String filter)
 115 {
-116   // 21, 1
-117   m_frags = frags;
-118   
m_frags__IsNotDefault = true;
+116   // 26, 1
+117   m_filter = 
filter;
+118   
m_filter__IsNotDefault = true;
 119 }
-120 public MapString,Integer getFrags()
+120 public String getFilter()
 121 {
-122   return m_frags;
+122   return m_filter;
 123 }
-124 private MapString,Integer m_frags;
-125 public boolean getFrags__IsNotDefault()
+124 private String m_filter;
+125 public boolean getFilter__IsNotDefault()
 126 {
-127   return m_frags__IsNotDefault;
+127   return m_filter__IsNotDefault;
 128 }
-129 private boolean m_frags__IsNotDefault;
-130 // 22, 1
-131 public void 
setMetaLocation(ServerName 
metaLocation)
+129 private boolean m_filter__IsNotDefault;
+130 // 24, 1
+131 public void 
setDeadServers(SetServerName deadServers)
 132 {
-133   // 22, 1
-134   m_metaLocation 
= metaLocation;
-135   
m_metaLocation__IsNotDefault = true;
+133   // 24, 1
+134   m_deadServers = 
deadServers;
+135   
m_deadServers__IsNotDefault = true;
 136 }
-137 public ServerName 
getMetaLocation()
+137 public SetServerName getDeadServers()
 138 {
-139   return m_metaLocation;
+139   return m_deadServers;
 140 }
-141 private ServerName 
m_metaLocation;
-142 public boolean getMetaLocation__IsNotDefault()
+141 private SetServerName m_deadServers;
+142 public boolean getDeadServers__IsNotDefault()
 143 {
-144   return m_metaLocation__IsNotDefault;
+144   return m_deadServers__IsNotDefault;
 145 }
-146 private boolean m_metaLocation__IsNotDefault;
+146 private boolean m_deadServers__IsNotDefault;
 147 // 25, 1
 148 public void 
setCatalogJanitorEnabled(boolean 
catalogJanitorEnabled)
 149 {
@@ -171,108 +171,108 @@
 161   return m_catalogJanitorEnabled__IsNotDefault;
 162 }
 163 private boolean m_catalogJanitorEnabled__IsNotDefault;
-164 // 24, 1
-165 public void 
setDeadServers(SetServerName deadServers)
+164 // 27, 1
+165 public void 
setFormat(String format)
 166 {
-167   // 24, 1
-168   m_deadServers = 
deadServers;
-169   
m_deadServers__IsNotDefault = true;
+167   // 27, 1
+168   m_format = 
format;
+169   
m_format__IsNotDefault = true;
 170 }
-171 public SetServerName getDeadServers()
+171 public String getFormat()
 172 {
-173   return m_deadServers;

[10/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
index ee9b740..57575c0 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
@@ -32,723 +32,730 @@
 024import static org.junit.Assert.fail;
 025
 026import java.io.IOException;
-027import java.util.ArrayList;
-028import java.util.List;
-029import java.util.Locale;
-030import java.util.Map;
-031import java.util.TreeMap;
-032
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.fs.FSDataOutputStream;
-035import org.apache.hadoop.fs.FileStatus;
-036import org.apache.hadoop.fs.FileSystem;
-037import org.apache.hadoop.fs.Path;
-038import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-039import 
org.apache.hadoop.hbase.HColumnDescriptor;
-040import 
org.apache.hadoop.hbase.HConstants;
-041import 
org.apache.hadoop.hbase.HTableDescriptor;
-042import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-043import 
org.apache.hadoop.hbase.TableName;
-044import 
org.apache.hadoop.hbase.TableNotFoundException;
-045import 
org.apache.hadoop.hbase.client.Table;
-046import 
org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
-047import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-048import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-049import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-050import 
org.apache.hadoop.hbase.io.hfile.HFile;
-051import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-052import 
org.apache.hadoop.hbase.regionserver.BloomType;
-053import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-054import 
org.apache.hadoop.hbase.testclassification.MapReduceTests;
-055import 
org.apache.hadoop.hbase.util.Bytes;
-056import 
org.apache.hadoop.hbase.util.FSUtils;
-057import 
org.apache.hadoop.hbase.util.HFileTestUtil;
-058import org.junit.AfterClass;
-059import org.junit.BeforeClass;
-060import org.junit.Rule;
-061import org.junit.Test;
-062import 
org.junit.experimental.categories.Category;
-063import org.junit.rules.TestName;
-064
-065/**
-066 * Test cases for the "load" half of the 
HFileOutputFormat bulk load
-067 * functionality. These tests run faster 
than the full MR cluster
-068 * tests in TestHFileOutputFormat
-069 */
-070@Category({MapReduceTests.class, 
LargeTests.class})
-071public class TestLoadIncrementalHFiles 
{
-072  @Rule
-073  public TestName tn = new TestName();
-074
-075  private static final byte[] QUALIFIER = 
Bytes.toBytes("myqual");
-076  private static final byte[] FAMILY = 
Bytes.toBytes("myfam");
-077  private static final String NAMESPACE = 
"bulkNS";
-078
-079  static final String 
EXPECTED_MSG_FOR_NON_EXISTING_FAMILY = "Unmatched family names found";
-080  static final int 
MAX_FILES_PER_REGION_PER_FAMILY = 4;
+027import java.nio.ByteBuffer;
+028import java.util.ArrayList;
+029import java.util.Deque;
+030import java.util.List;
+031import java.util.Locale;
+032import java.util.Map;
+033import java.util.TreeMap;
+034
+035import 
org.apache.hadoop.conf.Configuration;
+036import 
org.apache.hadoop.fs.FSDataOutputStream;
+037import org.apache.hadoop.fs.FileStatus;
+038import org.apache.hadoop.fs.FileSystem;
+039import org.apache.hadoop.fs.Path;
+040import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+041import 
org.apache.hadoop.hbase.HColumnDescriptor;
+042import 
org.apache.hadoop.hbase.HConstants;
+043import 
org.apache.hadoop.hbase.HTableDescriptor;
+044import 
org.apache.hadoop.hbase.NamespaceDescriptor;
+045import 
org.apache.hadoop.hbase.TableName;
+046import 
org.apache.hadoop.hbase.TableNotFoundException;
+047import 
org.apache.hadoop.hbase.client.Table;
+048import 
org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
+049import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+050import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+051import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
+052import 
org.apache.hadoop.hbase.io.hfile.HFile;
+053import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
+054import 
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.LoadQueueItem;
+055import 
org.apache.hadoop.hbase.regionserver.BloomType;
+056import 
org.apache.hadoop.hbase.testclassification.LargeTests;
+057import 
org.apache.hadoop.hbase.testclassification.MapReduceTests;
+058import 
org.apache.hadoop.hbase.util.Bytes;
+059import 
org.apache.hadoop.hbase.util.FSUtils;
+060import 
org.apache.hadoop.hbase.util.HFileTestUtil;
+061import org.junit.AfterClass;
+062import org.junit.BeforeClass;
+063import org.junit.Rule;
+064import org.junit.Test;

[30/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 105757b..30e54df 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 2007 - 2016 The Apache Software Foundation
 
-  File: 2045,
- Errors: 13916,
+  File: 2051,
+ Errors: 13912,
  Warnings: 0,
  Infos: 0
   
@@ -895,7 +895,7 @@ under the License.
   0
 
 
-  89
+  82
 
   
   
@@ -3107,7 +3107,7 @@ under the License.
   0
 
 
-  4
+  3
 
   
   
@@ -5109,7 +5109,7 @@ under the License.
   0
 
 
-  187
+  186
 
   
   
@@ -6243,7 +6243,7 @@ under the License.
   0
 
 
-  19
+  16
 
   
   
@@ -8007,7 +8007,7 @@ under the License.
   0
 
 
-  7
+  5
 
   
   
@@ -8427,7 +8427,7 @@ under the License.
   0
 
 
-  17
+  16
 
   
   
@@ -8623,7 +8623,7 @@ under the License.
   0
 
 
-  9
+  2
 
   
   
@@ -8838,6 +8838,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.ScanResultConsumer.java;>org/apache/hadoop/hbase/client/ScanResultConsumer.java
+
+
+  0
+
+
+  0
+
+
+  1
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.shaded.com.google.protobuf.NullValue.java;>org/apache/hadoop/hbase/shaded/com/google/protobuf/NullValue.java
 
 
@@ -9533,7 +9547,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -13817,7 +13831,7 @@ under the License.
   0
 
 
-  19
+  17
 
   
   
@@ -15175,7 +15189,7 @@ under the License.
   0
 
 
-  18
+  14
 
   
   
@@ -16272,6 +16286,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.AllowPartialScanResultCache.java;>org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.shaded.com.google.protobuf.OptionOrBuilder.java;>org/apache/hadoop/hbase/shaded/com/google/protobuf/OptionOrBuilder.java
 
 
@@ -16463,7 +16491,7 @@ under the License.
   0
 
 
-  113
+  127
 
   
   
@@ -16650,6 +16678,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.ScanResultCache.java;>org/apache/hadoop/hbase/client/ScanResultCache.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   

[11/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
 
b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
index 9b0e573..1e6eeaa 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestLoadIncrementalHFiles
+public class TestLoadIncrementalHFiles
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Test cases for the "load" half of the HFileOutputFormat 
bulk load
  functionality. These tests run faster than the full MR cluster
@@ -449,7 +449,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tn
-publicorg.junit.rules.TestName tn
+publicorg.junit.rules.TestName tn
 
 
 
@@ -458,7 +458,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 QUALIFIER
-private static finalbyte[] QUALIFIER
+private static finalbyte[] QUALIFIER
 
 
 
@@ -467,7 +467,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FAMILY
-private static finalbyte[] FAMILY
+private static finalbyte[] FAMILY
 
 
 
@@ -476,7 +476,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NAMESPACE
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NAMESPACE
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NAMESPACE
 
 See Also:
 Constant
 Field Values
@@ -489,7 +489,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 EXPECTED_MSG_FOR_NON_EXISTING_FAMILY
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String EXPECTED_MSG_FOR_NON_EXISTING_FAMILY
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String EXPECTED_MSG_FOR_NON_EXISTING_FAMILY
 
 See Also:
 Constant
 Field Values
@@ -502,7 +502,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MAX_FILES_PER_REGION_PER_FAMILY
-static finalint MAX_FILES_PER_REGION_PER_FAMILY
+static finalint MAX_FILES_PER_REGION_PER_FAMILY
 
 See Also:
 Constant
 Field Values
@@ -515,7 +515,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 SPLIT_KEYS
-private static finalbyte[][] SPLIT_KEYS
+private static finalbyte[][] SPLIT_KEYS
 
 
 
@@ -524,7 +524,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 util
-staticHBaseTestingUtility util
+staticHBaseTestingUtility util
 
 
 
@@ -541,7 +541,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TestLoadIncrementalHFiles
-publicTestLoadIncrementalHFiles()
+publicTestLoadIncrementalHFiles()
 
 
 
@@ -558,7 +558,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setUpBeforeClass
-public staticvoidsetUpBeforeClass()
+public staticvoidsetUpBeforeClass()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -572,7 +572,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setupNamespace
-protected staticvoidsetupNamespace()
+protected staticvoidsetupNamespace()
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -586,7 +586,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tearDownAfterClass
-public staticvoidtearDownAfterClass()
+public staticvoidtearDownAfterClass()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -600,7 +600,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testSimpleLoadWithMap
-publicvoidtestSimpleLoadWithMap()
+publicvoidtestSimpleLoadWithMap()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -614,7 +614,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testSimpleLoad

[28/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/LoadIncrementalHFiles.LoadQueueItem.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/LoadIncrementalHFiles.LoadQueueItem.html
 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/LoadIncrementalHFiles.LoadQueueItem.html
index f674375..14e6f20 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/LoadIncrementalHFiles.LoadQueueItem.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/LoadIncrementalHFiles.LoadQueueItem.html
@@ -110,6 +110,18 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapLoadIncrementalHFiles.LoadQueueItem,http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
+LoadIncrementalHFiles.doBulkLoad(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.Pathmap,
+  Adminadmin,
+  Tabletable,
+  RegionLocatorregionLocator,
+  booleansilence,
+  booleancopyFile)
+Perform a bulk load of the given directory into the given
+ pre-existing table.
+
+
+
 protected Pairhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListLoadIncrementalHFiles.LoadQueueItem,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 LoadIncrementalHFiles.groupOrSplit(com.google.common.collect.Multimaphttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer,LoadIncrementalHFiles.LoadQueueItemregionGroups,
 LoadIncrementalHFiles.LoadQueueItemitem,
@@ -118,21 +130,37 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Attempt to assign the given load queue item into its target 
region group.
 
 
-
-private Paircom.google.common.collect.Multimaphttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer,LoadIncrementalHFiles.LoadQueueItem,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+
+private Paircom.google.common.collect.Multimaphttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer,LoadIncrementalHFiles.LoadQueueItem,http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 LoadIncrementalHFiles.groupOrSplitPhase(Tabletable,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool,
  http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeLoadIncrementalHFiles.LoadQueueItemqueue,
  Pairbyte[][],byte[][]startEndKeys)
 
+
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapLoadIncrementalHFiles.LoadQueueItem,http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
+LoadIncrementalHFiles.performBulkLoad(Adminadmin,
+   Tabletable,
+   RegionLocatorregionLocator,
+   http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeLoadIncrementalHFiles.LoadQueueItemqueue,
+   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool,
+   SecureBulkLoadClientsecureClient,
+   booleancopyFile)
+
 
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapLoadIncrementalHFiles.LoadQueueItem,http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or 

[35/35] hbase-site git commit: Empty commit

2016-11-14 Thread stack
Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/36e5b7d6
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/36e5b7d6
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/36e5b7d6

Branch: refs/heads/asf-site
Commit: 36e5b7d692c14b6593d4c22f58ef4e4346bbaaf5
Parents: f17356a
Author: Michael Stack 
Authored: Mon Nov 14 21:45:08 2016 -0800
Committer: Michael Stack 
Committed: Mon Nov 14 21:45:08 2016 -0800

--

--




[18/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html
index 89b0f0c..df6f4e7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html
@@ -28,1103 +28,1096 @@
 020
 021import java.io.IOException;
 022import java.math.BigInteger;
-023import java.util.Arrays;
-024import java.util.Collection;
-025import java.util.Collections;
-026import java.util.Comparator;
-027import java.util.LinkedList;
-028import java.util.List;
+023
+024import java.util.Arrays;
+025import java.util.Collection;
+026import java.util.LinkedList;
+027import java.util.List;
+028import java.util.Map;
 029import java.util.Set;
 030import java.util.TreeMap;
-031
-032import 
org.apache.commons.cli.CommandLine;
-033import 
org.apache.commons.cli.GnuParser;
-034import 
org.apache.commons.cli.HelpFormatter;
-035import 
org.apache.commons.cli.OptionBuilder;
-036import org.apache.commons.cli.Options;
-037import 
org.apache.commons.cli.ParseException;
-038import 
org.apache.commons.lang.ArrayUtils;
-039import 
org.apache.commons.lang.StringUtils;
-040import org.apache.commons.logging.Log;
-041import 
org.apache.commons.logging.LogFactory;
-042import 
org.apache.hadoop.conf.Configuration;
-043import 
org.apache.hadoop.fs.FSDataInputStream;
-044import 
org.apache.hadoop.fs.FSDataOutputStream;
-045import org.apache.hadoop.fs.FileSystem;
-046import org.apache.hadoop.fs.Path;
-047import 
org.apache.hadoop.hbase.ClusterStatus;
-048import 
org.apache.hadoop.hbase.HBaseConfiguration;
-049import 
org.apache.hadoop.hbase.HColumnDescriptor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.HTableDescriptor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor;
-054import 
org.apache.hadoop.hbase.ServerName;
-055import 
org.apache.hadoop.hbase.TableName;
-056import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-057import 
org.apache.hadoop.hbase.client.Admin;
-058import 
org.apache.hadoop.hbase.client.ClusterConnection;
-059import 
org.apache.hadoop.hbase.client.Connection;
-060import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-061import 
org.apache.hadoop.hbase.client.NoServerForRegionException;
-062import 
org.apache.hadoop.hbase.client.RegionLocator;
-063import 
org.apache.hadoop.hbase.client.Table;
-064import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-065
-066import 
com.google.common.base.Preconditions;
-067import com.google.common.collect.Lists;
-068import com.google.common.collect.Maps;
-069import com.google.common.collect.Sets;
-070
-071/**
-072 * The {@link RegionSplitter} class 
provides several utilities to help in the
-073 * administration lifecycle for 
developers who choose to manually split regions
-074 * instead of having HBase handle that 
automatically. The most useful utilities
-075 * are:
-076 * p
-077 * ul
-078 * liCreate a table with a 
specified number of pre-split regions
-079 * liExecute a rolling split of 
all regions on an existing table
-080 * /ul
-081 * p
-082 * Both operations can be safely done on 
a live server.
-083 * p
-084 * bQuestion:/b How do I 
turn off automatic splitting? br
-085 * bAnswer:/b Automatic 
splitting is determined by the configuration value
-086 * 
iHConstants.HREGION_MAX_FILESIZE/i. It is not recommended that 
you set this
-087 * to Long.MAX_VALUE in case you forget 
about manual splits. A suggested setting
-088 * is 100GB, which would result in 
gt; 1hr major compactions if reached.
-089 * p
-090 * bQuestion:/b Why did 
the original authors decide to manually split? br
-091 * bAnswer:/b Specific 
workload characteristics of our use case allowed us
-092 * to benefit from a manual split 
system.
-093 * p
-094 * ul
-095 * liData (~1k) that would grow 
instead of being replaced
-096 * liData growth was roughly 
uniform across all regions
-097 * liOLTP workload. Data loss is 
a big deal.
-098 * /ul
-099 * p
-100 * bQuestion:/b Why is 
manual splitting good for this workload? br
-101 * bAnswer:/b Although 
automated splitting is not a bad option, there are
-102 * benefits to manual splitting.
-103 * p
-104 * ul
-105 * liWith growing amounts of 
data, splits will continually be needed. Since
-106 * you always know exactly what regions 
you have, long-term debugging and
-107 * profiling is much easier with manual 
splits. It is hard to trace the logs to
-108 * understand region level problems if it 
keeps splitting and getting renamed.
-109 * liData offlining bugs + 
unknown number of split regions == oh crap! If an
-110 

[23/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
index 6d25806..55fa666 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
@@ -370,12 +370,12 @@
 362   * @param regionLocator region 
locator
 363   * @param silence true to ignore 
unmatched column families
 364   * @param copyFile always copy hfiles 
if true
-365   * @return List of filenames which were 
not found
+365   * @return Map of LoadQueueItem to 
region
 366   * @throws TableNotFoundException if 
table does not yet exist
 367   */
-368  public ListString 
doBulkLoad(Mapbyte[], ListPath map, final Admin admin, Table 
table,
-369  RegionLocator regionLocator, 
boolean silence, boolean copyFile)
-370  throws 
TableNotFoundException, IOException {
+368  public MapLoadQueueItem, 
ByteBuffer doBulkLoad(Mapbyte[], ListPath map, final Admin 
admin,
+369  Table table, RegionLocator 
regionLocator, boolean silence, boolean copyFile)
+370  throws TableNotFoundException, 
IOException {
 371if 
(!admin.isTableAvailable(regionLocator.getName())) {
 372  throw new 
TableNotFoundException("Table " + table.getName() + " is not currently 
available.");
 373}
@@ -457,8 +457,8 @@
 449}
 450  }
 451
-452  ListString 
performBulkLoad(final Admin admin, Table table, RegionLocator regionLocator,
-453  DequeLoadQueueItem queue, 
ExecutorService pool,
+452  MapLoadQueueItem, ByteBuffer 
performBulkLoad(final Admin admin, Table table,
+453  RegionLocator regionLocator, 
DequeLoadQueueItem queue, ExecutorService pool,
 454  SecureBulkLoadClient secureClient, 
boolean copyFile) throws IOException {
 455int count = 0;
 456
@@ -472,802 +472,815 @@
 464// fs is the source filesystem
 465
fsDelegationToken.acquireDelegationToken(fs);
 466bulkToken = 
secureClient.prepareBulkLoad(admin.getConnection());
-467PairMultimapByteBuffer, 
LoadQueueItem, ListString pair = null;
+467PairMultimapByteBuffer, 
LoadQueueItem, SetString pair = null;
 468
-469// Assumes that region splits can 
happen while this occurs.
-470while (!queue.isEmpty()) {
-471  // need to reload split keys each 
iteration.
-472  final Pairbyte[][], 
byte[][] startEndKeys = regionLocator.getStartEndKeys();
-473  if (count != 0) {
-474LOG.info("Split occured while 
grouping HFiles, retry attempt " +
-475+ count + " with " + 
queue.size() + " files remaining to group or split");
-476  }
-477
-478  int maxRetries = 
getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);
-479  maxRetries = Math.max(maxRetries, 
startEndKeys.getFirst().length + 1);
-480  if (maxRetries != 0  
count = maxRetries) {
-481throw new IOException("Retry 
attempted " + count +
-482" times without completing, 
bailing out");
-483  }
-484  count++;
-485
-486  // Using ByteBuffer for byte[] 
equality semantics
-487  pair = groupOrSplitPhase(table, 
pool, queue, startEndKeys);
-488  MultimapByteBuffer, 
LoadQueueItem regionGroups = pair.getFirst();
-489
-490  if 
(!checkHFilesCountPerRegionPerFamily(regionGroups)) {
-491// Error is logged inside 
checkHFilesCountPerRegionPerFamily.
-492throw new IOException("Trying to 
load more than " + maxFilesPerRegionPerFamily
-493+ " hfiles to one family of 
one region");
-494  }
-495
-496  bulkLoadPhase(table, 
admin.getConnection(), pool, queue, regionGroups, copyFile);
-497
-498  // NOTE: The next iteration's split 
/ group could happen in parallel to
-499  // atomic bulkloads assuming that 
there are splits and no merges, and
-500  // that we can atomically pull out 
the groups we want to retry.
-501}
-502
-503if (!queue.isEmpty()) {
-504  throw new RuntimeException("Bulk 
load aborted with some files not yet loaded."
-505+ "Please check log for more 
details.");
-506}
-507if (pair == null) return null;
-508return pair.getSecond();
-509  }
-510
-511  /**
-512   * Prepare a collection of {@link 
LoadQueueItem} from list of source hfiles contained in the
-513   * passed directory and validates 
whether the prepared queue has all the valid table column
-514   * families in it.
-515   * @param hfilesDir directory 
containing list of hfiles to be loaded into the table
-516   * @param table table to which hfiles 
should be loaded
-517   * @param queue queue which needs to be 
loaded into the table
-518   * @param validateHFile if true hfiles 
will be validated for its format
-519 

[31/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 88d1b6c..7373064 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -295,7 +295,7 @@
 2051
 0
 0
-13915
+13912
 
 Files
 
@@ -2503,7 +2503,7 @@
 org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
 0
 0
-7
+5
 
 org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java
 0
@@ -4538,7 +4538,7 @@
 org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
 0
 0
-5
+4
 
 org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
 0
@@ -6790,7 +6790,7 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#NeedBraces;>NeedBraces
-1912
+1910
 Error
 
 coding
@@ -6905,7 +6905,7 @@
 
 max: 100
 ignorePattern: ^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated
-476
+475
 Error
 
 
@@ -34072,31 +34072,19 @@
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-507
-
-Error
-blocks
-NeedBraces
-'if' construct must use '{}'s.
-593
-
-Error
-sizes
-LineLength
-Line is longer than 100 characters (found 109).
-1196
+594
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
-1203
+1216
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1240
+1253
 
 org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java
 
@@ -63247,35 +63235,29 @@
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-296
-
-Error
-blocks
-NeedBraces
-'if' construct must use '{}'s.
-300
+304
 
 org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 imports
 ImportOrder
 Wrong order for 'com.google.common.annotations.VisibleForTesting' 
import.
 25
-
+
 Error
 imports
 ImportOrder
 Wrong order for 'org.apache.hadoop.conf.Configuration' import.
 28
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
@@ -63284,43 +63266,43 @@
 
 org/apache/hadoop/hbase/replication/ReplicationTableBase.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 imports
 ImportOrder
 Wrong order for 'java.io.IOException' import.
 46
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 323
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 343
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 361
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 415
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
@@ -63329,13 +63311,13 @@
 
 org/apache/hadoop/hbase/replication/ReplicationTracker.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
@@ -63344,13 +63326,13 @@
 
 org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 imports
 ImportOrder
@@ -63359,67 +63341,67 @@
 
 org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 imports
 ImportOrder
 Wrong order for 'com.google.common.base.Predicate' import.
 30
-
+
 Error
 indentation
 Indentation
 'method def' child have incorrect indentation level 6, expected level 
should be 4.
 54
-
+
 Error
 indentation
 Indentation
 'if' have incorrect indentation level 6, expected level should be 4.
 55
-
+
 Error
 indentation
 Indentation
 'if' child have incorrect indentation level 8, expected level should be 
6.
 56
-
+
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
 59
-
+
 Error
 indentation
 Indentation
 'if rcurly' have incorrect indentation level 6, expected level should be 
4.
 62
-
+
 Error
 indentation
 Indentation
 'if' have incorrect indentation level 8, expected level should be 6.
 63
-
+
 Error
 indentation
 Indentation
 'if' child have incorrect indentation level 10, expected level should be 
8.
 64
-
+
 Error
 indentation
 Indentation
 'if rcurly' have incorrect indentation level 8, expected level should be 
6.
 65
-
+
 Error
 indentation
 Indentation
@@ -63428,13 +63410,13 @@
 
 org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 imports
 ImportOrder
@@ -63443,55 +63425,55 @@
 
 org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 imports
 ImportOrder
 Wrong order for 'java.io.IOException' import.
 44
-
+
 Error
 imports
 UnusedImports
 Unused import - java.util.HashMap.
 47
-
+
 Error
 imports
 UnusedImports
 Unused import - java.util.HashSet.
 48
-
+
 Error
 imports
 UnusedImports
 Unused import - java.util.Set.
 51
-
+
 Error

[27/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
index 3719f0c..fa2916a 100644
--- a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
@@ -337,22 +337,22 @@ extends org.jamon.AbstractTemplateProxy
 
 
 Field Detail
-
+
 
 
 
 
-frags
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer frags
+filter
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String filter
 
 
-
+
 
 
 
 
-metaLocation
-protectedServerName metaLocation
+deadServers
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName deadServers
 
 
 
@@ -364,58 +364,58 @@ extends org.jamon.AbstractTemplateProxy
 protectedboolean catalogJanitorEnabled
 
 
-
+
 
 
 
 
-deadServers
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName deadServers
+format
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String format
 
 
-
+
 
 
 
 
-assignmentManager
-protectedAssignmentManager assignmentManager
+serverManager
+protectedServerManager serverManager
 
 
-
+
 
 
 
 
-servers
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName servers
+frags
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer frags
 
 
-
+
 
 
 
 
-format
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String format
+metaLocation
+protectedServerName metaLocation
 
 
-
+
 
 
 
 
-filter
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String filter
+assignmentManager
+protectedAssignmentManager assignmentManager
 
 
-
+
 
 
 
 
-serverManager
-protectedServerManager serverManager
+servers
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName servers
 
 
 
@@ -487,22 +487,22 @@ extends org.jamon.AbstractTemplateProxy
 
 
 
-
+
 
 
 
 
-setFrags
-public finalMasterStatusTmplsetFrags(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integerp_frags)
+setFilter
+public finalMasterStatusTmplsetFilter(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringp_filter)
 
 
-
+
 
 
 
 
-setMetaLocation
-public finalMasterStatusTmplsetMetaLocation(ServerNamep_metaLocation)
+setDeadServers
+public finalMasterStatusTmplsetDeadServers(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNamep_deadServers)
 
 
 
@@ -514,58 +514,58 @@ extends org.jamon.AbstractTemplateProxy
 public finalMasterStatusTmplsetCatalogJanitorEnabled(booleanp_catalogJanitorEnabled)
 
 
-
+
 
 
 
 
-setDeadServers
-public finalMasterStatusTmplsetDeadServers(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNamep_deadServers)
+setFormat
+public finalMasterStatusTmplsetFormat(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringp_format)
 
 
-
+
 
 
 
 
-setAssignmentManager
-public finalMasterStatusTmplsetAssignmentManager(AssignmentManagerp_assignmentManager)

[21/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
index 2b27b2b..0a4e021 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
@@ -69,15 +69,15 @@
 061  requiredArguments = {
 062@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 063  optionalArguments = {
-064@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
-065@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
+064@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+065@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
 066@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-067@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
-068@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
-069@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
-070@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-071@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-072@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager")})
+067@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+068@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
+069@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
+070@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
+071@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+072@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName")})
 073public class MasterStatusTmpl
 074  extends 
org.jamon.AbstractTemplateProxy
 075{
@@ -118,40 +118,40 @@
 110  return m_master;
 111}
 112private HMaster m_master;
-113// 21, 1
-114public void 
setFrags(MapString,Integer frags)
+113// 26, 1
+114public void setFilter(String 
filter)
 115{
-116  // 21, 1
-117  m_frags = frags;
-118  m_frags__IsNotDefault = true;
+116  // 26, 1
+117  m_filter = filter;
+118  m_filter__IsNotDefault = true;
 119}
-120public MapString,Integer 
getFrags()
+120public String getFilter()
 121{
-122  return m_frags;
+122  return m_filter;
 123}
-124private MapString,Integer 
m_frags;
-125public boolean 
getFrags__IsNotDefault()
+124private String m_filter;
+125public boolean 
getFilter__IsNotDefault()
 126{
-127  return m_frags__IsNotDefault;
+127  return m_filter__IsNotDefault;
 128}
-129private boolean 
m_frags__IsNotDefault;
-130// 22, 1
-131public void 
setMetaLocation(ServerName metaLocation)
+129private boolean 
m_filter__IsNotDefault;
+130// 24, 1
+131public void 
setDeadServers(SetServerName deadServers)
 132{
-133  // 22, 1
-134  m_metaLocation = metaLocation;
-135  m_metaLocation__IsNotDefault = 
true;
+133  // 24, 1
+134  m_deadServers = deadServers;
+135  m_deadServers__IsNotDefault = 
true;
 136}
-137public ServerName getMetaLocation()
+137public SetServerName 
getDeadServers()
 138{
-139  return m_metaLocation;
+139  return m_deadServers;
 140}
-141private ServerName m_metaLocation;
-142public boolean 
getMetaLocation__IsNotDefault()
+141private SetServerName 
m_deadServers;
+142public boolean 
getDeadServers__IsNotDefault()
 143{
-144  return 
m_metaLocation__IsNotDefault;
+144  return 
m_deadServers__IsNotDefault;
 145}
-146private boolean 
m_metaLocation__IsNotDefault;
+146private boolean 
m_deadServers__IsNotDefault;
 147// 25, 1
 148public void 
setCatalogJanitorEnabled(boolean catalogJanitorEnabled)
 149{
@@ -169,108 +169,108 @@
 161  return 
m_catalogJanitorEnabled__IsNotDefault;
 162}
 163private boolean 
m_catalogJanitorEnabled__IsNotDefault;
-164// 24, 1
-165public void 
setDeadServers(SetServerName deadServers)
+164// 27, 1
+165public void setFormat(String 
format)
 166{
-167  // 24, 1
-168  m_deadServers = deadServers;
-169  m_deadServers__IsNotDefault = 
true;
+167  // 27, 1
+168  m_format = format;
+169  m_format__IsNotDefault = true;
 170}
-171public SetServerName 
getDeadServers()
+171public String getFormat()
 172{
-173  return m_deadServers;

[32/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/bulk-loads.html
--
diff --git a/bulk-loads.html b/bulk-loads.html
index f082317..11ac63a 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
   Bulk Loads in Apache HBase (TM)
@@ -317,7 +317,7 @@ under the License. -->
 http://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-11-11
+  Last Published: 
2016-11-14
 
 
 



[05/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/xref-test/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html
--
diff --git 
a/xref-test/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html
 
b/xref-test/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html
index 484d2ee..b5415ac 100644
--- 
a/xref-test/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html
+++ 
b/xref-test/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html
@@ -553,121 +553,123 @@
 543 
ListString queues = rq.getUnClaimedQueueIds(deadRsZnode);
 544 for(String queue:queues){
 545   
PairString, SortedSetString pair = rq.claimQueue(deadRsZnode, 
queue);
-546   
logZnodesMap.put(pair.getFirst(), pair.getSecond());
-547 }
-548 
server.abort("Done with testing", null);
-549   } catch (Exception e) {
-550 
LOG.error("Got exception while running 
NodeFailoverWorker", e);
-551   } finally {
-552 
latch.countDown();
-553   }
-554 }
-555
-556 /**
-557  * @return 1 when the map is not empty.
-558  */
-559 private int 
isLogZnodesMapPopulated() {
-560   
CollectionSetString sets = logZnodesMap.values();
-561   if (sets.size()  1) {
-562 throw new 
RuntimeException("unexpected size of logZnodesMap: 
" + sets.size());
-563   }
-564   if (sets.size() == 1) {
-565 
SetString s = sets.iterator().next();
-566 for (String file : files) {
-567   // at least one file was missing
-568   if (!s.contains(file)) {
-569 return 0;
-570   }
-571 }
-572 return 1; // we found all 
the files
-573   }
-574   return 0;
-575 }
-576   }
-577
-578   static class 
FailInitializeDummyReplicationSource
 extends ReplicationSourceDummy
 {
+546   if (pair != null) {
+547 
logZnodesMap.put(pair.getFirst(), pair.getSecond());
+548   }
+549 }
+550 
server.abort("Done with testing", null);
+551   } catch (Exception e) {
+552 
LOG.error("Got exception while running 
NodeFailoverWorker", e);
+553   } finally {
+554 
latch.countDown();
+555   }
+556 }
+557
+558 /**
+559  * @return 1 when the map is not empty.
+560  */
+561 private int 
isLogZnodesMapPopulated() {
+562   
CollectionSetString sets = logZnodesMap.values();
+563   if (sets.size()  1) {
+564 throw new 
RuntimeException("unexpected size of logZnodesMap: 
" + sets.size());
+565   }
+566   if (sets.size() == 1) {
+567 
SetString s = sets.iterator().next();
+568 for (String file : files) {
+569   // at least one file was missing
+570   if (!s.contains(file)) {
+571 return 0;
+572   }
+573 }
+574 return 1; // we found all 
the files
+575   }
+576   return 0;
+577 }
+578   }
 579
-580 @Override
-581 public void 
init(Configuration conf, FileSystem fs, ReplicationSourceManager manager,
-582 
ReplicationQueues rq, ReplicationPeers rp, Stoppable stopper, String 
peerClusterId,
-583 UUID 
clusterId, ReplicationEndpoint replicationEndpoint, MetricsSource metrics)
-584 throws IOException {
-585   throw new 
IOException("Failing deliberately");
-586 }
-587   }
-588
-589   static class 
DummyServer
 implements Server {
-590 String hostname;
-591
-592 DummyServer()
 {
-593   hostname = 
"hostname.example.org";
-594 }
-595
-596 DummyServer(String
 hostname) {
-597   this.hostname = hostname;
-598 }
-599
-600 @Override
-601 public Configuration getConfiguration() {
-602   return conf;
-603 }
-604
-605 @Override
-606 public ZooKeeperWatcher getZooKeeper() {
-607   return zkw;
-608 }
-609
-610 @Override
-611 public CoordinatedStateManager 
getCoordinatedStateManager() {
-612   return null;
-613 }
-614 @Override
-615 public ClusterConnection getConnection() {
-616   return null;
-617 }
-618
-619 @Override
-620 public MetaTableLocator getMetaTableLocator() {
-621   return null;
-622 }
-623
-624 @Override
-625 public ServerName getServerName() {
-626   return ServerName.valueOf(hostname, 1234, 1L);
-627 }
-628
-629 @Override
-630 public void 
abort(String why, Throwable e) {
-631   // To change body of implemented methods use File | 
Settings | File Templates.
-632 }
-633
-634 @Override
-635 public boolean isAborted() {
-636   return false;
-637 }
-638
-639 @Override
-640 public void 
stop(String why) {
-641   // To change body of implemented methods use File | 
Settings | File Templates.
-642 }
-643
-644 @Override

[22/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
index f9833df..58c6a9c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
@@ -259,145 +259,153 @@
 251  }
 252
 253  /**
-254   * It "atomically" copies all the wals 
queues from another region server and returns them all
-255   * sorted per peer cluster (appended 
with the dead server's znode).
+254   * It "atomically" copies one peer's 
wals queue from another dead region server and returns them
+255   * all sorted. The new peer id is equal 
to the old peer id appended with the dead server's znode.
 256   * @param znode pertaining to the 
region server to copy the queues from
-257   */
-258  private PairString, 
SortedSetString moveQueueUsingMulti(String znode, String peerId) 
{
-259try {
-260  // hbase/replication/rs/deadrs
-261  String deadRSZnodePath = 
ZKUtil.joinZNode(this.queuesZNode, znode);
-262  ListZKUtilOp listOfOps = 
new ArrayList();
-263  ReplicationQueueInfo 
replicationQueueInfo = new ReplicationQueueInfo(peerId);
-264  if 
(!peerExists(replicationQueueInfo.getPeerId())) {
-265// the orphaned queues must be 
moved, otherwise the delete op of dead rs will fail,
-266// this will cause the whole 
multi op fail.
-267// NodeFailoverWorker will skip 
the orphaned queues.
-268LOG.warn("Peer " + peerId +
-269" didn't exist, will move its 
queue to avoid the failure of multi op");
-270  }
-271  String newPeerId = peerId + "-" + 
znode;
-272  String newPeerZnode = 
ZKUtil.joinZNode(this.myQueuesZnode, newPeerId);
-273  // check the logs queue for the old 
peer cluster
-274  String oldClusterZnode = 
ZKUtil.joinZNode(deadRSZnodePath, peerId);
-275  ListString wals = 
ZKUtil.listChildrenNoWatch(this.zookeeper, oldClusterZnode);
-276  SortedSetString logQueue = 
new TreeSet();
-277  if (wals == null || wals.size() == 
0) {
-278
listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode));
-279  } else {
-280// create the new cluster znode
-281ZKUtilOp op = 
ZKUtilOp.createAndFailSilent(newPeerZnode, HConstants.EMPTY_BYTE_ARRAY);
-282listOfOps.add(op);
-283// get the offset of the logs and 
set it to new znodes
-284for (String wal : wals) {
-285  String oldWalZnode = 
ZKUtil.joinZNode(oldClusterZnode, wal);
-286  byte[] logOffset = 
ZKUtil.getData(this.zookeeper, oldWalZnode);
-287  LOG.debug("Creating " + wal + " 
with data " + Bytes.toString(logOffset));
-288  String newLogZnode = 
ZKUtil.joinZNode(newPeerZnode, wal);
-289  
listOfOps.add(ZKUtilOp.createAndFailSilent(newLogZnode, logOffset));
-290  
listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldWalZnode));
-291  logQueue.add(wal);
-292}
-293// add delete op for peer
-294
listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode));
-295
-296if (LOG.isTraceEnabled())
-297  LOG.trace(" The multi list size 
is: " + listOfOps.size());
-298  }
-299  
ZKUtil.multiOrSequential(this.zookeeper, listOfOps, false);
-300  if (LOG.isTraceEnabled())
-301LOG.trace("Atomically moved the 
dead regionserver logs. ");
-302  return new Pair(newPeerId, 
logQueue);
-303} catch (KeeperException e) {
-304  // Multi call failed; it looks like 
some other regionserver took away the logs.
-305  LOG.warn("Got exception in 
copyQueuesFromRSUsingMulti: ", e);
-306} catch (InterruptedException e) {
-307  LOG.warn("Got exception in 
copyQueuesFromRSUsingMulti: ", e);
-308  
Thread.currentThread().interrupt();
-309}
-310return null;
-311  }
-312
-313  @Override
-314  public void addHFileRefs(String peerId, 
ListString files) throws ReplicationException {
-315String peerZnode = 
ZKUtil.joinZNode(this.hfileRefsZNode, peerId);
-316boolean debugEnabled = 
LOG.isDebugEnabled();
-317if (debugEnabled) {
-318  LOG.debug("Adding hfile references 
" + files + " in queue " + peerZnode);
-319}
-320ListZKUtilOp listOfOps = new 
ArrayListZKUtil.ZKUtilOp();
-321int size = files.size();
-322for (int i = 0; i  size; i++) {
-323  
listOfOps.add(ZKUtilOp.createAndFailSilent(ZKUtil.joinZNode(peerZnode, 
files.get(i)),
-324HConstants.EMPTY_BYTE_ARRAY));
-325}
-326if (debugEnabled) {
-327  LOG.debug(" The multi list size for 
adding hfile references in zk for node " + peerZnode
-328  + " is " + 

[07/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
--
diff --git 
a/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html 
b/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
index e0d658a..e348ec6 100644
--- a/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
+++ b/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
@@ -34,723 +34,730 @@
 24  import static 
org.junit.Assert.fail;
 25  
 26  import java.io.IOException;
-27  import java.util.ArrayList;
-28  import java.util.List;
-29  import java.util.Locale;
-30  import java.util.Map;
-31  import java.util.TreeMap;
-32
-33  import org.apache.hadoop.conf.Configuration;
-34  import org.apache.hadoop.fs.FSDataOutputStream;
-35  import org.apache.hadoop.fs.FileStatus;
-36  import org.apache.hadoop.fs.FileSystem;
-37  import org.apache.hadoop.fs.Path;
-38  import org.apache.hadoop.hbase.HBaseTestingUtility;
-39  import org.apache.hadoop.hbase.HColumnDescriptor;
-40  import org.apache.hadoop.hbase.HConstants;
-41  import org.apache.hadoop.hbase.HTableDescriptor;
-42  import org.apache.hadoop.hbase.NamespaceDescriptor;
-43  import org.apache.hadoop.hbase.TableName;
-44  import 
org.apache.hadoop.hbase.TableNotFoundException;
-45  import org.apache.hadoop.hbase.client.Table;
-46  import 
org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
-47  import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-48  import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-49  import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-50  import org.apache.hadoop.hbase.io.hfile.HFile;
-51  import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-52  import 
org.apache.hadoop.hbase.regionserver.BloomType;
-53  import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-54  import 
org.apache.hadoop.hbase.testclassification.MapReduceTests;
-55  import org.apache.hadoop.hbase.util.Bytes;
-56  import org.apache.hadoop.hbase.util.FSUtils;
-57  import org.apache.hadoop.hbase.util.HFileTestUtil;
-58  import org.junit.AfterClass;
-59  import org.junit.BeforeClass;
-60  import org.junit.Rule;
-61  import org.junit.Test;
-62  import org.junit.experimental.categories.Category;
-63  import org.junit.rules.TestName;
-64
-65  /**
-66   * Test cases for the "load" half of the 
HFileOutputFormat bulk load
-67   * functionality. These tests run faster than the 
full MR cluster
-68   * tests in TestHFileOutputFormat
-69   */
-70  
@Category({MapReduceTests.class, 
LargeTests.class})
-71  public class 
TestLoadIncrementalHFiles
 {
-72@Rule
-73public TestName tn = new TestName();
-74
-75private static final 
byte[] QUALIFIER = Bytes.toBytes("myqual");
-76private static final 
byte[] FAMILY = Bytes.toBytes("myfam");
-77private static final 
String NAMESPACE = "bulkNS";
-78
-79static final 
String EXPECTED_MSG_FOR_NON_EXISTING_FAMILY = "Unmatched family names found";
-80static final 
int MAX_FILES_PER_REGION_PER_FAMILY = 4;
+27  import java.nio.ByteBuffer;
+28  import java.util.ArrayList;
+29  import java.util.Deque;
+30  import java.util.List;
+31  import java.util.Locale;
+32  import java.util.Map;
+33  import java.util.TreeMap;
+34
+35  import org.apache.hadoop.conf.Configuration;
+36  import org.apache.hadoop.fs.FSDataOutputStream;
+37  import org.apache.hadoop.fs.FileStatus;
+38  import org.apache.hadoop.fs.FileSystem;
+39  import org.apache.hadoop.fs.Path;
+40  import org.apache.hadoop.hbase.HBaseTestingUtility;
+41  import org.apache.hadoop.hbase.HColumnDescriptor;
+42  import org.apache.hadoop.hbase.HConstants;
+43  import org.apache.hadoop.hbase.HTableDescriptor;
+44  import org.apache.hadoop.hbase.NamespaceDescriptor;
+45  import org.apache.hadoop.hbase.TableName;
+46  import 
org.apache.hadoop.hbase.TableNotFoundException;
+47  import org.apache.hadoop.hbase.client.Table;
+48  import 
org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
+49  import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+50  import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+51  import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
+52  import org.apache.hadoop.hbase.io.hfile.HFile;
+53  import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
+54  import 
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.LoadQueueItem;
+55  import 
org.apache.hadoop.hbase.regionserver.BloomType;
+56  import 
org.apache.hadoop.hbase.testclassification.LargeTests;
+57  import 
org.apache.hadoop.hbase.testclassification.MapReduceTests;
+58  import org.apache.hadoop.hbase.util.Bytes;
+59  import org.apache.hadoop.hbase.util.FSUtils;
+60  import org.apache.hadoop.hbase.util.HFileTestUtil;
+61  import org.junit.AfterClass;
+62  import org.junit.BeforeClass;
+63  import org.junit.Rule;
+64  import org.junit.Test;
+65  import 

[08/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
index 9da7348..d3a77ec 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
@@ -551,121 +551,123 @@
 543ListString queues = 
rq.getUnClaimedQueueIds(deadRsZnode);
 544for(String queue:queues){
 545  PairString, 
SortedSetString pair = rq.claimQueue(deadRsZnode, queue);
-546  
logZnodesMap.put(pair.getFirst(), pair.getSecond());
-547}
-548server.abort("Done with testing", 
null);
-549  } catch (Exception e) {
-550LOG.error("Got exception while 
running NodeFailoverWorker", e);
-551  } finally {
-552latch.countDown();
-553  }
-554}
-555
-556/**
-557 * @return 1 when the map is not 
empty.
-558 */
-559private int isLogZnodesMapPopulated() 
{
-560  CollectionSetString 
sets = logZnodesMap.values();
-561  if (sets.size()  1) {
-562throw new 
RuntimeException("unexpected size of logZnodesMap: " + sets.size());
-563  }
-564  if (sets.size() == 1) {
-565SetString s = 
sets.iterator().next();
-566for (String file : files) {
-567  // at least one file was 
missing
-568  if (!s.contains(file)) {
-569return 0;
-570  }
-571}
-572return 1; // we found all the 
files
-573  }
-574  return 0;
-575}
-576  }
-577
-578  static class 
FailInitializeDummyReplicationSource extends ReplicationSourceDummy {
+546  if (pair != null) {
+547
logZnodesMap.put(pair.getFirst(), pair.getSecond());
+548  }
+549}
+550server.abort("Done with testing", 
null);
+551  } catch (Exception e) {
+552LOG.error("Got exception while 
running NodeFailoverWorker", e);
+553  } finally {
+554latch.countDown();
+555  }
+556}
+557
+558/**
+559 * @return 1 when the map is not 
empty.
+560 */
+561private int isLogZnodesMapPopulated() 
{
+562  CollectionSetString 
sets = logZnodesMap.values();
+563  if (sets.size()  1) {
+564throw new 
RuntimeException("unexpected size of logZnodesMap: " + sets.size());
+565  }
+566  if (sets.size() == 1) {
+567SetString s = 
sets.iterator().next();
+568for (String file : files) {
+569  // at least one file was 
missing
+570  if (!s.contains(file)) {
+571return 0;
+572  }
+573}
+574return 1; // we found all the 
files
+575  }
+576  return 0;
+577}
+578  }
 579
-580@Override
-581public void init(Configuration conf, 
FileSystem fs, ReplicationSourceManager manager,
-582ReplicationQueues rq, 
ReplicationPeers rp, Stoppable stopper, String peerClusterId,
-583UUID clusterId, 
ReplicationEndpoint replicationEndpoint, MetricsSource metrics)
-584throws IOException {
-585  throw new IOException("Failing 
deliberately");
-586}
-587  }
-588
-589  static class DummyServer implements 
Server {
-590String hostname;
-591
-592DummyServer() {
-593  hostname = 
"hostname.example.org";
-594}
-595
-596DummyServer(String hostname) {
-597  this.hostname = hostname;
-598}
-599
-600@Override
-601public Configuration 
getConfiguration() {
-602  return conf;
-603}
-604
-605@Override
-606public ZooKeeperWatcher 
getZooKeeper() {
-607  return zkw;
-608}
-609
-610@Override
-611public CoordinatedStateManager 
getCoordinatedStateManager() {
-612  return null;
-613}
-614@Override
-615public ClusterConnection 
getConnection() {
-616  return null;
-617}
-618
-619@Override
-620public MetaTableLocator 
getMetaTableLocator() {
-621  return null;
-622}
-623
-624@Override
-625public ServerName getServerName() {
-626  return ServerName.valueOf(hostname, 
1234, 1L);
-627}
-628
-629@Override
-630public void abort(String why, 
Throwable e) {
-631  // To change body of implemented 
methods use File | Settings | File Templates.
-632}
-633
-634@Override
-635public boolean isAborted() {
-636  return false;
-637}
-638
-639@Override
-640public void stop(String why) {
-641  // To change body of implemented 
methods use 

[01/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site dbfeb6d66 -> 36e5b7d69


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/xref/org/apache/hadoop/hbase/util/RegionSplitter.html
--
diff --git a/xref/org/apache/hadoop/hbase/util/RegionSplitter.html 
b/xref/org/apache/hadoop/hbase/util/RegionSplitter.html
index 5cae107..c705d7f 100644
--- a/xref/org/apache/hadoop/hbase/util/RegionSplitter.html
+++ b/xref/org/apache/hadoop/hbase/util/RegionSplitter.html
@@ -30,1103 +30,1096 @@
 20  
 21  import java.io.IOException;
 22  import java.math.BigInteger;
-23  import java.util.Arrays;
-24  import java.util.Collection;
-25  import java.util.Collections;
-26  import java.util.Comparator;
-27  import java.util.LinkedList;
-28  import java.util.List;
+23
+24  import java.util.Arrays;
+25  import java.util.Collection;
+26  import java.util.LinkedList;
+27  import java.util.List;
+28  import java.util.Map;
 29  import java.util.Set;
 30  import java.util.TreeMap;
-31  
-32  import org.apache.commons.cli.CommandLine;
-33  import org.apache.commons.cli.GnuParser;
-34  import org.apache.commons.cli.HelpFormatter;
-35  import org.apache.commons.cli.OptionBuilder;
-36  import org.apache.commons.cli.Options;
-37  import org.apache.commons.cli.ParseException;
-38  import org.apache.commons.lang.ArrayUtils;
-39  import org.apache.commons.lang.StringUtils;
-40  import org.apache.commons.logging.Log;
-41  import org.apache.commons.logging.LogFactory;
-42  import org.apache.hadoop.conf.Configuration;
-43  import org.apache.hadoop.fs.FSDataInputStream;
-44  import org.apache.hadoop.fs.FSDataOutputStream;
-45  import org.apache.hadoop.fs.FileSystem;
-46  import org.apache.hadoop.fs.Path;
-47  import org.apache.hadoop.hbase.ClusterStatus;
-48  import org.apache.hadoop.hbase.HBaseConfiguration;
-49  import org.apache.hadoop.hbase.HColumnDescriptor;
-50  import org.apache.hadoop.hbase.HRegionInfo;
-51  import org.apache.hadoop.hbase.HRegionLocation;
-52  import org.apache.hadoop.hbase.HTableDescriptor;
-53  import org.apache.hadoop.hbase.MetaTableAccessor;
-54  import org.apache.hadoop.hbase.ServerName;
-55  import org.apache.hadoop.hbase.TableName;
-56  import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-57  import org.apache.hadoop.hbase.client.Admin;
-58  import 
org.apache.hadoop.hbase.client.ClusterConnection;
-59  import org.apache.hadoop.hbase.client.Connection;
-60  import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-61  import 
org.apache.hadoop.hbase.client.NoServerForRegionException;
-62  import 
org.apache.hadoop.hbase.client.RegionLocator;
-63  import org.apache.hadoop.hbase.client.Table;
-64  import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-65  
-66  import com.google.common.base.Preconditions;
-67  import com.google.common.collect.Lists;
-68  import com.google.common.collect.Maps;
-69  import com.google.common.collect.Sets;
-70  
-71  /**
-72   * The {@link RegionSplitter} class provides several 
utilities to help in the
-73   * administration lifecycle for developers who 
choose to manually split regions
-74   * instead of having HBase handle that 
automatically. The most useful utilities
-75   * are:
-76   * p
-77   * ul
-78   * liCreate a table with a specified number 
of pre-split regions
-79   * liExecute a rolling split of all regions 
on an existing table
-80   * /ul
-81   * p
-82   * Both operations can be safely done on a live 
server.
-83   * p
-84   * bQuestion:/b How do I turn off 
automatic splitting? br
-85   * bAnswer:/b Automatic splitting is 
determined by the configuration value
-86   * 
iHConstants.HREGION_MAX_FILESIZE/i. It is not recommended that 
you set this
-87   * to Long.MAX_VALUE in case you forget about manual 
splits. A suggested setting
-88   * is 100GB, which would result in gt; 1hr 
major compactions if reached.
-89   * p
-90   * bQuestion:/b Why did the original 
authors decide to manually split? br
-91   * bAnswer:/b Specific workload 
characteristics of our use case allowed us
-92   * to benefit from a manual split system.
-93   * p
-94   * ul
-95   * liData (~1k) that would grow instead of 
being replaced
-96   * liData growth was roughly uniform across 
all regions
-97   * liOLTP workload. Data loss is a big 
deal.
-98   * /ul
-99   * p
-100  * bQuestion:/b Why is manual 
splitting good for this workload? br
-101  * bAnswer:/b Although automated 
splitting is not a bad option, there are
-102  * benefits to manual splitting.
-103  * p
-104  * ul
-105  * liWith growing amounts of data, splits 
will continually be needed. Since
-106  * you always know exactly what regions you have, 
long-term debugging and
-107  * profiling is much easier with manual splits. It 
is hard to trace the logs to
-108  * understand region level problems if it keeps 
splitting and getting renamed.
-109  * liData offlining bugs + unknown number of 
split regions == oh crap! If an

[26/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html 
b/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
index 8870c77..19815f3 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class RegionSplitter.UniformSplit
+public static class RegionSplitter.UniformSplit
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionSplitter.SplitAlgorithm
 A SplitAlgorithm that divides the space of possible keys 
evenly. Useful
@@ -281,7 +281,7 @@ implements 
 
 xFF
-static finalbyte xFF
+static finalbyte xFF
 
 See Also:
 Constant
 Field Values
@@ -294,7 +294,7 @@ implements 
 
 firstRowBytes
-byte[] firstRowBytes
+byte[] firstRowBytes
 
 
 
@@ -303,7 +303,7 @@ implements 
 
 lastRowBytes
-byte[] lastRowBytes
+byte[] lastRowBytes
 
 
 
@@ -320,7 +320,7 @@ implements 
 
 UniformSplit
-publicUniformSplit()
+publicUniformSplit()
 
 
 
@@ -337,7 +337,7 @@ implements 
 
 split
-publicbyte[]split(byte[]start,
+publicbyte[]split(byte[]start,
 byte[]end)
 Description copied from 
interface:RegionSplitter.SplitAlgorithm
 Split a pre-existing region into 2 regions.
@@ -358,7 +358,7 @@ implements 
 
 split
-publicbyte[][]split(intnumRegions)
+publicbyte[][]split(intnumRegions)
 Description copied from 
interface:RegionSplitter.SplitAlgorithm
 Split an entire table.
 
@@ -378,7 +378,7 @@ implements 
 
 firstRow
-publicbyte[]firstRow()
+publicbyte[]firstRow()
 Description copied from 
interface:RegionSplitter.SplitAlgorithm
 In HBase, the first row is represented by an empty byte 
array. This might
  cause problems with your split algorithm or row printing. All your APIs
@@ -397,7 +397,7 @@ implements 
 
 lastRow
-publicbyte[]lastRow()
+publicbyte[]lastRow()
 Description copied from 
interface:RegionSplitter.SplitAlgorithm
 In HBase, the last row is represented by an empty byte 
array. This might
  cause problems with your split algorithm or row printing. All your APIs
@@ -416,7 +416,7 @@ implements 
 
 setFirstRow
-publicvoidsetFirstRow(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserInput)
+publicvoidsetFirstRow(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserInput)
 Description copied from 
interface:RegionSplitter.SplitAlgorithm
 In HBase, the last row is represented by an empty byte 
array. Set this
  value to help the split code understand how to evenly divide the first
@@ -435,7 +435,7 @@ implements 
 
 setLastRow
-publicvoidsetLastRow(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserInput)
+publicvoidsetLastRow(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserInput)
 Description copied from 
interface:RegionSplitter.SplitAlgorithm
 In HBase, the last row is represented by an empty byte 
array. Set this
  value to help the split code understand how to evenly divide the last
@@ -455,7 +455,7 @@ implements 
 
 setFirstRow
-publicvoidsetFirstRow(byte[]userInput)
+publicvoidsetFirstRow(byte[]userInput)
 Description copied from 
interface:RegionSplitter.SplitAlgorithm
 Set the first row
 
@@ -472,7 +472,7 @@ implements 
 
 setLastRow
-publicvoidsetLastRow(byte[]userInput)
+publicvoidsetLastRow(byte[]userInput)
 Description copied from 
interface:RegionSplitter.SplitAlgorithm
 Set the last row
 
@@ -489,7 +489,7 @@ implements 
 
 strToRow
-publicbyte[]strToRow(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringinput)
+publicbyte[]strToRow(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringinput)
 
 Specified by:
 strToRowin
 interfaceRegionSplitter.SplitAlgorithm
@@ -506,7 +506,7 @@ implements 
 
 rowToStr
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringrowToStr(byte[]row)
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringrowToStr(byte[]row)
 
 Specified by:
 rowToStrin
 interfaceRegionSplitter.SplitAlgorithm
@@ -523,7 +523,7 @@ implements 
 
 separator

[33/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
index 6d25806..55fa666 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
@@ -370,12 +370,12 @@
 362   * @param regionLocator region 
locator
 363   * @param silence true to ignore 
unmatched column families
 364   * @param copyFile always copy hfiles 
if true
-365   * @return List of filenames which were 
not found
+365   * @return Map of LoadQueueItem to 
region
 366   * @throws TableNotFoundException if 
table does not yet exist
 367   */
-368  public ListString 
doBulkLoad(Mapbyte[], ListPath map, final Admin admin, Table 
table,
-369  RegionLocator regionLocator, 
boolean silence, boolean copyFile)
-370  throws 
TableNotFoundException, IOException {
+368  public MapLoadQueueItem, 
ByteBuffer doBulkLoad(Mapbyte[], ListPath map, final Admin 
admin,
+369  Table table, RegionLocator 
regionLocator, boolean silence, boolean copyFile)
+370  throws TableNotFoundException, 
IOException {
 371if 
(!admin.isTableAvailable(regionLocator.getName())) {
 372  throw new 
TableNotFoundException("Table " + table.getName() + " is not currently 
available.");
 373}
@@ -457,8 +457,8 @@
 449}
 450  }
 451
-452  ListString 
performBulkLoad(final Admin admin, Table table, RegionLocator regionLocator,
-453  DequeLoadQueueItem queue, 
ExecutorService pool,
+452  MapLoadQueueItem, ByteBuffer 
performBulkLoad(final Admin admin, Table table,
+453  RegionLocator regionLocator, 
DequeLoadQueueItem queue, ExecutorService pool,
 454  SecureBulkLoadClient secureClient, 
boolean copyFile) throws IOException {
 455int count = 0;
 456
@@ -472,802 +472,815 @@
 464// fs is the source filesystem
 465
fsDelegationToken.acquireDelegationToken(fs);
 466bulkToken = 
secureClient.prepareBulkLoad(admin.getConnection());
-467PairMultimapByteBuffer, 
LoadQueueItem, ListString pair = null;
+467PairMultimapByteBuffer, 
LoadQueueItem, SetString pair = null;
 468
-469// Assumes that region splits can 
happen while this occurs.
-470while (!queue.isEmpty()) {
-471  // need to reload split keys each 
iteration.
-472  final Pairbyte[][], 
byte[][] startEndKeys = regionLocator.getStartEndKeys();
-473  if (count != 0) {
-474LOG.info("Split occured while 
grouping HFiles, retry attempt " +
-475+ count + " with " + 
queue.size() + " files remaining to group or split");
-476  }
-477
-478  int maxRetries = 
getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);
-479  maxRetries = Math.max(maxRetries, 
startEndKeys.getFirst().length + 1);
-480  if (maxRetries != 0  
count = maxRetries) {
-481throw new IOException("Retry 
attempted " + count +
-482" times without completing, 
bailing out");
-483  }
-484  count++;
-485
-486  // Using ByteBuffer for byte[] 
equality semantics
-487  pair = groupOrSplitPhase(table, 
pool, queue, startEndKeys);
-488  MultimapByteBuffer, 
LoadQueueItem regionGroups = pair.getFirst();
-489
-490  if 
(!checkHFilesCountPerRegionPerFamily(regionGroups)) {
-491// Error is logged inside 
checkHFilesCountPerRegionPerFamily.
-492throw new IOException("Trying to 
load more than " + maxFilesPerRegionPerFamily
-493+ " hfiles to one family of 
one region");
-494  }
-495
-496  bulkLoadPhase(table, 
admin.getConnection(), pool, queue, regionGroups, copyFile);
-497
-498  // NOTE: The next iteration's split 
/ group could happen in parallel to
-499  // atomic bulkloads assuming that 
there are splits and no merges, and
-500  // that we can atomically pull out 
the groups we want to retry.
-501}
-502
-503if (!queue.isEmpty()) {
-504  throw new RuntimeException("Bulk 
load aborted with some files not yet loaded."
-505+ "Please check log for more 
details.");
-506}
-507if (pair == null) return null;
-508return pair.getSecond();
-509  }
-510
-511  /**
-512   * Prepare a collection of {@link 
LoadQueueItem} from list of source hfiles contained in the
-513   * passed directory and validates 
whether the prepared queue has all the valid table column
-514   * families in it.
-515   * @param hfilesDir directory 
containing list of hfiles to be loaded into the table
-516   * @param table table to which hfiles 
should be loaded
-517   * @param queue queue which needs to be 
loaded into the table
-518   * @param validateHFile if true hfiles 
will be validated for its format
-519   * @throws 

[15/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/distribution-management.html
--
diff --git a/distribution-management.html b/distribution-management.html
index dc8b719..51a60f3 100644
--- a/distribution-management.html
+++ b/distribution-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Distribution Management
 
@@ -302,7 +302,7 @@
 http://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-11-11
+  Last Published: 
2016-11-14
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 6b05628..42b52d4 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -342,7 +342,7 @@ for more details.
 http://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-11-11
+  Last Published: 
2016-11-14
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/hbase-annotations/checkstyle.html
--
diff --git a/hbase-annotations/checkstyle.html 
b/hbase-annotations/checkstyle.html
index 949a4ca..6f5d677 100644
--- a/hbase-annotations/checkstyle.html
+++ b/hbase-annotations/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-11-11
+Last Published: 2016-11-14
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/hbase-annotations/dependencies.html
--
diff --git a/hbase-annotations/dependencies.html 
b/hbase-annotations/dependencies.html
index 9f50ade..202e944 100644
--- a/hbase-annotations/dependencies.html
+++ b/hbase-annotations/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-11-11
+Last Published: 2016-11-14
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/hbase-annotations/dependency-convergence.html
--
diff --git a/hbase-annotations/dependency-convergence.html 
b/hbase-annotations/dependency-convergence.html
index 6e7480e..224f125 100644
--- a/hbase-annotations/dependency-convergence.html
+++ b/hbase-annotations/dependency-convergence.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-11-11
+Last Published: 2016-11-14
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/hbase-annotations/dependency-info.html
--
diff --git a/hbase-annotations/dependency-info.html 
b/hbase-annotations/dependency-info.html
index c8811e5..76aea12 100644
--- a/hbase-annotations/dependency-info.html
+++ b/hbase-annotations/dependency-info.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-11-11
+Last Published: 2016-11-14
   | Version: 
2.0.0-SNAPSHOT
 

[29/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
index 64414af..49afb32 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
@@ -273,12 +273,13 @@ implements org.apache.hadoop.util.Tool
 
 
 protected void
-bulkLoadPhase(Tabletable,
+bulkLoadPhase(Tabletable,
  Connectionconn,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool,
  http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeLoadIncrementalHFiles.LoadQueueItemqueue,
  com.google.common.collect.Multimaphttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer,LoadIncrementalHFiles.LoadQueueItemregionGroups,
- booleancopyFile)
+ booleancopyFile,
+ http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapLoadIncrementalHFiles.LoadQueueItem,http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in 
java.nio">ByteBufferitem2RegionMap)
 This takes the LQI's grouped by likely regions and attempts 
to bulk load
  them.
 
@@ -324,7 +325,7 @@ implements org.apache.hadoop.util.Tool
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapLoadIncrementalHFiles.LoadQueueItem,http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
 doBulkLoad(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.Pathmap,
   Adminadmin,
   Tabletable,
@@ -371,7 +372,7 @@ implements org.apache.hadoop.util.Tool
 
 
 
-private Paircom.google.common.collect.Multimaphttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer,LoadIncrementalHFiles.LoadQueueItem,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+private Paircom.google.common.collect.Multimaphttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer,LoadIncrementalHFiles.LoadQueueItem,http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 groupOrSplitPhase(Tabletable,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool,
  http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeLoadIncrementalHFiles.LoadQueueItemqueue,
@@ -413,7 +414,7 @@ implements org.apache.hadoop.util.Tool
 main(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String[]args)
 
 
-(package private) http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapLoadIncrementalHFiles.LoadQueueItem,http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
 performBulkLoad(Adminadmin,

[16/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.html
index 89b0f0c..df6f4e7 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.html
@@ -28,1103 +28,1096 @@
 020
 021import java.io.IOException;
 022import java.math.BigInteger;
-023import java.util.Arrays;
-024import java.util.Collection;
-025import java.util.Collections;
-026import java.util.Comparator;
-027import java.util.LinkedList;
-028import java.util.List;
+023
+024import java.util.Arrays;
+025import java.util.Collection;
+026import java.util.LinkedList;
+027import java.util.List;
+028import java.util.Map;
 029import java.util.Set;
 030import java.util.TreeMap;
-031
-032import 
org.apache.commons.cli.CommandLine;
-033import 
org.apache.commons.cli.GnuParser;
-034import 
org.apache.commons.cli.HelpFormatter;
-035import 
org.apache.commons.cli.OptionBuilder;
-036import org.apache.commons.cli.Options;
-037import 
org.apache.commons.cli.ParseException;
-038import 
org.apache.commons.lang.ArrayUtils;
-039import 
org.apache.commons.lang.StringUtils;
-040import org.apache.commons.logging.Log;
-041import 
org.apache.commons.logging.LogFactory;
-042import 
org.apache.hadoop.conf.Configuration;
-043import 
org.apache.hadoop.fs.FSDataInputStream;
-044import 
org.apache.hadoop.fs.FSDataOutputStream;
-045import org.apache.hadoop.fs.FileSystem;
-046import org.apache.hadoop.fs.Path;
-047import 
org.apache.hadoop.hbase.ClusterStatus;
-048import 
org.apache.hadoop.hbase.HBaseConfiguration;
-049import 
org.apache.hadoop.hbase.HColumnDescriptor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.HTableDescriptor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor;
-054import 
org.apache.hadoop.hbase.ServerName;
-055import 
org.apache.hadoop.hbase.TableName;
-056import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-057import 
org.apache.hadoop.hbase.client.Admin;
-058import 
org.apache.hadoop.hbase.client.ClusterConnection;
-059import 
org.apache.hadoop.hbase.client.Connection;
-060import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-061import 
org.apache.hadoop.hbase.client.NoServerForRegionException;
-062import 
org.apache.hadoop.hbase.client.RegionLocator;
-063import 
org.apache.hadoop.hbase.client.Table;
-064import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-065
-066import 
com.google.common.base.Preconditions;
-067import com.google.common.collect.Lists;
-068import com.google.common.collect.Maps;
-069import com.google.common.collect.Sets;
-070
-071/**
-072 * The {@link RegionSplitter} class 
provides several utilities to help in the
-073 * administration lifecycle for 
developers who choose to manually split regions
-074 * instead of having HBase handle that 
automatically. The most useful utilities
-075 * are:
-076 * p
-077 * ul
-078 * liCreate a table with a 
specified number of pre-split regions
-079 * liExecute a rolling split of 
all regions on an existing table
-080 * /ul
-081 * p
-082 * Both operations can be safely done on 
a live server.
-083 * p
-084 * bQuestion:/b How do I 
turn off automatic splitting? br
-085 * bAnswer:/b Automatic 
splitting is determined by the configuration value
-086 * 
iHConstants.HREGION_MAX_FILESIZE/i. It is not recommended that 
you set this
-087 * to Long.MAX_VALUE in case you forget 
about manual splits. A suggested setting
-088 * is 100GB, which would result in 
gt; 1hr major compactions if reached.
-089 * p
-090 * bQuestion:/b Why did 
the original authors decide to manually split? br
-091 * bAnswer:/b Specific 
workload characteristics of our use case allowed us
-092 * to benefit from a manual split 
system.
-093 * p
-094 * ul
-095 * liData (~1k) that would grow 
instead of being replaced
-096 * liData growth was roughly 
uniform across all regions
-097 * liOLTP workload. Data loss is 
a big deal.
-098 * /ul
-099 * p
-100 * bQuestion:/b Why is 
manual splitting good for this workload? br
-101 * bAnswer:/b Although 
automated splitting is not a bad option, there are
-102 * benefits to manual splitting.
-103 * p
-104 * ul
-105 * liWith growing amounts of 
data, splits will continually be needed. Since
-106 * you always know exactly what regions 
you have, long-term debugging and
-107 * profiling is much easier with manual 
splits. It is hard to trace the logs to
-108 * understand region level problems if it 
keeps splitting and getting renamed.
-109 * liData offlining bugs + 
unknown number of split regions == oh crap! If an
-110 * WAL or StoreFile was mistakenly 
unprocessed by HBase due to a weird bug 

[06/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.html
--
diff --git 
a/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.html
 
b/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.html
index 2e33eee..ff39921 100644
--- 
a/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.html
+++ 
b/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.html
@@ -37,586 +37,589 @@
 27  import java.util.Collection;
 28  import java.util.Deque;
 29  import java.util.List;
-30  import java.util.NavigableMap;
-31  import java.util.concurrent.ExecutorService;
-32  import java.util.concurrent.atomic.AtomicInteger;
-33
-34  import org.apache.commons.logging.Log;
-35  import org.apache.commons.logging.LogFactory;
-36  import org.apache.hadoop.conf.Configuration;
-37  import org.apache.hadoop.fs.FileSystem;
-38  import org.apache.hadoop.fs.Path;
-39  import org.apache.hadoop.hbase.HBaseTestingUtility;
-40  import org.apache.hadoop.hbase.HColumnDescriptor;
-41  import org.apache.hadoop.hbase.HConstants;
-42  import org.apache.hadoop.hbase.HRegionInfo;
-43  import org.apache.hadoop.hbase.HRegionLocation;
-44  import org.apache.hadoop.hbase.HTableDescriptor;
-45  import org.apache.hadoop.hbase.MetaTableAccessor;
-46  import org.apache.hadoop.hbase.ServerName;
-47  import 
org.apache.hadoop.hbase.TableExistsException;
-48  import org.apache.hadoop.hbase.TableName;
-49  import org.apache.hadoop.hbase.client.Admin;
-50  import 
org.apache.hadoop.hbase.client.ClusterConnection;
-51  import org.apache.hadoop.hbase.client.Connection;
-52  import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-53  import 
org.apache.hadoop.hbase.client.RegionLocator;
-54  import org.apache.hadoop.hbase.client.Result;
-55  import 
org.apache.hadoop.hbase.client.ResultScanner;
-56  import org.apache.hadoop.hbase.client.Scan;
-57  import org.apache.hadoop.hbase.client.Table;
-58  import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-59  import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-60  import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-61  import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-62  import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-63  import 
org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
-64  import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-65  import 
org.apache.hadoop.hbase.testclassification.MapReduceTests;
-66  import org.apache.hadoop.hbase.util.Bytes;
-67  import org.apache.hadoop.hbase.util.FSUtils;
-68  import org.apache.hadoop.hbase.util.Pair;
-69  import org.junit.AfterClass;
-70  import org.junit.BeforeClass;
-71  import org.junit.Test;
-72  import org.junit.experimental.categories.Category;
-73  import org.mockito.Mockito;
-74
-75  import com.google.common.collect.Multimap;
+30  import java.util.Map;
+31  import java.util.NavigableMap;
+32  import java.util.concurrent.ExecutorService;
+33  import java.util.concurrent.atomic.AtomicInteger;
+34
+35  import org.apache.commons.logging.Log;
+36  import org.apache.commons.logging.LogFactory;
+37  import org.apache.hadoop.conf.Configuration;
+38  import org.apache.hadoop.fs.FileSystem;
+39  import org.apache.hadoop.fs.Path;
+40  import org.apache.hadoop.hbase.HBaseTestingUtility;
+41  import org.apache.hadoop.hbase.HColumnDescriptor;
+42  import org.apache.hadoop.hbase.HConstants;
+43  import org.apache.hadoop.hbase.HRegionInfo;
+44  import org.apache.hadoop.hbase.HRegionLocation;
+45  import org.apache.hadoop.hbase.HTableDescriptor;
+46  import org.apache.hadoop.hbase.MetaTableAccessor;
+47  import org.apache.hadoop.hbase.ServerName;
+48  import 
org.apache.hadoop.hbase.TableExistsException;
+49  import org.apache.hadoop.hbase.TableName;
+50  import org.apache.hadoop.hbase.client.Admin;
+51  import 
org.apache.hadoop.hbase.client.ClusterConnection;
+52  import org.apache.hadoop.hbase.client.Connection;
+53  import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+54  import 
org.apache.hadoop.hbase.client.RegionLocator;
+55  import org.apache.hadoop.hbase.client.Result;
+56  import 
org.apache.hadoop.hbase.client.ResultScanner;
+57  import org.apache.hadoop.hbase.client.Scan;
+58  import org.apache.hadoop.hbase.client.Table;
+59  import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+60  import 
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.LoadQueueItem;
+61  import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+62  import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+63  import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
+64  import 

[34/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/f17356a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/f17356a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/f17356a7

Branch: refs/heads/asf-site
Commit: f17356a79505371288cd8d82700110743eaa6227
Parents: dbfeb6d
Author: jenkins 
Authored: Mon Nov 14 14:45:49 2016 +
Committer: Michael Stack 
Committed: Mon Nov 14 21:44:52 2016 -0800

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 apache_hbase_reference_guide.pdfmarks   | 4 +-
 apidocs/index-all.html  | 2 +-
 .../hadoop/hbase/class-use/TableName.html   | 2 +-
 .../hbase/class-use/TableNotFoundException.html | 2 +-
 .../hadoop/hbase/client/class-use/Admin.html| 2 +-
 .../hbase/client/class-use/Connection.html  | 5 +-
 .../hbase/client/class-use/RegionLocator.html   | 2 +-
 .../hadoop/hbase/client/class-use/Table.html| 7 +-
 .../hbase/mapreduce/LoadIncrementalHFiles.html  |66 +-
 .../hbase/mapreduce/LoadIncrementalHFiles.html  |  1603 +--
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 12556 -
 checkstyle.rss  |   154 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html | 6 +-
 devapidocs/index-all.html   | 6 +-
 .../hadoop/hbase/class-use/TableName.html   | 2 +-
 .../hbase/class-use/TableNotFoundException.html | 2 +-
 .../hadoop/hbase/client/class-use/Admin.html| 4 +-
 .../hbase/client/class-use/Connection.html  | 5 +-
 .../hbase/client/class-use/RegionLocator.html   | 4 +-
 .../client/class-use/SecureBulkLoadClient.html  | 2 +-
 .../hadoop/hbase/client/class-use/Table.html|11 +-
 .../hbase/mapreduce/LoadIncrementalHFiles.html  |   116 +-
 .../LoadIncrementalHFiles.LoadQueueItem.html|87 +-
 .../replication/ReplicationQueuesZKImpl.html|18 +-
 .../tmpl/master/MasterStatusTmpl.ImplData.html  |   240 +-
 .../hbase/tmpl/master/MasterStatusTmpl.html |96 +-
 .../hbase/tmpl/master/MasterStatusTmplImpl.html |48 +-
 .../regionserver/RSStatusTmpl.ImplData.html |90 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.html   |36 +-
 .../tmpl/regionserver/RSStatusTmplImpl.html |18 +-
 .../util/RegionSplitter.HexStringSplit.html |52 +-
 .../util/RegionSplitter.SplitAlgorithm.html |24 +-
 .../hbase/util/RegionSplitter.UniformSplit.html |34 +-
 .../hadoop/hbase/util/RegionSplitter.html   |24 +-
 .../hadoop/hbase/util/class-use/Pair.html   | 8 +-
 .../org/apache/hadoop/hbase/Version.html| 6 +-
 .../LoadIncrementalHFiles.BulkHFileVisitor.html |  1603 +--
 .../LoadIncrementalHFiles.LoadQueueItem.html|  1603 +--
 .../hbase/mapreduce/LoadIncrementalHFiles.html  |  1603 +--
 .../replication/ReplicationQueuesZKImpl.html|   282 +-
 .../tmpl/master/MasterStatusTmpl.ImplData.html  |   240 +-
 .../tmpl/master/MasterStatusTmpl.Intf.html  |   240 +-
 .../hbase/tmpl/master/MasterStatusTmpl.html |   240 +-
 .../hbase/tmpl/master/MasterStatusTmplImpl.html |64 +-
 .../regionserver/RSStatusTmpl.ImplData.html |90 +-
 .../tmpl/regionserver/RSStatusTmpl.Intf.html|90 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.html   |90 +-
 .../tmpl/regionserver/RSStatusTmplImpl.html |24 +-
 .../util/RegionSplitter.HexStringSplit.html |  2167 ++-
 .../util/RegionSplitter.SplitAlgorithm.html |  2167 ++-
 .../hbase/util/RegionSplitter.UniformSplit.html |  2167 ++-
 .../hadoop/hbase/util/RegionSplitter.html   |  2167 ++-
 distribution-management.html| 4 +-
 export_control.html | 4 +-
 hbase-annotations/checkstyle.html   | 6 +-
 hbase-annotations/dependencies.html | 6 +-
 hbase-annotations/dependency-convergence.html   | 6 +-
 hbase-annotations/dependency-info.html  | 6 +-
 hbase-annotations/dependency-management.html| 6 +-
 hbase-annotations/distribution-management.html  | 6 +-
 hbase-annotations/index.html| 6 +-
 hbase-annotations/integration.html  | 6 +-
 hbase-annotations/issue-tracking.html

[09/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.html
index a5ea2a3..83f97ce 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.html
@@ -35,586 +35,589 @@
 027import java.util.Collection;
 028import java.util.Deque;
 029import java.util.List;
-030import java.util.NavigableMap;
-031import 
java.util.concurrent.ExecutorService;
-032import 
java.util.concurrent.atomic.AtomicInteger;
-033
-034import org.apache.commons.logging.Log;
-035import 
org.apache.commons.logging.LogFactory;
-036import 
org.apache.hadoop.conf.Configuration;
-037import org.apache.hadoop.fs.FileSystem;
-038import org.apache.hadoop.fs.Path;
-039import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-040import 
org.apache.hadoop.hbase.HColumnDescriptor;
-041import 
org.apache.hadoop.hbase.HConstants;
-042import 
org.apache.hadoop.hbase.HRegionInfo;
-043import 
org.apache.hadoop.hbase.HRegionLocation;
-044import 
org.apache.hadoop.hbase.HTableDescriptor;
-045import 
org.apache.hadoop.hbase.MetaTableAccessor;
-046import 
org.apache.hadoop.hbase.ServerName;
-047import 
org.apache.hadoop.hbase.TableExistsException;
-048import 
org.apache.hadoop.hbase.TableName;
-049import 
org.apache.hadoop.hbase.client.Admin;
-050import 
org.apache.hadoop.hbase.client.ClusterConnection;
-051import 
org.apache.hadoop.hbase.client.Connection;
-052import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-053import 
org.apache.hadoop.hbase.client.RegionLocator;
-054import 
org.apache.hadoop.hbase.client.Result;
-055import 
org.apache.hadoop.hbase.client.ResultScanner;
-056import 
org.apache.hadoop.hbase.client.Scan;
-057import 
org.apache.hadoop.hbase.client.Table;
-058import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-062import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-063import 
org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
-064import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-065import 
org.apache.hadoop.hbase.testclassification.MapReduceTests;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.FSUtils;
-068import 
org.apache.hadoop.hbase.util.Pair;
-069import org.junit.AfterClass;
-070import org.junit.BeforeClass;
-071import org.junit.Test;
-072import 
org.junit.experimental.categories.Category;
-073import org.mockito.Mockito;
-074
-075import 
com.google.common.collect.Multimap;
+030import java.util.Map;
+031import java.util.NavigableMap;
+032import 
java.util.concurrent.ExecutorService;
+033import 
java.util.concurrent.atomic.AtomicInteger;
+034
+035import org.apache.commons.logging.Log;
+036import 
org.apache.commons.logging.LogFactory;
+037import 
org.apache.hadoop.conf.Configuration;
+038import org.apache.hadoop.fs.FileSystem;
+039import org.apache.hadoop.fs.Path;
+040import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+041import 
org.apache.hadoop.hbase.HColumnDescriptor;
+042import 
org.apache.hadoop.hbase.HConstants;
+043import 
org.apache.hadoop.hbase.HRegionInfo;
+044import 
org.apache.hadoop.hbase.HRegionLocation;
+045import 
org.apache.hadoop.hbase.HTableDescriptor;
+046import 
org.apache.hadoop.hbase.MetaTableAccessor;
+047import 
org.apache.hadoop.hbase.ServerName;
+048import 
org.apache.hadoop.hbase.TableExistsException;
+049import 
org.apache.hadoop.hbase.TableName;
+050import 
org.apache.hadoop.hbase.client.Admin;
+051import 
org.apache.hadoop.hbase.client.ClusterConnection;
+052import 
org.apache.hadoop.hbase.client.Connection;
+053import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+054import 
org.apache.hadoop.hbase.client.RegionLocator;
+055import 
org.apache.hadoop.hbase.client.Result;
+056import 
org.apache.hadoop.hbase.client.ResultScanner;
+057import 
org.apache.hadoop.hbase.client.Scan;
+058import 
org.apache.hadoop.hbase.client.Table;
+059import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+060import 
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.LoadQueueItem;
+061import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
+064import 

[20/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
index 36f2731..23fce63 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
@@ -34,9 +34,9 @@
 026@org.jamon.annotations.Argument(name 
= "regionServer", type = "HRegionServer")},
 027  optionalArguments = {
 028@org.jamon.annotations.Argument(name 
= "bcv", type = "String"),
-029@org.jamon.annotations.Argument(name 
= "bcn", type = "String"),
-030@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-031@org.jamon.annotations.Argument(name 
= "filter", type = "String")})
+029@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+030@org.jamon.annotations.Argument(name 
= "bcn", type = "String"),
+031@org.jamon.annotations.Argument(name 
= "format", type = "String")})
 032public class RSStatusTmpl
 033  extends 
org.jamon.AbstractTemplateProxy
 034{
@@ -94,57 +94,57 @@
 086  return m_bcv__IsNotDefault;
 087}
 088private boolean 
m_bcv__IsNotDefault;
-089// 23, 1
-090public void setBcn(String bcn)
+089// 21, 1
+090public void setFilter(String 
filter)
 091{
-092  // 23, 1
-093  m_bcn = bcn;
-094  m_bcn__IsNotDefault = true;
+092  // 21, 1
+093  m_filter = filter;
+094  m_filter__IsNotDefault = true;
 095}
-096public String getBcn()
+096public String getFilter()
 097{
-098  return m_bcn;
+098  return m_filter;
 099}
-100private String m_bcn;
-101public boolean 
getBcn__IsNotDefault()
+100private String m_filter;
+101public boolean 
getFilter__IsNotDefault()
 102{
-103  return m_bcn__IsNotDefault;
+103  return m_filter__IsNotDefault;
 104}
-105private boolean 
m_bcn__IsNotDefault;
-106// 22, 1
-107public void setFormat(String 
format)
+105private boolean 
m_filter__IsNotDefault;
+106// 23, 1
+107public void setBcn(String bcn)
 108{
-109  // 22, 1
-110  m_format = format;
-111  m_format__IsNotDefault = true;
+109  // 23, 1
+110  m_bcn = bcn;
+111  m_bcn__IsNotDefault = true;
 112}
-113public String getFormat()
+113public String getBcn()
 114{
-115  return m_format;
+115  return m_bcn;
 116}
-117private String m_format;
-118public boolean 
getFormat__IsNotDefault()
+117private String m_bcn;
+118public boolean 
getBcn__IsNotDefault()
 119{
-120  return m_format__IsNotDefault;
+120  return m_bcn__IsNotDefault;
 121}
-122private boolean 
m_format__IsNotDefault;
-123// 21, 1
-124public void setFilter(String 
filter)
+122private boolean 
m_bcn__IsNotDefault;
+123// 22, 1
+124public void setFormat(String 
format)
 125{
-126  // 21, 1
-127  m_filter = filter;
-128  m_filter__IsNotDefault = true;
+126  // 22, 1
+127  m_format = format;
+128  m_format__IsNotDefault = true;
 129}
-130public String getFilter()
+130public String getFormat()
 131{
-132  return m_filter;
+132  return m_format;
 133}
-134private String m_filter;
-135public boolean 
getFilter__IsNotDefault()
+134private String m_format;
+135public boolean 
getFormat__IsNotDefault()
 136{
-137  return m_filter__IsNotDefault;
+137  return m_format__IsNotDefault;
 138}
-139private boolean 
m_filter__IsNotDefault;
+139private boolean 
m_format__IsNotDefault;
 140  }
 141  @Override
 142  protected 
org.jamon.AbstractTemplateProxy.ImplData makeImplData()
@@ -163,24 +163,24 @@
 155return this;
 156  }
 157  
-158  protected String bcn;
-159  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcn(String p_bcn)
+158  protected String filter;
+159  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFilter(String 
p_filter)
 160  {
-161(getImplData()).setBcn(p_bcn);
+161
(getImplData()).setFilter(p_filter);
 162return this;
 163  }
 164  
-165  protected String format;
-166  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFormat(String 
p_format)
+165  protected String bcn;
+166  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcn(String p_bcn)
 167  {
-168
(getImplData()).setFormat(p_format);
+168(getImplData()).setBcn(p_bcn);
 169return this;
 170  }
 171  
-172  protected String filter;
-173  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFilter(String 
p_filter)
+172  

[24/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
index 6d25806..55fa666 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
@@ -370,12 +370,12 @@
 362   * @param regionLocator region 
locator
 363   * @param silence true to ignore 
unmatched column families
 364   * @param copyFile always copy hfiles 
if true
-365   * @return List of filenames which were 
not found
+365   * @return Map of LoadQueueItem to 
region
 366   * @throws TableNotFoundException if 
table does not yet exist
 367   */
-368  public ListString 
doBulkLoad(Mapbyte[], ListPath map, final Admin admin, Table 
table,
-369  RegionLocator regionLocator, 
boolean silence, boolean copyFile)
-370  throws 
TableNotFoundException, IOException {
+368  public MapLoadQueueItem, 
ByteBuffer doBulkLoad(Mapbyte[], ListPath map, final Admin 
admin,
+369  Table table, RegionLocator 
regionLocator, boolean silence, boolean copyFile)
+370  throws TableNotFoundException, 
IOException {
 371if 
(!admin.isTableAvailable(regionLocator.getName())) {
 372  throw new 
TableNotFoundException("Table " + table.getName() + " is not currently 
available.");
 373}
@@ -457,8 +457,8 @@
 449}
 450  }
 451
-452  ListString 
performBulkLoad(final Admin admin, Table table, RegionLocator regionLocator,
-453  DequeLoadQueueItem queue, 
ExecutorService pool,
+452  MapLoadQueueItem, ByteBuffer 
performBulkLoad(final Admin admin, Table table,
+453  RegionLocator regionLocator, 
DequeLoadQueueItem queue, ExecutorService pool,
 454  SecureBulkLoadClient secureClient, 
boolean copyFile) throws IOException {
 455int count = 0;
 456
@@ -472,802 +472,815 @@
 464// fs is the source filesystem
 465
fsDelegationToken.acquireDelegationToken(fs);
 466bulkToken = 
secureClient.prepareBulkLoad(admin.getConnection());
-467PairMultimapByteBuffer, 
LoadQueueItem, ListString pair = null;
+467PairMultimapByteBuffer, 
LoadQueueItem, SetString pair = null;
 468
-469// Assumes that region splits can 
happen while this occurs.
-470while (!queue.isEmpty()) {
-471  // need to reload split keys each 
iteration.
-472  final Pairbyte[][], 
byte[][] startEndKeys = regionLocator.getStartEndKeys();
-473  if (count != 0) {
-474LOG.info("Split occured while 
grouping HFiles, retry attempt " +
-475+ count + " with " + 
queue.size() + " files remaining to group or split");
-476  }
-477
-478  int maxRetries = 
getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);
-479  maxRetries = Math.max(maxRetries, 
startEndKeys.getFirst().length + 1);
-480  if (maxRetries != 0  
count = maxRetries) {
-481throw new IOException("Retry 
attempted " + count +
-482" times without completing, 
bailing out");
-483  }
-484  count++;
-485
-486  // Using ByteBuffer for byte[] 
equality semantics
-487  pair = groupOrSplitPhase(table, 
pool, queue, startEndKeys);
-488  MultimapByteBuffer, 
LoadQueueItem regionGroups = pair.getFirst();
-489
-490  if 
(!checkHFilesCountPerRegionPerFamily(regionGroups)) {
-491// Error is logged inside 
checkHFilesCountPerRegionPerFamily.
-492throw new IOException("Trying to 
load more than " + maxFilesPerRegionPerFamily
-493+ " hfiles to one family of 
one region");
-494  }
-495
-496  bulkLoadPhase(table, 
admin.getConnection(), pool, queue, regionGroups, copyFile);
-497
-498  // NOTE: The next iteration's split 
/ group could happen in parallel to
-499  // atomic bulkloads assuming that 
there are splits and no merges, and
-500  // that we can atomically pull out 
the groups we want to retry.
-501}
-502
-503if (!queue.isEmpty()) {
-504  throw new RuntimeException("Bulk 
load aborted with some files not yet loaded."
-505+ "Please check log for more 
details.");
-506}
-507if (pair == null) return null;
-508return pair.getSecond();
-509  }
-510
-511  /**
-512   * Prepare a collection of {@link 
LoadQueueItem} from list of source hfiles contained in the
-513   * passed directory and validates 
whether the prepared queue has all the valid table column
-514   * families in it.
-515   * @param hfilesDir directory 
containing list of hfiles to be loaded into the table
-516   * @param table table to which hfiles 
should be loaded
-517   * @param queue queue which needs to be 
loaded into the table
-518   * 

[12/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/hbase-spark/scaladocs/org/apache/hadoop/hbase/spark/HBaseConnectionCacheStat.html
--
diff --git 
a/hbase-spark/scaladocs/org/apache/hadoop/hbase/spark/HBaseConnectionCacheStat.html
 
b/hbase-spark/scaladocs/org/apache/hadoop/hbase/spark/HBaseConnectionCacheStat.html
new file mode 100644
index 000..f323fea
--- /dev/null
+++ 
b/hbase-spark/scaladocs/org/apache/hadoop/hbase/spark/HBaseConnectionCacheStat.html
@@ -0,0 +1,450 @@
+
+http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd;>
+
+
+  HBaseConnectionCacheStat - Apache HBase - Spark 
2.0.0-SNAPSHOT API - 
org.apache.hadoop.hbase.spark.HBaseConnectionCacheStat
+  
+  
+  
+
+  
+  
+  
+ if(top === self) {
+var url = '../../../../../index.html';
+var hash = 
'org.apache.hadoop.hbase.spark.HBaseConnectionCacheStat';
+var anchor = window.location.hash;
+var anchor_opt = '';
+if (anchor.length >= 1)
+  anchor_opt = '@' + anchor.substring(1);
+window.location.href = url + '#' + hash + anchor_opt;
+ }
+ 
+
+
+
+  
+
+org.apache.hadoop.hbase.spark
+HBaseConnectionCacheStat
+  
+
+  
+  
+
+case class
+  
+  
+HBaseConnectionCacheStat(numTotalRequests: Long, numActualConnectionsCreated: Long, numActiveConnections: Long) extends Product with Serializable
+  
+  
+
+  To log the state of 'HBaseConnectionCache'
+numTotalRequestsnumber of total 
connection requests to the cachenumActualConnectionsCreatednumber of 
actual HBase connections the cache ever creatednumActiveConnectionsnumber of current 
alive HBase connections the cache is holding
+
+  Linear Supertypes
+  Serializable, Serializable, Product, Equals, AnyRef, Any
+
+
+
+  
+
+
+  Ordering
+  
+
+Alphabetic
+By inheritance
+  
+
+
+Inherited
+
+
+  HBaseConnectionCacheStatSerializableSerializableProductEqualsAnyRefAny
+
+  
+
+
+  Hide All
+  Show all
+
+http://docs.scala-lang.org/overviews/scaladoc/usage.html#members; 
target="_blank">Learn more about member selection
+  
+
+Visibility
+PublicAll
+  
+  
+
+  
+
+
+  Instance Constructors
+  
+  
+  
+  
+  
+
+new
+  
+  
+HBaseConnectionCacheStat(numTotalRequests: Long, numActualConnectionsCreated: Long, numActiveConnections: Long)
+  
+  
+  numTotalRequestsnumber of total 
connection requests to the cachenumActualConnectionsCreatednumber of 
actual HBase connections the cache ever creatednumActiveConnectionsnumber of current 
alive HBase connections the cache is holding
+
+
+
+
+
+
+
+
+
+  Value Members
+  
+  
+  
+  
+  
+final 
+def
+  
+  
+!=(arg0: AnyRef): Boolean
+  
+  
+   Definition 
ClassesAnyRef
+
+  
+  
+  
+  
+final 
+def
+  
+  
+!=(arg0: Any): Boolean
+  
+  
+   Definition 
ClassesAny
+
+  
+  
+  
+  
+final 
+def
+  
+  
+##(): Int
+  
+  
+   Definition 
ClassesAnyRef → Any
+
+  
+  
+  
+  
+final 
+def
+  
+  
+==(arg0: AnyRef): Boolean
+  
+  
+   Definition 
ClassesAnyRef
+
+  
+  
+  
+  
+final 
+def
+  
+  
+==(arg0: Any): Boolean
+  
+  
+   Definition 
ClassesAny
+
+  
+  
+  
+  
+final 
+def
+  
+  
+asInstanceOf[T0]: T0
+  
+  
+   Definition 
ClassesAny
+
+  
+  
+  
+  
+
+def
+  
+  
+clone(): AnyRef
+  
+  
+   
Attributesprotected[java.lang] Definition 
ClassesAnyRefAnnotations
+@throws(
+
+  ...
+)
+
+
+
+  
+  
+  
+  
+final 
+def
+  
+  
+eq(arg0: AnyRef): Boolean
+  
+  
+   Definition 
ClassesAnyRef
+
+  
+  
+  
+  
+
+def
+  
+  
+finalize(): Unit
+  
+  
+   
Attributesprotected[java.lang] Definition 
ClassesAnyRefAnnotations
+@throws(
+
+  classOf[java.lang.Throwable]
+)
+
+
+
+  
+  
+

[31/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/HConstants.Modify.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/HConstants.Modify.html 
b/1.2/apidocs/org/apache/hadoop/hbase/HConstants.Modify.html
new file mode 100644
index 000..f06515f
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/HConstants.Modify.html
@@ -0,0 +1,397 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+HConstants.Modify (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = {"i0":9,"i1":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Enum Constants|
+Field|
+Method
+
+
+Detail:
+Enum Constants|
+Field|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Enum HConstants.Modify
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">java.lang.EnumHConstants.Modify
+
+
+org.apache.hadoop.hbase.HConstants.Modify
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable, http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHConstants.Modify
+
+
+Enclosing class:
+HConstants
+
+
+
+@InterfaceAudience.Private
+public static enum HConstants.Modify
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumHConstants.Modify
+modifyTable op for replacing the table descriptor
+
+
+
+
+
+
+
+
+
+
+
+Enum Constant Summary
+
+Enum Constants
+
+Enum Constant and Description
+
+
+CLOSE_REGION
+
+
+TABLE_COMPACT
+
+
+TABLE_FLUSH
+
+
+TABLE_MAJOR_COMPACT
+
+
+TABLE_SET_HTD
+
+
+TABLE_SPLIT
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+static HConstants.Modify
+valueOf(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+Returns the enum constant of this type with the specified 
name.
+
+
+
+static HConstants.Modify[]
+values()
+Returns an array containing the constants of this enum 
type, in
+the order they are declared.
+
+
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">Enum
+http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true#compareTo-E-;
 title="class or interface in java.lang">compareTo, http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true#getDeclaringClass--;
 title="class or interface in java.lang">getDeclaringClass, http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/7/docs/
 api/java/lang/Enum.html?is-external=true#name--" title="class or interface in 
java.lang">name, http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true#ordinal--;
 title="class or interface in java.lang">ordinal, http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true#valueOf-java.lang.Class-java.lang.String-;
 title="class or interface in java.lang">valueOf
+
+
+

[17/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/KeyValueUtil.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/KeyValueUtil.html 
b/1.2/apidocs/org/apache/hadoop/hbase/KeyValueUtil.html
new file mode 100644
index 000..9484567
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/KeyValueUtil.html
@@ -0,0 +1,1175 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+KeyValueUtil (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":41,"i25":41,"i26":41,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class KeyValueUtil
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.KeyValueUtil
+
+
+
+
+
+
+
+
+@InterfaceAudience.Private
+public class KeyValueUtil
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+static convenience methods for dealing with KeyValues and 
collections of KeyValues
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+KeyValueUtil()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsConcrete MethodsDeprecated Methods
+
+Modifier and Type
+Method and Description
+
+
+static int
+appendKeyTo(Cellcell,
+   byte[]output,
+   intoffset)
+
+
+static int
+appendToByteArray(Cellcell,
+ byte[]output,
+ intoffset)
+copy key and value
+
+
+
+static void
+appendToByteBuffer(http://docs.oracle.com/javase/7/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbb,
+  KeyValuekv,
+  booleanincludeMvccVersion)
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
+copyKeyToNewByteBuffer(Cellcell)
+The position will be set to the beginning of the new 
ByteBuffer
+
+
+
+static byte[]
+copyToNewByteArray(Cellcell)
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
+copyToNewByteBuffer(Cellcell)
+The position will be set to the beginning of the new 
ByteBuffer
+
+
+
+static KeyValue
+copyToNewKeyValue(Cellcell)
+copy key only
+
+
+
+static KeyValue
+createFirstDeleteFamilyOnRow(byte[]row,
+byte[]family)
+Create a Delete Family KeyValue for the specified row and 
family that would
+ be smaller than all other possible Delete Family KeyValues that have the
+ same row and family.
+
+
+
+static KeyValue
+createFirstKeyInIncrementedRow(Cellin)
+Increment the row bytes and clear the other fields
+
+
+
+static KeyValue
+createFirstKeyInNextRow(Cellin)
+Append single byte 0x00 to the end of the input row 
key
+
+
+
+static KeyValue
+createFirstOnRow(byte[]row)
+Create a KeyValue that is smaller than all other possible 
KeyValues
+ for the given row.
+
+
+
+static KeyValue
+createFirstOnRow(byte[]row,
+byte[]family,
+byte[]qualifier)
+Create a KeyValue for the specified row, family and 
qualifier that would be
+ smaller than all other possible KeyValues that have the same 
row,family,qualifier.
+
+
+
+static KeyValue
+createFirstOnRow(byte[]buffer,
+byte[]row,
+byte[]family,
+byte[]qualifier)
+Create a KeyValue for the specified row, family and 
qualifier that would be
+ smaller than all other possible KeyValues that have the same row,
+ family, qualifier.
+
+
+
+static KeyValue

[26/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html 
b/1.2/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
new file mode 100644
index 000..3d2ca78
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -0,0 +1,2618 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+HTableDescriptor (Apache HBase 1.2.4 API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":42,"i20":10,"i21":42,"i22":10,"i23":10,"i24":41,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":9,"i40":10,"i41":9,"i42":42,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":42,"i57":42,"i58":10,"i59":42,"i60":42,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":42};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class HTableDescriptor
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.HTableDescriptor
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHTableDescriptor, 
org.apache.hadoop.io.Writable, org.apache.hadoop.io.WritableComparableHTableDescriptor
+
+
+Direct Known Subclasses:
+UnmodifyableHTableDescriptor
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Evolving
+public class HTableDescriptor
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements org.apache.hadoop.io.WritableComparableHTableDescriptor
+HTableDescriptor contains the details about an HBase table  
such as the descriptors of
+ all the column families, is the table a catalog table,  -ROOT-  
or
+  hbase:meta , if the table is read only, the maximum size of the 
memstore,
+ when the region split should occur, coprocessors associated with it 
etc...
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+COMPACTION_ENABLED
+INTERNAL Used by HBase Shell interface to access 
this metadata
+ attribute which denotes if the table is compaction enabled
+
+
+
+static boolean
+DEFAULT_COMPACTION_ENABLED
+Constant that denotes whether the table is compaction 
enabled by default
+
+
+
+static long
+DEFAULT_MEMSTORE_FLUSH_SIZE
+Constant that denotes the maximum default size of the 
memstore after which
+ the contents are flushed to the store files
+
+
+
+static boolean
+DEFAULT_NORMALIZATION_ENABLED
+Constant that denotes whether the table is normalized by 
default.
+
+
+
+static boolean
+DEFAULT_READONLY
+Constant that denotes whether the table is READONLY by 
default and is false
+
+
+
+static boolean
+DEFAULT_REGION_MEMSTORE_REPLICATION
+
+
+static int
+DEFAULT_REGION_REPLICATION
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+DEFERRED_LOG_FLUSH
+Deprecated.
+Use DURABILITY
 instead.
+
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+DURABILITY
+INTERNAL Durability setting for the 
table.
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 

[30/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/HConstants.html 
b/1.2/apidocs/org/apache/hadoop/hbase/HConstants.html
new file mode 100644
index 000..a554b91
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/HConstants.html
@@ -0,0 +1,5912 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+HConstants (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class HConstants
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.HConstants
+
+
+
+
+
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Stable
+public final class HConstants
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+HConstants holds a bunch of HBase-related constants
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static int
+ADMIN_QOS
+
+
+static int
+ALL_VERSIONS
+Define for 'return-all-versions'.
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+BASE_NAMESPACE_DIR
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+BUCKET_CACHE_IOENGINE_KEY
+Current ioengine options in include: heap, offheap and 
file:PATH (where PATH is the path
+ to the file that will host the file-based cache.
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+BUCKET_CACHE_SIZE_KEY
+When using bucket cache, this is a float that EITHER 
represents a percentage of total heap
+ memory size to give to the cache (if  1.0) OR, it is the capacity in
+ megabytes of the cache.
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+BYTES_PER_CHECKSUM
+The name of the configuration parameter that specifies
+ the number of bytes in a newly created checksum chunk.
+
+
+
+static byte[]
+CATALOG_FAMILY
+The catalog family
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CATALOG_FAMILY_STR
+The catalog family as a string
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CHECKSUM_TYPE_NAME
+The name of the configuration parameter that specifies
+ the name of an algorithm that is used to compute checksums
+ for newly created blocks.
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CIPHER_AES
+Default cipher for encryption
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CLIENT_PORT_STR
+The ZK client port key in the ZK properties map.
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CLUSTER_DISTRIBUTED
+Cluster is in distributed mode or not
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CLUSTER_ID_DEFAULT
+Default value for cluster ID
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CLUSTER_ID_FILE_NAME
+name of the file for unique cluster ID
+
+
+
+static boolean
+CLUSTER_IS_DISTRIBUTED
+Cluster is fully-distributed
+
+
+
+static boolean
+CLUSTER_IS_LOCAL
+Cluster is standalone or pseudo-distributed
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+COMPACTION_KV_MAX
+Parameter name for the 

[44/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/help-doc.html
--
diff --git a/1.2/apidocs/help-doc.html b/1.2/apidocs/help-doc.html
new file mode 100644
index 000..1461e78
--- /dev/null
+++ b/1.2/apidocs/help-doc.html
@@ -0,0 +1,230 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+API Help (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+How This API Document Is Organized
+This API (Application Programming Interface) document 
has pages corresponding to the items in the navigation bar, described as 
follows.
+
+
+
+
+Overview
+The Overview page is the front page of 
this API document and provides a list of all packages with a summary for each.  
This page can also contain an overall description of the set of packages.
+
+
+Package
+Each package has a page that contains a list of its classes and interfaces, 
with a summary for each. This page can contain six categories:
+
+Interfaces (italic)
+Classes
+Enums
+Exceptions
+Errors
+Annotation Types
+
+
+
+Class/Interface
+Each class, interface, nested class and nested interface has its own 
separate page. Each of these pages has three sections consisting of a 
class/interface description, summary tables, and detailed member 
descriptions:
+
+Class inheritance diagram
+Direct Subclasses
+All Known Subinterfaces
+All Known Implementing Classes
+Class/interface declaration
+Class/interface description
+
+
+Nested Class Summary
+Field Summary
+Constructor Summary
+Method Summary
+
+
+Field Detail
+Constructor Detail
+Method Detail
+
+Each summary entry contains the first sentence from the detailed 
description for that item. The summary entries are alphabetical, while the 
detailed descriptions are in the order they appear in the source code. This 
preserves the logical groupings established by the programmer.
+
+
+Annotation Type
+Each annotation type has its own separate page with the following 
sections:
+
+Annotation Type declaration
+Annotation Type description
+Required Element Summary
+Optional Element Summary
+Element Detail
+
+
+
+Enum
+Each enum has its own separate page with the following sections:
+
+Enum declaration
+Enum description
+Enum Constant Summary
+Enum Constant Detail
+
+
+
+Use
+Each documented package, class and interface has its own Use page.  This 
page describes what packages, classes, methods, constructors and fields use any 
part of the given class or package. Given a class or interface A, its Use page 
includes subclasses of A, fields declared as A, methods that return A, and 
methods and constructors with parameters of type A.  You can access this page 
by first going to the package, class or interface, then clicking on the "Use" 
link in the navigation bar.
+
+
+Tree (Class Hierarchy)
+There is a Class Hierarchy page for all 
packages, plus a hierarchy for each package. Each hierarchy page contains a 
list of classes and a list of interfaces. The classes are organized by 
inheritance structure starting with java.lang.Object. The 
interfaces do not inherit from java.lang.Object.
+
+When viewing the Overview page, clicking on "Tree" displays the hierarchy 
for all packages.
+When viewing a particular package, class or interface page, clicking 
"Tree" displays the hierarchy for only that package.
+
+
+
+Deprecated API
+The Deprecated API page lists all of the 
API that have been deprecated. A deprecated API is not recommended for use, 
generally due to improvements, and a replacement API is usually given. 
Deprecated APIs may be removed in future implementations.
+
+
+Index
+The Index contains an alphabetic list of all 
classes, interfaces, constructors, methods, and fields.
+
+
+Prev/Next
+These links take you to the next or previous class, interface, package, or 
related page.
+
+
+Frames/No Frames
+These links show and hide the HTML frames.  All pages are available with or 
without frames.
+
+
+All Classes
+The All Classes link shows all 
classes and interfaces except non-static nested types.
+
+
+Serialized Form
+Each serializable or externalizable class has a description of its 
serialization fields and methods. This information is of interest to 
re-implementors, not to developers using the API. While there is no link in the 
navigation bar, you can get to this information by going to any serialized 
class and 

[32/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html 
b/1.2/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
new file mode 100644
index 000..38e05b0
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
@@ -0,0 +1,3045 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+HColumnDescriptor (Apache HBase 1.2.4 API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":42,"i14":9,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":9,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":9,"i39":10,"i40":9,"i41":42,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":42,"i58":10,"i59":10,"i60":10,"i61":10,"i62":42,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":42,"i73":42,"i74":42,"i75":42,"i76":42,"i77":42,"i78":42,"i79":10,"i80":10,"i81":10,"i82":42};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class HColumnDescriptor
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.HColumnDescriptor
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHColumnDescriptor, 
org.apache.hadoop.io.Writable, org.apache.hadoop.io.WritableComparableHColumnDescriptor
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Evolving
+public class HColumnDescriptor
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements org.apache.hadoop.io.WritableComparableHColumnDescriptor
+An HColumnDescriptor contains information about a column 
family such as the
+ number of versions, compression settings, etc.
+
+ It is used as input when creating a table or adding a column.
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+BLOCKCACHE
+Key for the BLOCKCACHE attribute.
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+BLOCKSIZE
+Size of storefile/hfile 'blocks'.
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+BLOOMFILTER
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CACHE_BLOOMS_ON_WRITE
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CACHE_DATA_IN_L1
+Key for cache data into L1 if cache is set up with more 
than one tier.
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CACHE_DATA_ON_WRITE
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CACHE_INDEX_ON_WRITE
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+COMPRESS_TAGS
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or 

[20/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.RawBytesComparator.html
--
diff --git 
a/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.RawBytesComparator.html 
b/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.RawBytesComparator.html
new file mode 100644
index 000..2bedfbb
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.RawBytesComparator.html
@@ -0,0 +1,415 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+KeyValue.RawBytesComparator (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":42,"i3":10,"i4":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
KeyValue.RawBytesComparator
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.KeyValue.KVComparator
+
+
+org.apache.hadoop.hbase.KeyValue.RawBytesComparator
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorCell, KeyValue.SamePrefixComparatorbyte[], 
org.apache.hadoop.io.RawComparatorCell
+
+
+Enclosing class:
+KeyValue
+
+
+
+public static class KeyValue.RawBytesComparator
+extends KeyValue.KVComparator
+This is a TEST only Comparator used in TestSeekTo and 
TestReseekTo.
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+RawBytesComparator()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete MethodsDeprecated Methods
+
+Modifier and Type
+Method and Description
+
+
+byte[]
+calcIndexKey(byte[]lastKeyOfPreviousBlock,
+byte[]firstKeyInBlock)
+
+
+int
+compare(Cellleft,
+   Cellright)
+Compares the Key of a cell -- with fields being more 
significant in this order:
+ rowkey, colfam/qual, timestamp, type, mvcc
+
+
+
+int
+compareFlatKey(byte[]left,
+  intloffset,
+  intllength,
+  byte[]right,
+  introffset,
+  intrlength)
+Deprecated.
+Since 0.99.2.
+
+
+
+
+int
+compareOnlyKeyPortion(Cellleft,
+ Cellright)
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getLegacyKeyComparatorName()
+The HFileV2 file format's trailer contains this class 
name.
+
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.KeyValue.KVComparator
+clone,
 compare,
 compareColumns,
 compareColumns,
 compareFamilies,
 compareFlatKey,
 compareIgnoringPrefix,
 compareKey,
 compareRowKey,
 compareRows,
 compareRows,
 compareTimestamps,
 getShortMidpointKey,
 matchingRowColumn, matchingRows,
 matchingRows
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notifyAll--;
 title="class or interface in java.lang">notifyAll, http://docs.oracle.com/javase/7/docs/api/j
 ava/lang/Object.html?is-external=true#toString--" 

[24/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/JMXListener.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/JMXListener.html 
b/1.2/apidocs/org/apache/hadoop/hbase/JMXListener.html
new file mode 100644
index 000..096e674
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/JMXListener.html
@@ -0,0 +1,481 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+JMXListener (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = {"i0":9,"i1":10,"i2":10,"i3":10,"i4":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class JMXListener
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.JMXListener
+
+
+
+
+
+
+
+All Implemented Interfaces:
+Coprocessor
+
+
+
+public class JMXListener
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements Coprocessor
+Pluggable JMX Agent for HBase(to fix the 2 random TCP ports 
issue
+ of the out-of-the-box JMX Agent):
+ 1)connector port can share with the registry port if SSL is OFF
+ 2)support password authentication
+ 3)support subset of SSL (with default configuration)
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.Coprocessor
+Coprocessor.State
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static int
+defMasterRMIRegistryPort
+
+
+static int
+defRegionserverRMIRegistryPort
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+RMI_CONNECTOR_PORT_CONF_KEY
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+RMI_REGISTRY_PORT_CONF_KEY
+
+
+
+
+
+
+Fields inherited from interfaceorg.apache.hadoop.hbase.Coprocessor
+PRIORITY_HIGHEST,
 PRIORITY_LOWEST,
 PRIORITY_SYSTEM,
 PRIORITY_USER,
 VERSION
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+JMXListener()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+static http://docs.oracle.com/javase/7/docs/api/javax/management/remote/JMXServiceURL.html?is-external=true;
 title="class or interface in 
javax.management.remote">JMXServiceURL
+buildJMXServiceURL(intrmiRegistryPort,
+  intrmiConnectorPort)
+
+
+void
+start(CoprocessorEnvironmentenv)
+
+
+void
+startConnectorServer(intrmiRegistryPort,
+intrmiConnectorPort)
+
+
+void
+stop(CoprocessorEnvironmentenv)
+
+
+void
+stopConnectorServer()
+
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/7/docs/api/java/lang
 

[12/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/NamespaceNotFoundException.html
--
diff --git 
a/1.2/apidocs/org/apache/hadoop/hbase/NamespaceNotFoundException.html 
b/1.2/apidocs/org/apache/hadoop/hbase/NamespaceNotFoundException.html
new file mode 100644
index 000..de26e08
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/NamespaceNotFoundException.html
@@ -0,0 +1,293 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+NamespaceNotFoundException (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
NamespaceNotFoundException
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">java.lang.Throwable
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">java.lang.Exception
+
+
+http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">java.io.IOException
+
+
+org.apache.hadoop.hbase.HBaseIOException
+
+
+org.apache.hadoop.hbase.DoNotRetryIOException
+
+
+org.apache.hadoop.hbase.NamespaceNotFoundException
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Stable
+public class NamespaceNotFoundException
+extends DoNotRetryIOException
+Thrown when a namespace can not be located
+
+See Also:
+Serialized
 Form
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+NamespaceNotFoundException()
+
+
+NamespaceNotFoundException(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringnamespace)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
+http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#addSuppressed-java.lang.Throwable-;
 title="class or interface in java.lang">addSuppressed, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#fillInStackTrace--;
 title="class or interface in java.lang">fillInStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getCause--;
 title="class or interface in java.lang">getCause, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getLocalizedMessage--;
 title="class or interface in java.lang">getLocalizedMessage, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getMessage--;
 title="class or interface in java.lang">getMessage, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getStackTrace--;
 title="class or inter
 face in java.lang">getStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getSuppressed--;
 title="class or interface in java.lang">getSuppressed, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#initCause-java.lang.Throwable-;
 title="class or interface in java.lang">initCause, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace--;
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintStream-;
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintWriter-;
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lan
 

[25/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/HealthCheckChore.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/HealthCheckChore.html 
b/1.2/apidocs/org/apache/hadoop/hbase/HealthCheckChore.html
new file mode 100644
index 000..49ad553
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/HealthCheckChore.html
@@ -0,0 +1,302 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+HealthCheckChore (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class HealthCheckChore
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.ScheduledChore
+
+
+org.apache.hadoop.hbase.HealthCheckChore
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable
+
+
+
+public class HealthCheckChore
+extends ScheduledChore
+The Class HealthCheckChore for running health checker 
regularly.
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+HealthCheckChore(intsleepTime,
+Stoppablestopper,
+
org.apache.hadoop.conf.Configurationconf)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+protected void
+chore()
+The task to execute on each scheduled execution of the 
Chore
+
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.ScheduledChore
+cancel,
 cancel,
 choreForTesting,
 cleanup,
 getInitialDelay,
 getName,
 getPeriod,
 getStopper,
 getTimeUnit,
 initialChore,
 isInitialChoreComplete,
 isScheduled,
 run, 
toString,
 triggerNow
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/7/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Detail
+
+
+
+
+
+HealthCheckChore
+publicHealthCheckChore(intsleepTime,
+Stoppablestopper,
+org.apache.hadoop.conf.Configurationconf)
+
+
+
+
+
+
+
+
+
+Method Detail
+
+
+
+
+
+chore
+protectedvoidchore()
+Description copied from 
class:ScheduledChore
+The task to execute on each scheduled execution of the 
Chore
+
+Specified by:
+chorein
 classScheduledChore
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+

[16/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/LocalHBaseCluster.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/LocalHBaseCluster.html 
b/1.2/apidocs/org/apache/hadoop/hbase/LocalHBaseCluster.html
new file mode 100644
index 000..773c8dd
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/LocalHBaseCluster.html
@@ -0,0 +1,901 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+LocalHBaseCluster (Apache HBase 1.2.4 API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":9,"i15":10,"i16":9,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class LocalHBaseCluster
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.LocalHBaseCluster
+
+
+
+
+
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Evolving
+public class LocalHBaseCluster
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+This class creates a single process HBase cluster. One 
thread is created for
+ a master and one per region server.
+
+ Call startup()
 to start the cluster running and shutdown()
+ to close it all down. join()
 the cluster is you want to wait on
+ shutdown completion.
+
+ Runs master on port 16000 by default.  Because we can't just kill the
+ process -- not till HADOOP-1700 gets fixed and even then -- we need to
+ be able to find the master with a remote client to run shutdown.  To use a
+ port other than 16000, set the hbase.master to a value of 'local:PORT':
+ that is 'local', not 'localhost', and the port number the master should use
+ instead of 16000.
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+LOCAL
+local mode
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+LOCAL_COLON
+'local:'
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+LocalHBaseCluster(org.apache.hadoop.conf.Configurationconf)
+Constructor.
+
+
+
+LocalHBaseCluster(org.apache.hadoop.conf.Configurationconf,
+ intnoRegionServers)
+Constructor.
+
+
+
+LocalHBaseCluster(org.apache.hadoop.conf.Configurationconf,
+ intnoMasters,
+ intnoRegionServers)
+Constructor.
+
+
+
+LocalHBaseCluster(org.apache.hadoop.conf.Configurationconf,
+ intnoMasters,
+ intnoRegionServers,
+ http://docs.oracle.com/javase/7/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends 
org.apache.hadoop.hbase.master.HMastermasterClass,
+ http://docs.oracle.com/javase/7/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends 
org.apache.hadoop.hbase.regionserver.HRegionServerregionServerClass)
+Constructor.
+
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread
+addMaster()
+
+
+org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread
+addMaster(org.apache.hadoop.conf.Configurationc,
+ intindex)
+
+
+org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread
+addMaster(org.apache.hadoop.conf.Configurationc,
+ intindex,
+ Useruser)
+
+
+org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread
+addRegionServer()
+
+

[06/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/SplitLogTask.Done.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/SplitLogTask.Done.html 
b/1.2/apidocs/org/apache/hadoop/hbase/SplitLogTask.Done.html
new file mode 100644
index 000..13fab05
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/SplitLogTask.Done.html
@@ -0,0 +1,272 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+SplitLogTask.Done (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class SplitLogTask.Done
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.SplitLogTask
+
+
+org.apache.hadoop.hbase.SplitLogTask.Done
+
+
+
+
+
+
+
+
+
+Enclosing class:
+SplitLogTask
+
+
+
+public static class SplitLogTask.Done
+extends SplitLogTask
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.SplitLogTask
+SplitLogTask.Done, SplitLogTask.Err, SplitLogTask.Owned, SplitLogTask.Resigned, SplitLogTask.Unassigned
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+Done(ServerNameoriginServer,
+ZooKeeperProtos.SplitLogTask.RecoveryModemode)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.SplitLogTask
+equals,
 getMode,
 getServerName,
 hashCode,
 isDone,
 isDone,
 isErr, 
isErr,
 isOwned,
 isOwned,
 isResigned,
 isResigned,
 isUnassigned,
 isUnassigned,
 parseFrom,
 toByteArray,
 toString
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notifyAll--;
 title="class or interface in java.lang">notifyAll, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-ex
 ternal=true#wait-long-" title="class or interface in java.lang">wait, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Detail
+
+
+
+
+
+Done
+publicDone(ServerNameoriginServer,
+ZooKeeperProtos.SplitLogTask.RecoveryModemode)
+
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+Copyright  20072016 http://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/SplitLogTask.Err.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/SplitLogTask.Err.html 
b/1.2/apidocs/org/apache/hadoop/hbase/SplitLogTask.Err.html
new file mode 100644
index 000..29678ca
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/SplitLogTask.Err.html
@@ -0,0 +1,272 @@

[10/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/RegionLocations.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/RegionLocations.html 
b/1.2/apidocs/org/apache/hadoop/hbase/RegionLocations.html
new file mode 100644
index 000..6e7474e
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/RegionLocations.html
@@ -0,0 +1,576 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+RegionLocations (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class RegionLocations
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.RegionLocations
+
+
+
+
+
+
+
+
+@InterfaceAudience.Private
+public class RegionLocations
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+Container for holding a list of HRegionLocation's that correspond to 
the
+ same range. The list is indexed by the replicaId. This is an immutable list,
+ however mutation operations are provided which returns a new List via 
copy-on-write
+ (assuming small number of locations)
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+RegionLocations(http://docs.oracle.com/javase/7/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHRegionLocationlocations)
+
+
+RegionLocations(HRegionLocation...locations)
+Constructs the region location list.
+
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+HRegionLocation
+getDefaultRegionLocation()
+
+
+HRegionLocation
+getRegionLocation()
+Returns the first not-null region location in the list
+
+
+
+HRegionLocation
+getRegionLocation(intreplicaId)
+
+
+HRegionLocation
+getRegionLocationByRegionName(byte[]regionName)
+Returns the region location from the list for matching 
regionName, which can
+ be regionName or encodedRegionName
+
+
+
+HRegionLocation[]
+getRegionLocations()
+
+
+boolean
+isEmpty()
+Returns whether there are non-null elements in the 
list
+
+
+
+RegionLocations
+mergeLocations(RegionLocationsother)
+Merges this RegionLocations list with the given list 
assuming
+ same range, and keeping the most up to date version of the
+ HRegionLocation entries from either list according to seqNum.
+
+
+
+int
+numNonNullElements()
+Returns the size of not-null locations
+
+
+
+RegionLocations
+remove(HRegionLocationlocation)
+Removes the given location from the list
+
+
+
+RegionLocations
+remove(intreplicaId)
+Removes location of the given replicaId from the list
+
+
+
+RegionLocations
+removeByServer(ServerNameserverName)
+Returns a new RegionLocations with the locations removed 
(set to null)
+ which have the destination server as given.
+
+
+
+int
+size()
+Returns the size of the list even if some of the elements
+ might be null.
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+toString()
+
+
+RegionLocations
+updateLocation(HRegionLocationlocation,
+  booleancheckForEquals,
+  booleanforce)
+Updates the location with new only if the new location has 
a higher
+ seqNum than the old one or force is true.
+
+
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, 

[07/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/SettableSequenceId.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/SettableSequenceId.html 
b/1.2/apidocs/org/apache/hadoop/hbase/SettableSequenceId.html
new file mode 100644
index 000..c6f9ca8
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/SettableSequenceId.html
@@ -0,0 +1,240 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+SettableSequenceId (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = {"i0":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Interface 
SettableSequenceId
+
+
+
+
+
+
+All Known Implementing Classes:
+KeyValue, KeyValue.KeyOnlyKeyValue, NoTagsKeyValue, PrefixTreeArrayReversibleScanner,
 PrefixTreeArrayScanner, PrefixTreeArraySearcher, 
PrefixTreeCell, TagRewriteCell
+
+
+
+@InterfaceAudience.LimitedPrivate(value="Coprocesssor")
+public interface SettableSequenceId
+Using this Interface one can mark a Cell as Sequence 
stampable. 
+ Note : Make sure to make Cell implementation of this type in server 
side.
+
+
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsAbstract Methods
+
+Modifier and Type
+Method and Description
+
+
+void
+setSequenceId(longseqId)
+Sets with the given seqId.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Method Detail
+
+
+
+
+
+setSequenceId
+voidsetSequenceId(longseqId)
+throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+Sets with the given seqId.
+
+Parameters:
+seqId - 
+Throws:
+http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+Copyright  20072016 http://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/SettableTimestamp.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/SettableTimestamp.html 
b/1.2/apidocs/org/apache/hadoop/hbase/SettableTimestamp.html
new file mode 100644
index 000..dd7419e
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/SettableTimestamp.html
@@ -0,0 +1,266 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+SettableTimestamp (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = {"i0":6,"i1":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Interface 
SettableTimestamp
+

[36/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/CompatibilitySingletonFactory.html
--
diff --git 
a/1.2/apidocs/org/apache/hadoop/hbase/CompatibilitySingletonFactory.html 
b/1.2/apidocs/org/apache/hadoop/hbase/CompatibilitySingletonFactory.html
new file mode 100644
index 000..1b0c61c
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/CompatibilitySingletonFactory.html
@@ -0,0 +1,333 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+CompatibilitySingletonFactory (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = {"i0":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
CompatibilitySingletonFactory
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.CompatibilityFactory
+
+
+org.apache.hadoop.hbase.CompatibilitySingletonFactory
+
+
+
+
+
+
+
+
+
+
+public class CompatibilitySingletonFactory
+extends CompatibilityFactory
+Factory for classes supplied by hadoop compatibility 
modules.  Only one of each class will be
+  created.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+static class
+CompatibilitySingletonFactory.SingletonStorage
+
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.CompatibilityFactory
+EXCEPTION_END,
 EXCEPTION_START
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Modifier
+Constructor and Description
+
+
+protected 
+CompatibilitySingletonFactory()
+This is a static only class don't let anyone create an 
instance.
+
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+static TT
+getInstance(http://docs.oracle.com/javase/7/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTklass)
+Get the singleton instance of Any classes defined by 
compatibiliy jar's
+
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.CompatibilityFactory
+createExceptionString
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/7/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Detail
+
+
+
+
+

[34/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/DoNotRetryIOException.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/DoNotRetryIOException.html 
b/1.2/apidocs/org/apache/hadoop/hbase/DoNotRetryIOException.html
new file mode 100644
index 000..dd6b165
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/DoNotRetryIOException.html
@@ -0,0 +1,331 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+DoNotRetryIOException (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
DoNotRetryIOException
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">java.lang.Throwable
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">java.lang.Exception
+
+
+http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">java.io.IOException
+
+
+org.apache.hadoop.hbase.HBaseIOException
+
+
+org.apache.hadoop.hbase.DoNotRetryIOException
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable
+
+
+Direct Known Subclasses:
+AccessDeniedException, CoprocessorException, 
DoNotRetryRegionException, 
FailedSanityCheckException, FatalConnectionException, HBaseSnapshotException, 
InvalidFamilyOperationException, LeaseException, LockTimeoutException, 
NamespaceExistException, NamespaceNotFoundException, NoSuchColumnFamilyException, NotAllMetaRegionsOnlineException, OperationConflictException, QuotaExceededException, ScannerResetException, 
ScannerTimeoutException, TableExistsException, TableNotDisabledException, TableNotEnabledException, TableNotFoundException, UnknownProtocolException, UnknownScannerException
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Stable
+public class DoNotRetryIOException
+extends HBaseIOException
+Subclass if exception is not meant to be retried: e.g.
+ UnknownScannerException
+
+See Also:
+Serialized
 Form
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+DoNotRetryIOException()
+default constructor
+
+
+
+DoNotRetryIOException(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringmessage)
+
+
+DoNotRetryIOException(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage,
+ http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in 
java.lang">Throwablecause)
+
+
+DoNotRetryIOException(http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in 
java.lang">Throwablecause)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
+http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#addSuppressed-java.lang.Throwable-;
 title="class or interface in java.lang">addSuppressed, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#fillInStackTrace--;
 title="class or interface in java.lang">fillInStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getCause--;
 title="class or interface in java.lang">getCause, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getLocalizedMessage--;
 title="class or interface in java.lang">getLocalizedMessage, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getMessage--;
 title="class or interface in java.lang">getMessage, 

[29/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/HDFSBlocksDistribution.HostAndWeight.WeightComparator.html
--
diff --git 
a/1.2/apidocs/org/apache/hadoop/hbase/HDFSBlocksDistribution.HostAndWeight.WeightComparator.html
 
b/1.2/apidocs/org/apache/hadoop/hbase/HDFSBlocksDistribution.HostAndWeight.WeightComparator.html
new file mode 100644
index 000..a665151
--- /dev/null
+++ 
b/1.2/apidocs/org/apache/hadoop/hbase/HDFSBlocksDistribution.HostAndWeight.WeightComparator.html
@@ -0,0 +1,296 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+HDFSBlocksDistribution.HostAndWeight.WeightComparator (Apache HBase 
1.2.4 API)
+
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class HDFSBlocksDistribution.HostAndWeight.WeightComparator
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.HDFSBlocksDistribution.HostAndWeight.WeightComparator
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorHDFSBlocksDistribution.HostAndWeight
+
+
+Enclosing class:
+HDFSBlocksDistribution.HostAndWeight
+
+
+
+public static class HDFSBlocksDistribution.HostAndWeight.WeightComparator
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements http://docs.oracle.com/javase/7/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorHDFSBlocksDistribution.HostAndWeight
+comparator used to sort hosts based on weight
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+WeightComparator()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+int
+compare(HDFSBlocksDistribution.HostAndWeightl,
+   HDFSBlocksDistribution.HostAndWeightr)
+
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/7/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
+
+
+
+
+
+Methods inherited from 

[23/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/JitterScheduledThreadPoolExecutorImpl.html
--
diff --git 
a/1.2/apidocs/org/apache/hadoop/hbase/JitterScheduledThreadPoolExecutorImpl.html
 
b/1.2/apidocs/org/apache/hadoop/hbase/JitterScheduledThreadPoolExecutorImpl.html
new file mode 100644
index 000..90eb190
--- /dev/null
+++ 
b/1.2/apidocs/org/apache/hadoop/hbase/JitterScheduledThreadPoolExecutorImpl.html
@@ -0,0 +1,388 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+JitterScheduledThreadPoolExecutorImpl (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = {"i0":10,"i1":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
JitterScheduledThreadPoolExecutorImpl
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/AbstractExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">java.util.concurrent.AbstractExecutorService
+
+
+http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html?is-external=true;
 title="class or interface in 
java.util.concurrent">java.util.concurrent.ThreadPoolExecutor
+
+
+http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ScheduledThreadPoolExecutor.html?is-external=true;
 title="class or interface in 
java.util.concurrent">java.util.concurrent.ScheduledThreadPoolExecutor
+
+
+org.apache.hadoop.hbase.JitterScheduledThreadPoolExecutorImpl
+
+
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executor.html?is-external=true;
 title="class or interface in java.util.concurrent">Executor, http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutorService, http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ScheduledExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ScheduledExecutorService
+
+
+
+@InterfaceAudience.Private
+public class JitterScheduledThreadPoolExecutorImpl
+extends http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ScheduledThreadPoolExecutor.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ScheduledThreadPoolExecutor
+ScheduledThreadPoolExecutor that will add some jitter to 
the RunnableScheduledFuture.getDelay.
+
+ This will spread out things on a distributed cluster.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+protected class
+JitterScheduledThreadPoolExecutorImpl.JitteredRunnableScheduledFutureV
+Class that basically just defers to the wrapped 
future.
+
+
+
+
+
+
+
+Nested classes/interfaces inherited from 
classjava.util.concurrent.http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html?is-external=true;
 title="class or interface in java.util.concurrent">ThreadPoolExecutor
+http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.AbortPolicy.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ThreadPoolExecutor.AbortPolicy, http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.CallerRunsPolicy.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ThreadPoolExecutor.CallerRunsPolicy, http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.DiscardOldestPolicy.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ThreadPoolExecutor.DiscardOldestPolicy, http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.DiscardPolicy.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ThreadPoolExecutor.DiscardPolicy
+
+
+
+
+

[13/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/MultiActionResultTooLarge.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/MultiActionResultTooLarge.html 
b/1.2/apidocs/org/apache/hadoop/hbase/MultiActionResultTooLarge.html
new file mode 100644
index 000..920cdc5
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/MultiActionResultTooLarge.html
@@ -0,0 +1,278 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+MultiActionResultTooLarge (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
MultiActionResultTooLarge
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">java.lang.Throwable
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">java.lang.Exception
+
+
+http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">java.io.IOException
+
+
+org.apache.hadoop.hbase.RetryImmediatelyException
+
+
+org.apache.hadoop.hbase.MultiActionResultTooLarge
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Evolving
+public class MultiActionResultTooLarge
+extends RetryImmediatelyException
+Exception thrown when the result needs to be chunked on the 
server side.
+ It signals that retries should happen right away and not count against the 
number of
+ retries because some of the multi was a success.
+
+See Also:
+Serialized
 Form
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+MultiActionResultTooLarge(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Strings)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
+http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#addSuppressed-java.lang.Throwable-;
 title="class or interface in java.lang">addSuppressed, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#fillInStackTrace--;
 title="class or interface in java.lang">fillInStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getCause--;
 title="class or interface in java.lang">getCause, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getLocalizedMessage--;
 title="class or interface in java.lang">getLocalizedMessage, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getMessage--;
 title="class or interface in java.lang">getMessage, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getStackTrace--;
 title="class or inter
 face in java.lang">getStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getSuppressed--;
 title="class or interface in java.lang">getSuppressed, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#initCause-java.lang.Throwable-;
 title="class or interface in java.lang">initCause, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace--;
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintStream-;
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintWriter-;
 title="class or interface in java.lang">printStackTrace, 

[42/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/index.html
--
diff --git a/1.2/apidocs/index.html b/1.2/apidocs/index.html
new file mode 100644
index 000..aeb0ff5
--- /dev/null
+++ b/1.2/apidocs/index.html
@@ -0,0 +1,75 @@
+http://www.w3.org/TR/html4/frameset.dtd;>
+
+
+
+
+
+Apache HBase 1.2.4 API
+
+targetPage = "" + window.location.search;
+if (targetPage != "" && targetPage != "undefined")
+targetPage = targetPage.substring(1);
+if (targetPage.indexOf(":") != -1 || (targetPage != "" && 
!validURL(targetPage)))
+targetPage = "undefined";
+function validURL(url) {
+try {
+url = decodeURIComponent(url);
+}
+catch (error) {
+return false;
+}
+var pos = url.indexOf(".html");
+if (pos == -1 || pos != url.length - 5)
+return false;
+var allowNumber = false;
+var allowSep = false;
+var seenDot = false;
+for (var i = 0; i < url.length - 5; i++) {
+var ch = url.charAt(i);
+if ('a' <= ch && ch <= 'z' ||
+'A' <= ch && ch <= 'Z' ||
+ch == '$' ||
+ch == '_' ||
+ch.charCodeAt(0) > 127) {
+allowNumber = true;
+allowSep = true;
+} else if ('0' <= ch && ch <= '9'
+|| ch == '-') {
+if (!allowNumber)
+ return false;
+} else if (ch == '/' || ch == '.') {
+if (!allowSep)
+return false;
+allowNumber = false;
+allowSep = false;
+if (ch == '.')
+ seenDot = true;
+if (ch == '/' && seenDot)
+ return false;
+} else {
+return false;
+}
+}
+return true;
+}
+function loadFrames() {
+if (targetPage != "" && targetPage != "undefined")
+ top.classFrame.location = top.targetPage;
+}
+
+
+
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+Frame Alert
+This document is designed to be viewed using the frames feature. If you see 
this message, you are using a non-frame-capable web client. Link to Non-frame version.
+
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/Abortable.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/Abortable.html 
b/1.2/apidocs/org/apache/hadoop/hbase/Abortable.html
new file mode 100644
index 000..266addc
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/Abortable.html
@@ -0,0 +1,269 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Abortable (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = {"i0":6,"i1":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Interface Abortable
+
+
+
+
+
+
+All Known Subinterfaces:
+Admin, ClusterConnection, Connection, HConnection, MasterServices, OnlineRegions, RegionServerServices, Server
+
+
+All Known Implementing Classes:
+CoprocessorHConnection, HBaseAdmin, HBaseInterClusterReplicationEndpoint,
 HBaseReplicationEndpoint, HMaster, HMasterCommandLine.LocalHMaster, HRegionServer, RegionReplicaReplicationEndpoint,
 ReplicationPeerZKImpl, ZooKeeperWatcher
+
+
+
+@InterfaceAudience.LimitedPrivate(value={"Coprocesssor","Phoenix"})
+ @InterfaceStability.Evolving
+public interface Abortable
+Interface to support the aborting of a given server or 
client.
+ 
+ This is used primarily for ZooKeeper usage when we could get an unexpected
+ and fatal exception, requiring an abort.
+ 
+ Implemented by the Master, RegionServer, and TableServers (client).
+
+
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsAbstract Methods
+
+Modifier and Type
+Method and Description
+
+
+void

[09/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/RemoteExceptionHandler.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/RemoteExceptionHandler.html 
b/1.2/apidocs/org/apache/hadoop/hbase/RemoteExceptionHandler.html
new file mode 100644
index 000..8db1c62
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/RemoteExceptionHandler.html
@@ -0,0 +1,314 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+RemoteExceptionHandler (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = {"i0":9,"i1":9,"i2":41};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
RemoteExceptionHandler
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.RemoteExceptionHandler
+
+
+
+
+
+
+
+
+@InterfaceAudience.Private
+public class RemoteExceptionHandler
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+An immutable class which contains a static method for 
handling
+ org.apache.hadoop.ipc.RemoteException exceptions.
+
+
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsConcrete MethodsDeprecated Methods
+
+Modifier and Type
+Method and Description
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+checkIOException(http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOExceptione)
+Examine passed IOException.
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
+checkThrowable(http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwablet)
+Examine passed Throwable.
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+decodeRemoteException(org.apache.hadoop.ipc.RemoteExceptionre)
+Deprecated.
+Use 
RemoteException.unwrapRemoteException() instead.
+ In fact we should look into deprecating this whole class - St.Ack 
2010929
+
+
+
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/7/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in 

[14/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/MetaTableAccessor.html 
b/1.2/apidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
new file mode 100644
index 000..3ac7ed6
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
@@ -0,0 +1,1882 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+MetaTableAccessor (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":41,"i26":41,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9,"i35":9,"i36":9,"i37":9,"i38":9,"i39":9,"i40":9,"i41":9,"i42":9,"i43":9,"i44":9,"i45":9,"i46":9,"i47":9,"i48":9,"i49":9,"i50":9,"i51":9,"i52":9,"i53":9,"i54":9,"i55":9,"i56":9,"i57":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class MetaTableAccessor
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.MetaTableAccessor
+
+
+
+
+
+
+
+
+@InterfaceAudience.Private
+public class MetaTableAccessor
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+Read/write operations on region and assignment information 
store in
+ hbase:meta.
+
+ Some of the methods of this class take ZooKeeperWatcher as a param. The only 
reason
+ for this is because when used on client-side (like from HBaseAdmin), we want 
to use
+ short-living connection (opened before each operation, closed right after), 
while
+ when used on HM or HRS (like in AssignmentManager) we want permanent 
connection.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+static interface
+MetaTableAccessor.Visitor
+Implementations 'visit' a catalog table row.
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+protected static char
+META_REPLICA_ID_DELIMITER
+The delimiter for meta columns for replicaIds  0
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+MetaTableAccessor()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsConcrete MethodsDeprecated Methods
+
+Modifier and Type
+Method and Description
+
+
+static void
+addDaughter(Connectionconnection,
+   HRegionInforegionInfo,
+   ServerNamesn,
+   longopenSeqNum)
+Adds a daughter region entry to meta.
+
+
+
+static Put
+addDaughtersToPut(Putput,
+ HRegionInfosplitA,
+ HRegionInfosplitB)
+Adds split daughters to the Put
+
+
+
+static Put
+addEmptyLocation(Putp,
+intreplicaId)
+
+
+static Put
+addLocation(Putp,
+   ServerNamesn,
+   longopenSeqNum,
+   longtime,
+   intreplicaId)
+
+
+static void
+addRegionsToMeta(Connectionconnection,
+http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInforegionInfos,
+intregionReplication)
+Adds a hbase:meta row for each of the specified new 
regions.
+
+
+
+static void
+addRegionsToMeta(Connectionconnection,
+http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInforegionInfos,
+intregionReplication,
+longts)
+Adds a hbase:meta row for each of the specified new 
regions.
+
+
+
+static void
+addRegionToMeta(Connectionconnection,
+   HRegionInforegionInfo)
+Adds a hbase:meta row for the specified new region.
+
+
+
+static void

[27/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/HRegionLocation.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/HRegionLocation.html 
b/1.2/apidocs/org/apache/hadoop/hbase/HRegionLocation.html
new file mode 100644
index 000..d1368b2
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/HRegionLocation.html
@@ -0,0 +1,455 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+HRegionLocation (Apache HBase 1.2.4 API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class HRegionLocation
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.HRegionLocation
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHRegionLocation
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Evolving
+public class HRegionLocation
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHRegionLocation
+Data structure to hold HRegionInfo and the address for the 
hosting
+ HRegionServer.  Immutable.  Comparable, but we compare the 'location' only:
+ i.e. the hostname and port, and *not* the regioninfo.  This means two
+ instances are the same if they refer to the same 'location' (the same
+ hostname and port), though they may be carrying different regions.
+
+ On a big cluster, each client will have thousands of instances of this 
object, often
+  100 000 of them if not million. It's important to keep the object size as 
small
+  as possible.
+
+ This interface has been marked InterfaceAudience.Public in 0.96 and 
0.98.
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+HRegionLocation(HRegionInforegionInfo,
+   ServerNameserverName)
+
+
+HRegionLocation(HRegionInforegionInfo,
+   ServerNameserverName,
+   longseqNum)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+int
+compareTo(HRegionLocationo)
+
+
+boolean
+equals(http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objecto)
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getHostname()
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getHostnamePort()
+
+
+int
+getPort()
+
+
+HRegionInfo
+getRegionInfo()
+
+
+long
+getSeqNum()
+
+
+ServerName
+getServerName()
+
+
+int
+hashCode()
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+toString()
+
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in 

[02/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/TableNotFoundException.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/TableNotFoundException.html 
b/1.2/apidocs/org/apache/hadoop/hbase/TableNotFoundException.html
new file mode 100644
index 000..f01ed3b
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/TableNotFoundException.html
@@ -0,0 +1,324 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TableNotFoundException (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
TableNotFoundException
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">java.lang.Throwable
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">java.lang.Exception
+
+
+http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">java.io.IOException
+
+
+org.apache.hadoop.hbase.HBaseIOException
+
+
+org.apache.hadoop.hbase.DoNotRetryIOException
+
+
+org.apache.hadoop.hbase.TableNotFoundException
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Stable
+public class TableNotFoundException
+extends DoNotRetryIOException
+Thrown when a table can not be located
+
+See Also:
+Serialized
 Form
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TableNotFoundException()
+default constructor
+
+
+
+TableNotFoundException(byte[]tableName)
+
+
+TableNotFoundException(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Strings)
+
+
+TableNotFoundException(TableNametableName)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
+http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#addSuppressed-java.lang.Throwable-;
 title="class or interface in java.lang">addSuppressed, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#fillInStackTrace--;
 title="class or interface in java.lang">fillInStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getCause--;
 title="class or interface in java.lang">getCause, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getLocalizedMessage--;
 title="class or interface in java.lang">getLocalizedMessage, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getMessage--;
 title="class or interface in java.lang">getMessage, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getStackTrace--;
 title="class or inter
 face in java.lang">getStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getSuppressed--;
 title="class or interface in java.lang">getSuppressed, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#initCause-java.lang.Throwable-;
 title="class or interface in java.lang">initCause, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace--;
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintStream-;
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintWriter-;
 title="class or interface in java.lang">printStackTrace, 

[50/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/_chapters/images/architecture.gif
--
diff --git a/1.2/_chapters/images/architecture.gif 
b/1.2/_chapters/images/architecture.gif
new file mode 100644
index 000..8d84a23
Binary files /dev/null and b/1.2/_chapters/images/architecture.gif differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/_chapters/images/bc_basic.png
--
diff --git a/1.2/_chapters/images/bc_basic.png 
b/1.2/_chapters/images/bc_basic.png
new file mode 100644
index 000..231de93
Binary files /dev/null and b/1.2/_chapters/images/bc_basic.png differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/_chapters/images/bc_config.png
--
diff --git a/1.2/_chapters/images/bc_config.png 
b/1.2/_chapters/images/bc_config.png
new file mode 100644
index 000..53250cf
Binary files /dev/null and b/1.2/_chapters/images/bc_config.png differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/_chapters/images/bc_l1.png
--
diff --git a/1.2/_chapters/images/bc_l1.png b/1.2/_chapters/images/bc_l1.png
new file mode 100644
index 000..36d7e55
Binary files /dev/null and b/1.2/_chapters/images/bc_l1.png differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/_chapters/images/bc_l2_buckets.png
--
diff --git a/1.2/_chapters/images/bc_l2_buckets.png 
b/1.2/_chapters/images/bc_l2_buckets.png
new file mode 100644
index 000..5163928
Binary files /dev/null and b/1.2/_chapters/images/bc_l2_buckets.png differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/_chapters/images/bc_stats.png
--
diff --git a/1.2/_chapters/images/bc_stats.png 
b/1.2/_chapters/images/bc_stats.png
new file mode 100644
index 000..d8c6384
Binary files /dev/null and b/1.2/_chapters/images/bc_stats.png differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/_chapters/images/big_h_logo.png
--
diff --git a/1.2/_chapters/images/big_h_logo.png 
b/1.2/_chapters/images/big_h_logo.png
new file mode 100644
index 000..5256094
Binary files /dev/null and b/1.2/_chapters/images/big_h_logo.png differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/_chapters/images/big_h_logo.svg
--
diff --git a/1.2/_chapters/images/big_h_logo.svg 
b/1.2/_chapters/images/big_h_logo.svg
new file mode 100644
index 000..ab24198
--- /dev/null
+++ b/1.2/_chapters/images/big_h_logo.svg
@@ -0,0 +1,139 @@
+
+
+
+http://purl.org/dc/elements/1.1/;
+   xmlns:cc="http://creativecommons.org/ns#;
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#;
+   xmlns:svg="http://www.w3.org/2000/svg;
+   xmlns="http://www.w3.org/2000/svg;
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd;
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape;
+   version="1.1"
+   id="Layer_1"
+   x="0px"
+   y="0px"
+   width="792px"
+   height="612px"
+   viewBox="0 0 792 612"
+   enable-background="new 0 0 792 612"
+   xml:space="preserve"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="big_h_same_font_hbase3_logo.png"
+   inkscape:export-filename="big_h_bitmap.png"
+   inkscape:export-xdpi="90"
+   inkscape:export-ydpi="90">image/svg+xmlhttp://purl.org/dc/dcmitype/StillImage; 
/>
+
+
+
+
+
+
+
+
+APACHE
+
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/_chapters/images/coprocessor_stats.png
--
diff --git a/1.2/_chapters/images/coprocessor_stats.png 
b/1.2/_chapters/images/coprocessor_stats.png
new file mode 100644
index 000..2fc8703
Binary files /dev/null and b/1.2/_chapters/images/coprocessor_stats.png differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/_chapters/images/data_block_diff_encoding.png
--
diff --git a/1.2/_chapters/images/data_block_diff_encoding.png 
b/1.2/_chapters/images/data_block_diff_encoding.png
new file mode 100644
index 000..0bd03a4
Binary files /dev/null and b/1.2/_chapters/images/data_block_diff_encoding.png 
differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/_chapters/images/data_block_no_encoding.png
--
diff --git a/1.2/_chapters/images/data_block_no_encoding.png 
b/1.2/_chapters/images/data_block_no_encoding.png
new file mode 

[37/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/ClusterStatus.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/ClusterStatus.html 
b/1.2/apidocs/org/apache/hadoop/hbase/ClusterStatus.html
new file mode 100644
index 000..93b0f9a
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/ClusterStatus.html
@@ -0,0 +1,764 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+ClusterStatus (Apache HBase 1.2.4 API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":42,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class ClusterStatus
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.io.VersionedWritable
+
+
+org.apache.hadoop.hbase.ClusterStatus
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+org.apache.hadoop.io.Writable
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Evolving
+public class ClusterStatus
+extends org.apache.hadoop.io.VersionedWritable
+Status information on the HBase cluster.
+ 
+ ClusterStatus provides clients with information such as:
+ 
+ The count and names of region servers in the cluster.
+ The count and names of dead region servers in the cluster.
+ The name of the active master for the cluster.
+ The name(s) of the backup master(s) for the cluster, if they exist.
+ The average cluster load.
+ The number of regions deployed on the cluster.
+ The number of requests since last report.
+ Detailed region server loading and resource usage information,
+  per server and per region.
+ Regions in transition at master
+ The unique cluster ID
+ 
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+ClusterStatus()
+Deprecated.
+As of release 0.96
+ (https://issues.apache.org/jira/browse/HBASE-6038;>HBASE-6038).
+ This will be removed in HBase 2.0.0.
+ Used by Writables and Writables are going away.
+
+
+
+
+ClusterStatus(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringhbaseVersion,
+ http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringclusterid,
+ http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,ServerLoadservers,
+ http://docs.oracle.com/javase/7/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionServerNamedeadServers,
+ ServerNamemaster,
+ http://docs.oracle.com/javase/7/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionServerNamebackupMasters,
+ http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String,org.apache.hadoop.hbase.master.RegionStaterit,
+ http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]masterCoprocessors,
+ http://docs.oracle.com/javase/7/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in 
java.lang">BooleanbalancerOn)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsInstance MethodsConcrete MethodsDeprecated Methods
+
+Modifier and Type
+Method and Description
+
+

[22/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.KVComparator.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.KVComparator.html 
b/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.KVComparator.html
new file mode 100644
index 000..d401c4c
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.KVComparator.html
@@ -0,0 +1,825 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+KeyValue.KVComparator (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":42,"i18":10,"i19":10,"i20":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
KeyValue.KVComparator
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.KeyValue.KVComparator
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorCell, KeyValue.SamePrefixComparatorbyte[], 
org.apache.hadoop.io.RawComparatorCell
+
+
+Direct Known Subclasses:
+KeyValue.MetaComparator, KeyValue.RawBytesComparator
+
+
+Enclosing class:
+KeyValue
+
+
+
+public static class KeyValue.KVComparator
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements org.apache.hadoop.io.RawComparatorCell, KeyValue.SamePrefixComparatorbyte[]
+Compare KeyValues.  When we compare KeyValues, we only 
compare the Key
+ portion.  This means two KeyValues with same Key but different Values are
+ considered the same as far as this Comparator is concerned.
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+KVComparator()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete MethodsDeprecated Methods
+
+Modifier and Type
+Method and Description
+
+
+byte[]
+calcIndexKey(byte[]lastKeyOfPreviousBlock,
+byte[]firstKeyInBlock)
+
+
+protected http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+clone()
+
+
+int
+compare(byte[]l,
+   intloff,
+   intllen,
+   byte[]r,
+   introff,
+   intrlen)
+
+
+int
+compare(Cellleft,
+   Cellright)
+Compares the Key of a cell -- with fields being more 
significant in this order:
+ rowkey, colfam/qual, timestamp, type, mvcc
+
+
+
+protected int
+compareColumns(byte[]left,
+  intloffset,
+  intlquallength,
+  byte[]right,
+  introffset,
+  intrquallength)
+
+
+protected int
+compareColumns(byte[]left,
+  intloffset,
+  intllength,
+  intlfamilylength,
+  byte[]right,
+  introffset,
+  intrlength,
+  intrfamilylength)
+
+
+protected int
+compareFamilies(byte[]left,
+   intloffset,
+   intlfamilylength,
+   byte[]right,
+   introffset,
+   intrfamilylength)
+
+
+int
+compareFlatKey(byte[]left,
+  byte[]right)
+
+
+int
+compareFlatKey(byte[]left,
+  intloffset,
+  intllength,
+  byte[]right,
+  introffset,
+  intrlength)
+Compares left to right assuming that left,loffset,llength 
and right,roffset,rlength are
+ full KVs laid out in a flat byte[]s.
+
+
+
+int
+compareIgnoringPrefix(intcommonPrefix,
+ byte[]left,
+ intloffset,
+ intllength,
+ byte[]right,
+ introffset,
+ intrlength)
+Overridden
+
+
+
+int

[08/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/ServerLoad.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/ServerLoad.html 
b/1.2/apidocs/org/apache/hadoop/hbase/ServerLoad.html
new file mode 100644
index 000..144ef42
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/ServerLoad.html
@@ -0,0 +1,786 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+ServerLoad (Apache HBase 1.2.4 API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class ServerLoad
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.ServerLoad
+
+
+
+
+
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Evolving
+public class ServerLoad
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+This class is used for exporting current state of load on a 
RegionServer.
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static ServerLoad
+EMPTY_SERVERLOAD
+
+
+protected 
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad
+serverLoad
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+ServerLoad(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadserverLoad)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+long
+getCurrentCompactedKVs()
+
+
+int
+getInfoServerPort()
+
+
+int
+getLoad()
+Originally, this method factored in the effect of requests 
going to the
+ server as well.
+
+
+
+int
+getMaxHeapMB()
+
+
+int
+getMemstoreSizeInMB()
+
+
+int
+getNumberOfRegions()
+
+
+long
+getNumberOfRequests()
+
+
+long
+getReadRequestsCount()
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]
+getRegionServerCoprocessors()
+Return the RegionServer-level coprocessors
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],RegionLoad
+getRegionsLoad()
+
+
+org.apache.hadoop.hbase.replication.ReplicationLoadSink
+getReplicationLoadSink()
+Call directly from client such as hbase shell
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.replication.ReplicationLoadSource
+getReplicationLoadSourceList()
+Call directly from client such as hbase shell
+
+
+
+double
+getRequestsPerSecond()
+
+
+int
+getRootIndexSizeKB()
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]
+getRsCoprocessors()
+Return the RegionServer-level and Region-level 
coprocessors
+
+
+
+int
+getStorefileIndexSizeInMB()
+
+
+int
+getStorefiles()
+
+
+int
+getStorefileSizeInMB()
+
+
+int
+getStores()
+
+
+int
+getStoreUncompressedSizeMB()
+
+
+long
+getTotalCompactingKVs()
+
+
+long
+getTotalNumberOfRequests()
+
+
+int
+getTotalStaticBloomSizeKB()
+
+
+int
+getTotalStaticIndexSizeKB()
+
+
+int
+getUsedHeapMB()
+
+
+long
+getWriteRequestsCount()
+
+
+boolean
+hasMaxHeapMB()
+
+
+boolean
+hasNumberOfRequests()
+
+
+boolean
+hasTotalNumberOfRequests()
+
+
+boolean
+hasUsedHeapMB()
+
+
+org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad
+obtainServerLoadPB()
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 

[38/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/ChoreService.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/ChoreService.html 
b/1.2/apidocs/org/apache/hadoop/hbase/ChoreService.html
new file mode 100644
index 000..c609f95
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/ChoreService.html
@@ -0,0 +1,558 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+ChoreService (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class ChoreService
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.ChoreService
+
+
+
+
+
+
+
+
+@InterfaceAudience.Private
+public class ChoreService
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+ChoreService is a service that can be used to schedule 
instances of ScheduledChore to run
+ periodically while sharing threads. The ChoreService is backed by a
+ http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ScheduledThreadPoolExecutor.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ScheduledThreadPoolExecutor whose core 
pool size changes dynamically depending on the
+ number of ScheduledChore 
scheduled. All of the threads in the core thread pool of the
+ underlying http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ScheduledThreadPoolExecutor.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ScheduledThreadPoolExecutor are set to 
be daemon threads.
+ 
+ The ChoreService provides the ability to schedule, cancel, and trigger 
instances of
+ ScheduledChore. The 
ChoreService also provides the ability to check on the status of
+ scheduled chores. The number of threads used by the ChoreService changes 
based on the scheduling
+ load and whether or not the scheduled chores are executing on time. As more 
chores are scheduled,
+ there may be a need to increase the number of threads if it is noticed that 
chores are no longer
+ meeting their scheduled start times. On the other hand, as chores are 
cancelled, an attempt is
+ made to reduce the number of running threads to see if chores can still meet 
their start times
+ with a smaller thread pool.
+ 
+ When finished with a ChoreService it is good practice to call shutdown().
+ Calling this method ensures that all scheduled chores are cancelled and 
cleaned up properly.
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static int
+MIN_CORE_POOL_SIZE
+The minimum number of threads in the core pool of the 
underlying ScheduledThreadPoolExecutor
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+ChoreService(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringcoreThreadPoolPrefix)
+
+
+ChoreService(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringcoreThreadPoolPrefix,
+booleanjitter)
+
+
+ChoreService(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringcoreThreadPoolPrefix,
+intcorePoolSize,
+booleanjitter)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+void
+cancelChore(ScheduledChorechore)
+Cancel any ongoing schedules that this chore has with the 
implementer of this interface.
+
+
+
+void
+cancelChore(ScheduledChorechore,
+   booleanmayInterruptIfRunning)
+
+
+static ChoreService

[35/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/CoordinatedStateManagerFactory.html
--
diff --git 
a/1.2/apidocs/org/apache/hadoop/hbase/CoordinatedStateManagerFactory.html 
b/1.2/apidocs/org/apache/hadoop/hbase/CoordinatedStateManagerFactory.html
new file mode 100644
index 000..d9a333d
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/CoordinatedStateManagerFactory.html
@@ -0,0 +1,285 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+CoordinatedStateManagerFactory (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = {"i0":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
CoordinatedStateManagerFactory
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.CoordinatedStateManagerFactory
+
+
+
+
+
+
+
+
+@InterfaceAudience.Private
+public class CoordinatedStateManagerFactory
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+Creates instance of CoordinatedStateManager
+ based on configuration.
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+CoordinatedStateManagerFactory()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+static CoordinatedStateManager
+getCoordinatedStateManager(org.apache.hadoop.conf.Configurationconf)
+Creates consensus provider from the given 
configuration.
+
+
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/7/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Detail
+
+
+
+
+
+CoordinatedStateManagerFactory
+publicCoordinatedStateManagerFactory()
+
+
+
+
+
+
+
+
+
+Method Detail
+
+
+
+
+
+getCoordinatedStateManager
+public staticCoordinatedStateManagergetCoordinatedStateManager(org.apache.hadoop.conf.Configurationconf)
+Creates consensus provider from the given 
configuration.
+
+Parameters:
+conf - Configuration
+Returns:
+Implementation of  CoordinatedStateManager
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+

[48/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/allclasses-noframe.html
--
diff --git a/1.2/apidocs/allclasses-noframe.html 
b/1.2/apidocs/allclasses-noframe.html
new file mode 100644
index 000..c60b8e8
--- /dev/null
+++ b/1.2/apidocs/allclasses-noframe.html
@@ -0,0 +1,353 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+All Classes (Apache HBase 1.2.4 API)
+
+
+
+
+AllClasses
+
+
+AccessDeniedException
+Admin
+Append
+Attributes
+BadAuthException
+Base64
+Base64.Base64InputStream
+Base64.Base64OutputStream
+BinaryComparator
+BinaryPrefixComparator
+BitComparator
+BitComparator.BitwiseOp
+BloomType
+BufferedMutator
+BufferedMutator.ExceptionListener
+BufferedMutatorParams
+ByteArrayComparable
+ByteBufferOutputStream
+ByteBufferUtils
+ByteRange
+ByteRangeUtils
+Bytes
+Bytes.ByteArrayComparator
+Bytes.RowEndKeyComparator
+CallerDisconnectedException
+CallQueueTooBigException
+CallTimeoutException
+Cell
+CellCounter
+CellCreator
+CellUtil
+Cipher
+CipherProvider
+Client
+ClientBackoffPolicy
+ClockOutOfSyncException
+Cluster
+ClusterStatus
+ColumnCountGetFilter
+ColumnPaginationFilter
+ColumnPrefixFilter
+ColumnRangeFilter
+CompareFilter
+CompareFilter.CompareOp
+Compression.Algorithm
+ConfigurationUtil
+Connection
+ConnectionClosingException
+ConnectionFactory
+Consistency
+Constants
+Context
+CoprocessorException
+CoprocessorRpcChannel
+CopyTable
+CorruptedSnapshotException
+Counter
+DataBlockEncoding
+DataType
+Decryptor
+DefaultCipherProvider
+Delete
+DependentColumnFilter
+DoNotRetryIOException
+DoNotRetryRegionException
+DroppedSnapshotException
+Durability
+Encryption
+Encryption.Context
+EncryptionTest
+Encryptor
+ExponentialClientBackoffPolicy
+Export
+ExportSnapshot
+ExportSnapshotException
+FailedLogCloseException
+FailedSanityCheckException
+FailedServerException
+FailedSyncBeforeLogCloseException
+FamilyFilter
+FatalConnectionException
+FileSystemVersionException
+Filter
+Filter.ReturnCode
+FilterList
+FilterList.Operator
+FirstKeyOnlyFilter
+FirstKeyValueMatchingQualifiersFilter
+FixedLengthWrapper
+ForeignException
+FuzzyRowFilter
+Get
+GroupingTableMap
+GroupingTableMapper
+HBaseConfiguration
+HBaseInterfaceAudience
+HBaseIOException
+HBaseSnapshotException
+HColumnDescriptor
+HConnection
+HConnectionManager
+HConstants
+HFileOutputFormat
+HFileOutputFormat2
+HLogInputFormat
+HRegionInfo
+HRegionLocation
+HRegionPartitioner
+HRegionPartitioner
+HTableDescriptor
+HTableFactory
+HTableInterfaceFactory
+HTableMultiplexer
+HTableMultiplexer.HTableMultiplexerStatus
+IdentityTableMap
+IdentityTableMapper
+IdentityTableReduce
+IdentityTableReducer
+ImmutableBytesWritable
+ImmutableBytesWritable.Comparator
+Import
+ImportTsv
+InclusiveStopFilter
+IncompatibleFilterException
+Increment
+InterfaceAudience
+InterfaceStability
+InvalidFamilyOperationException
+InvalidRowFilterException
+IsolationLevel
+JsonMapper
+KeepDeletedCells
+KeyOnlyFilter
+KeyProvider
+KeyStoreKeyProvider
+KeyValueSortReducer
+LeaseException
+LeaseNotRecoveredException
+LoadIncrementalHFiles
+LocalHBaseCluster
+LockTimeoutException
+LongComparator
+MasterNotRunningException
+MD5Hash
+MergeRegionException
+MiniZooKeeperCluster
+MultiActionResultTooLarge
+MultipleColumnPrefixFilter
+MultiRowRangeFilter
+MultiRowRangeFilter.RowRange
+MultiTableInputFormat
+MultiTableInputFormatBase
+MultiTableOutputFormat
+MultiTableSnapshotInputFormat
+MultiTableSnapshotInputFormat
+Mutation
+NamespaceDescriptor
+NamespaceDescriptor.Builder
+NamespaceExistException
+NamespaceNotFoundException
+NoServerForRegionException
+NoSuchColumnFamilyException
+NotAllMetaRegionsOnlineException
+NotServingRegionException
+NullComparator
+Operation
+OperationConflictException
+OperationWithAttributes
+Order
+OrderedBlob
+OrderedBlobVar
+OrderedBytes
+OrderedBytesBase
+OrderedFloat32
+OrderedFloat64
+OrderedInt16
+OrderedInt32
+OrderedInt64
+OrderedInt8
+OrderedNumeric
+OrderedString
+PageFilter
+Pair
+PairOfSameType
+ParseConstants
+ParseFilter
+PBType
+PleaseHoldException
+PositionedByteRange
+PreemptiveFastFailException
+PrefixFilter
+ProcedureInfo
+Put
+PutCombiner
+PutSortReducer
+QualifierFilter
+Query
+QuotaExceededException
+QuotaFilter
+QuotaRetriever
+QuotaScope
+QuotaSettings
+QuotaSettingsFactory
+QuotaType
+RandomRowFilter
+RawByte
+RawBytes
+RawBytesFixedLength
+RawBytesTerminated
+RawDouble
+RawFloat
+RawInteger
+RawLong
+RawShort
+RawString
+RawStringFixedLength
+RawStringTerminated
+ReadOnlyByteRangeException
+RegexStringComparator
+RegexStringComparator.EngineType
+RegionAlreadyInTransitionException
+RegionException
+RegionInRecoveryException
+RegionLoad
+RegionLocator
+RegionOfflineException
+RegionServerAbortedException
+RegionServerRunningException
+RegionServerStoppedException
+RegionTooBusyException
+RemoteAdmin
+RemoteHTable
+RemoteWithExtrasException
+ReplicationAdmin
+ReplicationException
+ReplicationPeerConfig
+Response

[33/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/HBaseIOException.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/HBaseIOException.html 
b/1.2/apidocs/org/apache/hadoop/hbase/HBaseIOException.html
new file mode 100644
index 000..1e45c72
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/HBaseIOException.html
@@ -0,0 +1,313 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+HBaseIOException (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class HBaseIOException
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">java.lang.Throwable
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">java.lang.Exception
+
+
+http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">java.io.IOException
+
+
+org.apache.hadoop.hbase.HBaseIOException
+
+
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable
+
+
+Direct Known Subclasses:
+DoNotRetryIOException, FailedServerException, LeaseNotRecoveredException, 
PleaseHoldException, RegionException, StoppedRpcClientException, T
 ableInfoMissingException, WrongRowIOException
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Evolving
+public class HBaseIOException
+extends http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+All hbase specific IOExceptions should be subclasses of 
HBaseIOException
+
+See Also:
+Serialized
 Form
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+HBaseIOException()
+
+
+HBaseIOException(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringmessage)
+
+
+HBaseIOException(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage,
+http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in 
java.lang">Throwablecause)
+
+
+HBaseIOException(http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in 
java.lang">Throwablecause)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
+http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#addSuppressed-java.lang.Throwable-;
 title="class or interface in java.lang">addSuppressed, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#fillInStackTrace--;
 title="class or interface in java.lang">fillInStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getCause--;
 title="class or interface in java.lang">getCause, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getLocalizedMessage--;
 title="class or interface in java.lang">getLocalizedMessage, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getMessage--;
 title="class or interface in java.lang">getMessage, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getStackTrace--;
 title="class or inter
 face in java.lang">getStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getSuppressed--;
 title="class or interface in java.lang">getSuppressed, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#initCause-java.lang.Throwable-;
 title="class or interface 

[43/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/index-all.html
--
diff --git a/1.2/apidocs/index-all.html b/1.2/apidocs/index-all.html
new file mode 100644
index 000..17417a7
--- /dev/null
+++ b/1.2/apidocs/index-all.html
@@ -0,0 +1,14687 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Index (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+ABCDEFGHIJKLMNOPQRSTUVWZ
+
+
+A
+
+A - 
Static variable in class org.apache.hadoop.hbase.filter.ParseConstants
+
+ASCII code for 'A'
+
+abort(String,
 Throwable) - Method in interface org.apache.hadoop.hbase.client.Admin
+
+abortProcedure(long,
 boolean) - Method in interface org.apache.hadoop.hbase.client.Admin
+
+abort a procedure
+
+abortProcedureAsync(long,
 boolean) - Method in interface org.apache.hadoop.hbase.client.Admin
+
+Abort a procedure but does not block and wait for it be 
completely removed.
+
+abortTask(TaskAttemptContext)
 - Method in class org.apache.hadoop.hbase.mapreduce.TableOutputCommitter
+
+AccessDeniedException - Exception in org.apache.hadoop.hbase.security
+
+Exception thrown by access-related methods.
+
+AccessDeniedException()
 - Constructor for exception org.apache.hadoop.hbase.security.AccessDeniedException
+
+AccessDeniedException(Class?,
 String) - Constructor for exception 
org.apache.hadoop.hbase.security.AccessDeniedException
+
+AccessDeniedException(String)
 - Constructor for exception org.apache.hadoop.hbase.security.AccessDeniedException
+
+AccessDeniedException(Throwable)
 - Constructor for exception org.apache.hadoop.hbase.security.AccessDeniedException
+
+add(byte[],
 byte[], byte[]) - Method in class org.apache.hadoop.hbase.client.Append
+
+Add the specified column and value to this Append 
operation.
+
+add(Cell)
 - Method in class org.apache.hadoop.hbase.client.Append
+
+Add column and value to this Append operation.
+
+add(Cell)
 - Method in class org.apache.hadoop.hbase.client.Increment
+
+Add the specified KeyValue to this operation.
+
+add(byte[],
 byte[], byte[]) - Method in class org.apache.hadoop.hbase.client.Put
+
+Deprecated.
+Since 1.0.0. Use Put.addColumn(byte[],
 byte[], byte[])
+
+
+add(byte[],
 byte[], long, byte[]) - Method in class 
org.apache.hadoop.hbase.client.Put
+
+Deprecated.
+Since 1.0.0. Use Put.addColumn(byte[],
 byte[], long, byte[])
+
+
+add(byte[],
 ByteBuffer, long, ByteBuffer) - Method in class 
org.apache.hadoop.hbase.client.Put
+
+Deprecated.
+Since 1.0.0. Use Put.addColumn(byte[],
 ByteBuffer, long, ByteBuffer)
+
+
+add(Cell)
 - Method in class org.apache.hadoop.hbase.client.Put
+
+Add the specified KeyValue to this Put operation.
+
+add(Put)
 - Method in class org.apache.hadoop.hbase.client.RowMutations
+
+Add a Put operation 
to the list of mutations
+
+add(Delete)
 - Method in class org.apache.hadoop.hbase.client.RowMutations
+
+Add a Delete 
operation to the list of mutations
+
+add(String)
 - Method in class org.apache.hadoop.hbase.rest.client.Cluster
+
+Add a node to the cluster
+
+add(String,
 int) - Method in class org.apache.hadoop.hbase.rest.client.Cluster
+
+Add a node to the cluster
+
+add(DataType?)
 - Method in class org.apache.hadoop.hbase.types.StructBuilder
+
+Append field to the sequence of accumulated 
fields.
+
+add(byte[], 
byte[]) - Static method in class org.apache.hadoop.hbase.util.Bytes
+
+add(byte[],
 byte[], byte[]) - Static method in class 
org.apache.hadoop.hbase.util.Bytes
+
+add(byte[][])
 - Static method in class org.apache.hadoop.hbase.util.Bytes
+
+add(long) 
- Method in class org.apache.hadoop.hbase.util.Counter
+
+addClientPort(int)
 - Method in class org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster
+
+Add a client port to the list.
+
+addColumn(TableName,
 HColumnDescriptor) - Method in interface 
org.apache.hadoop.hbase.client.Admin
+
+Add a column to an existing table.
+
+addColumn(byte[],
 byte[]) - Method in class org.apache.hadoop.hbase.client.Delete
+
+Delete the latest version of the specified column.
+
+addColumn(byte[],
 byte[], long) - Method in class org.apache.hadoop.hbase.client.Delete
+
+Delete the specified version of the specified column.
+
+addColumn(byte[],
 byte[]) - Method in class org.apache.hadoop.hbase.client.Get
+
+Get the column from the specific family with the specified 
qualifier.
+
+addColumn(byte[],
 

[41/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/Cell.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/Cell.html 
b/1.2/apidocs/org/apache/hadoop/hbase/Cell.html
new file mode 100644
index 000..4982cca
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/Cell.html
@@ -0,0 +1,702 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Cell (Apache HBase 1.2.4 API)
+
+
+
+
+
+var methods = 
{"i0":38,"i1":6,"i2":6,"i3":6,"i4":38,"i5":38,"i6":6,"i7":6,"i8":6,"i9":38,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":38,"i20":6,"i21":6,"i22":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],32:["t6","Deprecated Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Interface Cell
+
+
+
+
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Evolving
+public interface Cell
+The unit of storage in HBase consisting of the following 
fields:
+ 
+ 
+ 1) row
+ 2) column family
+ 3) column qualifier
+ 4) timestamp
+ 5) type
+ 6) MVCC version
+ 7) value
+ 
+ 
+ Uniqueness is determined by the combination of row, column family, column 
qualifier,
+ timestamp, and type.
+ 
+ 
+ The natural comparator will perform a bitwise comparison on row, column 
family, and column
+ qualifier. Less intuitively, it will then treat the greater timestamp as the 
lesser value with
+ the goal of sorting newer cells first.
+ 
+ 
+ This interface should not include methods that allocate new byte[]'s such as 
those used in client
+ or debugging code. These users should use the methods found in the CellUtil class.
+ Currently for to minimize the impact of existing applications moving between 
0.94 and 0.96, we
+ include the costly helper methods marked as deprecated.   
+ 
+ 
+ Cell implements ComparableCell which is only meaningful when
+ comparing to other keys in the
+ same table. It uses CellComparator which does not work on the -ROOT- and 
hbase:meta tables.
+ 
+ 
+ In the future, we may consider adding a boolean isOnHeap() method and a 
getValueBuffer() method
+ that can be used to pass a value directly from an off-heap ByteBuffer to the 
network without
+ copying into an on-heap byte[].
+ 
+ 
+ Historic note: the original Cell implementation (KeyValue) requires that all 
fields be encoded as
+ consecutive bytes in the same byte[], whereas this interface allows fields to 
reside in separate
+ byte[]'s.
+ 
+
+
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsAbstract MethodsDeprecated Methods
+
+Modifier and Type
+Method and Description
+
+
+byte[]
+getFamily()
+Deprecated.
+as of 0.96, use CellUtil.cloneFamily(Cell)
+
+
+
+
+byte[]
+getFamilyArray()
+Contiguous bytes composed of legal HDFS filename characters 
which may start at any index in the
+ containing array.
+
+
+
+byte
+getFamilyLength()
+
+
+int
+getFamilyOffset()
+
+
+long
+getMvccVersion()
+Deprecated.
+as of 1.0, use getSequenceId()
+ 
+ Internal use only. A region-specific sequence ID given to each operation. It 
always exists for
+ cells in the memstore but is not retained forever. It may survive several 
flushes, but
+ generally becomes irrelevant after the cell's row is no longer involved in 
any operations that
+ require strict consistency.
+
+
+
+
+byte[]
+getQualifier()
+Deprecated.
+as of 0.96, use CellUtil.cloneQualifier(Cell)
+
+
+
+
+byte[]
+getQualifierArray()
+Contiguous raw bytes that may start at any index in the 
containing array.
+
+
+
+int
+getQualifierLength()
+
+
+int
+getQualifierOffset()
+
+
+byte[]
+getRow()
+Deprecated.
+as of 0.96, use CellUtil.getRowByte(Cell,
 int)
+
+
+
+
+byte[]
+getRowArray()
+Contiguous raw bytes that may start at any index in the 
containing array.
+
+
+
+short
+getRowLength()
+
+
+int
+getRowOffset()
+
+
+long
+getSequenceId()
+A region-specific unique monotonically increasing sequence 
ID given to each Cell.
+
+
+
+byte[]
+getTagsArray()
+
+
+int
+getTagsLength()
+
+
+int
+getTagsOffset()
+
+
+long
+getTimestamp()
+
+
+byte
+getTypeByte()
+
+
+byte[]
+getValue()

[46/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/constant-values.html
--
diff --git a/1.2/apidocs/constant-values.html b/1.2/apidocs/constant-values.html
new file mode 100644
index 000..31f01d1
--- /dev/null
+++ b/1.2/apidocs/constant-values.html
@@ -0,0 +1,4295 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Constant Field Values (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Constant Field Values
+Contents
+
+org.apache.*
+
+
+
+
+
+org.apache.*
+
+
+
+org.apache.hadoop.hbase.HBaseInterfaceAudience
+
+Modifier and Type
+Constant Field
+Value
+
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CONFIG
+"Configuration"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+COPROC
+"Coprocesssor"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+PHOENIX
+"Phoenix"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+REPLICATION
+"Replication"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+TOOLS
+"Tools"
+
+
+
+
+
+
+org.apache.hadoop.hbase.HColumnDescriptor
+
+Modifier and Type
+Constant Field
+Value
+
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+BLOCKCACHE
+"BLOCKCACHE"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+BLOCKSIZE
+"BLOCKSIZE"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+BLOOMFILTER
+"BLOOMFILTER"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CACHE_BLOOMS_ON_WRITE
+"CACHE_BLOOMS_ON_WRITE"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CACHE_DATA_IN_L1
+"CACHE_DATA_IN_L1"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CACHE_DATA_ON_WRITE
+"CACHE_DATA_ON_WRITE"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CACHE_INDEX_ON_WRITE
+"CACHE_INDEX_ON_WRITE"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+COMPRESS_TAGS
+"COMPRESS_TAGS"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+COMPRESSION
+"COMPRESSION"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+COMPRESSION_COMPACT
+"COMPRESSION_COMPACT"
+
+
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+DATA_BLOCK_ENCODING
+"DATA_BLOCK_ENCODING"
+
+
+
+
+publicstaticfinalboolean
+DEFAULT_BLOCKCACHE
+true
+
+
+
+
+publicstaticfinalint
+DEFAULT_BLOCKSIZE
+65536
+
+
+
+
+publicstaticfinalboolean
+DEFAULT_CACHE_BLOOMS_ON_WRITE
+false
+
+
+
+
+publicstaticfinalboolean
+DEFAULT_CACHE_DATA_IN_L1
+false
+
+
+
+
+publicstaticfinalboolean
+DEFAULT_CACHE_DATA_ON_WRITE
+false
+
+
+
+
+publicstaticfinalboolean
+DEFAULT_CACHE_INDEX_ON_WRITE
+false
+
+
+
+
+publicstaticfinalboolean
+DEFAULT_COMPRESS_TAGS
+true
+
+
+
+
+publicstaticfinalshort
+DEFAULT_DFS_REPLICATION
+0
+
+
+
+
+publicstaticfinalboolean
+DEFAULT_ENCODE_ON_DISK
+true
+
+
+
+

[21/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html 
b/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
new file mode 100644
index 000..dd93026
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
@@ -0,0 +1,897 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+KeyValue.KeyOnlyKeyValue (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
KeyValue.KeyOnlyKeyValue
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.KeyValue
+
+
+org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable, Cell, HeapSize, SettableSequenceId, SettableTimestamp
+
+
+Enclosing class:
+KeyValue
+
+
+
+public static class KeyValue.KeyOnlyKeyValue
+extends KeyValue
+A simple form of KeyValue that creates a keyvalue with only 
the key part of the byte[]
+ Mainly used in places where we need to compare two cells.  Avoids copying of 
bytes
+ In places like block index keys, we need to compare the key byte[] with a 
cell.
+ Hence create a Keyvalue(aka Cell) that would help in comparing as two 
cells
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.KeyValue
+KeyValue.KeyOnlyKeyValue, KeyValue.KVComparator, KeyValue.MetaComparator, KeyValue.RawBytesComparator, KeyValue.RowOnlyComparator, KeyValue.SamePrefixComparatorT, KeyValue.Type
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.KeyValue
+bytes, COLUMN_FAMILY_DELIM_ARRAY,
 COLUMN_FAMILY_DELIMITER,
 COMPARATOR,
 FAMILY_LENGTH_SIZE,
 KEY_INFRASTRUCTURE_SIZE,
 KEY_LENGTH_SIZE,
 KEYVALUE_INFRASTRUCTURE_SIZE,
 KEYVALUE_WITH_TAGS_INFRASTRUCTURE_SIZE,
 length, LOWESTKEY,
 META_COMPARATOR,
 offset, 
RAW_COMPARATOR,
 ROW_LENGTH_SIZE,
 ROW_OFFSET,
 TAGS_LENGTH_SIZE,
 TIMESTAMP_SIZE,
 TIMESTAMP_TYPE_SIZE,
 TYPE_SIZE
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+KeyOnlyKeyValue()
+
+
+KeyOnlyKeyValue(byte[]b)
+
+
+KeyOnlyKeyValue(byte[]b,
+   intoffset,
+   intlength)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+boolean
+equals(http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectother)
+Needed doing 'contains' on List.
+
+
+
+byte[]
+getFamilyArray()
+Contiguous bytes composed of legal HDFS filename characters 
which may start at any index in the
+ containing array.
+
+
+
+byte
+getFamilyLength()
+
+
+int
+getFamilyOffset()
+
+
+byte[]
+getKey()
+Do not use unless you have to.
+
+
+
+int
+getKeyLength()
+
+
+int
+getKeyOffset()
+
+
+byte[]
+getQualifierArray()
+Contiguous raw bytes that may start at any index in the 
containing array.
+
+
+
+int
+getQualifierLength()
+
+
+int
+getQualifierOffset()
+
+
+byte[]
+getRowArray()
+Contiguous raw bytes that may start at any index in the 
containing array.
+
+
+
+short
+getRowLength()
+
+
+int
+getRowOffset()
+
+
+byte[]
+getTagsArray()
+
+
+int
+getTagsLength()
+This returns the total length of the tag bytes
+
+
+
+int
+getTagsOffset()
+This returns the offset where the tag actually starts.
+
+
+
+long
+getTimestamp()
+
+
+int

[18/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/KeyValueTestUtil.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/KeyValueTestUtil.html 
b/1.2/apidocs/org/apache/hadoop/hbase/KeyValueTestUtil.html
new file mode 100644
index 000..ae5c906
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/KeyValueTestUtil.html
@@ -0,0 +1,483 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+KeyValueTestUtil (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class KeyValueTestUtil
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.KeyValueTestUtil
+
+
+
+
+
+
+
+
+@InterfaceAudience.Private
+public class KeyValueTestUtil
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+KeyValueTestUtil()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+static boolean
+containsIgnoreMvccVersion(http://docs.oracle.com/javase/7/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">Collection? extends CellkvCollection1,
+ http://docs.oracle.com/javase/7/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">Collection? extends CellkvCollection2)
+Checks whether KeyValues from kvCollection2 are contained 
in kvCollection1.
+
+
+
+static KeyValue
+create(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringrow,
+  http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringfamily,
+  http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringqualifier,
+  longtimestamp,
+  KeyValue.Typetype,
+  http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringvalue)
+
+
+static KeyValue
+create(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringrow,
+  http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringfamily,
+  http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringqualifier,
+  longtimestamp,
+  http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringvalue)
+
+
+protected static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getFamilyString(KeyValuekv)
+
+
+protected static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getQualifierString(KeyValuekv)
+
+
+protected static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getRowString(KeyValuekv)
+
+
+protected static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getTimestampString(KeyValuekv)
+
+
+protected static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getTypeString(KeyValuekv)
+
+

[40/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/CellComparator.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/CellComparator.html 
b/1.2/apidocs/org/apache/hadoop/hbase/CellComparator.html
new file mode 100644
index 000..80d038a
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/CellComparator.html
@@ -0,0 +1,825 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+CellComparator (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = 
{"i0":9,"i1":9,"i2":10,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":10,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class CellComparator
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.CellComparator
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable, http://docs.oracle.com/javase/7/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorCell
+
+
+Direct Known Subclasses:
+CellComparator.RowComparator
+
+
+
+@InterfaceAudience.Private
+ @InterfaceStability.Evolving
+public class CellComparator
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements http://docs.oracle.com/javase/7/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorCell, http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable
+Compare two HBase cells.  Do not use this method comparing 
-ROOT- or
+ hbase:meta cells.  Cells from these tables need a specialized 
comparator, one that
+ takes account of the special formatting of the row where we have commas to 
delimit table from
+ regionname, from row.  See KeyValue for how it has a special comparator to do 
hbase:meta cells
+ and yet another for -ROOT-.
+
+See Also:
+Serialized
 Form
+
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+static class
+CellComparator.RowComparator
+Counter part for the KeyValue.RowOnlyComparator
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+CellComparator()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+static boolean
+areKeyLengthsEqual(Cella,
+  Cellb)
+lengths
+
+
+
+static boolean
+areRowLengthsEqual(Cella,
+  Cellb)
+
+
+int
+compare(Cella,
+   Cellb)
+
+
+static int
+compare(Cella,
+   Cellb,
+   booleanignoreSequenceid)
+Compare cells.
+
+
+
+static int
+compareColumns(Cellleft,
+  Cellright)
+
+
+static int
+compareCommonFamilyPrefix(Cellleft,
+ Cellright,
+ intfamilyCommonPrefix)
+
+
+static int
+compareCommonQualifierPrefix(Cellleft,
+Cellright,
+intqualCommonPrefix)
+
+
+static int
+compareCommonRowPrefix(Cellleft,
+  Cellright,
+  introwCommonPrefix)
+
+
+static int
+compareFamilies(Cellleft,
+   Cellright)
+
+
+int
+compareFlatKey(Cellleft,
+  Cellright)
+
+
+static int
+compareQualifiers(Cellleft,
+ Cellright)
+
+
+static int
+compareRows(byte[]left,
+   intloffset,
+   intllength,
+   byte[]right,
+   introffset,
+   intrlength)
+Do not use comparing rows from hbase:meta.
+
+
+

[49/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/allclasses-frame.html
--
diff --git a/1.2/apidocs/allclasses-frame.html 
b/1.2/apidocs/allclasses-frame.html
new file mode 100644
index 000..861a2b0
--- /dev/null
+++ b/1.2/apidocs/allclasses-frame.html
@@ -0,0 +1,353 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+All Classes (Apache HBase 1.2.4 API)
+
+
+
+
+AllClasses
+
+
+AccessDeniedException
+Admin
+Append
+Attributes
+BadAuthException
+Base64
+Base64.Base64InputStream
+Base64.Base64OutputStream
+BinaryComparator
+BinaryPrefixComparator
+BitComparator
+BitComparator.BitwiseOp
+BloomType
+BufferedMutator
+BufferedMutator.ExceptionListener
+BufferedMutatorParams
+ByteArrayComparable
+ByteBufferOutputStream
+ByteBufferUtils
+ByteRange
+ByteRangeUtils
+Bytes
+Bytes.ByteArrayComparator
+Bytes.RowEndKeyComparator
+CallerDisconnectedException
+CallQueueTooBigException
+CallTimeoutException
+Cell
+CellCounter
+CellCreator
+CellUtil
+Cipher
+CipherProvider
+Client
+ClientBackoffPolicy
+ClockOutOfSyncException
+Cluster
+ClusterStatus
+ColumnCountGetFilter
+ColumnPaginationFilter
+ColumnPrefixFilter
+ColumnRangeFilter
+CompareFilter
+CompareFilter.CompareOp
+Compression.Algorithm
+ConfigurationUtil
+Connection
+ConnectionClosingException
+ConnectionFactory
+Consistency
+Constants
+Context
+CoprocessorException
+CoprocessorRpcChannel
+CopyTable
+CorruptedSnapshotException
+Counter
+DataBlockEncoding
+DataType
+Decryptor
+DefaultCipherProvider
+Delete
+DependentColumnFilter
+DoNotRetryIOException
+DoNotRetryRegionException
+DroppedSnapshotException
+Durability
+Encryption
+Encryption.Context
+EncryptionTest
+Encryptor
+ExponentialClientBackoffPolicy
+Export
+ExportSnapshot
+ExportSnapshotException
+FailedLogCloseException
+FailedSanityCheckException
+FailedServerException
+FailedSyncBeforeLogCloseException
+FamilyFilter
+FatalConnectionException
+FileSystemVersionException
+Filter
+Filter.ReturnCode
+FilterList
+FilterList.Operator
+FirstKeyOnlyFilter
+FirstKeyValueMatchingQualifiersFilter
+FixedLengthWrapper
+ForeignException
+FuzzyRowFilter
+Get
+GroupingTableMap
+GroupingTableMapper
+HBaseConfiguration
+HBaseInterfaceAudience
+HBaseIOException
+HBaseSnapshotException
+HColumnDescriptor
+HConnection
+HConnectionManager
+HConstants
+HFileOutputFormat
+HFileOutputFormat2
+HLogInputFormat
+HRegionInfo
+HRegionLocation
+HRegionPartitioner
+HRegionPartitioner
+HTableDescriptor
+HTableFactory
+HTableInterfaceFactory
+HTableMultiplexer
+HTableMultiplexer.HTableMultiplexerStatus
+IdentityTableMap
+IdentityTableMapper
+IdentityTableReduce
+IdentityTableReducer
+ImmutableBytesWritable
+ImmutableBytesWritable.Comparator
+Import
+ImportTsv
+InclusiveStopFilter
+IncompatibleFilterException
+Increment
+InterfaceAudience
+InterfaceStability
+InvalidFamilyOperationException
+InvalidRowFilterException
+IsolationLevel
+JsonMapper
+KeepDeletedCells
+KeyOnlyFilter
+KeyProvider
+KeyStoreKeyProvider
+KeyValueSortReducer
+LeaseException
+LeaseNotRecoveredException
+LoadIncrementalHFiles
+LocalHBaseCluster
+LockTimeoutException
+LongComparator
+MasterNotRunningException
+MD5Hash
+MergeRegionException
+MiniZooKeeperCluster
+MultiActionResultTooLarge
+MultipleColumnPrefixFilter
+MultiRowRangeFilter
+MultiRowRangeFilter.RowRange
+MultiTableInputFormat
+MultiTableInputFormatBase
+MultiTableOutputFormat
+MultiTableSnapshotInputFormat
+MultiTableSnapshotInputFormat
+Mutation
+NamespaceDescriptor
+NamespaceDescriptor.Builder
+NamespaceExistException
+NamespaceNotFoundException
+NoServerForRegionException
+NoSuchColumnFamilyException
+NotAllMetaRegionsOnlineException
+NotServingRegionException
+NullComparator
+Operation
+OperationConflictException
+OperationWithAttributes
+Order
+OrderedBlob
+OrderedBlobVar
+OrderedBytes
+OrderedBytesBase
+OrderedFloat32
+OrderedFloat64
+OrderedInt16
+OrderedInt32
+OrderedInt64
+OrderedInt8
+OrderedNumeric
+OrderedString
+PageFilter
+Pair
+PairOfSameType
+ParseConstants
+ParseFilter
+PBType
+PleaseHoldException
+PositionedByteRange
+PreemptiveFastFailException
+PrefixFilter
+ProcedureInfo
+Put
+PutCombiner
+PutSortReducer
+QualifierFilter
+Query
+QuotaExceededException
+QuotaFilter
+QuotaRetriever
+QuotaScope
+QuotaSettings
+QuotaSettingsFactory
+QuotaType
+RandomRowFilter
+RawByte
+RawBytes
+RawBytesFixedLength
+RawBytesTerminated
+RawDouble
+RawFloat
+RawInteger
+RawLong
+RawShort
+RawString
+RawStringFixedLength
+RawStringTerminated
+ReadOnlyByteRangeException
+RegexStringComparator
+RegexStringComparator.EngineType
+RegionAlreadyInTransitionException
+RegionException
+RegionInRecoveryException
+RegionLoad
+RegionLocator
+RegionOfflineException
+RegionServerAbortedException
+RegionServerRunningException
+RegionServerStoppedException
+RegionTooBusyException
+RemoteAdmin
+RemoteHTable
+RemoteWithExtrasException
+ReplicationAdmin
+ReplicationException
+ReplicationPeerConfig
+Response

[11/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/PleaseHoldException.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/PleaseHoldException.html 
b/1.2/apidocs/org/apache/hadoop/hbase/PleaseHoldException.html
new file mode 100644
index 000..9470795
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/PleaseHoldException.html
@@ -0,0 +1,305 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+PleaseHoldException (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
PleaseHoldException
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">java.lang.Throwable
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">java.lang.Exception
+
+
+http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">java.io.IOException
+
+
+org.apache.hadoop.hbase.HBaseIOException
+
+
+org.apache.hadoop.hbase.PleaseHoldException
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Stable
+public class PleaseHoldException
+extends HBaseIOException
+This exception is thrown by the master when a region server 
was shut down and
+ restarted so fast that the master still hasn't processed the server shutdown
+ of the first instance, or when master is initializing and client call admin
+ operations, or when an operation is performed on a region server that is 
still starting.
+
+See Also:
+Serialized
 Form
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+PleaseHoldException(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringmessage)
+
+
+PleaseHoldException(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage,
+   http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in 
java.lang">Throwablecause)
+
+
+PleaseHoldException(http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in 
java.lang">Throwablecause)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
+http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#addSuppressed-java.lang.Throwable-;
 title="class or interface in java.lang">addSuppressed, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#fillInStackTrace--;
 title="class or interface in java.lang">fillInStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getCause--;
 title="class or interface in java.lang">getCause, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getLocalizedMessage--;
 title="class or interface in java.lang">getLocalizedMessage, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getMessage--;
 title="class or interface in java.lang">getMessage, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getStackTrace--;
 title="class or inter
 face in java.lang">getStackTrace, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#getSuppressed--;
 title="class or interface in java.lang">getSuppressed, http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true#initCause-java.lang.Throwable-;
 title="class or interface in 

[28/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/HRegionInfo.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/HRegionInfo.html 
b/1.2/apidocs/org/apache/hadoop/hbase/HRegionInfo.html
new file mode 100644
index 000..43c1811
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/HRegionInfo.html
@@ -0,0 +1,2040 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+HRegionInfo (Apache HBase 1.2.4 API)
+
+
+
+
+
+var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":10,"i13":10,"i14":41,"i15":10,"i16":10,"i17":10,"i18":41,"i19":41,"i20":41,"i21":41,"i22":10,"i23":10,"i24":10,"i25":10,"i26":41,"i27":41,"i28":10,"i29":10,"i30":9,"i31":10,"i32":9,"i33":42,"i34":41,"i35":42,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":9,"i44":9,"i45":9,"i46":9,"i47":9,"i48":9,"i49":9,"i50":9,"i51":42,"i52":10,"i53":10,"i54":10,"i55":10,"i56":9,"i57":10,"i58":42};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class HRegionInfo
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.HRegionInfo
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHRegionInfo
+
+
+
+@InterfaceAudience.Public
+ @InterfaceStability.Evolving
+public class HRegionInfo
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHRegionInfo
+Information about a region. A region is a range of keys in 
the whole keyspace of a table, an
+ identifier (a timestamp) for differentiating between subset ranges (after 
region split)
+ and a replicaId for differentiating the instance for the same range and some 
status information
+ about the region.
+
+ The region has a unique name which consists of the following fields:
+ 
+  tableName   : The name of the table 
+  startKey: The startKey for the region. 
+  regionId: A timestamp when the region is created. 
+  replicaId   : An id starting from 0 to differentiate replicas of the 
same region range
+ but hosted in separated servers. The same region range can be hosted in 
multiple locations.
+  encodedName : An MD5 encoded string for the region name.
+ 
+
+  Other than the fields in the region name, region info contains:
+ 
+  endKey  : the endKey for the region (exclusive) 
+  split   : Whether the region is split 
+  offline : Whether the region is offline 
+ 
+
+ In 0.98 or before, a list of table's regions would fully cover the total 
keyspace, and at any
+ point in time, a row key always belongs to a single region, which is hosted 
in a single server.
+ In 0.99+, a region can have multiple instances (called replicas), and thus a 
range (or row) can
+ correspond to multiple HRegionInfo's. These HRI's share the same fields 
however except the
+ replicaId field. If the replicaId is not set, it defaults to 0, which is 
compatible with the
+ previous behavior of a range corresponding to 1 region.
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static int
+DEFAULT_REPLICA_ID
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ENCODED_REGION_NAME_REGEX
+A non-capture group so that this can be embedded.
+
+
+
+static HRegionInfo
+FIRST_META_REGIONINFO
+HRegionInfo for first meta region
+
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface 

[01/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site ca662680d -> dbfeb6d66


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/TagRewriteCell.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/TagRewriteCell.html 
b/1.2/apidocs/org/apache/hadoop/hbase/TagRewriteCell.html
new file mode 100644
index 000..b4ec4b6
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/TagRewriteCell.html
@@ -0,0 +1,866 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TagRewriteCell (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = 
{"i0":42,"i1":10,"i2":10,"i3":10,"i4":42,"i5":42,"i6":10,"i7":10,"i8":10,"i9":42,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class TagRewriteCell
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.TagRewriteCell
+
+
+
+
+
+
+
+All Implemented Interfaces:
+Cell, HeapSize, SettableSequenceId, SettableTimestamp
+
+
+
+@InterfaceAudience.Private
+public class TagRewriteCell
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements Cell, SettableSequenceId, SettableTimestamp, HeapSize
+This can be used when a Cell has to change with 
addition/removal of one or more tags. This is an
+ efficient way to do so in which only the tags bytes part need to recreated 
and copied. All other
+ parts, refer to the original Cell.
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TagRewriteCell(Cellcell,
+  byte[]tags)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete MethodsDeprecated Methods
+
+Modifier and Type
+Method and Description
+
+
+byte[]
+getFamily()
+Deprecated.
+
+
+
+byte[]
+getFamilyArray()
+Contiguous bytes composed of legal HDFS filename characters 
which may start at any index in the
+ containing array.
+
+
+
+byte
+getFamilyLength()
+
+
+int
+getFamilyOffset()
+
+
+long
+getMvccVersion()
+Deprecated.
+
+
+
+byte[]
+getQualifier()
+Deprecated.
+
+
+
+byte[]
+getQualifierArray()
+Contiguous raw bytes that may start at any index in the 
containing array.
+
+
+
+int
+getQualifierLength()
+
+
+int
+getQualifierOffset()
+
+
+byte[]
+getRow()
+Deprecated.
+
+
+
+byte[]
+getRowArray()
+Contiguous raw bytes that may start at any index in the 
containing array.
+
+
+
+short
+getRowLength()
+
+
+int
+getRowOffset()
+
+
+long
+getSequenceId()
+A region-specific unique monotonically increasing sequence 
ID given to each Cell.
+
+
+
+byte[]
+getTagsArray()
+
+
+int
+getTagsLength()
+
+
+int
+getTagsOffset()
+
+
+long
+getTimestamp()
+
+
+byte
+getTypeByte()
+
+
+byte[]
+getValue()
+Deprecated.
+
+
+
+byte[]
+getValueArray()
+Contiguous raw bytes that may start at any index in the 
containing array.
+
+
+
+int
+getValueLength()
+
+
+int
+getValueOffset()
+
+
+long
+heapSize()
+
+
+void
+setSequenceId(longseqId)
+Sets with the given seqId.
+
+
+
+void
+setTimestamp(byte[]ts,
+inttsOffset)
+Sets with the given timestamp.
+
+
+
+void
+setTimestamp(longts)
+Sets with the given timestamp.
+
+
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#finalize--;
 

[19/51] [partial] hbase-site git commit: Adding 1.2 site

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbfeb6d6/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.html
--
diff --git a/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.html 
b/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.html
new file mode 100644
index 000..ba17d78
--- /dev/null
+++ b/1.2/apidocs/org/apache/hadoop/hbase/KeyValue.html
@@ -0,0 +1,3487 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+KeyValue (Apache HBase 1.2.4 API)
+
+
+
+
+
+
+var methods = 
{"i0":10,"i1":9,"i2":9,"i3":9,"i4":41,"i5":41,"i6":41,"i7":10,"i8":9,"i9":9,"i10":9,"i11":10,"i12":42,"i13":9,"i14":9,"i15":42,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":9,"i22":10,"i23":10,"i24":10,"i25":9,"i26":9,"i27":9,"i28":10,"i29":10,"i30":10,"i31":42,"i32":10,"i33":10,"i34":10,"i35":42,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":42,"i47":10,"i48":42,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":42,"i55":9,"i56":41,"i57":42,"i58":9,"i59":10,"i60":9,"i61":9,"i62":9,"i63":41,"i64":9,"i65":9,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":9,"i74":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class KeyValue
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.KeyValue
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable, Cell, HeapSize, SettableSequenceId, SettableTimestamp
+
+
+Direct Known Subclasses:
+KeyValue.KeyOnlyKeyValue, NoTagsKeyValue
+
+
+
+@InterfaceAudience.Private
+public class KeyValue
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements Cell, HeapSize, http://docs.oracle.com/javase/7/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable, SettableSequenceId, SettableTimestamp
+An HBase Key/Value. This is the fundamental HBase Type.
+ 
+ HBase applications and users should use the Cell interface and avoid directly 
using KeyValue
+ and member functions not defined in Cell.
+ 
+ If being used client-side, the primary methods to access individual fields 
are getRow(),
+ getFamily(),
 getQualifier(),
 getTimestamp(),
 and getValue().
+ These methods allocate new byte arrays and return copies. Avoid their use 
server-side.
+ 
+ Instances of this class are immutable. They do not implement Comparable but 
Comparators are
+ provided. Comparators change with context, whether user table or a catalog 
table comparison. Its
+ critical you use the appropriate comparator. There are Comparators for normal 
HFiles, Meta's
+ Hfiles, and bloom filter keys.
+ 
+ KeyValue wraps a byte array and takes offsets and lengths into passed array 
at where to start
+ interpreting the content as KeyValue. The KeyValue format inside a byte array 
is:
+ keylength valuelength key value
+ Key is further decomposed as:
+ rowlength row columnfamilylength
+ columnfamily columnqualifier
+ timestamp keytype
+ The rowlength maximum is Short.MAX_SIZE, column 
family length maximum
+ is Byte.MAX_SIZE, and column qualifier + key length must be 
+ Integer.MAX_SIZE. The column does not contain the 
family/qualifier delimiter,
+ COLUMN_FAMILY_DELIMITER
+ KeyValue can optionally contain Tags. When it contains tags, it is added in 
the byte array after
+ the value part. The format for this part is: 
tagslengthtagsbytes.
+ tagslength maximum is Short.MAX_SIZE. The 
tagsbytes
+ contain one or more tags where as each tag is of the form
+ taglengthtagtypetagbytes.
+ tagtype is one byte and
+ taglength maximum is Short.MAX_SIZE and it includes 
1 byte type length
+ and actual tag bytes length.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+

hbase git commit: HBASE-17087 Enable Aliasing for CodedInputStream created by ByteInputByteString#newCodedInput.

2016-11-14 Thread anoopsamjohn
Repository: hbase
Updated Branches:
  refs/heads/master 86df89b01 -> 3f1f58726


HBASE-17087 Enable Aliasing for CodedInputStream created by 
ByteInputByteString#newCodedInput.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3f1f5872
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3f1f5872
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3f1f5872

Branch: refs/heads/master
Commit: 3f1f587266d999eba3adcf7986e68c4fa0f42a82
Parents: 86df89b
Author: anoopsamjohn 
Authored: Tue Nov 15 10:57:41 2016 +0530
Committer: anoopsamjohn 
Committed: Tue Nov 15 10:57:41 2016 +0530

--
 .../com/google/protobuf/ByteInputByteString.java  |  4 +++-
 .../src/main/patches/HBASE-17087.patch| 14 ++
 2 files changed, 17 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3f1f5872/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ByteInputByteString.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ByteInputByteString.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ByteInputByteString.java
index 1949602..30de4ec 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ByteInputByteString.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ByteInputByteString.java
@@ -244,6 +244,8 @@ final class ByteInputByteString extends 
ByteString.LeafByteString {
   public CodedInputStream newCodedInput() {
 // We trust CodedInputStream not to modify the bytes, or to give anyone
 // else access to them.
-return CodedInputStream.newInstance(buffer, offset, length, true);
+CodedInputStream cis = CodedInputStream.newInstance(buffer, offset, 
length, true);
+cis.enableAliasing(true);
+return cis;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3f1f5872/hbase-protocol-shaded/src/main/patches/HBASE-17087.patch
--
diff --git a/hbase-protocol-shaded/src/main/patches/HBASE-17087.patch 
b/hbase-protocol-shaded/src/main/patches/HBASE-17087.patch
new file mode 100644
index 000..3826efe
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/patches/HBASE-17087.patch
@@ -0,0 +1,14 @@
+diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ByteInputByteString.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ByteInputByteString.java
+index 1949602..30de4ec 100644
+--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ByteInputByteString.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ByteInputByteString.java
+@@ -244,6 +244,8 @@ final class ByteInputByteString extends 
ByteString.LeafByteString {
+   public CodedInputStream newCodedInput() {
+ // We trust CodedInputStream not to modify the bytes, or to give anyone
+ // else access to them.
+-return CodedInputStream.newInstance(buffer, offset, length, true);
++CodedInputStream cis = CodedInputStream.newInstance(buffer, offset, 
length, true);
++cis.enableAliasing(true);
++return cis;
+   }
+ }



hbase git commit: HBASE-17017 Remove the current per-region latency histogram metrics

2016-11-14 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 ca11c5870 -> 323768148


HBASE-17017 Remove the current per-region latency histogram metrics


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/32376814
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/32376814
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/32376814

Branch: refs/heads/branch-1.3
Commit: 323768148ab062ae50ae22f4a60fe2c58e05ec2d
Parents: ca11c58
Author: Enis Soztutar 
Authored: Tue Nov 8 15:43:41 2016 -0800
Committer: Enis Soztutar 
Committed: Mon Nov 14 17:27:21 2016 -0800

--
 .../regionserver/MetricsRegionServerSource.java |  5 +-
 .../hbase/regionserver/MetricsRegionSource.java | 12 -
 .../regionserver/MetricsRegionSourceImpl.java   | 52 ++--
 .../hadoop/hbase/regionserver/HRegion.java  |  7 +--
 .../hbase/regionserver/MetricsRegion.java   |  8 ---
 .../hbase/regionserver/RSRpcServices.java   |  1 -
 .../regionserver/TestRegionServerMetrics.java   | 12 ++---
 7 files changed, 25 insertions(+), 72 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/32376814/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 182122a..1991105 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -316,7 +316,7 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   String BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT = 
"blockCacheGeneralBloomMetaHitCount";
   String BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT = 
"blockCacheDeleteFamilyBloomHitCount";
   String BLOCK_CACHE_TRAILER_HIT_COUNT = "blockCacheTrailerHitCount";
-  
+
   String RS_START_TIME_NAME = "regionServerStartTime";
   String ZOOKEEPER_QUORUM_NAME = "zookeeperQuorum";
   String SERVER_NAME_NAME = "serverName";
@@ -335,6 +335,7 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   String MUTATE_KEY = "mutate";
   String APPEND_KEY = "append";
   String REPLAY_KEY = "replay";
+  String SCAN_KEY = "scan";
   String SCAN_SIZE_KEY = "scanSize";
   String SCAN_TIME_KEY = "scanTime";
 
@@ -446,6 +447,6 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   String RPC_MUTATE_REQUEST_COUNT_DESC =
   "Number of rpc mutation requests this region server has answered.";
   String AVERAGE_REGION_SIZE = "averageRegionSize";
-  String AVERAGE_REGION_SIZE_DESC = 
+  String AVERAGE_REGION_SIZE_DESC =
   "Average region size over the region server including memstore and 
storefile sizes.";
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/32376814/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
index 12ef07c..decf841 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
@@ -60,24 +60,12 @@ public interface MetricsRegionSource extends 
Comparable {
   void updateDelete();
 
   /**
-   * Update count and sizes of gets.
-   * @param getSize size in bytes of the resulting key values for a get
-   */
-  void updateGetSize(long getSize);
-
-  /**
* Update time of gets
* @param mills time for this get operation.
*/
   void updateGet(long mills);
 
   /**
-   * Update the count and sizes of resultScanner.next()
-   * @param scanSize Size in bytes of the resulting key values for a next()
-   */
-  void updateScanSize(long scanSize);
-
-  /**
* Update time used of resultScanner.next().
* */
   void updateScanTime(long mills);

http://git-wip-us.apache.org/repos/asf/hbase/blob/32376814/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
 

hbase git commit: HBASE-17044 Fix merge failed before creating merged region leaves meta inconsistent

2016-11-14 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/master 6c1ceaf11 -> dcf03b32f


HBASE-17044 Fix merge failed before creating merged region leaves meta 
inconsistent


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dcf03b32
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dcf03b32
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dcf03b32

Branch: refs/heads/master
Commit: dcf03b32f48b3b0a21ccea55a8044b88693b29c5
Parents: 6c1ceaf
Author: Andrew Purtell 
Authored: Mon Nov 14 12:28:19 2016 -0800
Committer: Andrew Purtell 
Committed: Mon Nov 14 17:23:27 2016 -0800

--
 .../org/apache/hadoop/hbase/master/AssignmentManager.java   | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dcf03b32/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index c8c25f1..3540b19 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -2700,9 +2700,16 @@ public class AssignmentManager {
 + ", a=" + rs_a + ", b=" + rs_b;
 }
 
+// Always bring the children back online. Even if they are not offline
+// there's no harm in making them online again.
 regionOnline(a, serverName);
 regionOnline(b, serverName);
-regionOffline(hri);
+
+// Only offline the merging region if it is known to exist.
+RegionState rs_p = regionStates.getRegionState(hri);
+if (rs_p != null) {
+  regionOffline(hri);
+}
 
 if (getTableStateManager().isTableState(hri.getTable(),
 TableState.State.DISABLED, TableState.State.DISABLING)) {



[5/5] hbase git commit: HBASE-17044 Fix merge failed before creating merged region leaves meta inconsistent

2016-11-14 Thread apurtell
HBASE-17044 Fix merge failed before creating merged region leaves meta 
inconsistent


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ca11c587
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ca11c587
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ca11c587

Branch: refs/heads/branch-1.3
Commit: ca11c5870fa589aa51cd126168807c7e0f103f1a
Parents: 66fbe99
Author: Andrew Purtell 
Authored: Mon Nov 14 12:28:19 2016 -0800
Committer: Andrew Purtell 
Committed: Mon Nov 14 17:14:52 2016 -0800

--
 .../hadoop/hbase/master/AssignmentManager.java  | 38 ++--
 1 file changed, 28 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ca11c587/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 739ebff..ba3e427 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -3868,18 +3868,32 @@ public class AssignmentManager extends 
ZooKeeperListener {
 LOG.info("Failed to record merged region " + p.getShortNameToLog());
 return "Failed to record the merging in meta";
   }
-} else {
-  mergingRegions.remove(encodedName);
-  regionOnline(a, sn);
-  regionOnline(b, sn);
+}
+return null;
+  }
+
+  private String onRegionMergeReverted(ServerName sn, TransitionCode code,
+ final HRegionInfo p, final HRegionInfo a, final HRegionInfo b) {
+RegionState rs_p = regionStates.getRegionState(p);
+String encodedName = p.getEncodedName();
+mergingRegions.remove(encodedName);
+
+// Always bring the children back online. Even if they are not offline
+// there's no harm in making them online again.
+regionOnline(a, sn);
+regionOnline(b, sn);
+
+// Only offline the merging region if it is known to exist.
+if (rs_p != null) {
   regionOffline(p);
+}
 
-  if (getTableStateManager().isTableState(p.getTable(),
-  ZooKeeperProtos.Table.State.DISABLED, 
ZooKeeperProtos.Table.State.DISABLING)) {
-invokeUnAssign(a);
-invokeUnAssign(b);
-  }
+if (getTableStateManager().isTableState(p.getTable(),
+ZooKeeperProtos.Table.State.DISABLED, 
ZooKeeperProtos.Table.State.DISABLING)) {
+  invokeUnAssign(a);
+  invokeUnAssign(b);
 }
+
 return null;
   }
 
@@ -4416,7 +4430,6 @@ public class AssignmentManager extends ZooKeeperListener {
   break;
 case MERGE_PONR:
 case MERGED:
-case MERGE_REVERTED:
   errorMsg = onRegionMerge(serverName, code, hri,
 HRegionInfo.convert(transition.getRegionInfo(1)),
 HRegionInfo.convert(transition.getRegionInfo(2)));
@@ -4428,6 +4441,11 @@ public class AssignmentManager extends ZooKeeperListener 
{
 }
   }
   break;
+case MERGE_REVERTED:
+errorMsg = onRegionMergeReverted(serverName, code, hri,
+HRegionInfo.convert(transition.getRegionInfo(1)),
+HRegionInfo.convert(transition.getRegionInfo(2)));
+  break;
 
 default:
   errorMsg = "Unexpected transition code " + code;



[1/5] hbase git commit: HBASE-17044 Fix merge failed before creating merged region leaves meta inconsistent

2016-11-14 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 ef9116336 -> 1ed735458
  refs/heads/branch-1 e01ee2fd1 -> e54826434
  refs/heads/branch-1.1 8012383d3 -> a268d4cf5
  refs/heads/branch-1.2 d6626eb6f -> c1701571d
  refs/heads/branch-1.3 66fbe9945 -> ca11c5870


HBASE-17044 Fix merge failed before creating merged region leaves meta 
inconsistent


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1ed73545
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1ed73545
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1ed73545

Branch: refs/heads/0.98
Commit: 1ed735458e5e951cf62067c008219c1fdc784f0a
Parents: ef91163
Author: Andrew Purtell 
Authored: Mon Nov 14 12:28:19 2016 -0800
Committer: Andrew Purtell 
Committed: Mon Nov 14 17:14:24 2016 -0800

--
 .../hadoop/hbase/master/AssignmentManager.java  | 36 +++-
 1 file changed, 27 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1ed73545/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 475d9ae..6100eda 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -3861,17 +3861,31 @@ public class AssignmentManager extends 
ZooKeeperListener {
 LOG.info("Failed to record merged region " + p.getShortNameToLog());
 return "Failed to record the merging in meta";
   }
-} else {
-  mergingRegions.remove(encodedName);
-  regionOnline(a, sn);
-  regionOnline(b, sn);
+}
+return null;
+  }
+
+  private String onRegionMergeReverted(ServerName sn, TransitionCode code,
+ final HRegionInfo p, final HRegionInfo a, final HRegionInfo b) {
+RegionState rs_p = regionStates.getRegionState(p);
+String encodedName = p.getEncodedName();
+mergingRegions.remove(encodedName);
+
+// Always bring the children back online. Even if they are not offline
+// there's no harm in making them online again.
+regionOnline(a, sn);
+regionOnline(b, sn);
+
+// Only offline the merging region if it is known to exist.
+if (rs_p != null) {
   regionOffline(p);
+}
 
-  if (isTableDisabledOrDisabling(p.getTable())) {
-invokeUnAssign(a);
-invokeUnAssign(b);
-  }
+if (isTableDisabledOrDisabling(p.getTable())) {
+  invokeUnAssign(a);
+  invokeUnAssign(b);
 }
+
 return null;
   }
 
@@ -4249,11 +4263,15 @@ public class AssignmentManager extends 
ZooKeeperListener {
 case READY_TO_MERGE:
 case MERGE_PONR:
 case MERGED:
-case MERGE_REVERTED:
   errorMsg = onRegionMerge(serverName, code, hri,
 HRegionInfo.convert(transition.getRegionInfo(1)),
 HRegionInfo.convert(transition.getRegionInfo(2)));
   break;
+case MERGE_REVERTED:
+errorMsg = onRegionMergeReverted(serverName, code, hri,
+HRegionInfo.convert(transition.getRegionInfo(1)),
+HRegionInfo.convert(transition.getRegionInfo(2)));
+  break;
 
 default:
   errorMsg = "Unexpected transition code " + code;



[3/5] hbase git commit: HBASE-17044 Fix merge failed before creating merged region leaves meta inconsistent

2016-11-14 Thread apurtell
HBASE-17044 Fix merge failed before creating merged region leaves meta 
inconsistent


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a268d4cf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a268d4cf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a268d4cf

Branch: refs/heads/branch-1.1
Commit: a268d4cf5a5ed5ea298645d564e2ad96f82c2953
Parents: 8012383
Author: Andrew Purtell 
Authored: Mon Nov 14 12:28:19 2016 -0800
Committer: Andrew Purtell 
Committed: Mon Nov 14 17:14:49 2016 -0800

--
 .../hadoop/hbase/master/AssignmentManager.java  | 38 ++--
 1 file changed, 28 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a268d4cf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 0d68382..d2e6d02 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -3846,18 +3846,32 @@ public class AssignmentManager extends 
ZooKeeperListener {
 LOG.info("Failed to record merged region " + p.getShortNameToLog());
 return "Failed to record the merging in meta";
   }
-} else {
-  mergingRegions.remove(encodedName);
-  regionOnline(a, sn);
-  regionOnline(b, sn);
+}
+return null;
+  }
+
+  private String onRegionMergeReverted(ServerName sn, TransitionCode code,
+ final HRegionInfo p, final HRegionInfo a, final HRegionInfo b) {
+RegionState rs_p = regionStates.getRegionState(p);
+String encodedName = p.getEncodedName();
+mergingRegions.remove(encodedName);
+
+// Always bring the children back online. Even if they are not offline
+// there's no harm in making them online again.
+regionOnline(a, sn);
+regionOnline(b, sn);
+
+// Only offline the merging region if it is known to exist.
+if (rs_p != null) {
   regionOffline(p);
+}
 
-  if (getTableStateManager().isTableState(p.getTable(),
-  ZooKeeperProtos.Table.State.DISABLED, 
ZooKeeperProtos.Table.State.DISABLING)) {
-invokeUnAssign(a);
-invokeUnAssign(b);
-  }
+if (getTableStateManager().isTableState(p.getTable(),
+ZooKeeperProtos.Table.State.DISABLED, 
ZooKeeperProtos.Table.State.DISABLING)) {
+  invokeUnAssign(a);
+  invokeUnAssign(b);
 }
+
 return null;
   }
 
@@ -4378,7 +4392,6 @@ public class AssignmentManager extends ZooKeeperListener {
 case READY_TO_MERGE:
 case MERGE_PONR:
 case MERGED:
-case MERGE_REVERTED:
   errorMsg = onRegionMerge(serverName, code, hri,
 HRegionInfo.convert(transition.getRegionInfo(1)),
 HRegionInfo.convert(transition.getRegionInfo(2)));
@@ -4390,6 +4403,11 @@ public class AssignmentManager extends ZooKeeperListener 
{
 }
   }
   break;
+case MERGE_REVERTED:
+errorMsg = onRegionMergeReverted(serverName, code, hri,
+HRegionInfo.convert(transition.getRegionInfo(1)),
+HRegionInfo.convert(transition.getRegionInfo(2)));
+  break;
 
 default:
   errorMsg = "Unexpected transition code " + code;



[4/5] hbase git commit: HBASE-17044 Fix merge failed before creating merged region leaves meta inconsistent

2016-11-14 Thread apurtell
HBASE-17044 Fix merge failed before creating merged region leaves meta 
inconsistent


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c1701571
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c1701571
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c1701571

Branch: refs/heads/branch-1.2
Commit: c1701571d89b9f74413e6a397cfa0da4cf5edf76
Parents: d6626eb
Author: Andrew Purtell 
Authored: Mon Nov 14 12:28:19 2016 -0800
Committer: Andrew Purtell 
Committed: Mon Nov 14 17:14:51 2016 -0800

--
 .../hadoop/hbase/master/AssignmentManager.java  | 38 ++--
 1 file changed, 28 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c1701571/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 4a04962..dc9c585 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -3869,18 +3869,32 @@ public class AssignmentManager extends 
ZooKeeperListener {
 LOG.info("Failed to record merged region " + p.getShortNameToLog());
 return "Failed to record the merging in meta";
   }
-} else {
-  mergingRegions.remove(encodedName);
-  regionOnline(a, sn);
-  regionOnline(b, sn);
+}
+return null;
+  }
+
+  private String onRegionMergeReverted(ServerName sn, TransitionCode code,
+ final HRegionInfo p, final HRegionInfo a, final HRegionInfo b) {
+RegionState rs_p = regionStates.getRegionState(p);
+String encodedName = p.getEncodedName();
+mergingRegions.remove(encodedName);
+
+// Always bring the children back online. Even if they are not offline
+// there's no harm in making them online again.
+regionOnline(a, sn);
+regionOnline(b, sn);
+
+// Only offline the merging region if it is known to exist.
+if (rs_p != null) {
   regionOffline(p);
+}
 
-  if (getTableStateManager().isTableState(p.getTable(),
-  ZooKeeperProtos.Table.State.DISABLED, 
ZooKeeperProtos.Table.State.DISABLING)) {
-invokeUnAssign(a);
-invokeUnAssign(b);
-  }
+if (getTableStateManager().isTableState(p.getTable(),
+ZooKeeperProtos.Table.State.DISABLED, 
ZooKeeperProtos.Table.State.DISABLING)) {
+  invokeUnAssign(a);
+  invokeUnAssign(b);
 }
+
 return null;
   }
 
@@ -4408,7 +4422,6 @@ public class AssignmentManager extends ZooKeeperListener {
 case READY_TO_MERGE:
 case MERGE_PONR:
 case MERGED:
-case MERGE_REVERTED:
   errorMsg = onRegionMerge(serverName, code, hri,
 HRegionInfo.convert(transition.getRegionInfo(1)),
 HRegionInfo.convert(transition.getRegionInfo(2)));
@@ -4420,6 +4433,11 @@ public class AssignmentManager extends ZooKeeperListener 
{
 }
   }
   break;
+case MERGE_REVERTED:
+errorMsg = onRegionMergeReverted(serverName, code, hri,
+HRegionInfo.convert(transition.getRegionInfo(1)),
+HRegionInfo.convert(transition.getRegionInfo(2)));
+  break;
 
 default:
   errorMsg = "Unexpected transition code " + code;



[2/5] hbase git commit: HBASE-17044 Fix merge failed before creating merged region leaves meta inconsistent

2016-11-14 Thread apurtell
HBASE-17044 Fix merge failed before creating merged region leaves meta 
inconsistent


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e5482643
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e5482643
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e5482643

Branch: refs/heads/branch-1
Commit: e54826434e77637e10cac921bb7bf0d8cd8009f5
Parents: e01ee2f
Author: Andrew Purtell 
Authored: Mon Nov 14 12:28:19 2016 -0800
Committer: Andrew Purtell 
Committed: Mon Nov 14 17:14:48 2016 -0800

--
 .../hadoop/hbase/master/AssignmentManager.java  | 38 ++--
 1 file changed, 28 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e5482643/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 73b8a5e..72ca98c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -3908,18 +3908,32 @@ public class AssignmentManager extends 
ZooKeeperListener {
 LOG.info("Failed to record merged region " + p.getShortNameToLog());
 return "Failed to record the merging in meta";
   }
-} else {
-  mergingRegions.remove(encodedName);
-  regionOnline(a, sn);
-  regionOnline(b, sn);
+}
+return null;
+  }
+
+  private String onRegionMergeReverted(ServerName sn, TransitionCode code,
+ final HRegionInfo p, final HRegionInfo a, final HRegionInfo b) {
+RegionState rs_p = regionStates.getRegionState(p);
+String encodedName = p.getEncodedName();
+mergingRegions.remove(encodedName);
+
+// Always bring the children back online. Even if they are not offline
+// there's no harm in making them online again.
+regionOnline(a, sn);
+regionOnline(b, sn);
+
+// Only offline the merging region if it is known to exist.
+if (rs_p != null) {
   regionOffline(p);
+}
 
-  if (getTableStateManager().isTableState(p.getTable(),
-  ZooKeeperProtos.Table.State.DISABLED, 
ZooKeeperProtos.Table.State.DISABLING)) {
-invokeUnAssign(a);
-invokeUnAssign(b);
-  }
+if (getTableStateManager().isTableState(p.getTable(),
+ZooKeeperProtos.Table.State.DISABLED, 
ZooKeeperProtos.Table.State.DISABLING)) {
+  invokeUnAssign(a);
+  invokeUnAssign(b);
 }
+
 return null;
   }
 
@@ -4458,7 +4472,6 @@ public class AssignmentManager extends ZooKeeperListener {
   break;
 case MERGE_PONR:
 case MERGED:
-case MERGE_REVERTED:
   errorMsg = onRegionMerge(serverName, code, hri,
 HRegionInfo.convert(transition.getRegionInfo(1)),
 HRegionInfo.convert(transition.getRegionInfo(2)));
@@ -4470,6 +4483,11 @@ public class AssignmentManager extends ZooKeeperListener 
{
 }
   }
   break;
+case MERGE_REVERTED:
+errorMsg = onRegionMergeReverted(serverName, code, hri,
+HRegionInfo.convert(transition.getRegionInfo(1)),
+HRegionInfo.convert(transition.getRegionInfo(2)));
+  break;
 
 default:
   errorMsg = "Unexpected transition code " + code;



hbase git commit: HBASE-17046 Add 1.1 doc to hbase.apache.org -- add a 1.2 link while I'm at it

2016-11-14 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 8d52d23c7 -> 6c1ceaf11


HBASE-17046 Add 1.1 doc to hbase.apache.org -- add a 1.2 link while I'm at it


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6c1ceaf1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6c1ceaf1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6c1ceaf1

Branch: refs/heads/master
Commit: 6c1ceaf11a137f1f74ad3bf52470eee19dad1724
Parents: 8d52d23
Author: Michael Stack 
Authored: Mon Nov 14 17:19:06 2016 -0800
Committer: Michael Stack 
Committed: Mon Nov 14 17:19:22 2016 -0800

--
 src/main/site/site.xml | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6c1ceaf1/src/main/site/site.xml
--
diff --git a/src/main/site/site.xml b/src/main/site/site.xml
index a11e7e9..f423374 100644
--- a/src/main/site/site.xml
+++ b/src/main/site/site.xml
@@ -108,6 +108,11 @@
   
   
   
+  
+
+
+
+  
   
 
 



hbase git commit: HBASE-17074 PreCommit job always fails because of OOM

2016-11-14 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/0.98 99798411e -> ef9116336


HBASE-17074 PreCommit job always fails because of OOM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ef911633
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ef911633
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ef911633

Branch: refs/heads/0.98
Commit: ef9116336236fdc212b1039c8b66c6dfc1b21d39
Parents: 9979841
Author: zhangduo 
Authored: Mon Nov 14 16:31:51 2016 +0800
Committer: zhangduo 
Committed: Tue Nov 15 08:31:13 2016 +0800

--
 dev-support/docker/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ef911633/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 2f7ce09..ee4cba4 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -134,7 +134,7 @@ RUN pip install python-dateutil
 ###
 # Avoid out of memory errors in builds
 ###
-ENV MAVEN_OPTS -Xms256m -Xmx512m
+ENV MAVEN_OPTS -Xmx3g
 
 ###
 # Everything past this point is either not needed for testing or breaks Yetus.



hbase git commit: HBASE-17074 PreCommit job always fails because of OOM

2016-11-14 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 23e168d0b -> 8012383d3


HBASE-17074 PreCommit job always fails because of OOM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8012383d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8012383d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8012383d

Branch: refs/heads/branch-1.1
Commit: 8012383d37aaea773c3963ee9b7bef842eb908ad
Parents: 23e168d
Author: zhangduo 
Authored: Mon Nov 14 16:31:51 2016 +0800
Committer: zhangduo 
Committed: Tue Nov 15 08:30:38 2016 +0800

--
 dev-support/docker/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8012383d/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index aa33a62..93e09a5 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -134,7 +134,7 @@ RUN pip install python-dateutil
 ###
 # Avoid out of memory errors in builds
 ###
-ENV MAVEN_OPTS -Xms256m -Xmx512m
+ENV MAVEN_OPTS -Xmx3g
 
 ###
 # Everything past this point is either not needed for testing or breaks Yetus.



hbase git commit: HBASE-17074 PreCommit job always fails because of OOM

2016-11-14 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 bf9614f72 -> d6626eb6f


HBASE-17074 PreCommit job always fails because of OOM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d6626eb6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d6626eb6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d6626eb6

Branch: refs/heads/branch-1.2
Commit: d6626eb6f46d17b7b5664a5bbd67c1600200633e
Parents: bf9614f
Author: zhangduo 
Authored: Mon Nov 14 16:31:51 2016 +0800
Committer: zhangduo 
Committed: Tue Nov 15 08:30:20 2016 +0800

--
 dev-support/docker/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d6626eb6/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index aa33a62..93e09a5 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -134,7 +134,7 @@ RUN pip install python-dateutil
 ###
 # Avoid out of memory errors in builds
 ###
-ENV MAVEN_OPTS -Xms256m -Xmx512m
+ENV MAVEN_OPTS -Xmx3g
 
 ###
 # Everything past this point is either not needed for testing or breaks Yetus.



hbase git commit: HBASE-17074 PreCommit job always fails because of OOM

2016-11-14 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 996b4847f -> 66fbe9945


HBASE-17074 PreCommit job always fails because of OOM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/66fbe994
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/66fbe994
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/66fbe994

Branch: refs/heads/branch-1.3
Commit: 66fbe99456c84dd575cc72787176e20585c937fd
Parents: 996b484
Author: zhangduo 
Authored: Mon Nov 14 16:31:51 2016 +0800
Committer: zhangduo 
Committed: Tue Nov 15 08:30:06 2016 +0800

--
 dev-support/docker/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/66fbe994/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index aa33a62..93e09a5 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -134,7 +134,7 @@ RUN pip install python-dateutil
 ###
 # Avoid out of memory errors in builds
 ###
-ENV MAVEN_OPTS -Xms256m -Xmx512m
+ENV MAVEN_OPTS -Xmx3g
 
 ###
 # Everything past this point is either not needed for testing or breaks Yetus.



hbase git commit: HBASE-17074 PreCommit job always fails because of OOM

2016-11-14 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 6ee2dcf48 -> 8d52d23c7


HBASE-17074 PreCommit job always fails because of OOM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8d52d23c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8d52d23c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8d52d23c

Branch: refs/heads/master
Commit: 8d52d23c77f26465334aa6fa5f421ab8745b804e
Parents: 6ee2dcf
Author: zhangduo 
Authored: Mon Nov 14 16:31:51 2016 +0800
Committer: zhangduo 
Committed: Tue Nov 15 08:29:24 2016 +0800

--
 dev-support/docker/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8d52d23c/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 44e2a58..6add8d5 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -130,7 +130,7 @@ RUN pip install python-dateutil
 ###
 # Avoid out of memory errors in builds
 ###
-ENV MAVEN_OPTS -Xms256m -Xmx512m
+ENV MAVEN_OPTS -Xmx3g
 
 ###
 # Everything past this point is either not needed for testing or breaks Yetus.



hbase git commit: HBASE-17074 PreCommit job always fails because of OOM

2016-11-14 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1 b77bfe9d3 -> e01ee2fd1


HBASE-17074 PreCommit job always fails because of OOM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e01ee2fd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e01ee2fd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e01ee2fd

Branch: refs/heads/branch-1
Commit: e01ee2fd1decacd19e58ad4e72dc69cff9cf757e
Parents: b77bfe9
Author: zhangduo 
Authored: Mon Nov 14 16:31:51 2016 +0800
Committer: zhangduo 
Committed: Tue Nov 15 08:29:44 2016 +0800

--
 dev-support/docker/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e01ee2fd/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index aa33a62..93e09a5 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -134,7 +134,7 @@ RUN pip install python-dateutil
 ###
 # Avoid out of memory errors in builds
 ###
-ENV MAVEN_OPTS -Xms256m -Xmx512m
+ENV MAVEN_OPTS -Xmx3g
 
 ###
 # Everything past this point is either not needed for testing or breaks Yetus.



hbase git commit: HBASE-16870 Add the metrics of replication sources which were transformed from other dead rs to ReplicationLoad

2016-11-14 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 dba43b628 -> 99798411e


HBASE-16870 Add the metrics of replication sources which were transformed from 
other dead rs to ReplicationLoad

Signed-off-by: zhangduo 
Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/99798411
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/99798411
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/99798411

Branch: refs/heads/0.98
Commit: 99798411e537007a3a12795f945c431081bc1c34
Parents: dba43b6
Author: Guanghao Zhang 
Authored: Thu Oct 20 09:33:03 2016 +0800
Committer: Andrew Purtell 
Committed: Sat Nov 12 09:41:06 2016 -0800

--
 .../replication/regionserver/MetricsSource.java |   2 +-
 .../replication/regionserver/Replication.java   |  13 ++-
 .../regionserver/ReplicationLoad.java   |  26 -
 .../regionserver/ReplicationSourceManager.java  |  24 -
 .../hbase/replication/TestReplicationBase.java  |   5 +-
 .../replication/TestReplicationSmallTests.java  |  45 -
 .../replication/TestReplicationStatus.java  | 100 +++
 7 files changed, 159 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/99798411/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
index 134477d..c5e5ff3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
@@ -95,7 +95,7 @@ public class MetricsSource implements BaseSource {
   public void setAgeOfLastShippedOp(long timestamp) {
 long age = EnvironmentEdgeManager.currentTimeMillis() - timestamp;
 singleSourceSource.setLastShippedAge(age);
-globalSourceSource.setLastShippedAge(age);
+globalSourceSource.setLastShippedAge(Math.max(age, 
globalSourceSource.getLastShippedAge()));
 this.lastTimestamp = timestamp;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/99798411/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
index 04c6f24..bd5c58a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
@@ -351,15 +351,24 @@ public class Replication implements WALActionsListener,
   }
 
   private void buildReplicationLoad() {
-// get source
-List sources = 
this.replicationManager.getSources();
 List sourceMetricsList = new ArrayList();
 
+// get source
+List sources = 
this.replicationManager.getSources();
 for (ReplicationSourceInterface source : sources) {
   if (source instanceof ReplicationSource) {
 sourceMetricsList.add(((ReplicationSource) source).getSourceMetrics());
   }
 }
+
+// get old source
+List oldSources = 
this.replicationManager.getOldSources();
+for (ReplicationSourceInterface source : oldSources) {
+  if (source instanceof ReplicationSource) {
+sourceMetricsList.add(((ReplicationSource) source).getSourceMetrics());
+  }
+}
+
 // get sink
 MetricsSink sinkMetrics = this.replicationSink.getSinkMetrics();
 this.replicationLoad.buildReplicationLoad(sourceMetricsList, sinkMetrics);

http://git-wip-us.apache.org/repos/asf/hbase/blob/99798411/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java
index a89da82..3e2b077 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java
@@ -19,8 +19,10 @@
 package org.apache.hadoop.hbase.replication.regionserver;
 
 import java.util.Date;

hbase git commit: HBASE-17089 Add doc on experience running with hedged reads

2016-11-14 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master c3685760f -> 6ee2dcf48


HBASE-17089 Add doc on experience running with hedged reads


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6ee2dcf4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6ee2dcf4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6ee2dcf4

Branch: refs/heads/master
Commit: 6ee2dcf480dd95877a20e33086a020eb1a19e41f
Parents: c368576
Author: Michael Stack 
Authored: Mon Nov 14 10:27:58 2016 -0800
Committer: Michael Stack 
Committed: Mon Nov 14 10:28:09 2016 -0800

--
 src/main/asciidoc/_chapters/performance.adoc | 12 +++-
 src/main/asciidoc/_chapters/protobuf.adoc|  5 +++--
 2 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6ee2dcf4/src/main/asciidoc/_chapters/performance.adoc
--
diff --git a/src/main/asciidoc/_chapters/performance.adoc 
b/src/main/asciidoc/_chapters/performance.adoc
index 5f27640..bc8b48a 100644
--- a/src/main/asciidoc/_chapters/performance.adoc
+++ b/src/main/asciidoc/_chapters/performance.adoc
@@ -156,6 +156,10 @@ See <>
 
 See <>.
 
+[[perf.99th.percentile]]
+=== Improving the 99th Percentile
+Try link:[hedged_reads].
+
 [[perf.compactions.and.splits]]
 === Managing Compactions
 
@@ -751,9 +755,10 @@ Most people should leave this alone.
 Default = 7, or can collapse to at least 1/128th of original size.
 See the _Development Process_ section of the document 
link:https://issues.apache.org/jira/secure/attachment/12444007/Bloom_Filters_in_HBase.pdf[BloomFilters
 in HBase] for more on what this option means.
 
+[[hedged.reads]]
 === Hedged Reads
 
-Hedged reads are a feature of HDFS, introduced in 
link:https://issues.apache.org/jira/browse/HDFS-5776[HDFS-5776].
+Hedged reads are a feature of HDFS, introduced in Hadoop 2.4.0 with 
link:https://issues.apache.org/jira/browse/HDFS-5776[HDFS-5776].
 Normally, a single thread is spawned for each read request.
 However, if hedged reads are enabled, the client waits some configurable 
amount of time, and if the read does not return, the client spawns a second 
read request, against a different block replica of the same data.
 Whichever read returns first is used, and the other read request is discarded.
@@ -790,6 +795,11 @@ See <>  for more information.
 * hedgeReadOpsWin - the number of times the hedged read thread was faster than 
the original thread.
   This could indicate that a given RegionServer is having trouble servicing 
requests.
 
+Concerns while running with hedged reads enabled include:
+
+* They may lead to network congestion. See 
link:https://issues.apache.org/jira/browse/HBASE-17083[HBASE-17083]
+* Make sure you set the thread pool large enough so as blocking on the pool 
does not become a bottleneck (Again see 
link:https://issues.apache.org/jira/browse/HBASE-17083[HBASE-17083])
+
 [[perf.deleting]]
 == Deleting from HBase
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6ee2dcf4/src/main/asciidoc/_chapters/protobuf.adoc
--
diff --git a/src/main/asciidoc/_chapters/protobuf.adoc 
b/src/main/asciidoc/_chapters/protobuf.adoc
index fa63127..1c2cc47 100644
--- a/src/main/asciidoc/_chapters/protobuf.adoc
+++ b/src/main/asciidoc/_chapters/protobuf.adoc
@@ -27,6 +27,8 @@
 :icons: font
 :experimental:
 
+
+== Protobuf
 HBase uses Google's link:http://protobuf.protobufs[protobufs] wherever
 it persists metadata -- in the tail of hfiles or Cells written by
 HBase into the system hbase;meta table or when HBase writes znodes
@@ -36,12 +38,11 @@ Interfaces (Services) we expose to clients, for example the 
`Admin` and `Client`
 Interfaces that the RegionServer fields,
 or specifying the arbitrary extensions added by developers via our
 xref:cp[Coprocessor Endpoint] mechanism.
+
 In this chapter we go into detail for  developers who are looking to
 understand better how it all works. This chapter is of particular
 use to those who would amend or extend HBase functionality.
 
-== Protobuf
-
 With protobuf, you describe serializations and services in a `.protos` file.
 You then feed these descriptors to a protobuf tool, the `protoc` binary,
 to generate classes that can marshall and unmarshall the described 
serializations



[2/2] hbase git commit: HBASE-15788 Use Offheap ByteBuffers from BufferPool to read RPC requests.

2016-11-14 Thread anoopsamjohn
HBASE-15788 Use Offheap ByteBuffers from BufferPool to read RPC requests.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c3685760
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c3685760
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c3685760

Branch: refs/heads/master
Commit: c3685760f004450667920144f926383eb307de53
Parents: 9250bf8
Author: anoopsamjohn 
Authored: Mon Nov 14 23:05:05 2016 +0530
Committer: anoopsamjohn 
Committed: Mon Nov 14 23:05:05 2016 +0530

--
 .../org/apache/hadoop/hbase/client/Put.java |   5 +-
 .../hadoop/hbase/ipc/CellBlockBuilder.java  |  38 ++-
 .../hadoop/hbase/ipc/TestCellBlockBuilder.java  |   4 +-
 .../apache/hadoop/hbase/OffheapKeyValue.java|  29 ++-
 .../apache/hadoop/hbase/codec/CellCodec.java|   8 +-
 .../hadoop/hbase/codec/CellCodecWithTags.java   |   8 +-
 .../org/apache/hadoop/hbase/codec/Codec.java|   4 +-
 .../hadoop/hbase/codec/KeyValueCodec.java   |  56 -
 .../hbase/codec/KeyValueCodecWithTags.java  |  16 +-
 .../hadoop/hbase/io/ByteArrayOutputStream.java  |   2 +-
 .../hadoop/hbase/io/ByteBufferOutputStream.java |   2 +-
 .../apache/hadoop/hbase/io/ByteBufferPool.java  |   4 +-
 .../io/ByteBufferSupportDataOutputStream.java   |  44 
 .../hbase/io/ByteBufferSupportOutputStream.java |  51 
 .../hadoop/hbase/io/ByteBufferWriter.java   |  53 
 .../io/ByteBufferWriterDataOutputStream.java|  44 
 .../hbase/io/ByteBufferWriterOutputStream.java  |  90 +++
 .../org/apache/hadoop/hbase/nio/ByteBuff.java   |  45 
 .../apache/hadoop/hbase/nio/MultiByteBuff.java  |  34 +++
 .../apache/hadoop/hbase/nio/SingleByteBuff.java |  12 +-
 .../hadoop/hbase/util/ByteBufferUtils.java  |  10 +-
 .../hbase/io/TestTagCompressionContext.java |   4 +-
 .../apache/hadoop/hbase/codec/MessageCodec.java |   8 +-
 .../hadoop/hbase/io/hfile/HFileBlock.java   |   4 +-
 .../org/apache/hadoop/hbase/ipc/CallRunner.java |   5 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 239 ---
 .../hadoop/hbase/regionserver/HRegion.java  |   4 +-
 .../wal/AsyncProtobufLogWriter.java |   4 +-
 .../hbase/regionserver/wal/WALCellCodec.java|  18 +-
 .../hadoop/hbase/ipc/AbstractTestIPC.java   |   4 +-
 .../apache/hadoop/hbase/ipc/TestRpcServer.java  | 140 +++
 31 files changed, 782 insertions(+), 207 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c3685760/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
index dbaf3a7..61a71f7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
@@ -29,6 +29,7 @@ import java.util.TreeMap;
 import java.util.UUID;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
@@ -319,9 +320,7 @@ public class Put extends Mutation implements HeapSize, 
Comparable {
 byte [] family = CellUtil.cloneFamily(kv);
 List list = getCellList(family);
 //Checking that the row of the kv is the same as the put
-int res = Bytes.compareTo(this.row, 0, row.length,
-kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
-if (res != 0) {
+if (!CellUtil.matchingRow(kv, this.row)) {
   throw new WrongRowIOException("The row in " + kv.toString() +
 " doesn't match the original one " +  Bytes.toStringBinary(this.row));
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c3685760/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
index fb2cafa..d00490b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
@@ -21,7 +21,9 @@ import io.netty.buffer.ByteBuf;
 import io.netty.buffer.ByteBufAllocator;
 import io.netty.buffer.ByteBufOutputStream;
 
+import java.io.ByteArrayInputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.OutputStream;
 import java.nio.BufferOverflowException;
 import java.nio.ByteBuffer;
@@ -35,10 +37,13 @@ import 

[1/2] hbase git commit: HBASE-15788 Use Offheap ByteBuffers from BufferPool to read RPC requests.

2016-11-14 Thread anoopsamjohn
Repository: hbase
Updated Branches:
  refs/heads/master 9250bf809 -> c3685760f


http://git-wip-us.apache.org/repos/asf/hbase/blob/c3685760/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServer.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServer.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServer.java
new file mode 100644
index 000..9f3bd94
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcServer.java
@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.hbase.io.ByteBufferPool;
+import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.MultiByteBuff;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
+import org.apache.hadoop.hbase.testclassification.RPCTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ RPCTests.class, SmallTests.class })
+public class TestRpcServer {
+
+  @Test
+  public void testAllocateByteBuffToReadInto() throws Exception {
+System.out.println(Long.MAX_VALUE);
+int maxBuffersInPool = 10;
+ByteBufferPool pool = new ByteBufferPool(6 * 1024, maxBuffersInPool);
+initPoolWithAllBuffers(pool, maxBuffersInPool);
+ByteBuff buff = null;
+Pair pair;
+// When the request size is less than 1/6th of the pool buffer size. We 
should use on demand
+// created on heap Buffer
+pair = RpcServer.allocateByteBuffToReadInto(pool, 
RpcServer.getMinSizeForReservoirUse(pool),
+200);
+buff = pair.getFirst();
+assertTrue(buff.hasArray());
+assertEquals(maxBuffersInPool, pool.getQueueSize());
+assertNull(pair.getSecond());
+// When the request size is > 1/6th of the pool buffer size.
+pair = RpcServer.allocateByteBuffToReadInto(pool, 
RpcServer.getMinSizeForReservoirUse(pool),
+1024);
+buff = pair.getFirst();
+assertFalse(buff.hasArray());
+assertEquals(maxBuffersInPool - 1, pool.getQueueSize());
+assertNotNull(pair.getSecond());
+pair.getSecond().run();// CallCleanup#run should put back the BB to pool.
+assertEquals(maxBuffersInPool, pool.getQueueSize());
+// Request size> pool buffer size
+pair = RpcServer.allocateByteBuffToReadInto(pool, 
RpcServer.getMinSizeForReservoirUse(pool),
+7 * 1024);
+buff = pair.getFirst();
+assertFalse(buff.hasArray());
+assertTrue(buff instanceof MultiByteBuff);
+ByteBuffer[] bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
+assertEquals(2, bbs.length);
+assertTrue(bbs[0].isDirect());
+assertTrue(bbs[1].isDirect());
+assertEquals(6 * 1024, bbs[0].limit());
+assertEquals(1024, bbs[1].limit());
+assertEquals(maxBuffersInPool - 2, pool.getQueueSize());
+assertNotNull(pair.getSecond());
+pair.getSecond().run();// CallCleanup#run should put back the BB to pool.
+assertEquals(maxBuffersInPool, pool.getQueueSize());
+
+pair = RpcServer.allocateByteBuffToReadInto(pool, 
RpcServer.getMinSizeForReservoirUse(pool),
+6 * 1024 + 200);
+buff = pair.getFirst();
+assertFalse(buff.hasArray());
+assertTrue(buff instanceof MultiByteBuff);
+bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
+assertEquals(2, bbs.length);
+assertTrue(bbs[0].isDirect());
+assertFalse(bbs[1].isDirect());
+assertEquals(6 * 1024, bbs[0].limit());
+assertEquals(200, bbs[1].limit());
+assertEquals(maxBuffersInPool - 1, pool.getQueueSize());
+assertNotNull(pair.getSecond());
+pair.getSecond().run();// CallCleanup#run should put back the BB to pool.
+

[02/11] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2016-11-14 Thread tedyu
http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
new file mode 100644
index 000..77ade03
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestFullBackupSetRestoreSet extends TestBackupBase {
+
+  private static final Log LOG = 
LogFactory.getLog(TestFullBackupSetRestoreSet.class);
+
+  @Test
+  public void testFullRestoreSetToOtherTable() throws Exception {
+
+LOG.info("Test full restore set");
+
+// Create set
+try (BackupSystemTable table = new 
BackupSystemTable(TEST_UTIL.getConnection())) {
+  String name = "name";
+  table.addToBackupSet(name, new String[] { table1.getNameAsString() });
+  List names = table.describeBackupSet(name);
+
+  assertNotNull(names);
+  assertTrue(names.size() == 1);
+  assertTrue(names.get(0).equals(table1));
+
+  String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", 
name };
+  // Run backup
+  int ret = ToolRunner.run(conf1, new BackupDriver(), args);
+  assertTrue(ret == 0);
+  List backups = table.getBackupHistory();
+  assertTrue(backups.size() == 1);
+  String backupId = backups.get(0).getBackupId();
+  assertTrue(checkSucceeded(backupId));
+  assertTrue(backupId.startsWith(name));
+
+  LOG.info("backup complete");
+
+  // Restore from set into other table
+  args =
+  new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", 
table1_restore.getNameAsString(),
+  "-o" };
+  // Run backup
+  ret = ToolRunner.run(conf1, new RestoreDriver(), args);
+  assertTrue(ret == 0);
+  HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+  assertTrue(hba.tableExists(table1_restore));
+  // Verify number of rows in both tables
+  assertEquals(TEST_UTIL.countRows(table1), 
TEST_UTIL.countRows(table1_restore));
+  TEST_UTIL.deleteTable(table1_restore);
+  LOG.info("restore into other table is complete");
+  hba.close();
+}
+  }
+
+  @Test
+  public void testFullRestoreSetToSameTable() throws Exception {
+
+LOG.info("Test full restore set to same table");
+
+// Create set
+try (BackupSystemTable table = new 
BackupSystemTable(TEST_UTIL.getConnection())) {
+  String name = "name1";
+  table.addToBackupSet(name, new String[] { table1.getNameAsString() });
+  List names = table.describeBackupSet(name);
+
+  assertNotNull(names);
+  assertTrue(names.size() == 1);
+  assertTrue(names.get(0).equals(table1));
+
+  String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", 
name };
+  // Run backup
+  int ret = ToolRunner.run(conf1, new BackupDriver(), args);
+  assertTrue(ret == 0);
+  List backups = table.getBackupHistory();
+  String backupId = backups.get(0).getBackupId();
+  assertTrue(checkSucceeded(backupId));
+
+  LOG.info("backup complete");
+  int count = TEST_UTIL.countRows(table1);
+  TEST_UTIL.deleteTable(table1);
+
+  // Restore from set into other table
+  args = new String[] { BACKUP_ROOT_DIR, backupId, 

[11/11] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2016-11-14 Thread tedyu
HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2725fb25
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2725fb25
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2725fb25

Branch: refs/heads/14123
Commit: 2725fb25b1f21bd6533409ebd4768627d74f2130
Parents: 9250bf8
Author: tedyu 
Authored: Mon Nov 14 09:21:25 2016 -0800
Committer: tedyu 
Committed: Mon Nov 14 09:21:25 2016 -0800

--
 bin/hbase   |6 +
 .../hadoop/hbase/protobuf/ProtobufUtil.java |   48 +-
 .../ClientSnapshotDescriptionUtils.java |8 +-
 .../java/org/apache/hadoop/hbase/TableName.java |6 +-
 .../apache/hadoop/hbase/backup/BackupType.java  |   25 +
 .../hadoop/hbase/util/AbstractHBaseTool.java|   33 +-
 .../hbase/coprocessor/TestClassLoading.java |   53 +-
 .../hbase/IntegrationTestBackupRestore.java |  298 +
 .../shaded/protobuf/generated/BackupProtos.java | 7592 ++
 .../src/main/protobuf/Backup.proto  |   96 +
 hbase-server/pom.xml|   10 +
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|2 +
 .../apache/hadoop/hbase/backup/BackupAdmin.java |  159 +
 .../hadoop/hbase/backup/BackupCopyTask.java |   53 +
 .../hadoop/hbase/backup/BackupDriver.java   |  205 +
 .../apache/hadoop/hbase/backup/BackupInfo.java  |  562 ++
 .../hadoop/hbase/backup/BackupRequest.java  |   90 +
 .../hbase/backup/BackupRestoreConstants.java|   89 +
 .../backup/BackupRestoreServerFactory.java  |   65 +
 .../hadoop/hbase/backup/BackupStatus.java   |  104 +
 .../hadoop/hbase/backup/HBackupFileSystem.java  |  166 +
 .../hadoop/hbase/backup/RestoreDriver.java  |  248 +
 .../hadoop/hbase/backup/RestoreRequest.java |   94 +
 .../apache/hadoop/hbase/backup/RestoreTask.java |   50 +
 .../hbase/backup/impl/BackupCommands.java   |  754 ++
 .../hbase/backup/impl/BackupException.java  |   86 +
 .../hadoop/hbase/backup/impl/BackupManager.java |  502 ++
 .../hbase/backup/impl/BackupManifest.java   |  711 ++
 .../hbase/backup/impl/BackupSnapshotCopy.java   |   36 +
 .../hbase/backup/impl/BackupSystemTable.java|  937 +++
 .../backup/impl/BackupSystemTableHelper.java|  437 +
 .../backup/impl/FullTableBackupClient.java  |  538 ++
 .../hbase/backup/impl/HBaseBackupAdmin.java |  555 ++
 .../backup/impl/IncrementalBackupManager.java   |  356 +
 .../impl/IncrementalTableBackupClient.java  |  237 +
 .../hbase/backup/impl/RestoreTablesClient.java  |  236 +
 .../hbase/backup/mapreduce/HFileSplitter.java   |  191 +
 .../mapreduce/MapReduceBackupCopyTask.java  |  351 +
 .../backup/mapreduce/MapReduceRestoreTask.java  |  172 +
 .../hbase/backup/master/BackupController.java   |   63 +
 .../hbase/backup/master/BackupLogCleaner.java   |  144 +
 .../master/LogRollMasterProcedureManager.java   |  148 +
 .../regionserver/LogRollBackupSubprocedure.java |  167 +
 .../LogRollBackupSubprocedurePool.java  |  137 +
 .../LogRollRegionServerProcedureManager.java|  186 +
 .../hbase/backup/util/BackupClientUtil.java |  437 +
 .../hbase/backup/util/BackupServerUtil.java |  487 ++
 .../hadoop/hbase/backup/util/BackupSet.java |   63 +
 .../hadoop/hbase/backup/util/LogUtils.java  |   45 +
 .../hbase/backup/util/RestoreServerUtil.java|  755 ++
 .../BaseCoordinatedStateManager.java|   20 +-
 .../coordination/ZkCoordinatedStateManager.java |   23 +-
 .../hadoop/hbase/mapred/TableOutputFormat.java  |6 +-
 .../hbase/mapreduce/HFileInputFormat2.java  |  175 +
 .../hbase/mapreduce/LoadIncrementalHFiles.java  |6 +-
 .../hadoop/hbase/mapreduce/WALInputFormat.java  |   42 +-
 .../hadoop/hbase/mapreduce/WALPlayer.java   |   83 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |7 +-
 .../procedure/ZKProcedureCoordinatorRpcs.java   |3 +-
 .../hbase/regionserver/HRegionServer.java   |   16 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |   27 +-
 .../apache/hadoop/hbase/util/ProcedureUtil.java |  105 +
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |5 +
 .../hadoop/hbase/HBaseTestingUtility.java   |   45 +-
 .../org/apache/hadoop/hbase/TestNamespace.java  |   12 +-
 .../hadoop/hbase/backup/TestBackupBase.java |  299 +
 .../hbase/backup/TestBackupBoundaryTests.java   |   97 +
 .../hbase/backup/TestBackupCommandLineTool.java |  413 +
 .../hadoop/hbase/backup/TestBackupDelete.java   |  102 +
 .../hbase/backup/TestBackupDeleteRestore.java   |   70 +
 .../hadoop/hbase/backup/TestBackupDescribe.java |  111 +
 .../hbase/backup/TestBackupMultipleDeletes.java |  173 +
 .../hbase/backup/TestBackupShowHistory.java |  146 +
 .../hbase/backup/TestBackupStatusProgress.java  |   98 +
 

[09/11] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2016-11-14 Thread tedyu
http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-protocol-shaded/src/main/protobuf/Backup.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Backup.proto 
b/hbase-protocol-shaded/src/main/protobuf/Backup.proto
new file mode 100644
index 000..b7196ca
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/protobuf/Backup.proto
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless optional by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains Backup manifest
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
+option java_outer_classname = "BackupProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+
+enum BackupType {
+  FULL = 0;
+  INCREMENTAL = 1;
+}
+
+message ServerTimestamp {
+  optional ServerName server = 1;
+  optional uint64 timestamp = 2;
+}
+
+message TableServerTimestamp {
+  optional TableName table = 1;
+  repeated ServerTimestamp server_timestamp = 2;
+}
+
+message BackupImage {
+  optional string backup_id = 1;
+  optional BackupType backup_type = 2;
+  optional string root_dir = 3;
+  repeated TableName table_list = 4;
+  optional uint64 start_ts = 5;
+  optional uint64 complete_ts = 6;
+  repeated BackupImage ancestors = 7; 
+  repeated TableServerTimestamp tst_map = 8;
+  
+}
+
+
+message TableBackupStatus {
+  optional TableName table = 1;
+  optional string target_dir = 2;
+  optional string snapshot = 3;
+}
+
+message BackupInfo {
+  optional string backup_id = 1;
+  optional BackupType type = 2;
+  optional string target_root_dir = 3;
+  optional BackupState state = 4;
+  optional BackupPhase phase = 5;
+  optional string failed_message = 6;
+  repeated TableBackupStatus table_backup_status = 7;
+  optional uint64  start_ts = 8;
+  optional uint64  end_ts = 9;
+  optional uint32 progress = 10; 
+  optional string job_id = 11;
+  optional uint32 workers_number = 12;
+  optional uint64 bandwidth = 13;
+  
+  enum BackupState {
+WAITING = 0;
+RUNNING = 1;
+COMPLETE = 2;
+FAILED = 3;
+CANCELLED = 4;
+  }
+
+  enum BackupPhase {
+REQUEST = 0;
+SNAPSHOT = 1;
+PREPARE_INCREMENTAL = 2;
+SNAPSHOTCOPY = 3;
+INCREMENTAL_COPY = 4;
+STORE_MANIFEST = 5;
+  } 
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/pom.xml
--
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 0bdee40..e6aed8e 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -402,6 +402,16 @@
   commons-collections
   commons-collections
 
+ 
+  org.apache.hadoop
+  hadoop-distcp
+  ${hadoop-two.version}
+
+
+  org.apache.hadoop
+  hadoop-distcp
+  ${hadoop-two.version}
+
 
   org.apache.hbase
   hbase-hadoop-compat

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 36d5112..d6223ea 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -400,6 +400,8 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 } else if (tableName.equals(TableName.valueOf("hbase:replication"))) {
 description = "The hbase:replication table tracks cross cluster 
replication through " +
 "WAL file offsets.";
+} else if (tableName.equals(TableName.BACKUP_TABLE_NAME)) {
+description = "The hbase:backup table stores backup system 
information.";
 }
 
 <% description %>


[01/11] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2016-11-14 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/14123 [created] 2725fb25b


http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index e2a9bee..7e5ac22 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -17,11 +17,41 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
+import org.apache.hadoop.hbase.CompatibilityFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@@ -41,12 +71,6 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.junit.rules.TestRule;
 
-import static org.junit.Assert.*;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
 @Category({RegionServerTests.class, LargeTests.class})
 public class TestRegionServerMetrics {
   private static final Log LOG = 
LogFactory.getLog(TestRegionServerMetrics.class);
@@ -81,6 +105,7 @@ public class TestRegionServerMetrics {
   public static void startCluster() throws Exception {
 metricsHelper = 
CompatibilityFactory.getInstance(MetricsAssertHelper.class);
 TEST_UTIL = new HBaseTestingUtility();
+
TEST_UTIL.getConfiguration().setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
 true);
 conf = TEST_UTIL.getConfiguration();
 conf.getLong("hbase.splitlog.max.resubmit", 0);
 // Make the failure test faster
@@ -99,6 +124,7 @@ public class TestRegionServerMetrics {
 while (cluster.getLiveRegionServerThreads().size() < 1) {
   Threads.sleep(100);
 }
+TEST_UTIL.waitUntilAllSystemRegionsAssigned();
 
 rs = cluster.getRegionServer(0);
 metricsRegionServer = rs.getRegionServerMetrics();
@@ -198,7 +224,7 @@ public class TestRegionServerMetrics {
 
   @Test
   public void testRegionCount() throws Exception {
-metricsHelper.assertGauge("regionCount", 1, serverSource);
+metricsHelper.assertGauge("regionCount", 2, serverSource);
   }
 
   @Test
@@ -277,7 +303,7 @@ public class TestRegionServerMetrics {
 TEST_UTIL.getHBaseAdmin().flush(tableName);
 
 metricsRegionServer.getRegionServerWrapper().forceRecompute();
-assertGauge("storeCount", 1);
+assertGauge("storeCount", 3);
 assertGauge("storeFileCount", 1);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
index 5b8538b..a951eff 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
+++ 

[07/11] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2016-11-14 Thread tedyu
http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
new file mode 100644
index 000..ab3f0f6
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -0,0 +1,937 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * This class provides 'hbase:backup' table API
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class BackupSystemTable implements Closeable {
+
+  static class WALItem {
+String backupId;
+String walFile;
+String backupRoot;
+
+WALItem(String backupId, String walFile, String backupRoot) {
+  this.backupId = backupId;
+  this.walFile = walFile;
+  this.backupRoot = backupRoot;
+}
+
+public String getBackupId() {
+  return backupId;
+}
+
+public String getWalFile() {
+  return walFile;
+}
+
+public String getBackupRoot() {
+  return backupRoot;
+}
+
+@Override
+public String toString() {
+  return "/" + backupRoot + "/" + backupId + "/" + walFile;
+}
+
+  }
+
+  private static final Log LOG = LogFactory.getLog(BackupSystemTable.class);
+  private final static TableName tableName = TableName.BACKUP_TABLE_NAME;
+  // Stores backup sessions (contexts)
+  final static byte[] SESSIONS_FAMILY = "session".getBytes();
+  // Stores other meta
+  final static byte[] META_FAMILY = "meta".getBytes();
+  // Connection to HBase cluster, shared
+  // among all instances
+  private final Connection connection;
+
+  public BackupSystemTable(Connection conn) throws IOException {
+this.connection = conn;
+  }
+
+  @Override
+  public void close() {
+// do nothing
+  }
+
+  /**
+   * Updates status (state) of a backup session in hbase:backup table
+   * @param context context
+   * @throws IOException exception
+   */
+  public void updateBackupInfo(BackupInfo context) throws IOException {
+
+if (LOG.isDebugEnabled()) {
+  LOG.debug("update backup status in hbase:backup for: " + 
context.getBackupId()
+  + " set status=" + 

[05/11] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

2016-11-14 Thread tedyu
http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
new file mode 100644
index 000..dc19f9b
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java
@@ -0,0 +1,144 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.master;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate;
+
+/**
+ * Implementation of a log cleaner that checks if a log is still scheduled for
+ * incremental backup before deleting it when its TTL is over.
+ */
+@InterfaceStability.Evolving
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+public class BackupLogCleaner extends BaseLogCleanerDelegate {
+  private static final Log LOG = LogFactory.getLog(BackupLogCleaner.class);
+
+  private boolean stopped = false;
+  private Connection conn;
+
+  public BackupLogCleaner() {
+  }
+
+  @Override
+  public void init(Map params) {
+if (params != null && params.containsKey(HMaster.MASTER)) {
+  MasterServices master = (MasterServices) params.get(HMaster.MASTER);
+  conn = master.getConnection();
+  if (getConf() == null) {
+super.setConf(conn.getConfiguration());
+  }
+}
+if (conn == null) {
+  try {
+conn = ConnectionFactory.createConnection(getConf());
+  } catch (IOException ioe) {
+throw new RuntimeException("Failed to create connection", ioe);
+  }
+}
+  }
+
+  @Override
+  public Iterable getDeletableFiles(Iterable files) {
+// all members of this class are null if backup is disabled,
+// so we cannot filter the files
+if (this.getConf() == null || !BackupManager.isBackupEnabled(getConf())) {
+  LOG.warn("Backup is not enabled. Check your "+ 
BackupRestoreConstants.BACKUP_ENABLE_KEY +
+" setting");
+  return files;
+}
+
+List list = new ArrayList();
+try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+  // If we do not have recorded backup sessions
+  try {
+if (!table.hasBackupSessions()) {
+  LOG.debug("BackupLogCleaner has no backup sessions");
+  return files;
+}
+  } catch (TableNotFoundException tnfe) {
+LOG.warn("hbase:backup is not available" + tnfe.getMessage());
+return files;
+  }
+
+  for(FileStatus file: files){
+String wal = file.getPath().toString();
+boolean logInSystemTable = table.isWALFileDeletable(wal);
+if(LOG.isDebugEnabled()) {
+  if(logInSystemTable) {
+LOG.debug("Found log file in hbase:backup, deleting: " + wal);
+list.add(file);
+  } else {
+LOG.debug("Didn't find this log in hbase:backup, keeping: " + wal);
+  }
+}
+  }
+  return list;
+} catch (IOException e) {
+  LOG.error("Failed to get hbase:backup table, therefore will keep all 
files", e);
+

  1   2   >