[2/2] hbase git commit: HBASE-18294 Reduce global heap pressure: flush based on heap occupancy

2018-02-18 Thread eshcar
HBASE-18294 Reduce global heap pressure: flush based on heap occupancy


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f3bb9b96
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f3bb9b96
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f3bb9b96

Branch: refs/heads/master
Commit: f3bb9b9613beaefbee0a53be1fb66b94fde1ce19
Parents: f3ff55a
Author: eshcar 
Authored: Sun Feb 18 09:55:44 2018 +0200
Committer: eshcar 
Committed: Sun Feb 18 09:55:44 2018 +0200

--
 .../hadoop/hbase/client/ConnectionUtils.java|   2 +-
 .../apache/hadoop/hbase/client/Mutation.java|   2 +-
 .../org/apache/hadoop/hbase/client/Result.java  |   2 +-
 .../apache/hadoop/hbase/ByteBufferKeyValue.java |   2 +-
 .../java/org/apache/hadoop/hbase/CellUtil.java  |   2 +-
 .../apache/hadoop/hbase/PrivateCellUtil.java|   7 +-
 .../hbase/util/MapReduceExtendedCell.java   |   2 +-
 .../hadoop/hbase/io/hfile/HFileBlockIndex.java  |   2 +-
 .../hbase/regionserver/AbstractMemStore.java|   4 +-
 .../regionserver/CSLMImmutableSegment.java  |   3 +-
 .../regionserver/CellArrayImmutableSegment.java |   9 +-
 .../regionserver/CellChunkImmutableSegment.java |  52 ++-
 .../hbase/regionserver/CompactingMemStore.java  |   8 +-
 .../hbase/regionserver/CompactionPipeline.java  |  47 +++---
 .../regionserver/CompositeImmutableSegment.java |  12 +-
 .../hbase/regionserver/DefaultMemStore.java |   4 +-
 .../regionserver/FlushAllLargeStoresPolicy.java |   2 +-
 .../regionserver/FlushLargeStoresPolicy.java|  52 ---
 .../FlushNonSloppyStoresFirstPolicy.java|   2 +-
 .../hadoop/hbase/regionserver/HRegion.java  | 144 ++-
 .../hbase/regionserver/HRegionServer.java   |  33 -
 .../hadoop/hbase/regionserver/HStore.java   |   2 +-
 .../regionserver/ImmutableMemStoreLAB.java  |  12 ++
 .../hbase/regionserver/ImmutableSegment.java|   4 +
 .../hbase/regionserver/MemStoreFlusher.java |  92 +---
 .../hadoop/hbase/regionserver/MemStoreLAB.java  |   8 +-
 .../hbase/regionserver/MemStoreLABImpl.java |  10 ++
 .../hadoop/hbase/regionserver/MemStoreSize.java |  52 ++-
 .../hbase/regionserver/MemStoreSizing.java  |  58 +++-
 .../hbase/regionserver/MemStoreSnapshot.java|  16 +--
 .../MetricsTableWrapperAggregateImpl.java   |   2 +-
 .../hbase/regionserver/MutableSegment.java  |   7 +-
 .../hadoop/hbase/regionserver/Region.java   |  16 ++-
 .../regionserver/RegionServerAccounting.java|  45 +++---
 .../regionserver/RegionServicesForStores.java   |   4 +-
 .../hadoop/hbase/regionserver/Segment.java  | 115 ++-
 .../hadoop/hbase/regionserver/StoreScanner.java |   2 +-
 .../org/apache/hadoop/hbase/wal/WALEdit.java|   2 +-
 .../hadoop/hbase/TestGlobalMemStoreSize.java|   4 +-
 .../hbase/TestPartialResultsFromClientSide.java |   2 +-
 ...TestServerSideScanMetricsFromClientSide.java |   3 +-
 .../hbase/client/TestAsyncRegionAdminApi.java   |  12 +-
 .../hadoop/hbase/client/TestClientPushback.java |   6 +-
 .../hbase/client/TestFlushFromClient.java   |  14 +-
 .../hadoop/hbase/client/TestSizeFailures.java   |   6 +-
 ...NegativeMemStoreSizeWithSlowCoprocessor.java |   3 +-
 .../regionserver/TestCompactingMemStore.java|   8 +-
 .../TestCompactingToCellFlatMapMemStore.java|   2 +-
 .../TestEndToEndSplitTransaction.java   |   2 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  20 +--
 .../regionserver/TestHRegionReplayEvents.java   |  36 ++---
 .../hadoop/hbase/regionserver/TestHStore.java   |   6 +-
 .../regionserver/TestPerColumnFamilyFlush.java  |  20 +--
 .../TestRegionServerAccounting.java |  20 +--
 .../hbase/regionserver/TestWALLockup.java   |   4 +-
 .../TestWalAndCompactingMemStoreFlush.java  |  18 +--
 56 files changed, 649 insertions(+), 377 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f3bb9b96/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 1a093f8..c9e994f 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -320,7 +320,7 @@ public final class ConnectionUtils {
 long estimatedHeapSizeOfResult = 0;
 // We don't make Iterator here
 for (Cell cell : rs.rawCells()) {
-  estimatedHeapSizeOfResult += PrivateCellUtil.estimatedHeapSizeOf(cell);
+  estimatedHeapSizeOfResult += PrivateCellUtil.estimatedSizeOfCell(cell);
 }

[1/2] hbase git commit: HBASE-18294 Reduce global heap pressure: flush based on heap occupancy

2018-02-18 Thread eshcar
Repository: hbase
Updated Branches:
  refs/heads/master f3ff55a2b -> f3bb9b961


http://git-wip-us.apache.org/repos/asf/hbase/blob/f3bb9b96/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
index 7689fcd..1c627f7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
@@ -31,17 +31,17 @@ import org.apache.hadoop.hbase.util.Pair;
 
 /**
  * RegionServerAccounting keeps record of some basic real time information 
about
- * the Region Server. Currently, it keeps record the global memstore size and 
global memstore heap
- * overhead. It also tracks the replay edits per region.
+ * the Region Server. Currently, it keeps record the global memstore size and 
global memstore
+ * on-heap and off-heap overhead. It also tracks the replay edits per region.
  */
 @InterfaceAudience.Private
 public class RegionServerAccounting {
   // memstore data size
-  private final LongAdder globalMemstoreDataSize = new LongAdder();
-  // memstore heap size. When off heap MSLAB in place, this will be only heap 
overhead of the Cell
-  // POJOs and entry overhead of them onto memstore. When on heap MSLAB, this 
will be include heap
-  // overhead as well as the cell data size. Ya cell data is in on heap area 
only then.
-  private final LongAdder globalMemstoreHeapSize = new LongAdder();
+  private final LongAdder globalMemStoreDataSize = new LongAdder();
+  // memstore heap size.
+  private final LongAdder globalMemStoreHeapSize = new LongAdder();
+  // memstore off-heap size.
+  private final LongAdder globalMemStoreOffHeapSize = new LongAdder();
 
   // Store the edits size during replaying WAL. Use this to roll back the
   // global memstore size once a region opening failed.
@@ -114,14 +114,21 @@ public class RegionServerAccounting {
* @return the global Memstore data size in the RegionServer
*/
   public long getGlobalMemStoreDataSize() {
-return globalMemstoreDataSize.sum();
+return globalMemStoreDataSize.sum();
   }
 
   /**
* @return the global memstore heap size in the RegionServer
*/
   public long getGlobalMemStoreHeapSize() {
-return this.globalMemstoreHeapSize.sum();
+return this.globalMemStoreHeapSize.sum();
+  }
+
+  /**
+   * @return the global memstore heap size in the RegionServer
+   */
+  public long getGlobalMemStoreOffHeapSize() {
+return this.globalMemStoreOffHeapSize.sum();
   }
 
   /**
@@ -129,13 +136,15 @@ public class RegionServerAccounting {
*the global Memstore size
*/
   public void incGlobalMemStoreSize(MemStoreSize memStoreSize) {
-globalMemstoreDataSize.add(memStoreSize.getDataSize());
-globalMemstoreHeapSize.add(memStoreSize.getHeapSize());
+globalMemStoreDataSize.add(memStoreSize.getDataSize());
+globalMemStoreHeapSize.add(memStoreSize.getHeapSize());
+globalMemStoreOffHeapSize.add(memStoreSize.getOffHeapSize());
   }
 
   public void decGlobalMemStoreSize(MemStoreSize memStoreSize) {
-globalMemstoreDataSize.add(-memStoreSize.getDataSize());
-globalMemstoreHeapSize.add(-memStoreSize.getHeapSize());
+globalMemStoreDataSize.add(-memStoreSize.getDataSize());
+globalMemStoreHeapSize.add(-memStoreSize.getHeapSize());
+globalMemStoreOffHeapSize.add(-memStoreSize.getOffHeapSize());
   }
 
   /**
@@ -151,13 +160,13 @@ public class RegionServerAccounting {
   }
 } else {
   // If the configured memstore is offheap, check for two things
-  // 1) If the global memstore data size is greater than the configured
+  // 1) If the global memstore off-heap size is greater than the configured
   // 'hbase.regionserver.offheap.global.memstore.size'
   // 2) If the global memstore heap size is greater than the configured 
onheap
   // global memstore limit 'hbase.regionserver.global.memstore.size'.
   // We do this to avoid OOME incase of scenarios where the heap is 
occupied with
   // lot of onheap references to the cells in memstore
-  if (getGlobalMemStoreDataSize() >= globalMemStoreLimit) {
+  if (getGlobalMemStoreOffHeapSize() >= globalMemStoreLimit) {
 // Indicates that global memstore size is above the configured
 // 'hbase.regionserver.offheap.global.memstore.size'
 return FlushType.ABOVE_OFFHEAP_HIGHER_MARK;
@@ -181,8 +190,8 @@ public class RegionServerAccounting {
 return FlushType.ABOVE_ONHEAP_LOWER_MARK;
   }
 } else {
-  if (getGlobalMemStoreDataSize() >= globalMemStoreLimitLowMark) {
-// Indicates that the offheap memstore's data size is greater than the 
glo

[2/2] hbase git commit: HBASE-18294 Reduce global heap pressure: flush based on heap occupancy

2018-02-18 Thread eshcar
HBASE-18294 Reduce global heap pressure: flush based on heap occupancy


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a458d7c4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a458d7c4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a458d7c4

Branch: refs/heads/branch-2
Commit: a458d7c40086fcde0ff9f6691a3b2f0a1a2a4dfc
Parents: 0082f55
Author: eshcar 
Authored: Sun Feb 18 12:58:52 2018 +0200
Committer: eshcar 
Committed: Sun Feb 18 12:58:52 2018 +0200

--
 .../hadoop/hbase/client/ConnectionUtils.java|   2 +-
 .../apache/hadoop/hbase/client/Mutation.java|   2 +-
 .../org/apache/hadoop/hbase/client/Result.java  |   2 +-
 .../apache/hadoop/hbase/ByteBufferKeyValue.java |   2 +-
 .../apache/hadoop/hbase/PrivateCellUtil.java|   7 +-
 .../hbase/util/MapReduceExtendedCell.java   |   2 +-
 .../hadoop/hbase/io/hfile/HFileBlockIndex.java  |   2 +-
 .../hbase/regionserver/AbstractMemStore.java|   4 +-
 .../regionserver/CSLMImmutableSegment.java  |   3 +-
 .../regionserver/CellArrayImmutableSegment.java |   9 +-
 .../regionserver/CellChunkImmutableSegment.java |  52 ++-
 .../hbase/regionserver/CompactingMemStore.java  |   8 +-
 .../hbase/regionserver/CompactionPipeline.java  |  47 +++---
 .../regionserver/CompositeImmutableSegment.java |  12 +-
 .../hbase/regionserver/DefaultMemStore.java |   4 +-
 .../regionserver/FlushAllLargeStoresPolicy.java |   2 +-
 .../regionserver/FlushLargeStoresPolicy.java|  52 ---
 .../FlushNonSloppyStoresFirstPolicy.java|   2 +-
 .../hadoop/hbase/regionserver/HRegion.java  | 144 ++-
 .../hbase/regionserver/HRegionServer.java   |  33 -
 .../hadoop/hbase/regionserver/HStore.java   |   2 +-
 .../regionserver/ImmutableMemStoreLAB.java  |  12 ++
 .../hbase/regionserver/ImmutableSegment.java|   4 +
 .../hbase/regionserver/MemStoreFlusher.java |  92 +---
 .../hadoop/hbase/regionserver/MemStoreLAB.java  |   8 +-
 .../hbase/regionserver/MemStoreLABImpl.java |  10 ++
 .../hadoop/hbase/regionserver/MemStoreSize.java |  52 ++-
 .../hbase/regionserver/MemStoreSizing.java  |  58 +++-
 .../hbase/regionserver/MemStoreSnapshot.java|  16 +--
 .../MetricsTableWrapperAggregateImpl.java   |   2 +-
 .../hbase/regionserver/MutableSegment.java  |   7 +-
 .../hadoop/hbase/regionserver/Region.java   |  16 ++-
 .../regionserver/RegionServerAccounting.java|  45 +++---
 .../regionserver/RegionServicesForStores.java   |   4 +-
 .../hadoop/hbase/regionserver/Segment.java  | 115 ++-
 .../hadoop/hbase/regionserver/StoreScanner.java |   2 +-
 .../org/apache/hadoop/hbase/wal/WALEdit.java|   2 +-
 .../hadoop/hbase/TestGlobalMemStoreSize.java|   4 +-
 .../hbase/TestPartialResultsFromClientSide.java |   2 +-
 ...TestServerSideScanMetricsFromClientSide.java |   3 +-
 .../hbase/client/TestAsyncRegionAdminApi.java   |  12 +-
 .../hadoop/hbase/client/TestClientPushback.java |   6 +-
 .../hbase/client/TestFlushFromClient.java   |  14 +-
 .../hadoop/hbase/client/TestSizeFailures.java   |   6 +-
 ...NegativeMemStoreSizeWithSlowCoprocessor.java |   3 +-
 .../regionserver/TestCompactingMemStore.java|   8 +-
 .../TestCompactingToCellFlatMapMemStore.java|   2 +-
 .../TestEndToEndSplitTransaction.java   |   2 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  20 +--
 .../regionserver/TestHRegionReplayEvents.java   |  36 ++---
 .../hadoop/hbase/regionserver/TestHStore.java   |   6 +-
 .../regionserver/TestPerColumnFamilyFlush.java  |  20 +--
 .../TestRegionServerAccounting.java |  20 +--
 .../hbase/regionserver/TestWALLockup.java   |   4 +-
 .../TestWalAndCompactingMemStoreFlush.java  |  18 +--
 55 files changed, 648 insertions(+), 376 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 1a093f8..c9e994f 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -320,7 +320,7 @@ public final class ConnectionUtils {
 long estimatedHeapSizeOfResult = 0;
 // We don't make Iterator here
 for (Cell cell : rs.rawCells()) {
-  estimatedHeapSizeOfResult += PrivateCellUtil.estimatedHeapSizeOf(cell);
+  estimatedHeapSizeOfResult += PrivateCellUtil.estimatedSizeOfCell(cell);
 }
 return estimatedHeapSizeOfResult;
   }

http://git-wip-

[1/2] hbase git commit: HBASE-18294 Reduce global heap pressure: flush based on heap occupancy

2018-02-18 Thread eshcar
Repository: hbase
Updated Branches:
  refs/heads/branch-2 0082f5559 -> a458d7c40


http://git-wip-us.apache.org/repos/asf/hbase/blob/a458d7c4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
index 7689fcd..1c627f7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
@@ -31,17 +31,17 @@ import org.apache.hadoop.hbase.util.Pair;
 
 /**
  * RegionServerAccounting keeps record of some basic real time information 
about
- * the Region Server. Currently, it keeps record the global memstore size and 
global memstore heap
- * overhead. It also tracks the replay edits per region.
+ * the Region Server. Currently, it keeps record the global memstore size and 
global memstore
+ * on-heap and off-heap overhead. It also tracks the replay edits per region.
  */
 @InterfaceAudience.Private
 public class RegionServerAccounting {
   // memstore data size
-  private final LongAdder globalMemstoreDataSize = new LongAdder();
-  // memstore heap size. When off heap MSLAB in place, this will be only heap 
overhead of the Cell
-  // POJOs and entry overhead of them onto memstore. When on heap MSLAB, this 
will be include heap
-  // overhead as well as the cell data size. Ya cell data is in on heap area 
only then.
-  private final LongAdder globalMemstoreHeapSize = new LongAdder();
+  private final LongAdder globalMemStoreDataSize = new LongAdder();
+  // memstore heap size.
+  private final LongAdder globalMemStoreHeapSize = new LongAdder();
+  // memstore off-heap size.
+  private final LongAdder globalMemStoreOffHeapSize = new LongAdder();
 
   // Store the edits size during replaying WAL. Use this to roll back the
   // global memstore size once a region opening failed.
@@ -114,14 +114,21 @@ public class RegionServerAccounting {
* @return the global Memstore data size in the RegionServer
*/
   public long getGlobalMemStoreDataSize() {
-return globalMemstoreDataSize.sum();
+return globalMemStoreDataSize.sum();
   }
 
   /**
* @return the global memstore heap size in the RegionServer
*/
   public long getGlobalMemStoreHeapSize() {
-return this.globalMemstoreHeapSize.sum();
+return this.globalMemStoreHeapSize.sum();
+  }
+
+  /**
+   * @return the global memstore heap size in the RegionServer
+   */
+  public long getGlobalMemStoreOffHeapSize() {
+return this.globalMemStoreOffHeapSize.sum();
   }
 
   /**
@@ -129,13 +136,15 @@ public class RegionServerAccounting {
*the global Memstore size
*/
   public void incGlobalMemStoreSize(MemStoreSize memStoreSize) {
-globalMemstoreDataSize.add(memStoreSize.getDataSize());
-globalMemstoreHeapSize.add(memStoreSize.getHeapSize());
+globalMemStoreDataSize.add(memStoreSize.getDataSize());
+globalMemStoreHeapSize.add(memStoreSize.getHeapSize());
+globalMemStoreOffHeapSize.add(memStoreSize.getOffHeapSize());
   }
 
   public void decGlobalMemStoreSize(MemStoreSize memStoreSize) {
-globalMemstoreDataSize.add(-memStoreSize.getDataSize());
-globalMemstoreHeapSize.add(-memStoreSize.getHeapSize());
+globalMemStoreDataSize.add(-memStoreSize.getDataSize());
+globalMemStoreHeapSize.add(-memStoreSize.getHeapSize());
+globalMemStoreOffHeapSize.add(-memStoreSize.getOffHeapSize());
   }
 
   /**
@@ -151,13 +160,13 @@ public class RegionServerAccounting {
   }
 } else {
   // If the configured memstore is offheap, check for two things
-  // 1) If the global memstore data size is greater than the configured
+  // 1) If the global memstore off-heap size is greater than the configured
   // 'hbase.regionserver.offheap.global.memstore.size'
   // 2) If the global memstore heap size is greater than the configured 
onheap
   // global memstore limit 'hbase.regionserver.global.memstore.size'.
   // We do this to avoid OOME incase of scenarios where the heap is 
occupied with
   // lot of onheap references to the cells in memstore
-  if (getGlobalMemStoreDataSize() >= globalMemStoreLimit) {
+  if (getGlobalMemStoreOffHeapSize() >= globalMemStoreLimit) {
 // Indicates that global memstore size is above the configured
 // 'hbase.regionserver.offheap.global.memstore.size'
 return FlushType.ABOVE_OFFHEAP_HIGHER_MARK;
@@ -181,8 +190,8 @@ public class RegionServerAccounting {
 return FlushType.ABOVE_ONHEAP_LOWER_MARK;
   }
 } else {
-  if (getGlobalMemStoreDataSize() >= globalMemStoreLimitLowMark) {
-// Indicates that the offheap memstore's data size is greater than the 
g

[14/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.hadoop.hbase.regions

[41/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
index e4e6219..6f4eff2 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":9,"i43":9,"i44":9,"i45":9,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":9,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":9,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":9,"i106":10,"i107":10,"i108":10,"i109":10
 
,"i110":10,"i111":10,"i112":41,"i113":41,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":9,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":42,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":9,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":9,"i177":10,"i178":10,"i179":9,"i180":9,"i181":9,"i182":9,"i183":9,"i184":9,"i185":9,"i186":9,"i187":9,"i188":9,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":9,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i209":10,"i210":10,
 
"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":10,"i229":9,"i230":9,"i231":10,"i232":10,"i233":10,"i234":10,"i235":10,"i236":10,"i237":10,"i238":10,"i239":10,"i240":10,"i241":10,"i242":9,"i243":10,"i244":10,"i245":10,"i246":10,"i247":10,"i248":10,"i249":10,"i250":10,"i251":10,"i252":10,"i253":10,"i254":10,"i255":10,"i256":9,"i257":10,"i258":10,"i259":10,"i260":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":9,"i42":9,"i43":9,"i44":9,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":9,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":9,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":9,"i106":10,"i107":10,"i108":10,"i109":10
 
,"i110":10,"i111":10,"i112":41,"i113":41,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":9,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":42,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":9,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":9,"i178":10,"i179":10,"i180":9,"i181":9,"i182":9,"i183":9,"i184":9,"i185":9,"i186":9,"i187":9,"i188":9,"i189":9,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":10,"i199":9,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i

[27/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColCell.html
index d143ef8..4583895 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColCell.html
@@ -258,7 +258,7 @@
 250
 251@Override
 252public long heapSize() {
-253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 254  if (this.tags != null) {
 255sum += 
ClassSize.sizeOf(this.tags);
 256  }
@@ -454,7 +454,7 @@
 446
 447@Override
 448public long heapSize() {
-449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 450  // this.tags is on heap byte[]
 451  if (this.tags != null) {
 452sum += 
ClassSize.sizeOf(this.tags);
@@ -2791,192 +2791,193 @@
 2783   * {@link HeapSize} we call {@link 
HeapSize#heapSize()} so cell can give a correct value. In other
 2784   * cases we just consider the bytes 
occupied by the cell components ie. row, CF, qualifier,
 2785   * timestamp, type, value and tags.
-2786   * @param cell
-2787   * @return estimate of the heap 
space
-2788   */
-2789  public static long 
estimatedHeapSizeOf(final Cell cell) {
-2790if (cell instanceof HeapSize) {
-2791  return ((HeapSize) 
cell).heapSize();
-2792}
-2793// TODO: Add sizing of references 
that hold the row, family, etc., arrays.
-2794return 
estimatedSerializedSizeOf(cell);
-2795  }
-2796
-2797  /**
-2798   * This method exists just to 
encapsulate how we serialize keys. To be replaced by a factory that
-2799   * we query to figure what the Cell 
implementation is and then, what serialization engine to use
-2800   * and further, how to serialize the 
key for inclusion in hfile index. TODO.
-2801   * @param cell
-2802   * @return The key portion of the Cell 
serialized in the old-school KeyValue way or null if passed
-2803   * a null 
cell
-2804   */
-2805  public static byte[] 
getCellKeySerializedAsKeyValueKey(final Cell cell) {
-2806if (cell == null) return null;
-2807byte[] b = new 
byte[KeyValueUtil.keyLength(cell)];
-2808KeyValueUtil.appendKeyTo(cell, b, 
0);
-2809return b;
-2810  }
-2811
-2812  /**
-2813   * Create a Cell that is smaller than 
all other possible Cells for the given Cell's row.
-2814   * @param cell
-2815   * @return First possible Cell on 
passed Cell's row.
-2816   */
-2817  public static Cell 
createFirstOnRow(final Cell cell) {
-2818if (cell instanceof 
ByteBufferExtendedCell) {
-2819  return new 
FirstOnRowByteBufferExtendedCell(
-2820  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2821  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength());
-2822}
-2823return new 
FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
-2824  }
-2825
-2826  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength) {
-2827return new FirstOnRowCell(row, 
roffset, rlength);
-2828  }
-2829
-2830  public static Cell 
createFirstOnRow(final byte[] row, final byte[] family, final byte[] col) {
-2831return createFirstOnRow(row, 0, 
(short) row.length, family, 0, (byte) family.length, col, 0,
-2832col.length);
-2833  }
-2834
-2835  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength,
-2836  final byte[] family, int foffset, 
byte flength, final byte[] col, int coffset, int clength) {
-2837return new FirstOnRowColCell(row, 
roffset, rlength, family, foffset, flength, col, coffset,
-2838clength);
-2839  }
-2840
-2841  public static Cell 
createFirstOnRow(final byte[] row) {
-2842return createFirstOnRow(row, 0, 
(short) row.length);
-2843  }
-2844
-2845  public static Cell 
createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) {
-2846if (cell instanceof 
ByteBufferExtendedCell) {
-2847  return new 
FirstOnRowColByteBufferExtendedCell(
-2848  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2849  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength(),
-2850  ByteBuffer.wrap(fArray), foff, 
(byte) flen, HConstants.EMPTY_BYTE_BUFFER, 0, 0);
-2851}
-2852return new 
FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), 
cell.getRowLength(),
-2853fArray, foff, (byte) flen, 
HConstants.EMPTY_BYTE_ARRAY, 0, 0);
-2854  }
-2855
-2856  public static Cell 
createFirstOnRowCol(final Cell cell) {
-2857if (cell insta

[42/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 92cc3ad..e501827 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class HRegion.RegionScannerImpl
+class HRegion.RegionScannerImpl
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements RegionScanner, Shipper, RpcCallback
 RegionScannerImpl is used to combine scanners from multiple 
Stores (aka column families).
@@ -425,7 +425,7 @@ implements 
 
 storeHeap
-KeyValueHeap storeHeap
+KeyValueHeap storeHeap
 
 
 
@@ -434,7 +434,7 @@ implements 
 
 joinedHeap
-KeyValueHeap joinedHeap
+KeyValueHeap joinedHeap
 Heap of key-values that are not essential for the provided 
filters and are thus read
  on demand, if on-demand column family loading is enabled.
 
@@ -445,7 +445,7 @@ implements 
 
 joinedContinuationRow
-protected Cell joinedContinuationRow
+protected Cell joinedContinuationRow
 If the joined heap data gathering is interrupted due to 
scan limits, this will
  contain the row for which we are populating the values.
 
@@ -456,7 +456,7 @@ implements 
 
 filterClosed
-private boolean filterClosed
+private boolean filterClosed
 
 
 
@@ -465,7 +465,7 @@ implements 
 
 stopRow
-protected final byte[] stopRow
+protected final byte[] stopRow
 
 
 
@@ -474,7 +474,7 @@ implements 
 
 includeStopRow
-protected final boolean includeStopRow
+protected final boolean includeStopRow
 
 
 
@@ -483,7 +483,7 @@ implements 
 
 region
-protected final HRegion region
+protected final HRegion region
 
 
 
@@ -492,7 +492,7 @@ implements 
 
 comparator
-protected final CellComparator comparator
+protected final CellComparator comparator
 
 
 
@@ -501,7 +501,7 @@ implements 
 
 readPt
-private final long readPt
+private final long readPt
 
 
 
@@ -510,7 +510,7 @@ implements 
 
 maxResultSize
-private final long maxResultSize
+private final long maxResultSize
 
 
 
@@ -519,7 +519,7 @@ implements 
 
 defaultScannerContext
-private final ScannerContext defaultScannerContext
+private final ScannerContext defaultScannerContext
 
 
 
@@ -528,7 +528,7 @@ implements 
 
 filter
-private final FilterWrapper filter
+private final FilterWrapper filter
 
 
 
@@ -545,7 +545,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scan scan,
+RegionScannerImpl(Scan scan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List additionalScanners,
   HRegion region)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -561,7 +561,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scan scan,
+RegionScannerImpl(Scan scan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List additionalScanners,
   HRegion region,
   long nonceGroup,
@@ -587,7 +587,7 @@ implements 
 
 getRegionInfo
-public RegionInfo getRegionInfo()
+public RegionInfo getRegionInfo()
 
 Specified by:
 getRegionInfo in
 interface RegionScanner
@@ -602,7 +602,7 @@ implements 
 
 initializeScanners
-protected void initializeScanners(Scan scan,
+protected void initializeScanners(Scan scan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List additionalScanners)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
@@ -617,7 +617,7 @@ implements 
 
 initializeKVHeap
-protected void initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners,
+protected void initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List joinedScanners,
 HRegion region)
  throws http://do

[08/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.hadoop.hba

[32/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferExtendedCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferExtendedCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferExtendedCell.html
index d143ef8..4583895 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferExtendedCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferExtendedCell.html
@@ -258,7 +258,7 @@
 250
 251@Override
 252public long heapSize() {
-253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 254  if (this.tags != null) {
 255sum += 
ClassSize.sizeOf(this.tags);
 256  }
@@ -454,7 +454,7 @@
 446
 447@Override
 448public long heapSize() {
-449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 450  // this.tags is on heap byte[]
 451  if (this.tags != null) {
 452sum += 
ClassSize.sizeOf(this.tags);
@@ -2791,192 +2791,193 @@
 2783   * {@link HeapSize} we call {@link 
HeapSize#heapSize()} so cell can give a correct value. In other
 2784   * cases we just consider the bytes 
occupied by the cell components ie. row, CF, qualifier,
 2785   * timestamp, type, value and tags.
-2786   * @param cell
-2787   * @return estimate of the heap 
space
-2788   */
-2789  public static long 
estimatedHeapSizeOf(final Cell cell) {
-2790if (cell instanceof HeapSize) {
-2791  return ((HeapSize) 
cell).heapSize();
-2792}
-2793// TODO: Add sizing of references 
that hold the row, family, etc., arrays.
-2794return 
estimatedSerializedSizeOf(cell);
-2795  }
-2796
-2797  /**
-2798   * This method exists just to 
encapsulate how we serialize keys. To be replaced by a factory that
-2799   * we query to figure what the Cell 
implementation is and then, what serialization engine to use
-2800   * and further, how to serialize the 
key for inclusion in hfile index. TODO.
-2801   * @param cell
-2802   * @return The key portion of the Cell 
serialized in the old-school KeyValue way or null if passed
-2803   * a null 
cell
-2804   */
-2805  public static byte[] 
getCellKeySerializedAsKeyValueKey(final Cell cell) {
-2806if (cell == null) return null;
-2807byte[] b = new 
byte[KeyValueUtil.keyLength(cell)];
-2808KeyValueUtil.appendKeyTo(cell, b, 
0);
-2809return b;
-2810  }
-2811
-2812  /**
-2813   * Create a Cell that is smaller than 
all other possible Cells for the given Cell's row.
-2814   * @param cell
-2815   * @return First possible Cell on 
passed Cell's row.
-2816   */
-2817  public static Cell 
createFirstOnRow(final Cell cell) {
-2818if (cell instanceof 
ByteBufferExtendedCell) {
-2819  return new 
FirstOnRowByteBufferExtendedCell(
-2820  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2821  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength());
-2822}
-2823return new 
FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
-2824  }
-2825
-2826  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength) {
-2827return new FirstOnRowCell(row, 
roffset, rlength);
-2828  }
-2829
-2830  public static Cell 
createFirstOnRow(final byte[] row, final byte[] family, final byte[] col) {
-2831return createFirstOnRow(row, 0, 
(short) row.length, family, 0, (byte) family.length, col, 0,
-2832col.length);
-2833  }
-2834
-2835  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength,
-2836  final byte[] family, int foffset, 
byte flength, final byte[] col, int coffset, int clength) {
-2837return new FirstOnRowColCell(row, 
roffset, rlength, family, foffset, flength, col, coffset,
-2838clength);
-2839  }
-2840
-2841  public static Cell 
createFirstOnRow(final byte[] row) {
-2842return createFirstOnRow(row, 0, 
(short) row.length);
-2843  }
-2844
-2845  public static Cell 
createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) {
-2846if (cell instanceof 
ByteBufferExtendedCell) {
-2847  return new 
FirstOnRowColByteBufferExtendedCell(
-2848  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2849  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength(),
-2850  ByteBuffer.wrap(fArray), foff, 
(byte) flen, HConstants.EMPTY_BYTE_BUFFER, 0, 0);
-2851}
-2852return new 
FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), 
cell.getRowLength(),
-2853fArray, foff, (byte) flen, 
HConstants.EMPTY_BYTE_ARRAY, 0, 0);
-2854  }
-2855
-2856  pu

[04/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurr

[02/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index 802b925..a3e80ab 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
-143import 
org.apache.hadoop.hbase.reg

[31/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColByteBufferExtendedCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColByteBufferExtendedCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColByteBufferExtendedCell.html
index d143ef8..4583895 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColByteBufferExtendedCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColByteBufferExtendedCell.html
@@ -258,7 +258,7 @@
 250
 251@Override
 252public long heapSize() {
-253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 254  if (this.tags != null) {
 255sum += 
ClassSize.sizeOf(this.tags);
 256  }
@@ -454,7 +454,7 @@
 446
 447@Override
 448public long heapSize() {
-449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 450  // this.tags is on heap byte[]
 451  if (this.tags != null) {
 452sum += 
ClassSize.sizeOf(this.tags);
@@ -2791,192 +2791,193 @@
 2783   * {@link HeapSize} we call {@link 
HeapSize#heapSize()} so cell can give a correct value. In other
 2784   * cases we just consider the bytes 
occupied by the cell components ie. row, CF, qualifier,
 2785   * timestamp, type, value and tags.
-2786   * @param cell
-2787   * @return estimate of the heap 
space
-2788   */
-2789  public static long 
estimatedHeapSizeOf(final Cell cell) {
-2790if (cell instanceof HeapSize) {
-2791  return ((HeapSize) 
cell).heapSize();
-2792}
-2793// TODO: Add sizing of references 
that hold the row, family, etc., arrays.
-2794return 
estimatedSerializedSizeOf(cell);
-2795  }
-2796
-2797  /**
-2798   * This method exists just to 
encapsulate how we serialize keys. To be replaced by a factory that
-2799   * we query to figure what the Cell 
implementation is and then, what serialization engine to use
-2800   * and further, how to serialize the 
key for inclusion in hfile index. TODO.
-2801   * @param cell
-2802   * @return The key portion of the Cell 
serialized in the old-school KeyValue way or null if passed
-2803   * a null 
cell
-2804   */
-2805  public static byte[] 
getCellKeySerializedAsKeyValueKey(final Cell cell) {
-2806if (cell == null) return null;
-2807byte[] b = new 
byte[KeyValueUtil.keyLength(cell)];
-2808KeyValueUtil.appendKeyTo(cell, b, 
0);
-2809return b;
-2810  }
-2811
-2812  /**
-2813   * Create a Cell that is smaller than 
all other possible Cells for the given Cell's row.
-2814   * @param cell
-2815   * @return First possible Cell on 
passed Cell's row.
-2816   */
-2817  public static Cell 
createFirstOnRow(final Cell cell) {
-2818if (cell instanceof 
ByteBufferExtendedCell) {
-2819  return new 
FirstOnRowByteBufferExtendedCell(
-2820  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2821  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength());
-2822}
-2823return new 
FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
-2824  }
-2825
-2826  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength) {
-2827return new FirstOnRowCell(row, 
roffset, rlength);
-2828  }
-2829
-2830  public static Cell 
createFirstOnRow(final byte[] row, final byte[] family, final byte[] col) {
-2831return createFirstOnRow(row, 0, 
(short) row.length, family, 0, (byte) family.length, col, 0,
-2832col.length);
-2833  }
-2834
-2835  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength,
-2836  final byte[] family, int foffset, 
byte flength, final byte[] col, int coffset, int clength) {
-2837return new FirstOnRowColCell(row, 
roffset, rlength, family, foffset, flength, col, coffset,
-2838clength);
-2839  }
-2840
-2841  public static Cell 
createFirstOnRow(final byte[] row) {
-2842return createFirstOnRow(row, 0, 
(short) row.length);
-2843  }
-2844
-2845  public static Cell 
createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) {
-2846if (cell instanceof 
ByteBufferExtendedCell) {
-2847  return new 
FirstOnRowColByteBufferExtendedCell(
-2848  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2849  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength(),
-2850  ByteBuffer.wrap(fArray), foff, 
(byte) flen, HConstants.EMPTY_BYTE_BUFFER, 0, 0);
-2851}
-2852return new 
FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), 
cell.getRowLength(),
-2853fArray, foff, (byte) flen, 
HConstants.EMPTY_BYTE_ARRAY, 0, 0);
-2854  }

[38/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.html
index f385b1d..9b50c3e 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -231,6 +231,14 @@ implements Called when opening a scanner on the data of this 
MemStoreLAB
 
 
+
+boolean
+isOffHeap() 
+
+
+boolean
+isOnHeap() 
+
 
 
 
@@ -417,7 +425,7 @@ implements 
 
 
-
+
 
 decScannerCount
 public void decScannerCount()
@@ -429,6 +437,32 @@ implements 
+
+
+
+
+isOnHeap
+public boolean isOnHeap()
+
+Specified by:
+isOnHeap in
 interface MemStoreLAB
+
+
+
+
+
+
+
+
+isOffHeap
+public boolean isOffHeap()
+
+Specified by:
+isOffHeap in
 interface MemStoreLAB
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
index 944a18d..66eb85e 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
@@ -151,7 +151,7 @@ extends Segment
-dataSize,
 FIXED_OVERHEAD,
 heapSize,
 minSequenceId,
 tagsPresent,
 timeRangeTracker
+FIXED_OVERHEAD,
 minSequenceId,
 segmentSize,
 tagsPresent,
 timeRangeTracker
 
 
 
@@ -176,6 +176,11 @@ extends 
 protected 
+ImmutableSegment(CellComparator comparator,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List segments) 
+
+
+protected 
 ImmutableSegment(CellSet cs,
 CellComparator comparator,
 MemStoreLAB memStoreLAB)
@@ -183,7 +188,7 @@ extends 
+
 protected 
 ImmutableSegment(Segment segment)
 
@@ -231,7 +236,7 @@ extends Segment
-close,
 compare,
 compareRows,
 decScannerCount,
 dump,
 getCellLength,
 getCellsCount,
 getCellSet,
 getComparator,
 getFirstAfter,
 getMemStoreLAB,
 getMinSequenceId,
 getScanner,
 getScanner,
 getScanners,
 getTimeRangeTracker,
 headSet, heapSize,
 heapSizeChange,
 incScannerCount,
 incSize,
 indexEntrySize,
 internalAdd,
 isEmpty,
 isTagsPresent, iterator,
 keySize,
 last,
 maybeCloneWithAllocator,
 setCellSet,
 shouldSeek,
 tailSet,
 updateMetaInfo,
 updateMetaInfo
+close,
 compare,
 compareRows,
 decScannerCount,
 dump,
 getCellLength,
 getCellsCount,
 getCellSet,
 getComparator,
 getMemStoreLAB,
 getMemStoreSize,
 getMinSequenceId,
 getScanner,
 getScanner,
 getScanners,
 getTimeRangeTracker,
 headSet, heapSize,
 heapSizeChange,
 incScannerCount,
 incSize,
 indexEntryOffHeapSize,
 indexEntryOnHeapSize,
 indexEntrySize,
 internalAdd,
 isEmpty,
 isTagsPresent,
 iterator,
 keySize,
 last,
 maybeCloneWithAllocator,
 offHeapSize,
 offHeapSizeChan
 ge, setCellSet,
 shouldSeek,
 tailSet,
 updateMetaInfo,
 updateMetaInfo
 
 
 
@@ -282,13 +287,23 @@ extends 
+
+
+
+
+ImmutableSegment
+protected ImmutableSegment(CellComparator comparator,
+   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List segments)
+
+
 
 
 
 
 
 ImmutableSegment
-protected ImmutableSegment(CellSet cs,
+protected ImmutableSegment(CellSet cs,
CellComparator comparator,
MemStoreLAB memStoreLAB)
 
@@ -301,7 +316,7 @@ extends 
 
 ImmutableSegment
-protected ImmutableSegment(Segment segment)
+protected ImmutableSegment(Segment segment)
 
  Copy C-tor to be used when new CSLMImmutableSegment (derived) is being built 
from a Mutable one.
  This C-tor should be used when active MutableSegment is pushed into the 
compaction
@@ -340,7 +355,7 @@ extends 
 
 getNumOfSegments
-public int getNumOfSegments()
+public int getNumOfSegments()
 
 
 
@@ -349,7 +364,7 @@ extends 
 
 getAllSegments
-public http://docs.or

[28/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
index d143ef8..4583895 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
@@ -258,7 +258,7 @@
 250
 251@Override
 252public long heapSize() {
-253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 254  if (this.tags != null) {
 255sum += 
ClassSize.sizeOf(this.tags);
 256  }
@@ -454,7 +454,7 @@
 446
 447@Override
 448public long heapSize() {
-449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 450  // this.tags is on heap byte[]
 451  if (this.tags != null) {
 452sum += 
ClassSize.sizeOf(this.tags);
@@ -2791,192 +2791,193 @@
 2783   * {@link HeapSize} we call {@link 
HeapSize#heapSize()} so cell can give a correct value. In other
 2784   * cases we just consider the bytes 
occupied by the cell components ie. row, CF, qualifier,
 2785   * timestamp, type, value and tags.
-2786   * @param cell
-2787   * @return estimate of the heap 
space
-2788   */
-2789  public static long 
estimatedHeapSizeOf(final Cell cell) {
-2790if (cell instanceof HeapSize) {
-2791  return ((HeapSize) 
cell).heapSize();
-2792}
-2793// TODO: Add sizing of references 
that hold the row, family, etc., arrays.
-2794return 
estimatedSerializedSizeOf(cell);
-2795  }
-2796
-2797  /**
-2798   * This method exists just to 
encapsulate how we serialize keys. To be replaced by a factory that
-2799   * we query to figure what the Cell 
implementation is and then, what serialization engine to use
-2800   * and further, how to serialize the 
key for inclusion in hfile index. TODO.
-2801   * @param cell
-2802   * @return The key portion of the Cell 
serialized in the old-school KeyValue way or null if passed
-2803   * a null 
cell
-2804   */
-2805  public static byte[] 
getCellKeySerializedAsKeyValueKey(final Cell cell) {
-2806if (cell == null) return null;
-2807byte[] b = new 
byte[KeyValueUtil.keyLength(cell)];
-2808KeyValueUtil.appendKeyTo(cell, b, 
0);
-2809return b;
-2810  }
-2811
-2812  /**
-2813   * Create a Cell that is smaller than 
all other possible Cells for the given Cell's row.
-2814   * @param cell
-2815   * @return First possible Cell on 
passed Cell's row.
-2816   */
-2817  public static Cell 
createFirstOnRow(final Cell cell) {
-2818if (cell instanceof 
ByteBufferExtendedCell) {
-2819  return new 
FirstOnRowByteBufferExtendedCell(
-2820  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2821  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength());
-2822}
-2823return new 
FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
-2824  }
-2825
-2826  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength) {
-2827return new FirstOnRowCell(row, 
roffset, rlength);
-2828  }
-2829
-2830  public static Cell 
createFirstOnRow(final byte[] row, final byte[] family, final byte[] col) {
-2831return createFirstOnRow(row, 0, 
(short) row.length, family, 0, (byte) family.length, col, 0,
-2832col.length);
-2833  }
-2834
-2835  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength,
-2836  final byte[] family, int foffset, 
byte flength, final byte[] col, int coffset, int clength) {
-2837return new FirstOnRowColCell(row, 
roffset, rlength, family, foffset, flength, col, coffset,
-2838clength);
-2839  }
-2840
-2841  public static Cell 
createFirstOnRow(final byte[] row) {
-2842return createFirstOnRow(row, 0, 
(short) row.length);
-2843  }
-2844
-2845  public static Cell 
createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) {
-2846if (cell instanceof 
ByteBufferExtendedCell) {
-2847  return new 
FirstOnRowColByteBufferExtendedCell(
-2848  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2849  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength(),
-2850  ByteBuffer.wrap(fArray), foff, 
(byte) flen, HConstants.EMPTY_BYTE_BUFFER, 0, 0);
-2851}
-2852return new 
FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), 
cell.getRowLength(),
-2853fArray, foff, (byte) flen, 
HConstants.EMPTY_BYTE_ARRAY, 0, 0);
-2854  }
-2855
-2856  public static Cell 
createFirstOnRowCol(final Cell cell) {
-2857if (cell instanceof 
ByteBuff

[36/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/Region.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/Region.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/Region.html
index 1f13b70..3cd318e 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/Region.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/Region.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":38,"i42":38,"i43":38,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":6};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":38,"i44":38,"i45":38,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":6,"i52":6,"i53":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -273,41 +273,49 @@ extends 
 long
-getMemStoreSize() 
+getMemStoreDataSize() 
 
 
 long
-getNumMutationsWithoutWAL() 
+getMemStoreHeapSize() 
 
 
 long
+getMemStoreOffHeapSize() 
+
+
+long
+getNumMutationsWithoutWAL() 
+
+
+long
 getOldestHfileTs(boolean majorCompactionOnly)
 This can be used to determine the last time all files of 
this region were major compacted.
 
 
-
+
 long
 getReadRequestsCount() 
 
-
+
 RegionInfo
 getRegionInfo() 
 
-
+
 Region.RowLock
 getRowLock(byte[] row,
   boolean readLock)
 Get a row lock for the specified row.
 
 
-
+
 RegionScanner
 getScanner(Scan scan)
 Return an iterator that scans over the HRegion, returning 
the indicated
  columns and rows specified by the Scan.
 
 
-
+
 RegionScanner
 getScanner(Scan scan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List additionalScanners)
@@ -315,67 +323,67 @@ extends Scan.
 
 
-
+
 Store
 getStore(byte[] family)
 Return the Store for the given family
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString>
 getStoreFileList(byte[][] columns) 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 getStores()
 Return the list of Stores managed by this region
 
 
-
+
 TableDescriptor
 getTableDescriptor() 
 
-
+
 long
 getWriteRequestsCount() 
 
-
+
 Result
 increment(Increment increment)
 Perform one or more increment operations on a row.
 
 
-
+
 boolean
 isAvailable() 
 
-
+
 boolean
 isClosed() 
 
-
+
 boolean
 isClosing() 
 
-
+
 boolean
 isMergeable() 
 
-
+
 boolean
 isReadOnly() 
 
-
+
 boolean
 isSplittable() 
 
-
+
 void
 mutateRow(RowMutations mutations)
 Performs multiple mutations atomically on a single 
row.
 
 
-
+
 void
 mutateRowsWithLocks(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection mutations,
http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in 
java.util">Collection rowsToLock,
@@ -384,7 +392,7 @@ extends Perform atomic mutations within the region.
 
 
-
+
 void
 processRowsWithLocks(RowProcessor processor)
 Deprecated. 
@@ -393,7 +401,7 @@ extends 
+
 void
 processRowsWithLocks(RowProcessor processor,
 long nonceGroup,
@@ -404,7 +412,7 @@ extends 
+
 void
 processRowsWithLocks(RowProcessor processor,
 long timeout,
@@ -416,13 +424,13 @@ extends 
+
 void
 put(Put put)
 Puts some data in the table.
 
 
-
+
 boolean
 refreshStoreFiles()
 Check the region's underlying store files, open the files 
that have not
@@ -430,7 +438,7 @@ extends 
+
 void
 requestCompaction(byte[] family,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String why,
@@ -440,7 +448,7 @@ extends Request compaction for the given family
 
 
-
+
 void
 requestCompaction(http://docs.oracle.com/javase/8/docs/api/java/lang/String.h

[10/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
o

[39/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index 93e5b96..cc0b2b6 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":9,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":9,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":9,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":9,"i128":10,"i129":9,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":9,"i148":10,"i149":9,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":9,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":9,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":9,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":9,"i129":10,"i130":9,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":9,"i149":10,"i150":9,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -902,360 +902,364 @@ implements 
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true";
 title="class or interface in java.util">SortedMapLong,HRegion>
-getCopyOfOnlineRegionsSortedBySize() 
+getCopyOfOnlineRegionsSortedByOffHeapSize() 
 
 
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true";
 title="class or interface in java.util">SortedMapLong,HRegion>
+getCopyOfOnlineRegionsSortedByOnHeapSize() 
+
+
 protected http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class
 getDumpServlet() 
 
-
+
 NettyEventLoopGroupConfig
 getEventLoopGroupConfig() 
 
-
+
 ExecutorService
 getExecutorService() 

[15/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.hadoop.hbase.regionserver.Mult

[12/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurr

[22/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
index 4f5b33a..4361237 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
@@ -278,567 +278,568 @@
 270  } else {
 271LOG.error(msg, e);
 272setFailure(e);
-273  }
-274}
-275// if split fails,  need to call 
((HRegion)parent).clearSplit() when it is a force split
-276return Flow.HAS_MORE_STATE;
-277  }
-278
-279  /**
-280   * To rollback {@link 
SplitTableRegionProcedure}, an AssignProcedure is asynchronously
-281   * submitted for parent region to be 
split (rollback doesn't wait on the completion of the
-282   * AssignProcedure) . This can be 
improved by changing rollback() to support sub-procedures.
-283   * See HBASE-19851 for details.
-284   */
-285  @Override
-286  protected void rollbackState(final 
MasterProcedureEnv env, final SplitTableRegionState state)
-287  throws IOException, 
InterruptedException {
-288if (isTraceEnabled()) {
-289  LOG.trace(this + " rollback state=" 
+ state);
-290}
-291
-292try {
-293  switch (state) {
-294  case 
SPLIT_TABLE_REGION_POST_OPERATION:
-295  case 
SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
-296  case 
SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
-297  case 
SPLIT_TABLE_REGION_UPDATE_META:
-298// PONR
-299throw new 
UnsupportedOperationException(this + " unhandled state=" + state);
-300  case 
SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META:
-301break;
-302  case 
SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
-303// Doing nothing, as re-open 
parent region would clean up daughter region directories.
-304break;
-305  case 
SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
-306openParentRegion(env);
-307break;
-308  case 
SPLIT_TABLE_REGION_PRE_OPERATION:
-309postRollBackSplitRegion(env);
-310break;
-311  case SPLIT_TABLE_REGION_PREPARE:
-312break; // nothing to do
-313  default:
-314throw new 
UnsupportedOperationException(this + " unhandled state=" + state);
-315  }
-316} catch (IOException e) {
-317  // This will be retried. Unless 
there is a bug in the code,
-318  // this should be just a "temporary 
error" (e.g. network down)
-319  LOG.warn("pid=" + getProcId() + " 
failed rollback attempt step " + state +
-320  " for splitting the region "
-321+ 
getParentRegion().getEncodedName() + " in table " + getTableName(), e);
-322  throw e;
-323}
-324  }
-325
-326  /*
-327   * Check whether we are in the state 
that can be rollback
-328   */
-329  @Override
-330  protected boolean 
isRollbackSupported(final SplitTableRegionState state) {
-331switch (state) {
-332  case 
SPLIT_TABLE_REGION_POST_OPERATION:
-333  case 
SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
-334  case 
SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
-335  case 
SPLIT_TABLE_REGION_UPDATE_META:
-336// It is not safe to rollback if 
we reach to these states.
-337return false;
-338  default:
-339break;
-340}
-341return true;
-342  }
-343
-344  @Override
-345  protected SplitTableRegionState 
getState(final int stateId) {
-346return 
SplitTableRegionState.forNumber(stateId);
-347  }
-348
-349  @Override
-350  protected int getStateId(final 
SplitTableRegionState state) {
-351return state.getNumber();
-352  }
-353
-354  @Override
-355  protected SplitTableRegionState 
getInitialState() {
-356return 
SplitTableRegionState.SPLIT_TABLE_REGION_PREPARE;
-357  }
-358
-359  @Override
-360  protected void 
serializeStateData(ProcedureStateSerializer serializer)
-361  throws IOException {
-362
super.serializeStateData(serializer);
-363
-364final 
MasterProcedureProtos.SplitTableRegionStateData.Builder splitTableRegionMsg =
-365
MasterProcedureProtos.SplitTableRegionStateData.newBuilder()
-366
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
-367
.setParentRegionInfo(ProtobufUtil.toRegionInfo(getRegion()))
-368
.addChildRegionInfo(ProtobufUtil.toRegionInfo(daughter_1_RI))
-369
.addChildRegionInfo(ProtobufUtil.toRegionInfo(daughter_2_RI));
-370
serializer.serialize(splitTableRegionMsg.build());
-371  }
-372
-373  @Override
-374  protected void 
deserializeStateData(ProcedureStateSerializer serializer)
-375  th

[18/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
index f6091f6..3ab770f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
@@ -146,193 +146,208 @@
 138  if(segment != null) newDataSize = 
segment.keySize();
 139  long dataSizeDelta = suffixDataSize 
- newDataSize;
 140  long suffixHeapSize = 
getSegmentsHeapSize(suffix);
-141  long newHeapSize = 0;
-142  if(segment != null) newHeapSize = 
segment.heapSize();
-143  long heapSizeDelta = suffixHeapSize 
- newHeapSize;
-144  region.addMemStoreSize(new 
MemStoreSizing(-dataSizeDelta, -heapSizeDelta));
-145  LOG.debug("Suffix data size={}, new 
segment data size={}, suffix heap size={}," +
-146  "new segment heap 
size={}",
-147  suffixDataSize,
-148  newDataSize,
-149  suffixHeapSize,
-150  newHeapSize);
-151}
-152return true;
-153  }
-154
-155  private static long 
getSegmentsHeapSize(List list) {
-156long res = 0;
-157for (Segment segment : list) {
-158  res += segment.heapSize();
-159}
-160return res;
-161  }
-162
-163  private static long 
getSegmentsKeySize(List list) {
-164long res = 0;
-165for (Segment segment : list) {
-166  res += segment.keySize();
-167}
-168return res;
-169  }
-170
-171  /**
-172   * If the caller holds the current 
version, go over the the pipeline and try to flatten each
-173   * segment. Flattening is replacing the 
ConcurrentSkipListMap based CellSet to CellArrayMap based.
-174   * Flattening of the segment that 
initially is not based on ConcurrentSkipListMap has no effect.
-175   * Return after one segment is 
successfully flatten.
-176   *
-177   * @return true iff a segment was 
successfully flattened
-178   */
-179  public boolean flattenOneSegment(long 
requesterVersion,
-180  CompactingMemStore.IndexType 
idxType,
-181  MemStoreCompactionStrategy.Action 
action) {
-182
-183if(requesterVersion != version) {
-184  LOG.warn("Segment flattening 
failed, because versions do not match. Requester version: "
-185  + requesterVersion + ", actual 
version: " + version);
-186  return false;
-187}
-188
-189synchronized (pipeline){
-190  if(requesterVersion != version) {
-191LOG.warn("Segment flattening 
failed, because versions do not match");
-192return false;
-193  }
-194  int i = 0;
-195  for (ImmutableSegment s : pipeline) 
{
-196if ( s.canBeFlattened() ) {
-197  MemStoreSizing 
newMemstoreAccounting = new MemStoreSizing(); // the size to be updated
-198  ImmutableSegment newS = 
SegmentFactory.instance().createImmutableSegmentByFlattening(
-199  
(CSLMImmutableSegment)s,idxType,newMemstoreAccounting,action);
-200  replaceAtIndex(i,newS);
-201  if(region != null) {
-202// update the global memstore 
size counter
-203// upon flattening there is 
no change in the data size
-204region.addMemStoreSize(new 
MemStoreSize(0, newMemstoreAccounting.getHeapSize()));
-205  }
-206  LOG.debug("Compaction pipeline 
segment {} flattened", s);
-207  return true;
-208}
-209i++;
+141  long suffixOffHeapSize = 
getSegmentsOffHeapSize(suffix);
+142  long newHeapSize = 0;
+143  long newOffHeapSize = 0;
+144  if(segment != null) {
+145newHeapSize = 
segment.heapSize();
+146newOffHeapSize = 
segment.offHeapSize();
+147  }
+148  long offHeapSizeDelta = 
suffixOffHeapSize - newOffHeapSize;
+149  long heapSizeDelta = suffixHeapSize 
- newHeapSize;
+150  region.addMemStoreSize(new 
MemStoreSize(-dataSizeDelta, -heapSizeDelta, -offHeapSizeDelta));
+151  LOG.debug("Suffix data size={}, new 
segment data size={}, "
+152  + "suffix heap size={}," + 
"new segment heap size={}"
+153  + "suffix off heap 
size={}," + "new segment off heap size={}"
+154  , suffixDataSize
+155  , newDataSize
+156  , suffixHeapSize
+157  , newHeapSize
+158  , suffixOffHeapSize
+159  , newOffHeapSize);
+160}
+161return true;
+162  }
+163
+164  private static long 
getSegmentsHeapSize(List list) {
+165long res = 0;
+166for (Segment segment : list) {
+167  res += segment.heapSize();
+168}
+169return res;
+170  }
+171
+172  private static long 
getSegmentsOffHeapSize(List list) {
+

[25/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.ValueAndTagRewriteCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.ValueAndTagRewriteCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.ValueAndTagRewriteCell.html
index d143ef8..4583895 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.ValueAndTagRewriteCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.ValueAndTagRewriteCell.html
@@ -258,7 +258,7 @@
 250
 251@Override
 252public long heapSize() {
-253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 254  if (this.tags != null) {
 255sum += 
ClassSize.sizeOf(this.tags);
 256  }
@@ -454,7 +454,7 @@
 446
 447@Override
 448public long heapSize() {
-449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 450  // this.tags is on heap byte[]
 451  if (this.tags != null) {
 452sum += 
ClassSize.sizeOf(this.tags);
@@ -2791,192 +2791,193 @@
 2783   * {@link HeapSize} we call {@link 
HeapSize#heapSize()} so cell can give a correct value. In other
 2784   * cases we just consider the bytes 
occupied by the cell components ie. row, CF, qualifier,
 2785   * timestamp, type, value and tags.
-2786   * @param cell
-2787   * @return estimate of the heap 
space
-2788   */
-2789  public static long 
estimatedHeapSizeOf(final Cell cell) {
-2790if (cell instanceof HeapSize) {
-2791  return ((HeapSize) 
cell).heapSize();
-2792}
-2793// TODO: Add sizing of references 
that hold the row, family, etc., arrays.
-2794return 
estimatedSerializedSizeOf(cell);
-2795  }
-2796
-2797  /**
-2798   * This method exists just to 
encapsulate how we serialize keys. To be replaced by a factory that
-2799   * we query to figure what the Cell 
implementation is and then, what serialization engine to use
-2800   * and further, how to serialize the 
key for inclusion in hfile index. TODO.
-2801   * @param cell
-2802   * @return The key portion of the Cell 
serialized in the old-school KeyValue way or null if passed
-2803   * a null 
cell
-2804   */
-2805  public static byte[] 
getCellKeySerializedAsKeyValueKey(final Cell cell) {
-2806if (cell == null) return null;
-2807byte[] b = new 
byte[KeyValueUtil.keyLength(cell)];
-2808KeyValueUtil.appendKeyTo(cell, b, 
0);
-2809return b;
-2810  }
-2811
-2812  /**
-2813   * Create a Cell that is smaller than 
all other possible Cells for the given Cell's row.
-2814   * @param cell
-2815   * @return First possible Cell on 
passed Cell's row.
-2816   */
-2817  public static Cell 
createFirstOnRow(final Cell cell) {
-2818if (cell instanceof 
ByteBufferExtendedCell) {
-2819  return new 
FirstOnRowByteBufferExtendedCell(
-2820  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2821  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength());
-2822}
-2823return new 
FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
-2824  }
-2825
-2826  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength) {
-2827return new FirstOnRowCell(row, 
roffset, rlength);
-2828  }
-2829
-2830  public static Cell 
createFirstOnRow(final byte[] row, final byte[] family, final byte[] col) {
-2831return createFirstOnRow(row, 0, 
(short) row.length, family, 0, (byte) family.length, col, 0,
-2832col.length);
-2833  }
-2834
-2835  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength,
-2836  final byte[] family, int foffset, 
byte flength, final byte[] col, int coffset, int clength) {
-2837return new FirstOnRowColCell(row, 
roffset, rlength, family, foffset, flength, col, coffset,
-2838clength);
-2839  }
-2840
-2841  public static Cell 
createFirstOnRow(final byte[] row) {
-2842return createFirstOnRow(row, 0, 
(short) row.length);
-2843  }
-2844
-2845  public static Cell 
createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) {
-2846if (cell instanceof 
ByteBufferExtendedCell) {
-2847  return new 
FirstOnRowColByteBufferExtendedCell(
-2848  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2849  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength(),
-2850  ByteBuffer.wrap(fArray), foff, 
(byte) flen, HConstants.EMPTY_BYTE_BUFFER, 0, 0);
-2851}
-2852return new 
FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), 
cell.getRowLength(),
-2853fArray, foff, (byte) flen, 
HConstants.EMPTY_BYTE_ARRAY, 0, 0);
-2854  }
-2855
-2856  public static Cell 
createFirstOnRowCol(final Cell c

[23/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
index 70c0a84..f458c4c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
@@ -263,541 +263,547 @@
 255throw new 
UnsupportedOperationException(this + " unhandled state=" + state);
 256  }
 257} catch (IOException e) {
-258  LOG.warn("Error trying to merge 
regions " + RegionInfo.getShortNameToLog(regionsToMerge) +
-259" in the table " + getTableName() 
+ " (in state=" + state + ")", e);
-260
-261  setFailure("master-merge-regions", 
e);
-262}
-263return Flow.HAS_MORE_STATE;
-264  }
-265
-266  /**
-267   * To rollback {@link 
MergeTableRegionsProcedure}, two AssignProcedures are asynchronously
-268   * submitted for each region to be 
merged (rollback doesn't wait on the completion of the
-269   * AssignProcedures) . This can be 
improved by changing rollback() to support sub-procedures.
-270   * See HBASE-19851 for details.
-271   */
-272  @Override
-273  protected void rollbackState(
-274  final MasterProcedureEnv env,
-275  final MergeTableRegionsState state) 
throws IOException, InterruptedException {
-276if (isTraceEnabled()) {
-277  LOG.trace(this + " rollback state=" 
+ state);
-278}
-279
-280try {
-281  switch (state) {
-282  case 
MERGE_TABLE_REGIONS_POST_OPERATION:
-283  case 
MERGE_TABLE_REGIONS_OPEN_MERGED_REGION:
-284  case 
MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION:
-285  case 
MERGE_TABLE_REGIONS_UPDATE_META:
-286String msg = this + " We are in 
the " + state + " state."
-287+ " It is complicated to 
rollback the merge operation that region server is working on."
-288+ " Rollback is not supported 
and we should let the merge operation to complete";
-289LOG.warn(msg);
-290// PONR
-291throw new 
UnsupportedOperationException(this + " unhandled state=" + state);
-292  case 
MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION:
-293break;
-294  case 
MERGE_TABLE_REGIONS_CREATE_MERGED_REGION:
-295cleanupMergedRegion(env);
-296break;
-297  case 
MERGE_TABLE_REGIONS_CLOSE_REGIONS:
-298
rollbackCloseRegionsForMerge(env);
+258  String msg = "Error trying to merge 
regions " +
+259
RegionInfo.getShortNameToLog(regionsToMerge) + " in the table " + 
getTableName() +
+260   " (in state=" + state + ")";
+261  if (!isRollbackSupported(state)) 
{
+262// We reach a state that cannot 
be rolled back. We just need to keep retry.
+263LOG.warn(msg, e);
+264  } else {
+265LOG.error(msg, e);
+266
setFailure("master-merge-regions", e);
+267  }
+268}
+269return Flow.HAS_MORE_STATE;
+270  }
+271
+272  /**
+273   * To rollback {@link 
MergeTableRegionsProcedure}, two AssignProcedures are asynchronously
+274   * submitted for each region to be 
merged (rollback doesn't wait on the completion of the
+275   * AssignProcedures) . This can be 
improved by changing rollback() to support sub-procedures.
+276   * See HBASE-19851 for details.
+277   */
+278  @Override
+279  protected void rollbackState(
+280  final MasterProcedureEnv env,
+281  final MergeTableRegionsState state) 
throws IOException, InterruptedException {
+282if (isTraceEnabled()) {
+283  LOG.trace(this + " rollback state=" 
+ state);
+284}
+285
+286try {
+287  switch (state) {
+288  case 
MERGE_TABLE_REGIONS_POST_OPERATION:
+289  case 
MERGE_TABLE_REGIONS_OPEN_MERGED_REGION:
+290  case 
MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION:
+291  case 
MERGE_TABLE_REGIONS_UPDATE_META:
+292String msg = this + " We are in 
the " + state + " state."
+293+ " It is complicated to 
rollback the merge operation that region server is working on."
+294+ " Rollback is not supported 
and we should let the merge operation to complete";
+295LOG.warn(msg);
+296// PONR
+297throw new 
UnsupportedOperationException(this + " unhandled state=" + state);
+298  case 
MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION:
 299break;
-300  case 
MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION:
-301postRollBackMergeRegions(env);
+300  case 
MERGE_TABLE_REGIONS_CREATE_MERGED_REGION:
+301cleanupMergedRegion(env);
 302break;
-303  case MERGE_TABLE_REGIONS_PREPARE:
-304break;
-305  default:
-306throw new 
Unsu

[35/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
index d263423..985e93a 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":6,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":6,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],4:["t3","Abstract 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public abstract class Segment
+public abstract class Segment
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 This is an abstraction of a segment maintained in a 
memstore, e.g., the active
  cell set or its snapshot.
@@ -148,21 +148,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 comparator 
 
 
-protected http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong
-dataSize 
-
-
 static long
 DEEP_OVERHEAD 
 
-
+
 static long
 FIXED_OVERHEAD 
 
-
-protected http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong
-heapSize 
-
 
 private MemStoreLAB
 memStoreLAB 
@@ -172,10 +164,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 minSequenceId 
 
 
+protected MemStoreSizing
+segmentSize 
+
+
 protected boolean
 tagsPresent 
 
-
+
 protected TimeRangeTracker
 timeRangeTracker 
 
@@ -196,17 +192,23 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 protected 
-Segment(CellComparator comparator,
+Segment(CellComparator comparator,
+   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List segments,
TimeRangeTracker trt) 
 
 
 protected 
+Segment(CellComparator comparator,
+   TimeRangeTracker trt) 
+
+
+protected 
 Segment(CellSet cellSet,
CellComparator comparator,
MemStoreLAB memStoreLAB,
TimeRangeTracker trt) 
 
-
+
 protected 
 Segment(Segment segment) 
 
@@ -272,13 +274,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-Cell
-getFirstAfter(Cell cell) 
-
-
 (package private) MemStoreLAB
 getMemStoreLAB() 
 
+
+MemStoreSize
+getMemStoreSize() 
+
 
 long
 getMinSequenceId() 
@@ -324,42 +326,51 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 protected void
-incSize(long delta,
-   long heapOverhead)
+incSize(long delta,
+   long heapOverhead,
+   long offHeapOverhead)
 Updates the size counters of the segment by the given 
delta
 
 
 
+protected long
+indexEntryOffHeapSize(boolean offHeap) 
+
+
+protected long
+indexEntryOnHeapSize(boolean onHeap) 
+
+
 protected abstract long
 indexEntrySize() 
 
-
+
 protected void
 internalAdd(Cell cell,
boolean mslabUsed,
MemStoreSizing memstoreSizing) 
 
-
+
 boolean
 isEmpty() 
 
-
+
 boolean
 isTagsPresent() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
 iterator() 
 
-
+
 long
 keySize() 
 
-
+
 Cell
 last() 
 
-
+
 Cell
 maybeCloneWithAllocator(Cell cell,
boolean forceCloneOfBigCell)
@@ -369,7 +380,16 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
  When a cell's size is too big (bigger than maxAlloc), it is not allocated on 
MSLAB.
 
 
-
+
+long
+offHeapSize() 
+
+
+protected long
+offHeapSizeChange(Cell cell,
+ boolean succ) 
+
+
 protected Segment
 setCellSet(CellSet cellSetOld,
   CellSet cellSetNew)
@@ -377,29 +397,29 @@ extends http://docs.oracle.com/java

[40/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
index d45ac05..64784ca 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class HRegionServer.MovedRegionInfo
+private static class HRegionServer.MovedRegionInfo
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -218,7 +218,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 serverName
-private final ServerName serverName
+private final ServerName serverName
 
 
 
@@ -227,7 +227,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 seqNum
-private final long seqNum
+private final long seqNum
 
 
 
@@ -236,7 +236,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ts
-private final long ts
+private final long ts
 
 
 
@@ -253,7 +253,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MovedRegionInfo
-public MovedRegionInfo(ServerName serverName,
+public MovedRegionInfo(ServerName serverName,
long closeSeqNum)
 
 
@@ -271,7 +271,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getServerName
-public ServerName getServerName()
+public ServerName getServerName()
 
 
 
@@ -280,7 +280,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getSeqNum
-public long getSeqNum()
+public long getSeqNum()
 
 
 
@@ -289,7 +289,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getMoveTime
-public long getMoveTime()
+public long getMoveTime()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index a64536f..0586bb3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-protected static final class HRegionServer.MovedRegionsCleaner
+protected static final class HRegionServer.MovedRegionsCleaner
 extends ScheduledChore
 implements Stoppable
 Creates a Chore thread to clean the moved region 
cache.
@@ -242,7 +242,7 @@ implements 
 
 regionServer
-private HRegionServer regionServer
+private HRegionServer regionServer
 
 
 
@@ -251,7 +251,7 @@ implements 
 
 stoppable
-Stoppable stoppable
+Stoppable stoppable
 
 
 
@@ -268,7 +268,7 @@ implements 
 
 MovedRegionsCleaner
-private MovedRegionsCleaner(HRegionServer regionServer,
+private MovedRegionsCleaner(HRegionServer regionServer,
 Stoppable stoppable)
 
 
@@ -286,7 +286,7 @@ implements 
 
 create
-static HRegionServer.MovedRegionsCleaner create(HRegionServer rs)
+static HRegionServer.MovedRegionsCleaner create(HRegionServer rs)
 
 
 
@@ -295,7 +295,7 @@ implements 
 
 chore
-protected void chore()
+protected void chore()
 Description copied from 
class: ScheduledChore
 The task to execute on each scheduled execution of the 
Chore
 
@@ -310,7 +310,7 @@ implements 
 
 stop
-public void stop(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String why)
+public void stop(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String why)
 Description copied from 
interface: Stoppable
 Stop this service.
  Implementers should favor logging errors over throwing 
RuntimeExceptions.
@@ -328,7 +328,7 @@ implements 
 
 isStopped
-public boolean isStopped()
+public boolean isStopped()
 
 Specified by:
 isStopped in
 interface Stoppable



[21/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
index 4f5b33a..4361237 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
@@ -278,567 +278,568 @@
 270  } else {
 271LOG.error(msg, e);
 272setFailure(e);
-273  }
-274}
-275// if split fails,  need to call 
((HRegion)parent).clearSplit() when it is a force split
-276return Flow.HAS_MORE_STATE;
-277  }
-278
-279  /**
-280   * To rollback {@link 
SplitTableRegionProcedure}, an AssignProcedure is asynchronously
-281   * submitted for parent region to be 
split (rollback doesn't wait on the completion of the
-282   * AssignProcedure) . This can be 
improved by changing rollback() to support sub-procedures.
-283   * See HBASE-19851 for details.
-284   */
-285  @Override
-286  protected void rollbackState(final 
MasterProcedureEnv env, final SplitTableRegionState state)
-287  throws IOException, 
InterruptedException {
-288if (isTraceEnabled()) {
-289  LOG.trace(this + " rollback state=" 
+ state);
-290}
-291
-292try {
-293  switch (state) {
-294  case 
SPLIT_TABLE_REGION_POST_OPERATION:
-295  case 
SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
-296  case 
SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
-297  case 
SPLIT_TABLE_REGION_UPDATE_META:
-298// PONR
-299throw new 
UnsupportedOperationException(this + " unhandled state=" + state);
-300  case 
SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META:
-301break;
-302  case 
SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
-303// Doing nothing, as re-open 
parent region would clean up daughter region directories.
-304break;
-305  case 
SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
-306openParentRegion(env);
-307break;
-308  case 
SPLIT_TABLE_REGION_PRE_OPERATION:
-309postRollBackSplitRegion(env);
-310break;
-311  case SPLIT_TABLE_REGION_PREPARE:
-312break; // nothing to do
-313  default:
-314throw new 
UnsupportedOperationException(this + " unhandled state=" + state);
-315  }
-316} catch (IOException e) {
-317  // This will be retried. Unless 
there is a bug in the code,
-318  // this should be just a "temporary 
error" (e.g. network down)
-319  LOG.warn("pid=" + getProcId() + " 
failed rollback attempt step " + state +
-320  " for splitting the region "
-321+ 
getParentRegion().getEncodedName() + " in table " + getTableName(), e);
-322  throw e;
-323}
-324  }
-325
-326  /*
-327   * Check whether we are in the state 
that can be rollback
-328   */
-329  @Override
-330  protected boolean 
isRollbackSupported(final SplitTableRegionState state) {
-331switch (state) {
-332  case 
SPLIT_TABLE_REGION_POST_OPERATION:
-333  case 
SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
-334  case 
SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
-335  case 
SPLIT_TABLE_REGION_UPDATE_META:
-336// It is not safe to rollback if 
we reach to these states.
-337return false;
-338  default:
-339break;
-340}
-341return true;
-342  }
-343
-344  @Override
-345  protected SplitTableRegionState 
getState(final int stateId) {
-346return 
SplitTableRegionState.forNumber(stateId);
-347  }
-348
-349  @Override
-350  protected int getStateId(final 
SplitTableRegionState state) {
-351return state.getNumber();
-352  }
-353
-354  @Override
-355  protected SplitTableRegionState 
getInitialState() {
-356return 
SplitTableRegionState.SPLIT_TABLE_REGION_PREPARE;
-357  }
-358
-359  @Override
-360  protected void 
serializeStateData(ProcedureStateSerializer serializer)
-361  throws IOException {
-362
super.serializeStateData(serializer);
-363
-364final 
MasterProcedureProtos.SplitTableRegionStateData.Builder splitTableRegionMsg =
-365
MasterProcedureProtos.SplitTableRegionStateData.newBuilder()
-366
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
-367
.setParentRegionInfo(ProtobufUtil.toRegionInfo(getRegion()))
-368
.addChildRegionInfo(ProtobufUtil.toRegionInfo(daughter_1_RI))
-369
.addChildRegionInfo(ProtobufUtil.toRegionInfo(daughter_2_RI));
-370
serializer.serialize(splitTableRegionMsg.build());
-371  }
-372
-373  @Override
-374  protected void 
deserializeStateData(ProcedureStateSerializer serializer)
-375  throws IOException {
-376
super.deserializeStateData(serializer);
-377
-378final 
Ma

[26/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteCell.html
index d143ef8..4583895 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteCell.html
@@ -258,7 +258,7 @@
 250
 251@Override
 252public long heapSize() {
-253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 254  if (this.tags != null) {
 255sum += 
ClassSize.sizeOf(this.tags);
 256  }
@@ -454,7 +454,7 @@
 446
 447@Override
 448public long heapSize() {
-449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 450  // this.tags is on heap byte[]
 451  if (this.tags != null) {
 452sum += 
ClassSize.sizeOf(this.tags);
@@ -2791,192 +2791,193 @@
 2783   * {@link HeapSize} we call {@link 
HeapSize#heapSize()} so cell can give a correct value. In other
 2784   * cases we just consider the bytes 
occupied by the cell components ie. row, CF, qualifier,
 2785   * timestamp, type, value and tags.
-2786   * @param cell
-2787   * @return estimate of the heap 
space
-2788   */
-2789  public static long 
estimatedHeapSizeOf(final Cell cell) {
-2790if (cell instanceof HeapSize) {
-2791  return ((HeapSize) 
cell).heapSize();
-2792}
-2793// TODO: Add sizing of references 
that hold the row, family, etc., arrays.
-2794return 
estimatedSerializedSizeOf(cell);
-2795  }
-2796
-2797  /**
-2798   * This method exists just to 
encapsulate how we serialize keys. To be replaced by a factory that
-2799   * we query to figure what the Cell 
implementation is and then, what serialization engine to use
-2800   * and further, how to serialize the 
key for inclusion in hfile index. TODO.
-2801   * @param cell
-2802   * @return The key portion of the Cell 
serialized in the old-school KeyValue way or null if passed
-2803   * a null 
cell
-2804   */
-2805  public static byte[] 
getCellKeySerializedAsKeyValueKey(final Cell cell) {
-2806if (cell == null) return null;
-2807byte[] b = new 
byte[KeyValueUtil.keyLength(cell)];
-2808KeyValueUtil.appendKeyTo(cell, b, 
0);
-2809return b;
-2810  }
-2811
-2812  /**
-2813   * Create a Cell that is smaller than 
all other possible Cells for the given Cell's row.
-2814   * @param cell
-2815   * @return First possible Cell on 
passed Cell's row.
-2816   */
-2817  public static Cell 
createFirstOnRow(final Cell cell) {
-2818if (cell instanceof 
ByteBufferExtendedCell) {
-2819  return new 
FirstOnRowByteBufferExtendedCell(
-2820  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2821  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength());
-2822}
-2823return new 
FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
-2824  }
-2825
-2826  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength) {
-2827return new FirstOnRowCell(row, 
roffset, rlength);
-2828  }
-2829
-2830  public static Cell 
createFirstOnRow(final byte[] row, final byte[] family, final byte[] col) {
-2831return createFirstOnRow(row, 0, 
(short) row.length, family, 0, (byte) family.length, col, 0,
-2832col.length);
-2833  }
-2834
-2835  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength,
-2836  final byte[] family, int foffset, 
byte flength, final byte[] col, int coffset, int clength) {
-2837return new FirstOnRowColCell(row, 
roffset, rlength, family, foffset, flength, col, coffset,
-2838clength);
-2839  }
-2840
-2841  public static Cell 
createFirstOnRow(final byte[] row) {
-2842return createFirstOnRow(row, 0, 
(short) row.length);
-2843  }
-2844
-2845  public static Cell 
createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) {
-2846if (cell instanceof 
ByteBufferExtendedCell) {
-2847  return new 
FirstOnRowColByteBufferExtendedCell(
-2848  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2849  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength(),
-2850  ByteBuffer.wrap(fArray), foff, 
(byte) flen, HConstants.EMPTY_BYTE_BUFFER, 0, 0);
-2851}
-2852return new 
FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), 
cell.getRowLength(),
-2853fArray, foff, (byte) flen, 
HConstants.EMPTY_BYTE_ARRAY, 0, 0);
-2854  }
-2855
-2856  public static Cell 
createFirstOnRowCol(final Cell cell) {
-2857if (cell instanceof 
Byt

[34/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
index 36977cd..0baed61 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
@@ -527,26 +527,30 @@
 
 
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true";
 title="class or interface in java.util">SortedMapLong,HRegion>
-HRegionServer.getCopyOfOnlineRegionsSortedBySize() 
+HRegionServer.getCopyOfOnlineRegionsSortedByOffHeapSize() 
 
 
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true";
 title="class or interface in java.util">SortedMapLong,HRegion>
+HRegionServer.getCopyOfOnlineRegionsSortedByOnHeapSize() 
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
 HRegionServer.getOnlineRegionsLocalContext()
 For tests, web ui and metrics.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 HRegionServer.getRegions() 
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 RSRpcServices.getRegions(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List regionSpecifiers,
   CacheEvictionStatsBuilder stats)
 Find the List of HRegions based on a list of region 
specifiers
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 HRegionServer.getRegions(TableName tableName)
 Gets the online regions of the specified table.
@@ -750,10 +754,6 @@
RpcCallContext context) 
 
 
-protected long
-FlushLargeStoresPolicy.getFlushSizeLowerBound(HRegion region) 
-
-
 private Result
 RSRpcServices.increment(HRegion region,
  OperationQuota quota,
@@ -764,43 +764,43 @@
 Execute an increment mutation.
 
 
-
+
 protected void
 HRegion.RegionScannerImpl.initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List joinedScanners,
 HRegion region) 
 
-
+
 protected void
 ReversedRegionScannerImpl.initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List joinedScanners,
 HRegion region) 
 
-
+
 static HRegion
 HRegion.openHRegion(HRegion other,
CancelableProgressable reporter)
 Useful when reopening a closed region (normally for unit 
tests)
 
 
-
+
 void
 BaseRowProcessor.postBatchMutate(HRegion region) 
 
-
+
 void
 RowProcessor.postBatchMutate(HRegion region)
 Deprecated. 
 The hook to be executed after the process() and applying 
the Mutations to region.
 
 
-
+
 void
 BaseRowProcessor.postProcess(HRegion region,
WALEdit walEdit,
boolean success) 
 
-
+
 void
 RowProcessor.postProcess(HRegion region,
WALEdit walEdit,
@@ -809,12 +809,12 @@
 The hook to be executed after process() and applying the 
Mutations to region.
 
 
-
+
 void
 BaseRowProcessor.preBatchMutate(HRegion region,
   WALEdit walEdit) 
 
-
+
 void
 RowProcessor.preBatchMutate(HRegion region,
   WALEdit walEdit)
@@ -822,17 +822,17 @@
 The hook to be executed after the process() but before 
applying the Mutations to region.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 SecureBulkLoadManager.prepareBulkLoad(HRegion region,

org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest request) 
 
-
+
 void
 BaseRowProcessor.preProcess(HRegion region,
   WALEdit 

[01/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 56564b90a -> 991224b95


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
index 1a47423..58a6306 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
@@ -1614,7 +1614,7 @@
 1606int storefiles = 0;
 1607int storeUncompressedSizeMB = 0;
 1608int storefileSizeMB = 0;
-1609int memstoreSizeMB = (int) 
(r.getMemStoreSize() / 1024 / 1024);
+1609int memstoreSizeMB = (int) 
(r.getMemStoreDataSize() / 1024 / 1024);
 1610long storefileIndexSizeKB = 0;
 1611int rootLevelIndexSizeKB = 0;
 1612int totalStaticIndexSizeKB = 0;
@@ -2751,11 +2751,11 @@
 2743  }
 2744
 2745  /**
-2746   * @return A new Map of online regions 
sorted by region size with the first entry being the
-2747   * biggest.  If two regions are the 
same size, then the last one found wins; i.e. this method
-2748   * may NOT return all regions.
+2746   * @return A new Map of online regions 
sorted by region off-heap size with the first entry being
+2747   *   the biggest.  If two regions are 
the same size, then the last one found wins; i.e. this
+2748   *   method may NOT return all 
regions.
 2749   */
-2750  SortedMap 
getCopyOfOnlineRegionsSortedBySize() {
+2750  SortedMap 
getCopyOfOnlineRegionsSortedByOffHeapSize() {
 2751// we'll sort the regions in 
reverse
 2752SortedMap 
sortedRegions = new TreeMap<>(
 2753new Comparator() {
@@ -2766,974 +2766,995 @@
 2758});
 2759// Copy over all regions. Regions 
are sorted by size with biggest first.
 2760for (HRegion region : 
this.onlineRegions.values()) {
-2761  
sortedRegions.put(region.getMemStoreSize(), region);
+2761  
sortedRegions.put(region.getMemStoreOffHeapSize(), region);
 2762}
 2763return sortedRegions;
 2764  }
 2765
 2766  /**
-2767   * @return time stamp in millis of 
when this region server was started
-2768   */
-2769  public long getStartcode() {
-2770return this.startcode;
-2771  }
-2772
-2773  /** @return reference to 
FlushRequester */
-2774  @Override
-2775  public FlushRequester 
getFlushRequester() {
-2776return this.cacheFlusher;
-2777  }
-2778
-2779  @Override
-2780  public CompactionRequester 
getCompactionRequestor() {
-2781return this.compactSplitThread;
-2782  }
-2783
-2784  /**
-2785   * Get the top N most loaded regions 
this server is serving so we can tell the
-2786   * master which regions it can 
reallocate if we're overloaded. TODO: actually
-2787   * calculate which regions are most 
loaded. (Right now, we're just grabbing
-2788   * the first N regions being served 
regardless of load.)
+2767   * @return A new Map of online regions 
sorted by region heap size with the first entry being the
+2768   *   biggest.  If two regions are the 
same size, then the last one found wins; i.e. this method
+2769   *   may NOT return all regions.
+2770   */
+2771  SortedMap 
getCopyOfOnlineRegionsSortedByOnHeapSize() {
+2772// we'll sort the regions in 
reverse
+2773SortedMap 
sortedRegions = new TreeMap<>(
+2774new Comparator() {
+2775  @Override
+2776  public int compare(Long a, 
Long b) {
+2777return -1 * 
a.compareTo(b);
+2778  }
+2779});
+2780// Copy over all regions. Regions 
are sorted by size with biggest first.
+2781for (HRegion region : 
this.onlineRegions.values()) {
+2782  
sortedRegions.put(region.getMemStoreHeapSize(), region);
+2783}
+2784return sortedRegions;
+2785  }
+2786
+2787  /**
+2788   * @return time stamp in millis of 
when this region server was started
 2789   */
-2790  protected RegionInfo[] 
getMostLoadedRegions() {
-2791ArrayList regions 
= new ArrayList<>();
-2792for (Region r : 
onlineRegions.values()) {
-2793  if (!r.isAvailable()) {
-2794continue;
-2795  }
-2796  if (regions.size() < 
numRegionsToReport) {
-2797
regions.add(r.getRegionInfo());
-2798  } else {
-2799break;
-2800  }
-2801}
-2802return regions.toArray(new 
RegionInfo[regions.size()]);
+2790  public long getStartcode() {
+2791return this.startcode;
+2792  }
+2793
+2794  /** @return reference to 
FlushRequester */
+2795  @Override
+2796  public FlushRequester 
getFlushRequester() {
+2797return this.cacheFlusher;
+2798  }
+2799
+2800  @Override
+2801  public Compacti

[17/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
index 8c02b6e..1a90c1d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
@@ -51,55 +51,65 @@
 043
 044  protected long flushSizeLowerBound = 
-1;
 045
-046  protected long 
getFlushSizeLowerBound(HRegion region) { int familyNumber = 
region.getTableDescriptor().getColumnFamilyCount();
-047// For multiple families, lower bound 
is the "average flush size" by default
-048// unless setting in configuration is 
larger.
-049long flushSizeLowerBound = 
region.getMemStoreFlushSize() / familyNumber;
-050long minimumLowerBound =
-051
getConf().getLong(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,
-052  
DEFAULT_HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN);
-053if (minimumLowerBound > 
flushSizeLowerBound) {
-054  flushSizeLowerBound = 
minimumLowerBound;
-055}
-056// use the setting in table 
description if any
-057String flushedSizeLowerBoundString 
=
-058
region.getTableDescriptor().getValue(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND);
-059if (flushedSizeLowerBoundString == 
null) {
-060  LOG.debug("No {} set in table {} 
descriptor;" +
-061  "using 
region.getMemStoreFlushSize/# of families ({}) instead.",
-062  
HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND,
-063  
region.getTableDescriptor().getTableName(),
-064  
StringUtils.humanSize(flushSizeLowerBound) + ")");
-065} else {
-066  try {
-067flushSizeLowerBound = 
Long.parseLong(flushedSizeLowerBoundString);
-068  } catch (NumberFormatException nfe) 
{
-069// fall back for fault setting
-070LOG.warn("Number format exception 
parsing {} for table {}: {}, {}; " +
-071"using 
region.getMemStoreFlushSize/# of families ({}) instead.",
-072
HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND,
-073
region.getTableDescriptor().getTableName(),
-074
flushedSizeLowerBoundString,
-075nfe,
-076flushSizeLowerBound);
-077
-078  }
-079}
-080return flushSizeLowerBound;
-081  }
-082
-083  protected boolean shouldFlush(HStore 
store) {
-084if 
(store.getMemStoreSize().getDataSize() > this.flushSizeLowerBound) {
-085  LOG.debug("Flush {} of {}; 
memstoreSize={} > lowerBound={}",
-086  store.getColumnFamilyName(),
-087  
region.getRegionInfo().getEncodedName(),
-088  
store.getMemStoreSize().getDataSize(),
-089  this.flushSizeLowerBound);
-090  return true;
-091}
-092return false;
-093  }
-094}
+046  protected void 
setFlushSizeLowerBounds(HRegion region) {
+047int familyNumber = 
region.getTableDescriptor().getColumnFamilyCount();
+048// For multiple families, lower bound 
is the "average flush size" by default
+049// unless setting in configuration is 
larger.
+050flushSizeLowerBound = 
region.getMemStoreFlushSize() / familyNumber;
+051long minimumLowerBound =
+052
getConf().getLong(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,
+053  
DEFAULT_HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN);
+054if (minimumLowerBound > 
flushSizeLowerBound) {
+055  flushSizeLowerBound = 
minimumLowerBound;
+056}
+057// use the setting in table 
description if any
+058String flushedSizeLowerBoundString 
=
+059
region.getTableDescriptor().getValue(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND);
+060if (flushedSizeLowerBoundString == 
null) {
+061  LOG.debug("No {} set in table {} 
descriptor;"
+062  + "using 
region.getMemStoreFlushHeapSize/# of families ({}) "
+063  + "instead."
+064  , 
HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND
+065  , 
region.getTableDescriptor().getTableName()
+066  , 
StringUtils.humanSize(flushSizeLowerBound)
+067  + ")");
+068} else {
+069  try {
+070flushSizeLowerBound = 
Long.parseLong(flushedSizeLowerBoundString);
+071  } catch (NumberFormatException nfe) 
{
+072// fall back for fault setting
+073LOG.warn("Number format exception 
parsing {} for table {}: {}, {}; "
+074+ "using 
region.getMemStoreFlushHeapSize/# of families ({}) "
+075+ "and 
region.getMemStoreFlushOffHeapSize/# of families ({}) "
+076+ "instead."
+077, 
HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND
+078, 
region.getTableDescriptor().getTableName()
+079, 
flushedS

[11/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.hadoop.hbase.regionserver

[16/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
o

[46/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
index aa1df59..bc89c2d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
@@ -576,7 +576,7 @@ extends 
 
 EXPECTED_SPLIT_STATES
-private static RegionState.State[] EXPECTED_SPLIT_STATES
+private static RegionState.State[] EXPECTED_SPLIT_STATES
 
 
 
@@ -686,7 +686,7 @@ extends 
 
 rollbackState
-protected void rollbackState(MasterProcedureEnv env,
+protected void rollbackState(MasterProcedureEnv env,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState state)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException,
  http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true";
 title="class or interface in java.lang">InterruptedException
@@ -710,7 +710,7 @@ extends 
 
 isRollbackSupported
-protected boolean isRollbackSupported(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState state)
+protected boolean isRollbackSupported(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState state)
 Description copied from 
class: StateMachineProcedure
 Used by the default implementation of abort() to know if 
the current state can be aborted
  and rollback can be triggered.
@@ -726,7 +726,7 @@ extends 
 
 getState
-protected org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState getState(int stateId)
+protected org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState getState(int stateId)
 Description copied from 
class: StateMachineProcedure
 Convert an ordinal (or state id) to an Enum (or more 
descriptive) state object.
 
@@ -745,7 +745,7 @@ extends 
 
 getStateId
-protected int getStateId(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState state)
+protected int getStateId(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState state)
 Description copied from 
class: StateMachineProcedure
 Convert the Enum (or more descriptive) state object to an 
ordinal (or state id).
 
@@ -764,7 +764,7 @@ extends 
 
 getInitialState
-protected org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState getInitialState()
+protected org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState getInitialState()
 Description copied from 
class: StateMachineProcedure
 Return the initial state object that will be used for the 
first call to executeFromState().
 
@@ -781,7 +781,7 @@ extends 
 
 serializeStateData
-protected void serializeStateData(ProcedureStateSerializer serializer)
+protected void serializeStateData(ProcedureStateSerializer serializer)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
class: Procedure
 The user-level code of the procedure may have some state to
@@ -803,7 +803,7 @@ extends 
 
 deserializeStateData
-protected void deserializeStateData(ProcedureStateSerializer serializer)
+protected void deserializeStateData(ProcedureStateSerializer serializer)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
class: Procedure
 Called on store load to allow the user to decode the 
previously serialized
@@ -824,7 +824,7 @@ extends 
 
 toStringClassDetails
-public void toStringClassDetails(http://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true";
 title="class or interface in java.lang">StringBuilder sb)
+public void toStringClassDetails(http://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true";
 title="class or interface in java.lang">StringBuilder sb)
 Description copied from 
class: Procedure
 Extend the toString() information with the procedure details
  e.g. className and parameters
@@ -842,7 +842,7 @@ extends 
 
 getParentRegion
-private RegionInfo getParentRegion()
+private RegionInfo getParentRegion()
 
 
 
@@ -851,7 +851,7 @@ extends 
 
 getTableOperationType
-public TableProcedureInterface.Tab

[37/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html
index 9de9558..dda133a 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -319,10 +319,18 @@ implements 
+boolean
+isOffHeap() 
+
+
+boolean
+isOnHeap() 
+
+
 private void
 recycleChunks() 
 
-
+
 private void
 tryRetireChunk(Chunk c)
 Try to retire the current chunk if it is still
@@ -654,13 +662,39 @@ implements 
+
+
+
+
+isOnHeap
+public boolean isOnHeap()
+
+Specified by:
+isOnHeap in
 interface MemStoreLAB
+
+
+
+
+
+
+
+
+isOffHeap
+public boolean isOffHeap()
+
+Specified by:
+isOffHeap in
 interface MemStoreLAB
+
+
+
 
 
 
 
 
 getCurrentChunk
-Chunk getCurrentChunk()
+Chunk getCurrentChunk()
 
 
 
@@ -669,7 +703,7 @@ implements 
 
 getPooledChunks
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true";
 title="class or interface in java.util.concurrent">BlockingQueue getPooledChunks()
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true";
 title="class or interface in java.util.concurrent">BlockingQueue getPooledChunks()
 
 
 
@@ -678,7 +712,7 @@ implements 
 
 getNumOfChunksReturnedToPool
-http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer getNumOfChunksReturnedToPool()
+http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer getNumOfChunksReturnedToPool()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreSize.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreSize.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreSize.html
index 21b1e4d..bc8da8d 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreSize.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreSize.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -152,6 +152,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 'heapSize' tracks all Cell's heap size occupancy.
 
 
+
+protected long
+offHeapSize
+off-heap size: the aggregated size of all data that is 
allocated off-heap including all
+ key-values that reside off-heap and the metadata that resides off-heap
+
+
 
 
 
@@ -164,14 +171,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 Constructors 
 
-Constructor and Description
+Modifier
+Constructor and Description
 
 
-MemStoreSize() 
+ 
+MemStoreSize() 
 
 
-MemStoreSize(long dataSize,
-long heapSize) 
+ 
+MemStoreSize(long dataSize,
+long heapSize,
+long offHeapSize) 
+
+
+protected 
+MemStoreSize(MemStoreSize memStoreSize) 
 
 
 
@@ -201,14 +216,18 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 getHeapSize() 
 
 
+long
+getOffHeapSize() 
+
+
 int
 hashCode() 
 
-
+
 boolean
 isEmpty() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 toString() 
 
@@ -240,7 +259,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 dataSize
-protected long dataSize
+protected volatile long dataSize
 'dataSize' tracks the Cell's data bytes size alone (Key 
bytes, value bytes). A cell's data can
  be in on heap or off heap area depending on the MSLAB and its configuration 
to be using on heap
  or off heap LABs
@@ -249,14 +268,25 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-
+
 
 heapSize
-protected long heapSize
+protected volatile long heapSize
 'heapSize' 

[20/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
index 49ff09e..c27dc14 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
@@ -179,177 +179,179 @@
 171  }
 172
 173  MemStoreSizing getSnapshotSizing() {
-174return new 
MemStoreSizing(this.snapshot.keySize(), this.snapshot.heapSize());
-175  }
-176
-177  @Override
-178  public String toString() {
-179StringBuilder buf = new 
StringBuilder();
-180int i = 1;
-181try {
-182  for (Segment segment : 
getSegments()) {
-183buf.append("Segment (" + i + ") " 
+ segment.toString() + "; ");
-184i++;
-185  }
-186} catch (IOException e){
-187  return e.toString();
-188}
-189return buf.toString();
-190  }
-191
-192  protected Configuration 
getConfiguration() {
-193return conf;
-194  }
-195
-196  protected void dump(Logger log) {
-197active.dump(log);
-198snapshot.dump(log);
-199  }
-200
-201
-202  /*
-203   * Inserts the specified Cell into 
MemStore and deletes any existing
-204   * versions of the same 
row/family/qualifier as the specified Cell.
-205   * 

-206 * First, the specified Cell is inserted into the Memstore. +174return new MemStoreSizing(this.snapshot.keySize(), +175this.snapshot.heapSize(), +176this.snapshot.offHeapSize()); +177 } +178 +179 @Override +180 public String toString() { +181StringBuilder buf = new StringBuilder(); +182int i = 1; +183try { +184 for (Segment segment : getSegments()) { +185buf.append("Segment (" + i + ") " + segment.toString() + "; "); +186i++; +187 } +188} catch (IOException e){ +189 return e.toString(); +190} +191return buf.toString(); +192 } +193 +194 protected Configuration getConfiguration() { +195return conf; +196 } +197 +198 protected void dump(Logger log) { +199active.dump(log); +200snapshot.dump(log); +201 } +202 +203 +204 /* +205 * Inserts the specified Cell into MemStore and deletes any existing +206 * versions of the same row/family/qualifier as the specified Cell. 207 *

-208 * If there are any existing Cell in this MemStore with the same row, -209 * family, and qualifier, they are removed. -210 *

-211 * Callers must hold the read lock. -212 * -213 * @param cell the cell to be updated -214 * @param readpoint readpoint below which we can safely remove duplicate KVs -215 * @param memstoreSize -216 */ -217 private void upsert(Cell cell, long readpoint, MemStoreSizing memstoreSizing) { -218// Add the Cell to the MemStore -219// Use the internalAdd method here since we (a) already have a lock -220// and (b) cannot safely use the MSLAB here without potentially -221// hitting OOME - see TestMemStore.testUpsertMSLAB for a -222// test that triggers the pathological case if we don't avoid MSLAB -223// here. -224// This cell data is backed by the same byte[] where we read request in RPC(See HBASE-15180). We -225// must do below deep copy. Or else we will keep referring to the bigger chunk of memory and -226// prevent it from getting GCed. -227cell = deepCopyIfNeeded(cell); -228this.active.upsert(cell, readpoint, memstoreSizing); -229setOldestEditTimeToNow(); -230checkActiveSize(); -231 } -232 -233 /* -234 * @param a -235 * @param b -236 * @return Return lowest of a or b or null if both a and b are null -237 */ -238 protected Cell getLowest(final Cell a, final Cell b) { -239if (a == null) { -240 return b; -241} -242if (b == null) { -243 return a; -244} -245return comparator.compareRows(a, b) <= 0? a: b; -246 } -247 -248 /* -249 * @param key Find row that follows this one. If null, return first. -250 * @param set Set to look in for a row beyond row. -251 * @return Next row or null if none found. If one found, will be a new -252 * KeyValue -- can be destroyed by subsequent calls to this method. -253 */ -254 protected Cell getNextRow(final Cell key, -255 final NavigableSet set) { -256Cell result = null; -257SortedSet tail = key == null? set: set.tailSet(key); -258// Iterate until we fall into the next row; i.e. move off current row -259for (Cell cell: tail) { -260 if (comparator.compareRows(cell, key) <= 0) { -261continue; -262 } -263 // Note: Not suppressing deletes or expired cells. Needs to be handled -264 // by higher up functions. -265 result = cell; -26


[44/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html
index 740d31b..b58e92c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -165,7 +165,7 @@ extends Segment
-dataSize,
 FIXED_OVERHEAD,
 heapSize,
 minSequenceId,
 tagsPresent,
 timeRangeTracker
+FIXED_OVERHEAD,
 minSequenceId,
 segmentSize,
 tagsPresent,
 timeRangeTracker
 
 
 
@@ -242,89 +242,86 @@ extends getCellSet() 
 
 
-Cell
-getFirstAfter(Cell cell) 
-
-
 long
 getMinSequenceId() 
 
-
+
 int
 getNumOfSegments() 
 
-
+
 KeyValueScanner
 getScanner(long readPoint)
 Creates the scanner for the given read point
 
 
-
+
 KeyValueScanner
 getScanner(long readPoint,
   long order)
 Creates the scanner for the given read point, and a 
specific order in a list
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 getScanners(long readPoint,
long order) 
 
-
+
 TimeRangeTracker
 getTimeRangeTracker() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true";
 title="class or interface in java.util">SortedSet
 headSet(Cell firstKeyOnRow) 
 
-
+
 long
 heapSize() 
 
-
+
 void
 incScannerCount() 
 
-
+
 protected void
-incSize(long delta,
-   long heapOverhead)
+incSize(long delta,
+   long heapOverhead,
+   long offHeapOverhead)
 Updates the heap size counter of the segment by the given 
delta
 
 
-
+
 protected long
 indexEntrySize() 
 
-
+
 protected void
 internalAdd(Cell cell,
boolean mslabUsed,
MemStoreSizing memstoreSizing) 
 
-
+
 boolean
 isEmpty() 
 
-
+
 boolean
 isTagsPresent() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
 iterator() 
 
-
+
 long
 keySize() 
 
-
+
 Cell
 last() 
 
-
+
 Cell
 maybeCloneWithAllocator(Cell cell,
boolean forceCloneOfBigCell)
@@ -332,7 +329,7 @@ extends 
+
 protected CompositeImmutableSegment
 setCellSet(CellSet cellSetOld,
   CellSet cellSetNew)
@@ -340,22 +337,22 @@ extends 
+
 boolean
 shouldSeek(TimeRange tr,
   long oldestUnexpiredTS) 
 
-
+
 protected http://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true";
 title="class or interface in java.util">SortedSet
 tailSet(Cell firstCell)
 Returns a subset of the segment cell set, which starts with 
the given cell
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 toString() 
 
-
+
 protected void
 updateMetaInfo(Cell cellToAdd,
   boolean succ,
@@ -375,7 +372,7 @@ extends Segment
-getCellLength,
 getComparator,
 getMemStoreLAB,
 heapSizeChange,
 updateMetaInfo
+getCellLength,
 getComparator,
 getMemStoreLAB,
 getMemStoreSize,
 heapSizeChange,
 indexEntryOffHeapSize,
 indexEntryOnHeapSize,
 offHeapSize,
  offHeapSizeChange,
 updateMetaInfo
 
 
 
@@ -498,28 +495,13 @@ extends 
-
-
-
-
-getFirstAfter
-public Cell getFirstAfter(Cell cell)
-
-Overrides:
-getFirstAfter in
 class Segment
-Returns:
-the first cell in the segment that has equal or greater key than the given 
cell
-
-
-
 
 
 
 
 
 close
-public void close()
+public void close()
 Closing a segment before it is being discarded
 
 Overrides:
@@ -533,7 +515,7 @@ extends 
 
 maybeCloneWithAllocator
-public Cell maybeCloneWithAllocator(Cell cell,
+public Cell maybeCloneWithAllocator(Cell cell,
 boolean forceCloneOfBigCell)
 If the segment has a memory allocator the cell is being 
cloned to this space, and returned;
  otherwise the given cell is returned
@@ -551,7 +533,

[13/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.hadoop.hba

[51/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/991224b9
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/991224b9
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/991224b9

Branch: refs/heads/asf-site
Commit: 991224b958b51bd683ae6377a6ddd54ffbe4afcc
Parents: 56564b9
Author: jenkins 
Authored: Sun Feb 18 15:13:10 2018 +
Committer: jenkins 
Committed: Sun Feb 18 15:13:10 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 .../org/apache/hadoop/hbase/CellUtil.html   | 2 +-
 .../apache/hadoop/hbase/client/Mutation.html| 2 +-
 .../org/apache/hadoop/hbase/client/Result.html  | 2 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 34722 -
 checkstyle.rss  |12 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html | 6 +-
 devapidocs/index-all.html   |   141 +-
 .../apache/hadoop/hbase/PrivateCellUtil.html|53 +-
 .../org/apache/hadoop/hbase/class-use/Cell.html |   195 +-
 .../hadoop/hbase/class-use/CellComparator.html  |29 +-
 .../hbase/class-use/RegionTooBusyException.html | 2 +-
 .../hadoop/hbase/client/package-tree.html   |22 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   |10 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 4 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../assignment/MergeTableRegionsProcedure.html  |70 +-
 ...tTableRegionProcedure.StoreFileSplitter.html |12 +-
 .../assignment/SplitTableRegionProcedure.html   |60 +-
 .../hadoop/hbase/master/package-tree.html   | 4 +-
 .../hbase/master/procedure/package-tree.html| 2 +-
 .../hadoop/hbase/monitoring/package-tree.html   | 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |16 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 4 +-
 .../hadoop/hbase/quotas/package-tree.html   | 6 +-
 .../hbase/regionserver/AbstractMemStore.html|32 +-
 .../regionserver/CSLMImmutableSegment.html  | 8 +-
 .../regionserver/CellArrayImmutableSegment.html |12 +-
 .../regionserver/CellChunkImmutableSegment.html |60 +-
 .../hbase/regionserver/CompactionPipeline.html  |63 +-
 .../regionserver/CompositeImmutableSegment.html |   137 +-
 .../regionserver/FlushAllLargeStoresPolicy.html | 2 +-
 .../regionserver/FlushLargeStoresPolicy.html|12 +-
 .../FlushNonSloppyStoresFirstPolicy.html| 2 +-
 .../HRegion.BatchOperation.Visitor.html | 4 +-
 .../regionserver/HRegion.BatchOperation.html|78 +-
 .../regionserver/HRegion.BulkLoadListener.html  | 8 +-
 .../HRegion.FlushResult.Result.html |10 +-
 .../hbase/regionserver/HRegion.FlushResult.html | 8 +-
 .../HRegion.MutationBatchOperation.html |44 +-
 .../regionserver/HRegion.RegionScannerImpl.html |90 +-
 .../HRegion.ReplayBatchOperation.html   |32 +-
 .../regionserver/HRegion.RowLockContext.html|28 +-
 .../hbase/regionserver/HRegion.RowLockImpl.html |16 +-
 .../hadoop/hbase/regionserver/HRegion.html  |  1077 +-
 .../HRegionServer.MovedRegionInfo.html  |16 +-
 .../HRegionServer.MovedRegionsCleaner.html  |16 +-
 .../hbase/regionserver/HRegionServer.html   |   419 +-
 .../regionserver/ImmutableMemStoreLAB.html  |38 +-
 .../hbase/regionserver/ImmutableSegment.html|31 +-
 .../MemStoreFlusher.FlushHandler.html   | 6 +-
 .../MemStoreFlusher.FlushQueueEntry.html| 2 +-
 .../MemStoreFlusher.FlushRegionEntry.html   |36 +-
 .../MemStoreFlusher.WakeupFlushThread.html  | 8 +-
 .../hbase/regionserver/MemStoreFlusher.html |   108 +-
 .../hadoop/hbase/regionserver/MemStoreLAB.html  |30 +-
 .../hbase/regionserver/MemStoreLABImpl.html |44 +-
 .../hadoop/hbase/regionserver/MemStoreSize.html |89 +-
 .../hbase/regionserver/MemStoreSizing.html  |   117 +-
 .../hbase/regionserver/MemStoreSnapshot.html|67 +-
 .../hbase/regionserver/MutableSegment.html  | 8 +-
 .../hbase/regionserver/Region.Operation.html|34 +-
 .../hbase/region

[33/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyByteBufferExtendedCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyByteBufferExtendedCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyByteBufferExtendedCell.html
index d143ef8..4583895 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyByteBufferExtendedCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyByteBufferExtendedCell.html
@@ -258,7 +258,7 @@
 250
 251@Override
 252public long heapSize() {
-253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 254  if (this.tags != null) {
 255sum += 
ClassSize.sizeOf(this.tags);
 256  }
@@ -454,7 +454,7 @@
 446
 447@Override
 448public long heapSize() {
-449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 450  // this.tags is on heap byte[]
 451  if (this.tags != null) {
 452sum += 
ClassSize.sizeOf(this.tags);
@@ -2791,192 +2791,193 @@
 2783   * {@link HeapSize} we call {@link 
HeapSize#heapSize()} so cell can give a correct value. In other
 2784   * cases we just consider the bytes 
occupied by the cell components ie. row, CF, qualifier,
 2785   * timestamp, type, value and tags.
-2786   * @param cell
-2787   * @return estimate of the heap 
space
-2788   */
-2789  public static long 
estimatedHeapSizeOf(final Cell cell) {
-2790if (cell instanceof HeapSize) {
-2791  return ((HeapSize) 
cell).heapSize();
-2792}
-2793// TODO: Add sizing of references 
that hold the row, family, etc., arrays.
-2794return 
estimatedSerializedSizeOf(cell);
-2795  }
-2796
-2797  /**
-2798   * This method exists just to 
encapsulate how we serialize keys. To be replaced by a factory that
-2799   * we query to figure what the Cell 
implementation is and then, what serialization engine to use
-2800   * and further, how to serialize the 
key for inclusion in hfile index. TODO.
-2801   * @param cell
-2802   * @return The key portion of the Cell 
serialized in the old-school KeyValue way or null if passed
-2803   * a null 
cell
-2804   */
-2805  public static byte[] 
getCellKeySerializedAsKeyValueKey(final Cell cell) {
-2806if (cell == null) return null;
-2807byte[] b = new 
byte[KeyValueUtil.keyLength(cell)];
-2808KeyValueUtil.appendKeyTo(cell, b, 
0);
-2809return b;
-2810  }
-2811
-2812  /**
-2813   * Create a Cell that is smaller than 
all other possible Cells for the given Cell's row.
-2814   * @param cell
-2815   * @return First possible Cell on 
passed Cell's row.
-2816   */
-2817  public static Cell 
createFirstOnRow(final Cell cell) {
-2818if (cell instanceof 
ByteBufferExtendedCell) {
-2819  return new 
FirstOnRowByteBufferExtendedCell(
-2820  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2821  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength());
-2822}
-2823return new 
FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
-2824  }
-2825
-2826  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength) {
-2827return new FirstOnRowCell(row, 
roffset, rlength);
-2828  }
-2829
-2830  public static Cell 
createFirstOnRow(final byte[] row, final byte[] family, final byte[] col) {
-2831return createFirstOnRow(row, 0, 
(short) row.length, family, 0, (byte) family.length, col, 0,
-2832col.length);
-2833  }
-2834
-2835  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength,
-2836  final byte[] family, int foffset, 
byte flength, final byte[] col, int coffset, int clength) {
-2837return new FirstOnRowColCell(row, 
roffset, rlength, family, foffset, flength, col, coffset,
-2838clength);
-2839  }
-2840
-2841  public static Cell 
createFirstOnRow(final byte[] row) {
-2842return createFirstOnRow(row, 0, 
(short) row.length);
-2843  }
-2844
-2845  public static Cell 
createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) {
-2846if (cell instanceof 
ByteBufferExtendedCell) {
-2847  return new 
FirstOnRowColByteBufferExtendedCell(
-2848  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2849  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength(),
-2850  ByteBuffer.wrap(fArray), foff, 
(byte) flen, HConstants.EMPTY_BYTE_BUFFER, 0, 0);
-2851}
-2852return new 
FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), 
cell.getRowLength(),
-2853fArray, foff, (byte) flen, 
HConstants.EMPTY_BYTE_ARRAY, 0, 0);
-2854  }
-2855
-2856  public static Cell 
createF

[07/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.hadoop.hbase.re

[30/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSByteBufferExtendedCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSByteBufferExtendedCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSByteBufferExtendedCell.html
index d143ef8..4583895 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSByteBufferExtendedCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSByteBufferExtendedCell.html
@@ -258,7 +258,7 @@
 250
 251@Override
 252public long heapSize() {
-253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 254  if (this.tags != null) {
 255sum += 
ClassSize.sizeOf(this.tags);
 256  }
@@ -454,7 +454,7 @@
 446
 447@Override
 448public long heapSize() {
-449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 450  // this.tags is on heap byte[]
 451  if (this.tags != null) {
 452sum += 
ClassSize.sizeOf(this.tags);
@@ -2791,192 +2791,193 @@
 2783   * {@link HeapSize} we call {@link 
HeapSize#heapSize()} so cell can give a correct value. In other
 2784   * cases we just consider the bytes 
occupied by the cell components ie. row, CF, qualifier,
 2785   * timestamp, type, value and tags.
-2786   * @param cell
-2787   * @return estimate of the heap 
space
-2788   */
-2789  public static long 
estimatedHeapSizeOf(final Cell cell) {
-2790if (cell instanceof HeapSize) {
-2791  return ((HeapSize) 
cell).heapSize();
-2792}
-2793// TODO: Add sizing of references 
that hold the row, family, etc., arrays.
-2794return 
estimatedSerializedSizeOf(cell);
-2795  }
-2796
-2797  /**
-2798   * This method exists just to 
encapsulate how we serialize keys. To be replaced by a factory that
-2799   * we query to figure what the Cell 
implementation is and then, what serialization engine to use
-2800   * and further, how to serialize the 
key for inclusion in hfile index. TODO.
-2801   * @param cell
-2802   * @return The key portion of the Cell 
serialized in the old-school KeyValue way or null if passed
-2803   * a null 
cell
-2804   */
-2805  public static byte[] 
getCellKeySerializedAsKeyValueKey(final Cell cell) {
-2806if (cell == null) return null;
-2807byte[] b = new 
byte[KeyValueUtil.keyLength(cell)];
-2808KeyValueUtil.appendKeyTo(cell, b, 
0);
-2809return b;
-2810  }
-2811
-2812  /**
-2813   * Create a Cell that is smaller than 
all other possible Cells for the given Cell's row.
-2814   * @param cell
-2815   * @return First possible Cell on 
passed Cell's row.
-2816   */
-2817  public static Cell 
createFirstOnRow(final Cell cell) {
-2818if (cell instanceof 
ByteBufferExtendedCell) {
-2819  return new 
FirstOnRowByteBufferExtendedCell(
-2820  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2821  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength());
-2822}
-2823return new 
FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
-2824  }
-2825
-2826  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength) {
-2827return new FirstOnRowCell(row, 
roffset, rlength);
-2828  }
-2829
-2830  public static Cell 
createFirstOnRow(final byte[] row, final byte[] family, final byte[] col) {
-2831return createFirstOnRow(row, 0, 
(short) row.length, family, 0, (byte) family.length, col, 0,
-2832col.length);
-2833  }
-2834
-2835  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength,
-2836  final byte[] family, int foffset, 
byte flength, final byte[] col, int coffset, int clength) {
-2837return new FirstOnRowColCell(row, 
roffset, rlength, family, foffset, flength, col, coffset,
-2838clength);
-2839  }
-2840
-2841  public static Cell 
createFirstOnRow(final byte[] row) {
-2842return createFirstOnRow(row, 0, 
(short) row.length);
-2843  }
-2844
-2845  public static Cell 
createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) {
-2846if (cell instanceof 
ByteBufferExtendedCell) {
-2847  return new 
FirstOnRowColByteBufferExtendedCell(
-2848  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2849  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength(),
-2850  ByteBuffer.wrap(fArray), foff, 
(byte) flen, HConstants.EMPTY_BYTE_BUFFER, 0, 0);
-2851}
-2852return new 
FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), 
cell.getRowLength(),
-2853fArray, foff, (byte) flen, 
HConstants.EMPTY_BYTE_ARRAY, 0, 0);

[05/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.hadoop.hbase.regionserver.Mult

[47/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
index f30793d..67cc5f5 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
@@ -890,6 +890,10 @@
 
 
 
+ImmutableSegment(CellComparator comparator,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List segments) 
+
+
 ImmutableSegment(CellSet cs,
 CellComparator comparator,
 MemStoreLAB memStoreLAB)
@@ -897,43 +901,43 @@
  C-tor to be used to build the derived classes
 
 
-
+
 KeyValueHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners,
 CellComparator comparator)
 Constructor.
 
 
-
+
 KVScannerComparator(CellComparator kvComparator)
 Constructor
 
 
-
+
 MemStoreCompactorSegmentsIterator(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List segments,
  CellComparator comparator,
  int compactionKVMax,
  HStore store) 
 
-
+
 MemStoreMergerSegmentsIterator(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List segments,
   CellComparator comparator,
   int compactionKVMax) 
 
-
+
 MutableSegment(CellSet cellSet,
   CellComparator comparator,
   MemStoreLAB memStoreLAB) 
 
-
+
 ReversedKeyValueHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners,
 CellComparator comparator) 
 
-
+
 ReversedKVScannerComparator(CellComparator kvComparator)
 Constructor
 
 
-
+
 ScanInfo(byte[] family,
 int minVersions,
 int maxVersions,
@@ -948,7 +952,7 @@
 long preadMaxBytes,
 boolean newVersionBehavior) 
 
-
+
 ScanInfo(org.apache.hadoop.conf.Configuration conf,
 byte[] family,
 int minVersions,
@@ -960,13 +964,18 @@
 CellComparator comparator,
 boolean newVersionBehavior) 
 
-
+
 ScanInfo(org.apache.hadoop.conf.Configuration conf,
 ColumnFamilyDescriptor family,
 long ttl,
 long timeToPurgeDeletes,
 CellComparator comparator) 
 
+
+Segment(CellComparator comparator,
+   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List segments,
+   TimeRangeTracker trt) 
+
 
 Segment(CellComparator comparator,
TimeRangeTracker trt) 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/class-use/RegionTooBusyException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/RegionTooBusyException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/RegionTooBusyException.html
index d8c730a..12712ae 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/RegionTooBusyException.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/RegionTooBusyException.html
@@ -119,7 +119,7 @@
 
 
 private void
-HRegion.requestFlushIfNeeded(long memstoreTotalSize) 
+HRegion.requestFlushIfNeeded() 
 
 
 private void

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index a90ffcb..d14bfcc 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -547,24 +547,24 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.client.CompactionState
-org.apache.hadoop.hbase.client.Abstract

[19/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
index 42ad63c..85d3a6a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
@@ -61,220 +61,262 @@
 053  protected 
CellChunkImmutableSegment(CellComparator comparator, MemStoreSegmentsIterator 
iterator,
 054  MemStoreLAB memStoreLAB, int 
numOfCells, MemStoreCompactionStrategy.Action action) {
 055super(null, comparator, memStoreLAB); 
// initialize the CellSet with NULL
-056incSize(0, DEEP_OVERHEAD_CCM); // 
initiate the heapSize with the size of the segment metadata
-057// build the new CellSet based on 
CellArrayMap and update the CellSet of the new Segment
-058initializeCellSet(numOfCells, 
iterator, action);
-059  }
-060
-061  
/**
-062   * C-tor to be used when new 
CellChunkImmutableSegment is built as a result of flattening
-063   * of CSLMImmutableSegment
-064   * The given iterator returns the Cells 
that "survived" the compaction.
-065   */
-066  protected 
CellChunkImmutableSegment(CSLMImmutableSegment segment,
-067  MemStoreSizing memstoreSizing, 
MemStoreCompactionStrategy.Action action) {
-068super(segment); // initiailize the 
upper class
-069
incSize(0,-CSLMImmutableSegment.DEEP_OVERHEAD_CSLM + 
CellChunkImmutableSegment.DEEP_OVERHEAD_CCM);
-070int numOfCells = 
segment.getCellsCount();
-071// build the new CellSet based on 
CellChunkMap
-072reinitializeCellSet(numOfCells, 
segment.getScanner(Long.MAX_VALUE), segment.getCellSet(),
-073action);
-074// arrange the meta-data size, 
decrease all meta-data sizes related to SkipList;
-075// add sizes of CellChunkMap entry, 
decrease also Cell object sizes
-076// (reinitializeCellSet doesn't take 
the care for the sizes)
-077long newSegmentSizeDelta = 
numOfCells*(indexEntrySize()-ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
-078
-079incSize(0, newSegmentSizeDelta);
-080memstoreSizing.incMemStoreSize(0, 
newSegmentSizeDelta);
-081  }
-082
-083  @Override
-084  protected long indexEntrySize() {
-085return ((long) 
ClassSize.CELL_CHUNK_MAP_ENTRY - KeyValue.FIXED_OVERHEAD);
-086  }
-087
-088  @Override
-089  protected boolean canBeFlattened() {
-090return false;
-091  }
-092
-093  /  PRIVATE METHODS  
/
-094  
/**/
-095  // Create CellSet based on CellChunkMap 
from compacting iterator
-096  private void initializeCellSet(int 
numOfCells, MemStoreSegmentsIterator iterator,
-097  MemStoreCompactionStrategy.Action 
action) {
-098
-099// calculate how many chunks we will 
need for index
-100int chunkSize = 
ChunkCreator.getInstance().getChunkSize();
-101int numOfCellsInChunk = 
CellChunkMap.NUM_OF_CELL_REPS_IN_CHUNK;
-102int numberOfChunks = 
calculateNumberOfChunks(numOfCells, numOfCellsInChunk);
-103int numOfCellsAfterCompaction = 0;
-104int currentChunkIdx = 0;
-105int offsetInCurentChunk = 
ChunkCreator.SIZEOF_CHUNK_HEADER;
-106int numUniqueKeys=0;
-107Cell prev = null;
-108// all index Chunks are allocated 
from ChunkCreator
-109Chunk[] chunks = new 
Chunk[numberOfChunks];
-110for (int i=0; i < numberOfChunks; 
i++) {
-111  chunks[i] = 
this.getMemStoreLAB().getNewExternalChunk();
-112}
-113while (iterator.hasNext()) {
// the iterator hides the elimination logic for compaction
-114  boolean alreadyCopied = false;
-115  Cell c = iterator.next();
-116  numOfCellsAfterCompaction++;
-117  assert(c instanceof 
ExtendedCell);
-118  if (((ExtendedCell)c).getChunkId() 
== ExtendedCell.CELL_NOT_BASED_ON_CHUNK) {
-119// CellChunkMap assumes all cells 
are allocated on MSLAB.
-120// Therefore, cells which are not 
allocated on MSLAB initially,
-121// are copied into MSLAB here.
-122c = copyCellIntoMSLAB(c);
-123alreadyCopied = true;
-124  }
-125  if (offsetInCurentChunk + 
ClassSize.CELL_CHUNK_MAP_ENTRY > chunkSize) {
-126currentChunkIdx++;  
// continue to the next index chunk
-127offsetInCurentChunk = 
ChunkCreator.SIZEOF_CHUNK_HEADER;
-128  }
-129  if (action == 
MemStoreCompactionStrategy.Action.COMPACT && !alreadyCopied) {
-130// for compaction copy cell to 
the new segment (MSLAB copy)
-131c = maybeCloneWithAllocator(c, 
false);
-132  }
-133  off

[09/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor

[29/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
index d143ef8..4583895 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
@@ -258,7 +258,7 @@
 250
 251@Override
 252public long heapSize() {
-253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 254  if (this.tags != null) {
 255sum += 
ClassSize.sizeOf(this.tags);
 256  }
@@ -454,7 +454,7 @@
 446
 447@Override
 448public long heapSize() {
-449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 450  // this.tags is on heap byte[]
 451  if (this.tags != null) {
 452sum += 
ClassSize.sizeOf(this.tags);
@@ -2791,192 +2791,193 @@
 2783   * {@link HeapSize} we call {@link 
HeapSize#heapSize()} so cell can give a correct value. In other
 2784   * cases we just consider the bytes 
occupied by the cell components ie. row, CF, qualifier,
 2785   * timestamp, type, value and tags.
-2786   * @param cell
-2787   * @return estimate of the heap 
space
-2788   */
-2789  public static long 
estimatedHeapSizeOf(final Cell cell) {
-2790if (cell instanceof HeapSize) {
-2791  return ((HeapSize) 
cell).heapSize();
-2792}
-2793// TODO: Add sizing of references 
that hold the row, family, etc., arrays.
-2794return 
estimatedSerializedSizeOf(cell);
-2795  }
-2796
-2797  /**
-2798   * This method exists just to 
encapsulate how we serialize keys. To be replaced by a factory that
-2799   * we query to figure what the Cell 
implementation is and then, what serialization engine to use
-2800   * and further, how to serialize the 
key for inclusion in hfile index. TODO.
-2801   * @param cell
-2802   * @return The key portion of the Cell 
serialized in the old-school KeyValue way or null if passed
-2803   * a null 
cell
-2804   */
-2805  public static byte[] 
getCellKeySerializedAsKeyValueKey(final Cell cell) {
-2806if (cell == null) return null;
-2807byte[] b = new 
byte[KeyValueUtil.keyLength(cell)];
-2808KeyValueUtil.appendKeyTo(cell, b, 
0);
-2809return b;
-2810  }
-2811
-2812  /**
-2813   * Create a Cell that is smaller than 
all other possible Cells for the given Cell's row.
-2814   * @param cell
-2815   * @return First possible Cell on 
passed Cell's row.
-2816   */
-2817  public static Cell 
createFirstOnRow(final Cell cell) {
-2818if (cell instanceof 
ByteBufferExtendedCell) {
-2819  return new 
FirstOnRowByteBufferExtendedCell(
-2820  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2821  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength());
-2822}
-2823return new 
FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
-2824  }
-2825
-2826  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength) {
-2827return new FirstOnRowCell(row, 
roffset, rlength);
-2828  }
-2829
-2830  public static Cell 
createFirstOnRow(final byte[] row, final byte[] family, final byte[] col) {
-2831return createFirstOnRow(row, 0, 
(short) row.length, family, 0, (byte) family.length, col, 0,
-2832col.length);
-2833  }
-2834
-2835  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength,
-2836  final byte[] family, int foffset, 
byte flength, final byte[] col, int coffset, int clength) {
-2837return new FirstOnRowColCell(row, 
roffset, rlength, family, foffset, flength, col, coffset,
-2838clength);
-2839  }
-2840
-2841  public static Cell 
createFirstOnRow(final byte[] row) {
-2842return createFirstOnRow(row, 0, 
(short) row.length);
-2843  }
-2844
-2845  public static Cell 
createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) {
-2846if (cell instanceof 
ByteBufferExtendedCell) {
-2847  return new 
FirstOnRowColByteBufferExtendedCell(
-2848  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2849  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength(),
-2850  ByteBuffer.wrap(fArray), foff, 
(byte) flen, HConstants.EMPTY_BYTE_BUFFER, 0, 0);
-2851}
-2852return new 
FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), 
cell.getRowLength(),
-2853fArray, foff, (byte) flen, 
HConstants.EMPTY_BYTE_ARRAY, 0, 0);
-2854  }
-2855
-2856  public static Cell 
createFirstO

[06/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.

[03/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyC

[45/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.html
index 68118ab..0ac1bc0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.html
@@ -160,7 +160,7 @@ extends Segment
-dataSize,
 FIXED_OVERHEAD,
 heapSize,
 minSequenceId,
 tagsPresent,
 timeRangeTracker
+FIXED_OVERHEAD,
 minSequenceId,
 segmentSize,
 tagsPresent,
 timeRangeTracker
 
 
 
@@ -248,7 +248,7 @@ extends Segment
-close,
 compare,
 compareRows,
 decScannerCount,
 dump,
 getCellLength,
 getCellsCount,
 getCellSet,
 getComparator,
 getFirstAfter,
 getMemStoreLAB,
 getMinSequenceId,
 getScanner,
 getScanner,
 getScanners,
 getTimeRangeTracker,
 headSet, heapSize,
 heapSizeChange,
 incScannerCount,
 incSize,
 internalAdd,
 isEmpty,
 isTagsPresent,
 iterator, keySize,
 last,
 maybeCloneWithAllocator,
 setCellSet,
 shouldSeek,
 tailSet,
 updateMetaInfo, updateMetaInfo
+close,
 compare,
 compareRows,
 decScannerCount,
 dump,
 getCellLength,
 getCellsCount,
 getCellSet,
 getComparator,
 getMemStoreLAB,
 getMemStoreSize,
 getMinSequenceId,
 getScanner,
 getScanner,
 getScanners,
 getTimeRangeTracker,
 headSet, heapSize,
 heapSizeChange,
 incScannerCount,
 incSize,
 indexEntryOffHeapSize,
 indexEntryOnHeapSize,
 internalAdd,
 isEmpty, isTagsPresent,
 iterator,
 keySize,
 last,
 maybeCloneWithAllocator,
 offHeapSize,
 offHeapSizeChange,
 setCellSet,
 shouldSeek,
 tailSet,
 updateMetaInfo,
 updateMetaInfo
 
 
 
@@ -334,7 +334,7 @@ extends 
 
 indexEntrySize
-protected long indexEntrySize()
+protected long indexEntrySize()
 
 Specified by:
 indexEntrySize in
 class Segment
@@ -347,7 +347,7 @@ extends 
 
 canBeFlattened
-protected boolean canBeFlattened()
+protected boolean canBeFlattened()
 
 Specified by:
 canBeFlattened in
 class ImmutableSegment
@@ -360,7 +360,7 @@ extends 
 
 initializeCellSet
-private void initializeCellSet(int numOfCells,
+private void initializeCellSet(int numOfCells,
MemStoreSegmentsIterator iterator,
MemStoreCompactionStrategy.Action action)
 
@@ -371,7 +371,7 @@ extends 
 
 reinitializeCellSet
-private void reinitializeCellSet(int numOfCells,
+private void reinitializeCellSet(int numOfCells,
  KeyValueScanner segmentScanner,
  CellSet oldCellSet,
  MemStoreCompactionStrategy.Action action)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
index e2f3913..d2534b2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -160,7 +160,7 @@ extends Segment
-dataSize,
 FIXED_OVERHEAD,
 heapSize,
 minSequenceId,
 tagsPresent,
 timeRangeTracker
+FIXED_OVERHEAD,
 minSequenceId,
 segmentSize,
 tagsPresent,
 timeRangeTracker
 
 
 
@@ -235,15 +235,23 @@ extends 
 protected long
-indexEntrySize() 
+indexEntryOffHeapSize(boolean offHeap) 
 
 
+protected long
+indexEntryOnHeapSize(boolean onHeap) 
+
+
+protected long
+indexEntrySize() 
+
+
 private void
 initializeCellSet(int numOfCells,
  MemStoreSegmentsIterator iterator,
  MemStoreCompactionStrategy.Action action) 
 
-
+
 private void
 reinitializeCellSet(int numOfCells,
KeyValueScanner segmentScanner,
@@ -263,7 +271,7 @@ extends Segment
-close,
 compare,
 compareRows,
 decScannerCount,
 dump,
 getCellLength,
 getCellsCount,
 getCellSet,
 getComparator,
 getFirstAfter,
 getMemStoreLAB,
 getMinSequenceId,
 getScanner,
 getScanner,
 getScanners,
 getTimeRangeTracker,
 headSet, heapSize,
 heapSizeChange,
 incScannerCount,
 incSize,
 internalAdd,
 isEmpty,
 isTagsPresent,
 iterator, keySize,
 last,
 maybeCloneWithAllocator,
 setCellSet,
 shouldSeek,
 tailSet,
 updateMetaInfo, updateMetaInfo
+close,
 compare,
 compareRows,

[50/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 9a3ccc6..0328956 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -289,7 +289,7 @@
 3544
 0
 0
-16546
+16537
 
 Files
 
@@ -872,7 +872,7 @@
 org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java
 0
 0
-16
+15
 
 org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java
 0
@@ -6399,150 +6399,140 @@
 0
 3
 
-org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
-0
-0
-1
-
 org/apache/hadoop/hbase/regionserver/CellChunkMap.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/regionserver/CellFlatMap.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/regionserver/CellSet.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/CellSink.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/ChangedReadersObserver.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/Chunk.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/ChunkCreator.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/regionserver/CompactSplit.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/regionserver/CompactedHFilesDischargeHandler.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
 0
 0
-8
-
+6
+
 org/apache/hadoop/hbase/regionserver/CompactionTool.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.java
 0
 0
 31
-
+
 org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/DisabledRegionSplitPolicy.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/DumpRegionServerMetrics.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/FavoredNodesForRegion.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java
 0
 0
 2
-
-org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java
-0
-0
-3
 
 org/apache/hadoop/hbase/regionserver/FlushPolicyFactory.java
 0
@@ -6577,7 +6567,7 @@
 org/apache/hadoop/hbase/regionserver/HRegionServer.java
 0
 0
-126
+124
 
 org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
 0
@@ -10344,12 +10334,12 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#LeftCurly";>LeftCurly
-199
+198
  Error
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#NeedBraces";>NeedBraces
-1977
+1975
  Error
 
 coding
@@ -10424,7 +10414,7 @@
 http://checkstyle.sourceforge.net/config_imports.html#UnusedImports";>UnusedImports
 
 processJavadoc: "true"
-123
+122
  Error
 
 indentation
@@ -10435,14 +10425,14 @@
 caseIndent: "2"
 basicOffset: "2"
 lineWrappingIndentation: "2"
-5050
+5049
  Error
 
 javadoc
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation";>JavadocTagContinuationIndentation
 
 offset: "2"
-808
+806
  Error
 
 
@@ -10465,7 +10455,7 @@
 
 max: "100"
 ignorePattern: "^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated"
-1640
+1638
  Error
 
 
@@ -18273,67 +18263,67 @@
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2786
+2787
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2801
+2802
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2806
+2807
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2814
+2815
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2880
+2881
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2881
+2882
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2882
+2883
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2883
+2884
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2905
+2906
 
  Error
 javadoc
 No

[24/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
index 28f226e..3152619 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
@@ -328,7 +328,7 @@
 320long estimatedHeapSizeOfResult = 0;
 321// We don't make Iterator here
 322for (Cell cell : rs.rawCells()) {
-323  estimatedHeapSizeOfResult += 
PrivateCellUtil.estimatedHeapSizeOf(cell);
+323  estimatedHeapSizeOfResult += 
PrivateCellUtil.estimatedSizeOfCell(cell);
 324}
 325return estimatedHeapSizeOfResult;
 326  }

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
index 28f226e..3152619 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
@@ -328,7 +328,7 @@
 320long estimatedHeapSizeOfResult = 0;
 321// We don't make Iterator here
 322for (Cell cell : rs.rawCells()) {
-323  estimatedHeapSizeOfResult += 
PrivateCellUtil.estimatedHeapSizeOf(cell);
+323  estimatedHeapSizeOfResult += 
PrivateCellUtil.estimatedSizeOfCell(cell);
 324}
 325return estimatedHeapSizeOfResult;
 326  }

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
index 28f226e..3152619 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
@@ -328,7 +328,7 @@
 320long estimatedHeapSizeOfResult = 0;
 321// We don't make Iterator here
 322for (Cell cell : rs.rawCells()) {
-323  estimatedHeapSizeOfResult += 
PrivateCellUtil.estimatedHeapSizeOf(cell);
+323  estimatedHeapSizeOfResult += 
PrivateCellUtil.estimatedSizeOfCell(cell);
 324}
 325return estimatedHeapSizeOfResult;
 326  }

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
index 18bd3f6..cfa5e40 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
@@ -496,7 +496,7 @@
 488  size * ClassSize.REFERENCE);
 489
 490  for(Cell cell : entry.getValue()) 
{
-491heapsize += 
PrivateCellUtil.estimatedHeapSizeOf(cell);
+491heapsize += 
PrivateCellUtil.estimatedSizeOfCell(cell);
 492  }
 493}
 494heapsize += getAttributeSize();

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
index 18bd3f6..cfa5e40 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
@@ -496,7 +496,7 @@
 488  size * ClassSize.REFERENCE);
 489
 490  for(Cell cell : entry.getValue()) 
{
-491heapsize += 
PrivateCellUtil.estimatedHeapSizeOf(cell);
+491heapsize += 
PrivateCellUtil.estimatedSizeOfCell(cell);
 492  }
 493}
 494heapsize += getAttributeSize();

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/client/Result.html
--
diff --git a/devapid

hbase-site git commit: INFRA-10751 Empty commit

2018-02-18 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 991224b95 -> e82a131d3


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/e82a131d
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/e82a131d
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/e82a131d

Branch: refs/heads/asf-site
Commit: e82a131d3b7a1faa8fe17cbf469e5546aacb37d5
Parents: 991224b
Author: jenkins 
Authored: Sun Feb 18 15:13:36 2018 +
Committer: jenkins 
Committed: Sun Feb 18 15:13:36 2018 +

--

--




[48/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html 
b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html
index c67b046..6b99201 100644
--- a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html
+++ b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html
@@ -493,23 +493,23 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-static long
-estimatedHeapSizeOf(Cell cell)
-This is an estimate of the heap space occupied by a 
cell.
-
-
-
 static int
 estimatedSerializedSizeOf(Cell cell)
 Estimate based on keyvalue's serialization format in the 
RPC layer.
 
 
-
+
 static int
 estimatedSerializedSizeOfKey(Cell cell)
 Calculates the serialized key size.
 
 
+
+static long
+estimatedSizeOfCell(Cell cell)
+This is an estimate of the heap space occupied by a 
cell.
+
+
 
 static ByteRange
 fillFamilyRange(Cell cell,
@@ -2140,17 +2140,18 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-
+
 
 
 
 
-estimatedHeapSizeOf
-public static long estimatedHeapSizeOf(Cell cell)
+estimatedSizeOfCell
+public static long estimatedSizeOfCell(Cell cell)
 This is an estimate of the heap space occupied by a cell. 
When the cell is of type
  HeapSize we 
call HeapSize.heapSize()
 so cell can give a correct value. In other
  cases we just consider the bytes occupied by the cell components ie. row, CF, 
qualifier,
- timestamp, type, value and tags.
+ timestamp, type, value and tags.
+ Note that this can be the JVM heap space (on-heap) or the OS heap 
(off-heap)
 
 Parameters:
 cell - 
@@ -2165,7 +2166,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getCellKeySerializedAsKeyValueKey
-public static byte[] getCellKeySerializedAsKeyValueKey(Cell cell)
+public static byte[] getCellKeySerializedAsKeyValueKey(Cell cell)
 This method exists just to encapsulate how we serialize 
keys. To be replaced by a factory that
  we query to figure what the Cell implementation is and then, what 
serialization engine to use
  and further, how to serialize the key for inclusion in hfile index. 
TODO.
@@ -2184,7 +2185,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createFirstOnRow
-public static Cell createFirstOnRow(Cell cell)
+public static Cell createFirstOnRow(Cell cell)
 Create a Cell that is smaller than all other possible Cells 
for the given Cell's row.
 
 Parameters:
@@ -2200,7 +2201,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createFirstOnRow
-public static Cell createFirstOnRow(byte[] row,
+public static Cell createFirstOnRow(byte[] row,
 int roffset,
 short rlength)
 
@@ -2211,7 +2212,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createFirstOnRow
-public static Cell createFirstOnRow(byte[] row,
+public static Cell createFirstOnRow(byte[] row,
 byte[] family,
 byte[] col)
 
@@ -,7 +2223,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createFirstOnRow
-public static Cell createFirstOnRow(byte[] row,
+public static Cell createFirstOnRow(byte[] row,
 int roffset,
 short rlength,
 byte[] family,
@@ -2239,7 +2240,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createFirstOnRow
-public static Cell createFirstOnRow(byte[] row)
+public static Cell createFirstOnRow(byte[] row)
 
 
 
@@ -2248,7 +2249,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createFirstOnRowFamily
-public static Cell createFirstOnRowFamily(Cell cell,
+public static Cell createFirstOnRowFamily(Cell cell,
   byte[] fArray,
   int foff,
   int flen)
@@ -2260,7 +2261,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createFirstOnRowCol
-public static Cell createFirstOnRowCol(Cell cell)
+public static Cell createFirstOnRowCol(Cell cell)
 
 
 
@@ -2269,7 +2270,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createFirstOnNextRow
-public static Cell createFirstOnNextRow(Cell cell)
+public static Cell createFirstOnNextRow(Cell cell)
 
 
 
@@ -2278,7 +2279,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createFirstOnRowCol
-public static Cell createFirstOnRowCol(Cell cell,
+public static Cell createFirstOnRowCol(Cell cell,
byte[] qArray,
  

[49/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 0faca7a..139d7d6 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 ©2007 - 2018 The Apache Software Foundation
 
   File: 3544,
- Errors: 16546,
+ Errors: 16537,
  Warnings: 0,
  Infos: 0
   
@@ -3555,7 +3555,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -7727,7 +7727,7 @@ under the License.
   0
 
 
-  16
+  15
 
   
   
@@ -13845,7 +13845,7 @@ under the License.
   0
 
 
-  126
+  124
 
   
   
@@ -26263,7 +26263,7 @@ under the License.
   0
 
 
-  8
+  6
 
   
   
@@ -49097,7 +49097,7 @@ under the License.
   0
 
 
-  3
+  0
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/coc.html
--
diff --git a/coc.html b/coc.html
index 418b0aa..861f0ae 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org";>the priv
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-17
+  Last Published: 
2018-02-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index a337292..81cefda 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-17
+  Last Published: 
2018-02-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index da89989..94840a6 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependencies
 
@@ -445,7 +445,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-17
+  Last Published: 
2018-02-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 1cc4e5b..15deff7 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Reactor Dependency Convergence
 
@@ -1035,7 +1035,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-17
+  Last Published: 
2018-02-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 40c7183..07c8943 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Dependency Information
 
@@ -318,7 +318,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-17
+  Last Published

[43/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index 7c184cb..e614678 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private abstract static class HRegion.BatchOperation
+private abstract static class HRegion.BatchOperation
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Class that tracks the progress of a batch operations, 
accumulating status codes and tracking
  the index at which processing is proceeding. These batch operations may get 
split into
@@ -411,7 +411,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 operations
-protected final T[] operations
+protected final T[] operations
 
 
 
@@ -420,7 +420,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 retCodeDetails
-protected final OperationStatus[] retCodeDetails
+protected final OperationStatus[] retCodeDetails
 
 
 
@@ -429,7 +429,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 walEditsFromCoprocessors
-protected final WALEdit[] walEditsFromCoprocessors
+protected final WALEdit[] walEditsFromCoprocessors
 
 
 
@@ -438,7 +438,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 familyCellMaps
-protected final http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList>[] familyCellMaps
+protected final http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList>[] familyCellMaps
 
 
 
@@ -447,7 +447,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 region
-protected final HRegion region
+protected final HRegion region
 
 
 
@@ -456,7 +456,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 nextIndexToProcess
-protected int nextIndexToProcess
+protected int nextIndexToProcess
 
 
 
@@ -465,7 +465,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 observedExceptions
-protected final HRegion.ObservedExceptionsInBatch observedExceptions
+protected final HRegion.ObservedExceptionsInBatch observedExceptions
 
 
 
@@ -474,7 +474,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 durability
-protected Durability durability
+protected Durability durability
 
 
 
@@ -483,7 +483,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 atomic
-protected boolean atomic
+protected boolean atomic
 
 
 
@@ -502,7 +502,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 BatchOperation
-public BatchOperation(HRegion region,
+public BatchOperation(HRegion region,
   T[] operations)
 
 
@@ -520,7 +520,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 visitBatchOperations
-public void visitBatchOperations(boolean pendingOnly,
+public void visitBatchOperations(boolean pendingOnly,
  int lastIndexExclusive,
  HRegion.BatchOperation.Visitor visitor)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -537,7 +537,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getMutation
-public abstract Mutation getMutation(int index)
+public abstract Mutation getMutation(int index)
 
 
 
@@ -546,7 +546,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getNonceGroup
-public abstract long getNonceGroup(int index)
+public abstract long getNonceGroup(int index)
 
 
 
@@ -555,7 +555,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getNonce
-public abstract long getNonce(int index)
+public abstract long getNonce(int index)
 
 
 
@@ -564,7 +564,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getMutationsForCoprocs
-public abstract Mutation[] getMutationsForCoprocs()
+public abstract Mutation[] getMutationsForCoprocs()