hbase git commit: HBASE-15251 During a cluster restart, Hmaster thinks it is a failover by mistake (Clara Xiong)

2016-02-18 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master f352f3c37 -> 8eedc9675


HBASE-15251 During a cluster restart, Hmaster thinks it is a failover by 
mistake (Clara Xiong)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8eedc967
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8eedc967
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8eedc967

Branch: refs/heads/master
Commit: 8eedc967515a4d9133068962fe029160d24e6f95
Parents: f352f3c
Author: tedyu 
Authored: Thu Feb 18 23:46:54 2016 -0800
Committer: tedyu 
Committed: Thu Feb 18 23:46:54 2016 -0800

--
 .../hadoop/hbase/master/AssignmentManager.java  | 80 +++-
 1 file changed, 61 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8eedc967/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 7639004..53a080e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -18,6 +18,8 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -44,6 +46,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CoordinatedStateException;
@@ -92,8 +95,6 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.zookeeper.KeeperException;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * Manages and performs region assignment.
  * Related communications with regionserver are all done over RPC.
@@ -443,31 +444,43 @@ public class AssignmentManager {
   if (LOG.isDebugEnabled()) {
 LOG.debug("Found dead servers out on cluster " + 
serverManager.getDeadServers());
   }
-} else {
+  // Check if there are any regions on these servers
+  failover = false;
+  for (ServerName serverName : 
serverManager.getDeadServers().copyServerNames()) {
+if (regionStates.getRegionAssignments().values().contains(serverName)) 
{
+  LOG.debug("Found regions on dead server: " + serverName);
+  failover = true;
+  break;
+}
+  }
+}
+Set onlineServers = serverManager.getOnlineServers().keySet();
+if (!failover) {
   // If any one region except meta is assigned, it's a failover.
-  Set onlineServers = 
serverManager.getOnlineServers().keySet();
   for (Map.Entry en:
   regionStates.getRegionAssignments().entrySet()) {
 HRegionInfo hri = en.getKey();
 if (!hri.isMetaTable()
 && onlineServers.contains(en.getValue())) {
-  LOG.debug("Found " + hri + " out on cluster");
+  LOG.debug("Found region " + hri + " out on cluster");
   failover = true;
   break;
 }
   }
-  if (!failover) {
-// If any region except meta is in transition on a live server, it's a 
failover.
-Map regionsInTransition = 
regionStates.getRegionsInTransition();
-if (!regionsInTransition.isEmpty()) {
-  for (RegionState regionState: regionsInTransition.values()) {
-ServerName serverName = regionState.getServerName();
-if (!regionState.getRegion().isMetaRegion()
-&& serverName != null && onlineServers.contains(serverName)) {
-  LOG.debug("Found " + regionState + " in RITs");
-  failover = true;
-  break;
-}
+}
+if (!failover) {
+  // If any region except meta is in transition on a live server, it's a 
failover.
+  Map regionsInTransition = 
regionStates.getRegionsInTransition();
+  if (!regionsInTransition.isEmpty()) {
+for (RegionState regionState: regionsInTransition.values()) {
+  ServerName serverName = regionState.getServerName();
+  if (!regionState.getRegion().isMetaRegion()
+  && serverName != null && onlineServers.contains(serverName)) {
+LOG.debug("Found " + regionState + " for region " +
+  regionState.getRegion().getRegionNameAsString() + " for server " 
+
+

svn commit: r12451 - /dev/hbase/hbase-1.2.0RC4/

2016-02-18 Thread busbey
Author: busbey
Date: Fri Feb 19 06:26:14 2016
New Revision: 12451

Log:
HBase 1.2.0 RC4 artifacts.

Added:
dev/hbase/hbase-1.2.0RC4/
dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz   (with props)
dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz.asc
dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz.md5
dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz.mds
dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-src.tar.gz   (with props)
dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-src.tar.gz.asc
dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-src.tar.gz.md5
dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-src.tar.gz.mds

Added: dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz.asc
==
--- dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz.asc (added)
+++ dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz.asc Fri Feb 19 06:26:14 2016
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABCgAGBQJWxrQbAAoJEOZeEdQNgNt8tgsP/iyryoxU9Z30+YHTCFH++5Ab
+hua8xnOzIxk9R1z0iC2OlHmEIaaCU682LN+n5j+R+/UwSgldohOogHOEwqi3LZvi
+f5G9mIcEyiZTs45snHn9ENTCyUL5x9i7O6HTRpTGZ+nBQErkicBvwTXiZp2pdVL8
+5VljtN6mcLJmNOn42W5esBhG0Klx76XqOJPeaOV5niJry+bWDuKoqjluIXM3Vcp7
+B6eLbSzmlXC4OeEyahZkglb6pAWVfMTCSt3iFGBfn5amijwcTHYROkHpmP6Kxh0g
+moj4dx0yU+Te5UYFyBB/83CPWH5nHlgnKJ28SFjbvG8r67F1edZWbUSGznfB99u2
+50WnkfzDr1dPNCjRHFWzbsGjCt7PpnZromhUs/8qq7Ph1sZkJCFrkG5NJfKq2hXv
+3CEKrzWFu1MyP33fA+awuiOdkREV+yfe8JatWHSq2qUR+3eK+QspEfmMzSFxP34q
+l9CY8dYJoxd7tYHq4+UVHqxsj2fd0xicb4d4q1s5KBOrAB01Pxe9CneiPX8kWxwT
+s4LQvUrADjiaCrCHJ0UGYAhEDYRikMdePrEKBzpbhsZv5DJsf1naDueoo5FSfwuh
+IxpJWlr6z0GFfiGhyu9KAlkM2GTSA/jkKV2s3TCYWcKBKnwtkF46O3q6TexmbMRH
+75UP4TmDzSXz1o6Y83JJ
+=OrBA
+-END PGP SIGNATURE-

Added: dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz.md5
==
--- dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz.md5 (added)
+++ dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz.md5 Fri Feb 19 06:26:14 2016
@@ -0,0 +1 @@
+c54710777a687a5a4737b2e4c0aac6b6  hbase-1.2.0-bin.tar.gz

Added: dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz.mds
==
--- dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz.mds (added)
+++ dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-bin.tar.gz.mds Fri Feb 19 06:26:14 2016
@@ -0,0 +1,17 @@
+hbase-1.2.0-bin.tar.gz:MD5 = C5 47 10 77 7A 68 7A 5A  47 37 B2 E4 C0 AA C6
+ B6
+hbase-1.2.0-bin.tar.gz:   SHA1 = D005 8F83 D591 FAE5 F835  14B6 BF87 663A 2628
+ F906
+hbase-1.2.0-bin.tar.gz: RMD160 = A1A9 0D5F 4E1F 5C51 0B65  25F4 B8C2 9287 685E
+ 0F97
+hbase-1.2.0-bin.tar.gz: SHA224 = 110C8ACC B5D9432E 13DC7459 236F1525 43C34687
+ 5B90B68D CADD8434
+hbase-1.2.0-bin.tar.gz: SHA256 = 72968E12 E979C6C1 969495DA FAF63BED 738513C4
+ F7D847A6 05107E53 6F9E2F73
+hbase-1.2.0-bin.tar.gz: SHA384 = A01EE6C7 0C180451 3C8944AD A58E4331 5C872D9B
+ A1376785 58E1CA8C CA6AD34F 77243B7C 065996DD
+ A3C7B26C 944F5924
+hbase-1.2.0-bin.tar.gz: SHA512 = 32985A45 1BDB75C9 6232EAEF 3B5064DF 096F9B08
+ 88B6A6A6 65843D6F 4EB7DA76 5F65ED81 0143B83B
+ 4A1596C8 24E6BEA9 8FF588C5 645B7665 6BCAE801
+ BE2C909A

Added: dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-src.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-src.tar.gz.asc
==
--- dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-src.tar.gz.asc (added)
+++ dev/hbase/hbase-1.2.0RC4/hbase-1.2.0-src.tar.gz.asc Fri Feb 19 06:26:14 2016
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABCgAGBQJWxrRIAAoJEOZeEdQNgNt8HTwP/0r4hu+864nzqX+G1c+3TYxS
+N4S5D73AzYYxA/jexQ1fMZ287+b+XH3wmgo/GrFZT25TgPAD3+XYYGmaj1xqQFxl
+/qRcSEWmsOJL8KK/a5Y5ZpeHFzgzywQkvDSKw4fuiqmY7Q6pTHPQN14m6clsJJZ+
+weiL04KEcE9mf8z1Wsnp9uaRxIgTdAvTSLJO5UNRRBGmFEmMNT1InJTCRXB8UX52
+tglVpif5s8VwUOvA3StVh00wSTdy8uwoo6p8B8erxIHPu7oj/4p5V+dUWLmcx/rq
+5w4iqylYpgNjJB7uor6YKaBbtfQ01mY5po5BBQfDvPl6yeBPaRrkUjq6SKXvzpRr
+f7bkHiQLj3lsC3393uBt5lN1EC

[hbase] Git Push Summary

2016-02-18 Thread busbey
Repository: hbase
Updated Tags:  refs/tags/1.2.0RC4 [created] 9157bf9f0


hbase git commit: HBASE-14025 update CHANGES.txt for 1.2 RC4

2016-02-18 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 d4388ba01 -> 25b281972


HBASE-14025 update CHANGES.txt for 1.2 RC4


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/25b28197
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/25b28197
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/25b28197

Branch: refs/heads/branch-1.2
Commit: 25b281972df2f5b15c426c8963cbf77dd853a5ad
Parents: d4388ba
Author: Sean Busbey 
Authored: Thu Feb 18 22:09:39 2016 -0600
Committer: Sean Busbey 
Committed: Thu Feb 18 22:09:39 2016 -0600

--
 CHANGES.txt | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/25b28197/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index ce6a09d..7ccf127 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,6 +1,6 @@
 HBase Change Log
 
-Release Notes - HBase - Version 1.2.0 02/17/2016
+Release Notes - HBase - Version 1.2.0 02/22/2016
 
 ** Sub-task
 * [HBASE-12748] - RegionCoprocessorHost.execOperation creates too many 
iterator objects
@@ -112,6 +112,8 @@ Release Notes - HBase - Version 1.2.0 02/17/2016
 * [HBASE-15210] - Undo aggressive load balancer logging at tens of lines 
per millisecond
 * [HBASE-15224] - Undo  "hbase.increment.fast.but.narrow.consistency" 
option; it is not necessary since HBASE-15213
 * [HBASE-15238] - HFileReaderV2 prefetch overreaches; runs off the end of 
the data
+* [HBASE-15263] - 
TestIPv6NIOServerSocketChannel.testServerSocketFromLocalhostResolution can hang 
indefinetly
+* [HBASE-15270] - Use appropriate encoding for "filter" field in 
TaskMonitorTmpl.jamon
 
 ** Bug
 * [HBASE-5878] - Use getVisibleLength public api from HdfsDataInputStream 
from Hadoop-2.
@@ -336,6 +338,7 @@ Release Notes - HBase - Version 1.2.0 02/17/2016
 * [HBASE-14437] - ArithmeticException in ReplicationInterClusterEndpoint
 * [HBASE-14445] - ExportSnapshot does not honor -chmod option
 * [HBASE-14449] - Rewrite deadlock prevention for concurrent connection 
close
+* [HBASE-14460] - [Perf Regression] Merge of MVCC and SequenceId 
(HBASE-8763) slowed Increments, CheckAndPuts, batch operations
 * [HBASE-14463] - Severe performance downgrade when parallel reading a 
single key from BucketCache
 * [HBASE-14469] - Fix some comment, validation and logging around memstore 
lower limit configuration
 * [HBASE-14471] - Thrift -  HTTP Error 413 full HEAD if using kerberos 
authentication
@@ -469,6 +472,7 @@ Release Notes - HBase - Version 1.2.0 02/17/2016
 * [HBASE-15100] - Master WALProcs still never clean up
 * [HBASE-15102] - HeapMemoryTuner can "overtune" memstore size and 
suddenly drop it into blocking zone
 * [HBASE-15104] - Occasional failures due to NotServingRegionException in 
IT tests
+* [HBASE-15122] - Servlets generate 
XSS_REQUEST_PARAMETER_TO_SERVLET_WRITER findbugs warnings
 * [HBASE-15133] - Data loss after compaction when a row has more than 
Integer.MAX_VALUE columns
 * [HBASE-15139] - Connection manager doesn't pass client metrics to 
RpcClient
 * [HBASE-15145] - HBCK and Replication should authenticate to zookepeer 
using server principal
@@ -486,6 +490,9 @@ Release Notes - HBase - Version 1.2.0 02/17/2016
 * [HBASE-15218] - On RS crash and replay of WAL, loosing all Tags in Cells
 * [HBASE-15221] - HTableMultiplexer improvements (stale region locations 
and resource leaks)
 * [HBASE-15252] - Data loss when replaying wal if HDFS timeout
+* [HBASE-15279] - OrderedBytes.isEncodedValue does not check for int8 and 
int16 types
+* [HBASE-15283] - Revert to IOException in TimeRange constructor to 
maintain API compat in 1.x line
+* [HBASE-15285] - Forward-port respect for isReturnResult from HBASE-15095
 
 ** Improvement
 * [HBASE-6617] - ReplicationSourceManager should be able to track multiple 
WAL paths
@@ -590,6 +597,7 @@ Release Notes - HBase - Version 1.2.0 02/17/2016
 * [HBASE-15076] - Add getScanner(Scan scan, List 
additionalScanners) API into Region interface
 * [HBASE-15111] - "hbase version" should write to stdout
 * [HBASE-15129] - Set default value for hbase.fs.tmp.dir rather than fully 
depend on hbase-default.xml
+* [HBASE-15211] - Don't run the CatalogJanitor if there are regions in 
transition
 * [HBASE-15229] - Canary Tools should not call System.Exit on error
 
 ** New Feature



hbase git commit: HBASE-15282 Bump hbase-spark to use Spark 1.6.0

2016-02-18 Thread jmhsieh
Repository: hbase
Updated Branches:
  refs/heads/master d2ba87509 -> f352f3c37


HBASE-15282 Bump hbase-spark to use Spark 1.6.0


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f352f3c3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f352f3c3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f352f3c3

Branch: refs/heads/master
Commit: f352f3c3717e1ebb129b8fb476cff6c8daa4ac06
Parents: d2ba875
Author: Jonathan M Hsieh 
Authored: Thu Feb 18 17:31:42 2016 -0800
Committer: Jonathan M Hsieh 
Committed: Thu Feb 18 17:31:42 2016 -0800

--
 hbase-spark/pom.xml  | 2 +-
 .../scala/org/apache/hadoop/hbase/spark/DefaultSourceSuite.scala | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f352f3c3/hbase-spark/pom.xml
--
diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml
index 251ea59..7c7590e 100644
--- a/hbase-spark/pom.xml
+++ b/hbase-spark/pom.xml
@@ -37,7 +37,7 @@
 Apache HBase - Spark
 
 
-1.3.0
+1.6.0
 2.10.4
 2.10
 true

http://git-wip-us.apache.org/repos/asf/hbase/blob/f352f3c3/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/DefaultSourceSuite.scala
--
diff --git 
a/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/DefaultSourceSuite.scala
 
b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/DefaultSourceSuite.scala
index 30ddfc4..04dd9ba 100644
--- 
a/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/DefaultSourceSuite.scala
+++ 
b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/DefaultSourceSuite.scala
@@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.spark
 import org.apache.hadoop.hbase.client.{Put, ConnectionFactory}
 import org.apache.hadoop.hbase.spark.datasources.HBaseSparkConf
 import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.hbase.{TableNotFoundException, TableName, 
HBaseTestingUtility}
+import org.apache.hadoop.hbase.{TableName, HBaseTestingUtility}
 import org.apache.spark.sql.{DataFrame, SQLContext}
 import org.apache.spark.{SparkConf, SparkContext, Logging}
 import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite}
@@ -514,7 +514,7 @@ BeforeAndAfterEach with BeforeAndAfterAll with Logging {
 
 
   test("Test table that doesn't exist") {
-intercept[TableNotFoundException] {
+intercept[Exception] {
   df = sqlContext.load("org.apache.hadoop.hbase.spark",
 Map("hbase.columns.mapping" ->
   "KEY_FIELD STRING :key, A_FIELD STRING c:a, B_FIELD STRING c:b,",



[10/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html
index 0597160..b444f46 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html
@@ -184,2145 +184,2183 @@
 176  // Min batch size when replay WAL 
edits
 177  private final int minBatchSize;
 178
-179  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
-180  FileSystem fs, LastSequenceId 
idChecker,
-181  CoordinatedStateManager csm, 
RecoveryMode mode) {
-182this.conf = 
HBaseConfiguration.create(conf);
-183String codecClassName = conf
-184
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-185
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-186this.rootDir = rootDir;
-187this.fs = fs;
-188this.sequenceIdChecker = idChecker;
-189this.csm = 
(BaseCoordinatedStateManager)csm;
-190this.walFactory = factory;
-191this.controller = new 
PipelineController();
-192
-193entryBuffers = new 
EntryBuffers(controller,
-194
this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-195128*1024*1024));
+179  // the file being split currently
+180  private FileStatus fileBeingSplit;
+181
+182  @VisibleForTesting
+183  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
+184  FileSystem fs, LastSequenceId 
idChecker,
+185  CoordinatedStateManager csm, 
RecoveryMode mode) {
+186this.conf = 
HBaseConfiguration.create(conf);
+187String codecClassName = conf
+188
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+189
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
+190this.rootDir = rootDir;
+191this.fs = fs;
+192this.sequenceIdChecker = idChecker;
+193this.csm = 
(BaseCoordinatedStateManager)csm;
+194this.walFactory = factory;
+195this.controller = new 
PipelineController();
 196
-197// a larger minBatchSize may slow 
down recovery because replay writer has to wait for
-198// enough edits before replaying 
them
-199this.minBatchSize = 
this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
-200this.distributedLogReplay = 
(RecoveryMode.LOG_REPLAY == mode);
-201
-202this.numWriterThreads = 
this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
-203if (csm != null && 
this.distributedLogReplay) {
-204  outputSink = new 
LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
-205} else {
-206  if (this.distributedLogReplay) {
-207LOG.info("ZooKeeperWatcher is 
passed in as NULL so disable distrubitedLogRepaly.");
-208  }
-209  this.distributedLogReplay = 
false;
-210  outputSink = new 
LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
-211}
-212
-213  }
-214
-215  /**
-216   * Splits a WAL file into region's 
recovered-edits directory.
-217   * This is the main entry point for 
distributed log splitting from SplitLogWorker.
-218   * 

-219 * If the log file has N regions then N recovered.edits files will be produced. -220 *

-221 * @param rootDir -222 * @param logfile -223 * @param fs -224 * @param conf -225 * @param reporter -226 * @param idChecker -227 * @param cp coordination state manager -228 * @return false if it is interrupted by the progress-able. -229 * @throws IOException -230 */ -231 public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs, -232 Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, -233 CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException { -234WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode); -235return s.splitLogFile(logfile, reporter); -236 } -237 -238 // A wrapper to split one log folder using the method used by distributed -239 // log splitting. Used by tools and unit tests. It should be package private. -240 // It is public only because TestWALObserver is in a different package, -241 // which uses this method to do log splitting. -242 @VisibleForTesting -243 public static List split(Path rootDir, Path logDir, Path oldLogDir, -244 FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { -245final FileStatus[] logfiles = SplitLogManager.getFileList(conf, -246 Collections.singletonList(logDir), null); -247List splits = new ArrayList(); -248if (logfiles != null && logfiles.length > 0) { -249 for (FileStatus logfile: logfiles) { -250


[27/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 1d64963..a232cc8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -6980,880 +6980,882 @@
 6972  
lock(this.updatesLock.readLock());
 6973  try {
 6974Result cpResult = 
doCoprocessorPreCall(op, mutation);
-6975if (cpResult != null) return 
cpResult;
-6976Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
-6977Map> forMemStore =
-6978new HashMap>(mutation.getFamilyCellMap().size());
-6979// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
-6980// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
-6981WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
-6982// Actually write to WAL now if 
a walEdit to apply.
-6983if (walEdit != null && 
!walEdit.isEmpty()) {
-6984  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
-6985} else {
-6986  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
-6987  // transaction.
-6988  
recordMutationWithoutWal(mutation.getFamilyCellMap());
-6989  writeEntry = mvcc.begin();
-6990}
-6991// Now write to MemStore. Do it 
a column family at a time.
-6992long sequenceId = 
writeEntry.getWriteNumber();
-6993for (Map.Entry> e: forMemStore.entrySet()) {
-6994  accumulatedResultSize +=
-6995  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceId);
-6996}
-6997
mvcc.completeAndWait(writeEntry);
-6998writeEntry = null;
-6999  } finally {
-7000
this.updatesLock.readLock().unlock();
-7001  }
-7002  // If results is null, then client 
asked that we not return the calculated results.
-7003  return results !=  null? 
Result.create(results): null;
-7004} finally {
-7005  // Call complete always, even on 
success. doDelta is doing a Get READ_UNCOMMITTED when it goes
-7006  // to get current value under an 
exclusive lock so no need so no need to wait to return to
-7007  // the client. Means only way to 
read-your-own-increment or append is to come in with an
-7008  // a 0 increment.
-7009  if (writeEntry != null) 
mvcc.complete(writeEntry);
-7010  rowLock.release();
-7011  // Request a cache flush if over 
the limit.  Do it outside update lock.
-7012  if 
(isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) 
requestFlush();
-7013  closeRegionOperation(op);
-7014  if (this.metricsRegion != null) 
{
-7015switch (op) {
-7016  case INCREMENT:
-7017
this.metricsRegion.updateIncrement();
-7018break;
-7019  case APPEND:
-7020
this.metricsRegion.updateAppend();
-7021break;
-7022  default:
+6975if (cpResult != null) {
+6976  return returnResults? 
cpResult: null;
+6977}
+6978Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
+6979Map> forMemStore =
+6980new HashMap>(mutation.getFamilyCellMap().size());
+6981// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
+6982// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
+6983WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
+6984// Actually write to WAL now if 
a walEdit to apply.
+6985if (walEdit != null && 
!walEdit.isEmpty()) {
+6986  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
+6987} else {
+6988  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
+6989  // transaction.
+6990  
recordMutationWithoutWal(mutation.getFamilyCellMap());
+6991  writeEntry = mvcc.begin();
+6992}
+6993// Now write to MemStore. Do it 
a column family at a time.
+6994long sequenceId = 
writeEntry.getWriteNumber();
+6995for (Map.Entry> e: forMemStore.entrySet()) {
+6996  accumulatedResultSize +=
+6997  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceId);
+

[29/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index 1d64963..a232cc8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -6980,880 +6980,882 @@
 6972  
lock(this.updatesLock.readLock());
 6973  try {
 6974Result cpResult = 
doCoprocessorPreCall(op, mutation);
-6975if (cpResult != null) return 
cpResult;
-6976Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
-6977Map> forMemStore =
-6978new HashMap>(mutation.getFamilyCellMap().size());
-6979// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
-6980// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
-6981WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
-6982// Actually write to WAL now if 
a walEdit to apply.
-6983if (walEdit != null && 
!walEdit.isEmpty()) {
-6984  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
-6985} else {
-6986  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
-6987  // transaction.
-6988  
recordMutationWithoutWal(mutation.getFamilyCellMap());
-6989  writeEntry = mvcc.begin();
-6990}
-6991// Now write to MemStore. Do it 
a column family at a time.
-6992long sequenceId = 
writeEntry.getWriteNumber();
-6993for (Map.Entry> e: forMemStore.entrySet()) {
-6994  accumulatedResultSize +=
-6995  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceId);
-6996}
-6997
mvcc.completeAndWait(writeEntry);
-6998writeEntry = null;
-6999  } finally {
-7000
this.updatesLock.readLock().unlock();
-7001  }
-7002  // If results is null, then client 
asked that we not return the calculated results.
-7003  return results !=  null? 
Result.create(results): null;
-7004} finally {
-7005  // Call complete always, even on 
success. doDelta is doing a Get READ_UNCOMMITTED when it goes
-7006  // to get current value under an 
exclusive lock so no need so no need to wait to return to
-7007  // the client. Means only way to 
read-your-own-increment or append is to come in with an
-7008  // a 0 increment.
-7009  if (writeEntry != null) 
mvcc.complete(writeEntry);
-7010  rowLock.release();
-7011  // Request a cache flush if over 
the limit.  Do it outside update lock.
-7012  if 
(isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) 
requestFlush();
-7013  closeRegionOperation(op);
-7014  if (this.metricsRegion != null) 
{
-7015switch (op) {
-7016  case INCREMENT:
-7017
this.metricsRegion.updateIncrement();
-7018break;
-7019  case APPEND:
-7020
this.metricsRegion.updateAppend();
-7021break;
-7022  default:
+6975if (cpResult != null) {
+6976  return returnResults? 
cpResult: null;
+6977}
+6978Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
+6979Map> forMemStore =
+6980new HashMap>(mutation.getFamilyCellMap().size());
+6981// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
+6982// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
+6983WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
+6984// Actually write to WAL now if 
a walEdit to apply.
+6985if (walEdit != null && 
!walEdit.isEmpty()) {
+6986  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
+6987} else {
+6988  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
+6989  // transaction.
+6990  
recordMutationWithoutWal(mutation.getFamilyCellMap());
+6991  writeEntry = mvcc.begin();
+6992}
+6993// Now write to MemStore. Do it 
a column family at a time.
+6994long sequenceId = 
writeEntry.getWriteNumber();
+6995for (Map.Entry> e: forMemStore.entrySet()) {
+6996  accumulatedResultSize +=
+6997  
applyToMemstore(e.getKey(), e.getValue(), true, f

[16/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.PipelineController.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.PipelineController.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.PipelineController.html
index 0597160..b444f46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.PipelineController.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.PipelineController.html
@@ -184,2145 +184,2183 @@
 176  // Min batch size when replay WAL 
edits
 177  private final int minBatchSize;
 178
-179  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
-180  FileSystem fs, LastSequenceId 
idChecker,
-181  CoordinatedStateManager csm, 
RecoveryMode mode) {
-182this.conf = 
HBaseConfiguration.create(conf);
-183String codecClassName = conf
-184
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-185
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-186this.rootDir = rootDir;
-187this.fs = fs;
-188this.sequenceIdChecker = idChecker;
-189this.csm = 
(BaseCoordinatedStateManager)csm;
-190this.walFactory = factory;
-191this.controller = new 
PipelineController();
-192
-193entryBuffers = new 
EntryBuffers(controller,
-194
this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-195128*1024*1024));
+179  // the file being split currently
+180  private FileStatus fileBeingSplit;
+181
+182  @VisibleForTesting
+183  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
+184  FileSystem fs, LastSequenceId 
idChecker,
+185  CoordinatedStateManager csm, 
RecoveryMode mode) {
+186this.conf = 
HBaseConfiguration.create(conf);
+187String codecClassName = conf
+188
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+189
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
+190this.rootDir = rootDir;
+191this.fs = fs;
+192this.sequenceIdChecker = idChecker;
+193this.csm = 
(BaseCoordinatedStateManager)csm;
+194this.walFactory = factory;
+195this.controller = new 
PipelineController();
 196
-197// a larger minBatchSize may slow 
down recovery because replay writer has to wait for
-198// enough edits before replaying 
them
-199this.minBatchSize = 
this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
-200this.distributedLogReplay = 
(RecoveryMode.LOG_REPLAY == mode);
-201
-202this.numWriterThreads = 
this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
-203if (csm != null && 
this.distributedLogReplay) {
-204  outputSink = new 
LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
-205} else {
-206  if (this.distributedLogReplay) {
-207LOG.info("ZooKeeperWatcher is 
passed in as NULL so disable distrubitedLogRepaly.");
-208  }
-209  this.distributedLogReplay = 
false;
-210  outputSink = new 
LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
-211}
-212
-213  }
-214
-215  /**
-216   * Splits a WAL file into region's 
recovered-edits directory.
-217   * This is the main entry point for 
distributed log splitting from SplitLogWorker.
-218   * 

-219 * If the log file has N regions then N recovered.edits files will be produced. -220 *

-221 * @param rootDir -222 * @param logfile -223 * @param fs -224 * @param conf -225 * @param reporter -226 * @param idChecker -227 * @param cp coordination state manager -228 * @return false if it is interrupted by the progress-able. -229 * @throws IOException -230 */ -231 public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs, -232 Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, -233 CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException { -234WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode); -235return s.splitLogFile(logfile, reporter); -236 } -237 -238 // A wrapper to split one log folder using the method used by distributed -239 // log splitting. Used by tools and unit tests. It should be package private. -240 // It is public only because TestWALObserver is in a different package, -241 // which uses this method to do log splitting. -242 @VisibleForTesting -243 public static List split(Path rootDir, Path logDir, Path oldLogDir, -244 FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { -245final FileStatus[] logfiles = SplitLogManager.getFileList(conf, -246 Collections.singletonList(logDir), null); -247List splits = new ArrayList(); -248i


[44/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Increment.html 
b/devapidocs/org/apache/hadoop/hbase/client/Increment.html
index 8dc1466..6fb7a0a 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Increment.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Increment.html
@@ -152,10 +152,6 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 HEAP_OVERHEAD 
 
 
-private static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
-RETURN_RESULTS 
-
-
 private TimeRange
 tr 
 
@@ -402,23 +398,13 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 private static final long HEAP_OVERHEAD
 
 
-
-
-
-
-
-RETURN_RESULTS
-private static final http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RETURN_RESULTS
-See Also:Constant
 Field Values
-
-
 
 
 
 
 
 tr
-private TimeRange tr
+private TimeRange tr
 
 
 
@@ -435,7 +421,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 Increment
-public Increment(byte[] row)
+public Increment(byte[] row)
 Create a Increment operation for the specified row.
  
  At least one column must be incremented.
@@ -448,7 +434,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 Increment
-public Increment(byte[] row,
+public Increment(byte[] row,
  int offset,
  int length)
 Create a Increment operation for the specified row.
@@ -463,7 +449,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 Increment
-public Increment(Increment i)
+public Increment(Increment i)
 Copy constructor
 Parameters:i - 

 
@@ -482,7 +468,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 add
-public Increment add(Cell cell)
+public Increment add(Cell cell)
   throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Add the specified KeyValue to this operation.
 Parameters:cell - 
individual Cell
@@ -497,7 +483,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 addColumn
-public Increment addColumn(byte[] family,
+public Increment addColumn(byte[] family,
   byte[] qualifier,
   long amount)
 Increment the column from the specific family with the 
specified qualifier
@@ -514,7 +500,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 getTimeRange
-public TimeRange getTimeRange()
+public TimeRange getTimeRange()
 Gets the TimeRange used for this increment.
 Returns:TimeRange
 
@@ -525,7 +511,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 setTimeRange
-public Increment setTimeRange(long minStamp,
+public Increment setTimeRange(long minStamp,
  long maxStamp)
throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Sets the TimeRange to be used on the Get for this increment.
@@ -548,8 +534,11 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 setReturnResults
-public Increment setReturnResults(boolean returnResults)
-Parameters:returnResults - True 
(default) if the increment operation should return the results. A
+public Increment setReturnResults(boolean returnResults)
+
+Overrides:
+setReturnResults in
 class Mutation
+Parameters:returnResults 
- True (default) if the increment operation should return the results. A
   client that is not interested in the result can save network 
bandwidth setting this
   to false.
 
@@ -560,8 +549,11 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 isReturnResults
-public boolean isReturnResults()
-Returns:current value for 
returnResults
+public boolean isReturnResults()
+
+Overrides:
+isReturnResults in
 class Mutation
+Returns:current setting for 
returnResults
 
 
 
@@ -570,7 +562,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 numFamilies
-public int numFamilies()
+public int numFamilies()
 Method for retrieving the number of families to increment 
from
 
 Overrides:
@@ -584,7 +576,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 hasFamilies
-public boolean hasFamilies()
+public boolean hasFamilies()
 Method for checking if any families have been inserted into 
this Increment
 Returns:true if familyMap is non 
empty false otherwise
 
@@ -595,7 +587,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 getFamilyMapOfLongs
-

[49/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/apidocs/src-html/org/apache/hadoop/hbase/client/Append.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Append.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Append.html
index 93d92df..28a04b5 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Append.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Append.html
@@ -55,136 +55,135 @@
 047@InterfaceAudience.Public
 048@InterfaceStability.Stable
 049public class Append extends Mutation {
-050  private static final String 
RETURN_RESULTS = "_rr_";
-051  /**
-052   * @param returnResults
-053   *  True (default) if the 
append operation should return the results.
-054   *  A client that is not 
interested in the result can save network
-055   *  bandwidth setting this to 
false.
-056   */
-057  public Append setReturnResults(boolean 
returnResults) {
-058setAttribute(RETURN_RESULTS, 
Bytes.toBytes(returnResults));
-059return this;
-060  }
-061
-062  /**
-063   * @return current setting for 
returnResults
-064   */
+050  /**
+051   * @param returnResults
+052   *  True (default) if the 
append operation should return the results.
+053   *  A client that is not 
interested in the result can save network
+054   *  bandwidth setting this to 
false.
+055   */
+056  public Append setReturnResults(boolean 
returnResults) {
+057
super.setReturnResults(returnResults);
+058return this;
+059  }
+060
+061  /**
+062   * @return current setting for 
returnResults
+063   */
+064  // This method makes public the 
superclasses's protected method.
 065  public boolean isReturnResults() {
-066byte[] v = 
getAttribute(RETURN_RESULTS);
-067return v == null ? true : 
Bytes.toBoolean(v);
-068  }
-069
-070  /**
-071   * Create a Append operation for the 
specified row.
-072   * 

-073 * At least one column must be appended to. -074 * @param row row key; makes a local copy of passed in array. -075 */ -076 public Append(byte[] row) { -077this(row, 0, row.length); -078 } -079 /** -080 * Copy constructor -081 * @param a -082 */ -083 public Append(Append a) { -084this.row = a.getRow(); -085this.ts = a.getTimeStamp(); -086 this.familyMap.putAll(a.getFamilyCellMap()); -087for (Map.Entry entry : a.getAttributesMap().entrySet()) { -088 this.setAttribute(entry.getKey(), entry.getValue()); -089} -090 } -091 -092 /** Create a Append operation for the specified row. -093 *

-094 * At least one column must be appended to. -095 * @param rowArray Makes a copy out of this buffer. -096 * @param rowOffset -097 * @param rowLength -098 */ -099 public Append(final byte [] rowArray, final int rowOffset, final int rowLength) { -100checkRow(rowArray, rowOffset, rowLength); -101this.row = Bytes.copy(rowArray, rowOffset, rowLength); -102 } -103 -104 /** -105 * Add the specified column and value to this Append operation. -106 * @param family family name -107 * @param qualifier column qualifier -108 * @param value value to append to specified column -109 * @return this -110 */ -111 public Append add(byte [] family, byte [] qualifier, byte [] value) { -112KeyValue kv = new KeyValue(this.row, family, qualifier, this.ts, KeyValue.Type.Put, value); -113return add(kv); -114 } -115 -116 /** -117 * Add column and value to this Append operation. -118 * @param cell -119 * @return This instance -120 */ -121 @SuppressWarnings("unchecked") -122 public Append add(final Cell cell) { -123// Presume it is KeyValue for now. -124byte [] family = CellUtil.cloneFamily(cell); -125List list = this.familyMap.get(family); -126if (list == null) { -127 list = new ArrayList(); -128} -129// find where the new entry should be placed in the List -130list.add(cell); -131this.familyMap.put(family, list); -132return this; -133 } -134 -135 @Override -136 public Append setAttribute(String name, byte[] value) { -137return (Append) super.setAttribute(name, value); -138 } -139 -140 @Override -141 public Append setId(String id) { -142return (Append) super.setId(id); -143 } -144 -145 @Override -146 public Append setDurability(Durability d) { -147return (Append) super.setDurability(d); -148 } -149 -150 @Override -151 public Append setFamilyCellMap(NavigableMap> map) { -152return (Append) super.setFamilyCellMap(map); -153 } -154 -155 @Override -156 public Append setClusterIds(List clusterIds) { -157return (Append) super.setClusterIds(clusterIds); -158 } -159 -160 @Override -161 public Append setCellVisibility(CellVisibility expression) { -162return (Append) super.setCellVisibility(expression); -163 } -164 -165 @Override -166 public App


[05/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.MockWAL.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.MockWAL.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.MockWAL.html
index aa5da14..b06a764 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.MockWAL.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.MockWAL.html
@@ -41,1157 +41,1234 @@
 033import java.lang.reflect.Field;
 034import 
java.security.PrivilegedExceptionAction;
 035import java.util.ArrayList;
-036import java.util.Collection;
-037import java.util.HashSet;
-038import java.util.List;
-039import java.util.Set;
-040import 
java.util.concurrent.atomic.AtomicBoolean;
-041import 
java.util.concurrent.atomic.AtomicInteger;
-042
-043import org.apache.commons.logging.Log;
-044import 
org.apache.commons.logging.LogFactory;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.fs.FSDataInputStream;
-047import org.apache.hadoop.fs.FileStatus;
-048import org.apache.hadoop.fs.FileSystem;
-049import org.apache.hadoop.fs.Path;
-050import org.apache.hadoop.fs.PathFilter;
-051import org.apache.hadoop.hbase.Cell;
-052import 
org.apache.hadoop.hbase.HBaseConfiguration;
-053import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-054import 
org.apache.hadoop.hbase.HColumnDescriptor;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HTableDescriptor;
-058import 
org.apache.hadoop.hbase.KeyValue;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MiniHBaseCluster;
-061import 
org.apache.hadoop.hbase.ServerName;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-064import 
org.apache.hadoop.hbase.client.Delete;
-065import 
org.apache.hadoop.hbase.client.Get;
-066import 
org.apache.hadoop.hbase.client.Put;
-067import 
org.apache.hadoop.hbase.client.Result;
-068import 
org.apache.hadoop.hbase.client.ResultScanner;
-069import 
org.apache.hadoop.hbase.client.Scan;
-070import 
org.apache.hadoop.hbase.client.Table;
-071import 
org.apache.hadoop.hbase.master.HMaster;
-072import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-073import 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
-074import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-075import 
org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher;
-076import 
org.apache.hadoop.hbase.regionserver.FlushRequestListener;
-077import 
org.apache.hadoop.hbase.regionserver.FlushRequester;
-078import 
org.apache.hadoop.hbase.regionserver.HRegion;
-079import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-080import 
org.apache.hadoop.hbase.regionserver.MemStoreSnapshot;
-081import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-082import 
org.apache.hadoop.hbase.regionserver.Region;
-083import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-084import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
-085import 
org.apache.hadoop.hbase.regionserver.Store;
-086import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-087import 
org.apache.hadoop.hbase.security.User;
-088import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-089import 
org.apache.hadoop.hbase.testclassification.RegionServerTests;
-090import 
org.apache.hadoop.hbase.util.Bytes;
-091import 
org.apache.hadoop.hbase.util.EnvironmentEdge;
-092import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-093import 
org.apache.hadoop.hbase.util.FSUtils;
-094import 
org.apache.hadoop.hbase.util.HFileTestUtil;
-095import 
org.apache.hadoop.hbase.util.Pair;
-096import 
org.apache.hadoop.hbase.wal.DefaultWALProvider;
-097import org.apache.hadoop.hbase.wal.WAL;
-098import 
org.apache.hadoop.hbase.wal.WALFactory;
-099import 
org.apache.hadoop.hbase.wal.WALKey;
-100import 
org.apache.hadoop.hbase.wal.WALSplitter;
-101import 
org.apache.hadoop.hdfs.DFSInputStream;
-102import org.junit.After;
-103import org.junit.AfterClass;
-104import org.junit.Before;
-105import org.junit.BeforeClass;
-106import org.junit.Rule;
-107import org.junit.Test;
-108import 
org.junit.experimental.categories.Category;
-109import org.junit.rules.TestName;
-110import org.mockito.Mockito;
-111import 
org.mockito.invocation.InvocationOnMock;
-112import org.mockito.stubbing.Answer;
-113
-114/**
-115 * Test replay of edits out of a WAL 
split.
-116 */
-117@Category({RegionServerTests.class, 
MediumTests.class})
-118public class TestWALReplay {
-119  private static final Log LOG = 
LogFactory.getLog(TestWALReplay.class);
-120  static 

[20/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
index 0597160..b444f46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
@@ -184,2145 +184,2183 @@
 176  // Min batch size when replay WAL 
edits
 177  private final int minBatchSize;
 178
-179  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
-180  FileSystem fs, LastSequenceId 
idChecker,
-181  CoordinatedStateManager csm, 
RecoveryMode mode) {
-182this.conf = 
HBaseConfiguration.create(conf);
-183String codecClassName = conf
-184
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-185
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-186this.rootDir = rootDir;
-187this.fs = fs;
-188this.sequenceIdChecker = idChecker;
-189this.csm = 
(BaseCoordinatedStateManager)csm;
-190this.walFactory = factory;
-191this.controller = new 
PipelineController();
-192
-193entryBuffers = new 
EntryBuffers(controller,
-194
this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-195128*1024*1024));
+179  // the file being split currently
+180  private FileStatus fileBeingSplit;
+181
+182  @VisibleForTesting
+183  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
+184  FileSystem fs, LastSequenceId 
idChecker,
+185  CoordinatedStateManager csm, 
RecoveryMode mode) {
+186this.conf = 
HBaseConfiguration.create(conf);
+187String codecClassName = conf
+188
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+189
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
+190this.rootDir = rootDir;
+191this.fs = fs;
+192this.sequenceIdChecker = idChecker;
+193this.csm = 
(BaseCoordinatedStateManager)csm;
+194this.walFactory = factory;
+195this.controller = new 
PipelineController();
 196
-197// a larger minBatchSize may slow 
down recovery because replay writer has to wait for
-198// enough edits before replaying 
them
-199this.minBatchSize = 
this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
-200this.distributedLogReplay = 
(RecoveryMode.LOG_REPLAY == mode);
-201
-202this.numWriterThreads = 
this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
-203if (csm != null && 
this.distributedLogReplay) {
-204  outputSink = new 
LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
-205} else {
-206  if (this.distributedLogReplay) {
-207LOG.info("ZooKeeperWatcher is 
passed in as NULL so disable distrubitedLogRepaly.");
-208  }
-209  this.distributedLogReplay = 
false;
-210  outputSink = new 
LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
-211}
-212
-213  }
-214
-215  /**
-216   * Splits a WAL file into region's 
recovered-edits directory.
-217   * This is the main entry point for 
distributed log splitting from SplitLogWorker.
-218   * 

-219 * If the log file has N regions then N recovered.edits files will be produced. -220 *

-221 * @param rootDir -222 * @param logfile -223 * @param fs -224 * @param conf -225 * @param reporter -226 * @param idChecker -227 * @param cp coordination state manager -228 * @return false if it is interrupted by the progress-able. -229 * @throws IOException -230 */ -231 public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs, -232 Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, -233 CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException { -234WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode); -235return s.splitLogFile(logfile, reporter); -236 } -237 -238 // A wrapper to split one log folder using the method used by distributed -239 // log splitting. Used by tools and unit tests. It should be package private. -240 // It is public only because TestWALObserver is in a different package, -241 // which uses this method to do log splitting. -242 @VisibleForTesting -243 public static List split(Path rootDir, Path logDir, Path oldLogDir, -244 FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { -245final FileStatus[] logfiles = SplitLogManager.getFileList(conf, -246 Collections.singletonList(logDir), null); -247List


[18/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
index 0597160..b444f46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
@@ -184,2145 +184,2183 @@
 176  // Min batch size when replay WAL 
edits
 177  private final int minBatchSize;
 178
-179  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
-180  FileSystem fs, LastSequenceId 
idChecker,
-181  CoordinatedStateManager csm, 
RecoveryMode mode) {
-182this.conf = 
HBaseConfiguration.create(conf);
-183String codecClassName = conf
-184
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-185
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-186this.rootDir = rootDir;
-187this.fs = fs;
-188this.sequenceIdChecker = idChecker;
-189this.csm = 
(BaseCoordinatedStateManager)csm;
-190this.walFactory = factory;
-191this.controller = new 
PipelineController();
-192
-193entryBuffers = new 
EntryBuffers(controller,
-194
this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-195128*1024*1024));
+179  // the file being split currently
+180  private FileStatus fileBeingSplit;
+181
+182  @VisibleForTesting
+183  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
+184  FileSystem fs, LastSequenceId 
idChecker,
+185  CoordinatedStateManager csm, 
RecoveryMode mode) {
+186this.conf = 
HBaseConfiguration.create(conf);
+187String codecClassName = conf
+188
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+189
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
+190this.rootDir = rootDir;
+191this.fs = fs;
+192this.sequenceIdChecker = idChecker;
+193this.csm = 
(BaseCoordinatedStateManager)csm;
+194this.walFactory = factory;
+195this.controller = new 
PipelineController();
 196
-197// a larger minBatchSize may slow 
down recovery because replay writer has to wait for
-198// enough edits before replaying 
them
-199this.minBatchSize = 
this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
-200this.distributedLogReplay = 
(RecoveryMode.LOG_REPLAY == mode);
-201
-202this.numWriterThreads = 
this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
-203if (csm != null && 
this.distributedLogReplay) {
-204  outputSink = new 
LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
-205} else {
-206  if (this.distributedLogReplay) {
-207LOG.info("ZooKeeperWatcher is 
passed in as NULL so disable distrubitedLogRepaly.");
-208  }
-209  this.distributedLogReplay = 
false;
-210  outputSink = new 
LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
-211}
-212
-213  }
-214
-215  /**
-216   * Splits a WAL file into region's 
recovered-edits directory.
-217   * This is the main entry point for 
distributed log splitting from SplitLogWorker.
-218   * 

-219 * If the log file has N regions then N recovered.edits files will be produced. -220 *

-221 * @param rootDir -222 * @param logfile -223 * @param fs -224 * @param conf -225 * @param reporter -226 * @param idChecker -227 * @param cp coordination state manager -228 * @return false if it is interrupted by the progress-able. -229 * @throws IOException -230 */ -231 public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs, -232 Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, -233 CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException { -234WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode); -235return s.splitLogFile(logfile, reporter); -236 } -237 -238 // A wrapper to split one log folder using the method used by distributed -239 // log splitting. Used by tools and unit tests. It should be package private. -240 // It is public only because TestWALObserver is in a different package, -241 // which uses this method to do log splitting. -242 @VisibleForTesting -243 public static List split(Path rootDir, Path logDir, Path oldLogDir, -244 FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { -245final FileStatus[] logfiles = SplitLogManager.getFileList(conf, -246 Collections.singletonList(logDir), null); -247List splits = new ArrayList(); -248if (logfiles != null


[47/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html 
b/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
index 4e66df3..804d5fb 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
@@ -1511,242 +1511,261 @@
 1503   * false otherwise.
 1504   */
 1505  public static boolean 
isEncodedValue(PositionedByteRange src) {
-1506return isNull(src) || isNumeric(src) 
|| isFixedInt32(src) || isFixedInt64(src)
-1507|| isFixedFloat32(src) || 
isFixedFloat64(src) || isText(src) || isBlobCopy(src)
-1508|| isBlobVar(src);
-1509  }
-1510
-1511  /**
-1512   * Return true when the next encoded 
value in {@code src} is null, false
-1513   * otherwise.
-1514   */
-1515  public static boolean 
isNull(PositionedByteRange src) {
-1516return NULL ==
-1517(-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1518  }
-1519
-1520  /**
-1521   * Return true when the next encoded 
value in {@code src} uses Numeric
-1522   * encoding, false otherwise. {@code 
NaN}, {@code +/-Inf} are valid Numeric
-1523   * values.
-1524   */
-1525  public static boolean 
isNumeric(PositionedByteRange src) {
-1526byte x = (-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1527return x >= NEG_INF && x 
<= NAN;
-1528  }
-1529
-1530  /**
-1531   * Return true when the next encoded 
value in {@code src} uses Numeric
-1532   * encoding and is {@code Infinite}, 
false otherwise.
-1533   */
-1534  public static boolean 
isNumericInfinite(PositionedByteRange src) {
-1535byte x = (-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1536return NEG_INF == x || POS_INF == 
x;
-1537  }
-1538
-1539  /**
-1540   * Return true when the next encoded 
value in {@code src} uses Numeric
-1541   * encoding and is {@code NaN}, false 
otherwise.
-1542   */
-1543  public static boolean 
isNumericNaN(PositionedByteRange src) {
-1544return NAN == (-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1545  }
-1546
-1547  /**
-1548   * Return true when the next encoded 
value in {@code src} uses Numeric
-1549   * encoding and is {@code 0}, false 
otherwise.
-1550   */
-1551  public static boolean 
isNumericZero(PositionedByteRange src) {
-1552return ZERO ==
-1553(-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1554  }
-1555
-1556  /**
-1557   * Return true when the next encoded 
value in {@code src} uses fixed-width
-1558   * Int32 encoding, false otherwise.
-1559   */
-1560  public static boolean 
isFixedInt32(PositionedByteRange src) {
-1561return FIXED_INT32 ==
-1562(-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1563  }
-1564
-1565  /**
-1566   * Return true when the next encoded 
value in {@code src} uses fixed-width
-1567   * Int64 encoding, false otherwise.
-1568   */
-1569  public static boolean 
isFixedInt64(PositionedByteRange src) {
-1570return FIXED_INT64 ==
-1571(-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1572  }
-1573
-1574  /**
-1575   * Return true when the next encoded 
value in {@code src} uses fixed-width
-1576   * Float32 encoding, false 
otherwise.
-1577   */
-1578  public static boolean 
isFixedFloat32(PositionedByteRange src) {
-1579return FIXED_FLOAT32 ==
-1580(-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1581  }
-1582
-1583  /**
-1584   * Return true when the next encoded 
value in {@code src} uses fixed-width
-1585   * Float64 encoding, false 
otherwise.
-1586   */
-1587  public static boolean 
isFixedFloat64(PositionedByteRange src) {
-1588return FIXED_FLOAT64 ==
-1589(-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1590  }
-1591
-1592  /**
-1593   * Return true when the next encoded 
value in {@code src} uses Text encoding,
-1594   * false otherwise.
-1595   */
-1596  public static boolean 
isText(PositionedByteRange src) {
-1597return TEXT ==
-1598(-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1599  }
-1600
-1601  /**
-1602   * Return true when the next encoded 
value in {@code src} uses BlobVar
-1603   * encoding, false otherwise.
-1604   */
-1605  public static boolean 
isBlobVar(PositionedByteRange src) {
-1606return BLOB_VAR ==
-1607(-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1608  }
-1609
-1610  /**
-1611   * Return true when the next encoded 
value in {@code src} uses BlobCopy
-1612   * encoding, false otherwise.
-1613   */
-1614  public stat

[41/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/org/apache/hadoop/hbase/util/OrderedBytes.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/OrderedBytes.html 
b/devapidocs/org/apache/hadoop/hbase/util/OrderedBytes.html
index 301613a..ef3f752 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/OrderedBytes.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/OrderedBytes.html
@@ -789,18 +789,32 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 static boolean
+isFixedInt16(PositionedByteRange src)
+Return true when the next encoded value in src 
uses fixed-width
+ Int16 encoding, false otherwise.
+
+
+
+static boolean
 isFixedInt32(PositionedByteRange src)
 Return true when the next encoded value in src 
uses fixed-width
  Int32 encoding, false otherwise.
 
 
-
+
 static boolean
 isFixedInt64(PositionedByteRange src)
 Return true when the next encoded value in src 
uses fixed-width
  Int64 encoding, false otherwise.
 
 
+
+static boolean
+isFixedInt8(PositionedByteRange src)
+Return true when the next encoded value in src 
uses fixed-width
+ Int8 encoding, false otherwise.
+
+
 
 static boolean
 isNull(PositionedByteRange src)
@@ -1984,7 +1998,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 isNull
-public static boolean isNull(PositionedByteRange src)
+public static boolean isNull(PositionedByteRange src)
 Return true when the next encoded value in src 
is null, false
  otherwise.
 
@@ -1995,7 +2009,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 isNumeric
-public static boolean isNumeric(PositionedByteRange src)
+public static boolean isNumeric(PositionedByteRange src)
 Return true when the next encoded value in src 
uses Numeric
  encoding, false otherwise. NaN, +/-Inf are valid 
Numeric
  values.
@@ -2007,7 +2021,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 isNumericInfinite
-public static boolean isNumericInfinite(PositionedByteRange src)
+public static boolean isNumericInfinite(PositionedByteRange src)
 Return true when the next encoded value in src 
uses Numeric
  encoding and is Infinite, false otherwise.
 
@@ -2018,7 +2032,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 isNumericNaN
-public static boolean isNumericNaN(PositionedByteRange src)
+public static boolean isNumericNaN(PositionedByteRange src)
 Return true when the next encoded value in src 
uses Numeric
  encoding and is NaN, false otherwise.
 
@@ -2029,18 +2043,40 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 isNumericZero
-public static boolean isNumericZero(PositionedByteRange src)
+public static boolean isNumericZero(PositionedByteRange src)
 Return true when the next encoded value in src 
uses Numeric
  encoding and is 0, false otherwise.
 
 
+
+
+
+
+
+isFixedInt8
+public static boolean isFixedInt8(PositionedByteRange src)
+Return true when the next encoded value in src 
uses fixed-width
+ Int8 encoding, false otherwise.
+
+
+
+
+
+
+
+isFixedInt16
+public static boolean isFixedInt16(PositionedByteRange src)
+Return true when the next encoded value in src 
uses fixed-width
+ Int16 encoding, false otherwise.
+
+
 
 
 
 
 
 isFixedInt32
-public static boolean isFixedInt32(PositionedByteRange src)
+public static boolean isFixedInt32(PositionedByteRange src)
 Return true when the next encoded value in src 
uses fixed-width
  Int32 encoding, false otherwise.
 
@@ -2051,7 +2087,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 isFixedInt64
-public static boolean isFixedInt64(PositionedByteRange src)
+public static boolean isFixedInt64(PositionedByteRange src)
 Return true when the next encoded value in src 
uses fixed-width
  Int64 encoding, false otherwise.
 
@@ -2062,7 +2098,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 isFixedFloat32
-public static boolean isFixedFloat32(PositionedByteRange src)
+public static boolean isFixedFloat32(PositionedByteRange src)
 Return true when the next encoded value in src 
uses fixed-width
  Float32 encoding, false otherwise.
 
@@ -2073,7 +2109,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 isFixedFloat64
-public static boolean isFixedFloat64(PositionedByteRange src)
+public static boolean isFixedFloat64(PositionedByteRange src)
 Return true when the next encoded value in src 
uses fixed-width
  Float64 encoding, false otherwise.
 
@@ -2084,7 +2120,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 isText
-public static boolean isText(PositionedByteRange src)
+public static boolean isText(PositionedByteRange src)
 Return true when the next encoded value in src 
uses Text encoding,
  false otherwise.
 
@@ -2095,7 +2131,7 @@ extends http://docs.oracle.com/javase/7/docs/api/

[22/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
index 0597160..b444f46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
@@ -184,2145 +184,2183 @@
 176  // Min batch size when replay WAL 
edits
 177  private final int minBatchSize;
 178
-179  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
-180  FileSystem fs, LastSequenceId 
idChecker,
-181  CoordinatedStateManager csm, 
RecoveryMode mode) {
-182this.conf = 
HBaseConfiguration.create(conf);
-183String codecClassName = conf
-184
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-185
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-186this.rootDir = rootDir;
-187this.fs = fs;
-188this.sequenceIdChecker = idChecker;
-189this.csm = 
(BaseCoordinatedStateManager)csm;
-190this.walFactory = factory;
-191this.controller = new 
PipelineController();
-192
-193entryBuffers = new 
EntryBuffers(controller,
-194
this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-195128*1024*1024));
+179  // the file being split currently
+180  private FileStatus fileBeingSplit;
+181
+182  @VisibleForTesting
+183  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
+184  FileSystem fs, LastSequenceId 
idChecker,
+185  CoordinatedStateManager csm, 
RecoveryMode mode) {
+186this.conf = 
HBaseConfiguration.create(conf);
+187String codecClassName = conf
+188
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+189
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
+190this.rootDir = rootDir;
+191this.fs = fs;
+192this.sequenceIdChecker = idChecker;
+193this.csm = 
(BaseCoordinatedStateManager)csm;
+194this.walFactory = factory;
+195this.controller = new 
PipelineController();
 196
-197// a larger minBatchSize may slow 
down recovery because replay writer has to wait for
-198// enough edits before replaying 
them
-199this.minBatchSize = 
this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
-200this.distributedLogReplay = 
(RecoveryMode.LOG_REPLAY == mode);
-201
-202this.numWriterThreads = 
this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
-203if (csm != null && 
this.distributedLogReplay) {
-204  outputSink = new 
LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
-205} else {
-206  if (this.distributedLogReplay) {
-207LOG.info("ZooKeeperWatcher is 
passed in as NULL so disable distrubitedLogRepaly.");
-208  }
-209  this.distributedLogReplay = 
false;
-210  outputSink = new 
LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
-211}
-212
-213  }
-214
-215  /**
-216   * Splits a WAL file into region's 
recovered-edits directory.
-217   * This is the main entry point for 
distributed log splitting from SplitLogWorker.
-218   * 

-219 * If the log file has N regions then N recovered.edits files will be produced. -220 *

-221 * @param rootDir -222 * @param logfile -223 * @param fs -224 * @param conf -225 * @param reporter -226 * @param idChecker -227 * @param cp coordination state manager -228 * @return false if it is interrupted by the progress-able. -229 * @throws IOException -230 */ -231 public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs, -232 Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, -233 CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException { -234WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode); -235return s.splitLogFile(logfile, reporter); -236 } -237 -238 // A wrapper to split one log folder using the method used by distributed -239 // log splitting. Used by tools and unit tests. It should be package private. -240 // It is public only because TestWALObserver is in a different package, -241 // which uses this method to do log splitting. -242 @VisibleForTesting -243 public static List split(Path rootDir, Path logDir, Path oldLogDir, -244 FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { -245final FileStatus[] logfiles = SplitLogManager.getFileList(conf, -246 Collections.singletonList(logDir), null); -247List splits


[39/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
index 914dd73..f5c0b75 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
@@ -104,7 +104,7 @@
 
 
 
-private static final class WALSplitter.WriterAndPath
+private static final class WALSplitter.WriterAndPath
 extends WALSplitter.SinkWriter
 Private data structure that wraps a Writer and its Path, 
also collecting statistics about the
  data written to this output.
@@ -127,10 +127,14 @@ extends Field and Description
 
 
+(package private) long
+minLogSeqNum 
+
+
 (package private) 
org.apache.hadoop.fs.Path
 p 
 
-
+
 (package private) WALProvider.Writer
 w 
 
@@ -156,8 +160,9 @@ extends Constructor and Description
 
 
-WALSplitter.WriterAndPath(org.apache.hadoop.fs.Path p,
-  WALProvider.Writer w) 
+WALSplitter.WriterAndPath(org.apache.hadoop.fs.Path p,
+  WALProvider.Writer w,
+  
long minLogSeqNum) 
 
 
 
@@ -202,16 +207,25 @@ extends 
 
 p
-final org.apache.hadoop.fs.Path p
+final org.apache.hadoop.fs.Path p
 
 
 
 
 
-
+
 
 w
-final WALProvider.Writer w
+final WALProvider.Writer w
+
+
+
+
+
+
+
+minLogSeqNum
+final long minLogSeqNum
 
 
 
@@ -222,14 +236,15 @@ extends 
+
 
 
 
 
 WALSplitter.WriterAndPath
-WALSplitter.WriterAndPath(org.apache.hadoop.fs.Path p,
- WALProvider.Writer w)
+WALSplitter.WriterAndPath(org.apache.hadoop.fs.Path p,
+ WALProvider.Writer w,
+ long minLogSeqNum)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
index a9c9187..9ea0830 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
@@ -108,7 +108,7 @@
 
 
 
-public static class WALSplitter.WriterThread
+public static class WALSplitter.WriterThread
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Thread.html?is-external=true";
 title="class or interface in java.lang">Thread
 
 
@@ -252,7 +252,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Thread.html?
 
 
 shouldStop
-private volatile boolean shouldStop
+private volatile boolean shouldStop
 
 
 
@@ -261,7 +261,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Thread.html?
 
 
 controller
-private WALSplitter.PipelineController controller
+private WALSplitter.PipelineController controller
 
 
 
@@ -270,7 +270,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Thread.html?
 
 
 entryBuffers
-private WALSplitter.EntryBuffers entryBuffers
+private WALSplitter.EntryBuffers entryBuffers
 
 
 
@@ -279,7 +279,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Thread.html?
 
 
 outputSink
-private WALSplitter.OutputSink outputSink
+private WALSplitter.OutputSink outputSink
 
 
 
@@ -296,7 +296,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Thread.html?
 
 
 WALSplitter.WriterThread
-WALSplitter.WriterThread(WALSplitter.PipelineController controller,
+WALSplitter.WriterThread(WALSplitter.PipelineController controller,
 WALSplitter.EntryBuffers entryBuffers,
 WALSplitter.OutputSink sink,
 int i)
@@ -316,7 +316,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Thread.html?
 
 
 run
-public void run()
+public void run()
 
 Specified by:
 http://docs.oracle.com/javase/7/docs/api/java/lang/Runnable.html?is-external=true#run()"
 title="class or interface in java.lang">run in 
interface http://docs.oracle.com/javase/7/docs/api/java/lang/Runnable.html?is-external=true";
 title="class or interface in java.lang">Runnable
@@ -331,7 +331,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Thread.html?
 
 
 doRun
-private void doRun()
+private void doRun()
 throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Throws:
 http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -343,7 +343,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Thread.

[35/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index 1d64963..a232cc8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -6980,880 +6980,882 @@
 6972  
lock(this.updatesLock.readLock());
 6973  try {
 6974Result cpResult = 
doCoprocessorPreCall(op, mutation);
-6975if (cpResult != null) return 
cpResult;
-6976Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
-6977Map> forMemStore =
-6978new HashMap>(mutation.getFamilyCellMap().size());
-6979// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
-6980// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
-6981WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
-6982// Actually write to WAL now if 
a walEdit to apply.
-6983if (walEdit != null && 
!walEdit.isEmpty()) {
-6984  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
-6985} else {
-6986  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
-6987  // transaction.
-6988  
recordMutationWithoutWal(mutation.getFamilyCellMap());
-6989  writeEntry = mvcc.begin();
-6990}
-6991// Now write to MemStore. Do it 
a column family at a time.
-6992long sequenceId = 
writeEntry.getWriteNumber();
-6993for (Map.Entry> e: forMemStore.entrySet()) {
-6994  accumulatedResultSize +=
-6995  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceId);
-6996}
-6997
mvcc.completeAndWait(writeEntry);
-6998writeEntry = null;
-6999  } finally {
-7000
this.updatesLock.readLock().unlock();
-7001  }
-7002  // If results is null, then client 
asked that we not return the calculated results.
-7003  return results !=  null? 
Result.create(results): null;
-7004} finally {
-7005  // Call complete always, even on 
success. doDelta is doing a Get READ_UNCOMMITTED when it goes
-7006  // to get current value under an 
exclusive lock so no need so no need to wait to return to
-7007  // the client. Means only way to 
read-your-own-increment or append is to come in with an
-7008  // a 0 increment.
-7009  if (writeEntry != null) 
mvcc.complete(writeEntry);
-7010  rowLock.release();
-7011  // Request a cache flush if over 
the limit.  Do it outside update lock.
-7012  if 
(isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) 
requestFlush();
-7013  closeRegionOperation(op);
-7014  if (this.metricsRegion != null) 
{
-7015switch (op) {
-7016  case INCREMENT:
-7017
this.metricsRegion.updateIncrement();
-7018break;
-7019  case APPEND:
-7020
this.metricsRegion.updateAppend();
-7021break;
-7022  default:
+6975if (cpResult != null) {
+6976  return returnResults? 
cpResult: null;
+6977}
+6978Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
+6979Map> forMemStore =
+6980new HashMap>(mutation.getFamilyCellMap().size());
+6981// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
+6982// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
+6983WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
+6984// Actually write to WAL now if 
a walEdit to apply.
+6985if (walEdit != null && 
!walEdit.isEmpty()) {
+6986  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
+6987} else {
+6988  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
+6989  // transaction.
+6990  
recordMutationWithoutWal(mutation.getFamilyCellMap());
+6991  writeEntry = mvcc.begin();
+6992}
+6993// Now write to MemStore. Do it 
a column family at a time.
+6994long sequenceId = 
writeEntry.getWriteNumber();
+6995for (Map.Entry> e: forMemStore.entrySet()) {
+6996  accumulatedResultSize +=
+6997  
applyToMemstore(e.getKey(), e.getValue(), true, f

[28/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
index 1d64963..a232cc8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
@@ -6980,880 +6980,882 @@
 6972  
lock(this.updatesLock.readLock());
 6973  try {
 6974Result cpResult = 
doCoprocessorPreCall(op, mutation);
-6975if (cpResult != null) return 
cpResult;
-6976Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
-6977Map> forMemStore =
-6978new HashMap>(mutation.getFamilyCellMap().size());
-6979// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
-6980// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
-6981WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
-6982// Actually write to WAL now if 
a walEdit to apply.
-6983if (walEdit != null && 
!walEdit.isEmpty()) {
-6984  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
-6985} else {
-6986  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
-6987  // transaction.
-6988  
recordMutationWithoutWal(mutation.getFamilyCellMap());
-6989  writeEntry = mvcc.begin();
-6990}
-6991// Now write to MemStore. Do it 
a column family at a time.
-6992long sequenceId = 
writeEntry.getWriteNumber();
-6993for (Map.Entry> e: forMemStore.entrySet()) {
-6994  accumulatedResultSize +=
-6995  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceId);
-6996}
-6997
mvcc.completeAndWait(writeEntry);
-6998writeEntry = null;
-6999  } finally {
-7000
this.updatesLock.readLock().unlock();
-7001  }
-7002  // If results is null, then client 
asked that we not return the calculated results.
-7003  return results !=  null? 
Result.create(results): null;
-7004} finally {
-7005  // Call complete always, even on 
success. doDelta is doing a Get READ_UNCOMMITTED when it goes
-7006  // to get current value under an 
exclusive lock so no need so no need to wait to return to
-7007  // the client. Means only way to 
read-your-own-increment or append is to come in with an
-7008  // a 0 increment.
-7009  if (writeEntry != null) 
mvcc.complete(writeEntry);
-7010  rowLock.release();
-7011  // Request a cache flush if over 
the limit.  Do it outside update lock.
-7012  if 
(isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) 
requestFlush();
-7013  closeRegionOperation(op);
-7014  if (this.metricsRegion != null) 
{
-7015switch (op) {
-7016  case INCREMENT:
-7017
this.metricsRegion.updateIncrement();
-7018break;
-7019  case APPEND:
-7020
this.metricsRegion.updateAppend();
-7021break;
-7022  default:
+6975if (cpResult != null) {
+6976  return returnResults? 
cpResult: null;
+6977}
+6978Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
+6979Map> forMemStore =
+6980new HashMap>(mutation.getFamilyCellMap().size());
+6981// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
+6982// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
+6983WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
+6984// Actually write to WAL now if 
a walEdit to apply.
+6985if (walEdit != null && 
!walEdit.isEmpty()) {
+6986  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
+6987} else {
+6988  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
+6989  // transaction.
+6990  
recordMutationWithoutWal(mutation.getFamilyCellMap());
+6991  writeEntry = mvcc.begin();
+6992}
+6993// Now write to MemStore. Do it 
a column family at a time.
+6994long sequenceId = 
writeEntry.getWriteNumber();
+6995for (Map.Entry> e: forMemStore.entrySet()) {
+6996  accumulatedResultSize +=
+6997  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceI

[14/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionServerWriter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionServerWriter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionServerWriter.html
index 0597160..b444f46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionServerWriter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionServerWriter.html
@@ -184,2145 +184,2183 @@
 176  // Min batch size when replay WAL 
edits
 177  private final int minBatchSize;
 178
-179  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
-180  FileSystem fs, LastSequenceId 
idChecker,
-181  CoordinatedStateManager csm, 
RecoveryMode mode) {
-182this.conf = 
HBaseConfiguration.create(conf);
-183String codecClassName = conf
-184
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-185
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-186this.rootDir = rootDir;
-187this.fs = fs;
-188this.sequenceIdChecker = idChecker;
-189this.csm = 
(BaseCoordinatedStateManager)csm;
-190this.walFactory = factory;
-191this.controller = new 
PipelineController();
-192
-193entryBuffers = new 
EntryBuffers(controller,
-194
this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-195128*1024*1024));
+179  // the file being split currently
+180  private FileStatus fileBeingSplit;
+181
+182  @VisibleForTesting
+183  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
+184  FileSystem fs, LastSequenceId 
idChecker,
+185  CoordinatedStateManager csm, 
RecoveryMode mode) {
+186this.conf = 
HBaseConfiguration.create(conf);
+187String codecClassName = conf
+188
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+189
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
+190this.rootDir = rootDir;
+191this.fs = fs;
+192this.sequenceIdChecker = idChecker;
+193this.csm = 
(BaseCoordinatedStateManager)csm;
+194this.walFactory = factory;
+195this.controller = new 
PipelineController();
 196
-197// a larger minBatchSize may slow 
down recovery because replay writer has to wait for
-198// enough edits before replaying 
them
-199this.minBatchSize = 
this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
-200this.distributedLogReplay = 
(RecoveryMode.LOG_REPLAY == mode);
-201
-202this.numWriterThreads = 
this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
-203if (csm != null && 
this.distributedLogReplay) {
-204  outputSink = new 
LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
-205} else {
-206  if (this.distributedLogReplay) {
-207LOG.info("ZooKeeperWatcher is 
passed in as NULL so disable distrubitedLogRepaly.");
-208  }
-209  this.distributedLogReplay = 
false;
-210  outputSink = new 
LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
-211}
-212
-213  }
-214
-215  /**
-216   * Splits a WAL file into region's 
recovered-edits directory.
-217   * This is the main entry point for 
distributed log splitting from SplitLogWorker.
-218   * 

-219 * If the log file has N regions then N recovered.edits files will be produced. -220 *

-221 * @param rootDir -222 * @param logfile -223 * @param fs -224 * @param conf -225 * @param reporter -226 * @param idChecker -227 * @param cp coordination state manager -228 * @return false if it is interrupted by the progress-able. -229 * @throws IOException -230 */ -231 public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs, -232 Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, -233 CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException { -234WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode); -235return s.splitLogFile(logfile, reporter); -236 } -237 -238 // A wrapper to split one log folder using the method used by distributed -239 // log splitting. Used by tools and unit tests. It should be package private. -240 // It is public only because TestWALObserver is in a different package, -241 // which uses this method to do log splitting. -242 @VisibleForTesting -243 public static List split(Path rootDir, Path logDir, Path oldLogDir, -244 FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { -245final FileStatus[] logfiles = SplitLogManager.getFileList(conf, -246 Collections.singletonList(logDir), null); -247List splits = new ArrayList(); -248i


[24/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
index c9f2d82..04fbb3a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
@@ -67,15 +67,15 @@
 059  requiredArguments = {
 060@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 061  optionalArguments = {
-062@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
-063@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-064@org.jamon.annotations.Argument(name 
= "frags", type = "Map"),
-065@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+062@org.jamon.annotations.Argument(name 
= "frags", type = "Map"),
+063@org.jamon.annotations.Argument(name 
= "servers", type = "List"),
+064@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
+065@org.jamon.annotations.Argument(name 
= "deadServers", type = "Set"),
 066@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-067@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-068@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-069@org.jamon.annotations.Argument(name 
= "deadServers", type = "Set"),
-070@org.jamon.annotations.Argument(name 
= "servers", type = "List")})
+067@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
+068@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+069@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+070@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean")})
 071public class MasterStatusTmpl
 072  extends 
org.jamon.AbstractTemplateProxy
 073{
@@ -116,74 +116,74 @@
 108  return m_master;
 109}
 110private HMaster m_master;
-111// 22, 1
-112public void 
setMetaLocation(ServerName metaLocation)
+111// 21, 1
+112public void 
setFrags(Map frags)
 113{
-114  // 22, 1
-115  m_metaLocation = metaLocation;
-116  m_metaLocation__IsNotDefault = 
true;
+114  // 21, 1
+115  m_frags = frags;
+116  m_frags__IsNotDefault = true;
 117}
-118public ServerName getMetaLocation()
+118public Map 
getFrags()
 119{
-120  return m_metaLocation;
+120  return m_frags;
 121}
-122private ServerName m_metaLocation;
-123public boolean 
getMetaLocation__IsNotDefault()
+122private Map 
m_frags;
+123public boolean 
getFrags__IsNotDefault()
 124{
-125  return 
m_metaLocation__IsNotDefault;
+125  return m_frags__IsNotDefault;
 126}
-127private boolean 
m_metaLocation__IsNotDefault;
-128// 28, 1
-129public void 
setServerManager(ServerManager serverManager)
+127private boolean 
m_frags__IsNotDefault;
+128// 23, 1
+129public void 
setServers(List servers)
 130{
-131  // 28, 1
-132  m_serverManager = serverManager;
-133  m_serverManager__IsNotDefault = 
true;
+131  // 23, 1
+132  m_servers = servers;
+133  m_servers__IsNotDefault = true;
 134}
-135public ServerManager 
getServerManager()
+135public List 
getServers()
 136{
-137  return m_serverManager;
+137  return m_servers;
 138}
-139private ServerManager 
m_serverManager;
-140public boolean 
getServerManager__IsNotDefault()
+139private List 
m_servers;
+140public boolean 
getServers__IsNotDefault()
 141{
-142  return 
m_serverManager__IsNotDefault;
+142  return m_servers__IsNotDefault;
 143}
-144private boolean 
m_serverManager__IsNotDefault;
-145// 21, 1
-146public void 
setFrags(Map frags)
+144private boolean 
m_servers__IsNotDefault;
+145// 22, 1
+146public void 
setMetaLocation(ServerName metaLocation)
 147{
-148  // 21, 1
-149  m_frags = frags;
-150  m_frags__IsNotDefault = true;
+148  // 22, 1
+149  m_metaLocation = metaLocation;
+150  m_metaLocation__IsNotDefault = 
true;
 151}
-152public Map 
getFrags()
+152public ServerName getMetaLocation()
 153{
-154  return m_frags;
+154  return m_metaLocation;
 155}
-156private Map 
m_frags;
-157public boolean 
getFrags__IsNotDefault()
+156private ServerName m_metaLoc

[02/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestOrderedBytes.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestOrderedBytes.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestOrderedBytes.html
index 62df6ab..9b63985 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestOrderedBytes.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestOrderedBytes.html
@@ -1194,7 +1194,65 @@
 1186  assertEquals(o, 
OrderedBytes.skip(buff));
 1187}
 1188  }
-1189}
+1189
+1190  /**
+1191   * Test encoded value check
+1192   */
+1193  @Test
+1194  public void testEncodedValueCheck() 
{
+1195BigDecimal longMax = 
BigDecimal.valueOf(Long.MAX_VALUE);
+1196double negInf = 
Double.NEGATIVE_INFINITY;
+1197BigDecimal negLarge = 
longMax.multiply(longMax).negate();
+1198BigDecimal negMed = new 
BigDecimal("-10.0");
+1199BigDecimal negSmall = new 
BigDecimal("-0.0010");
+1200long zero = 0l;
+1201BigDecimal posSmall = 
negSmall.negate();
+1202BigDecimal posMed = 
negMed.negate();
+1203BigDecimal posLarge = 
negLarge.negate();
+1204double posInf = 
Double.POSITIVE_INFINITY;
+1205double nan = Double.NaN;
+1206byte int8 = 100;
+1207short int16 = 100;
+1208int int32 = 100;
+1209long int64 = 100l;
+1210float float32 = 100.0f;
+1211double float64 = 100.0d;
+1212String text = "hello world.";
+1213byte[] blobVar = 
Bytes.toBytes("foo");
+1214
+1215int cnt = 0;
+1216PositionedByteRange buff = new 
SimplePositionedMutableByteRange(1024);
+1217for (Order ord : new Order[] { 
Order.ASCENDING, Order.DESCENDING }) {
+1218  int o;
+1219  o = OrderedBytes.encodeNull(buff, 
ord); cnt++;
+1220  o = 
OrderedBytes.encodeNumeric(buff, negInf, ord); cnt++;
+1221  o = 
OrderedBytes.encodeNumeric(buff, negLarge, ord); cnt++;
+1222  o = 
OrderedBytes.encodeNumeric(buff, negMed, ord); cnt++;
+1223  o = 
OrderedBytes.encodeNumeric(buff, negSmall, ord); cnt++;
+1224  o = 
OrderedBytes.encodeNumeric(buff, zero, ord); cnt++;
+1225  o = 
OrderedBytes.encodeNumeric(buff, posSmall, ord); cnt++;
+1226  o = 
OrderedBytes.encodeNumeric(buff, posMed, ord); cnt++;
+1227  o = 
OrderedBytes.encodeNumeric(buff, posLarge, ord); cnt++;
+1228  o = 
OrderedBytes.encodeNumeric(buff, posInf, ord); cnt++;
+1229  o = 
OrderedBytes.encodeNumeric(buff, nan, ord); cnt++;
+1230  o = OrderedBytes.encodeInt8(buff, 
int8, ord); cnt++;
+1231  o = OrderedBytes.encodeInt16(buff, 
int16, ord); cnt++;
+1232  o = OrderedBytes.encodeInt32(buff, 
int32, ord); cnt++;
+1233  o = OrderedBytes.encodeInt64(buff, 
int64, ord); cnt++;
+1234  o = 
OrderedBytes.encodeFloat32(buff, float32, ord); cnt++;
+1235  o = 
OrderedBytes.encodeFloat64(buff, float64, ord); cnt++;
+1236  o = 
OrderedBytes.encodeString(buff, text, ord); cnt++;
+1237  o = 
OrderedBytes.encodeBlobVar(buff, blobVar, ord); cnt++;
+1238}
+1239
+1240buff.setPosition(0);
+1241
assertEquals(OrderedBytes.length(buff), cnt);
+1242for (int i = 0; i < cnt; i++) {
+1243  
assertEquals(OrderedBytes.isEncodedValue(buff), true);
+1244  OrderedBytes.skip(buff);
+1245}
+1246  }
+1247}
 
 
 



[51/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/f32f549a
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/f32f549a
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/f32f549a

Branch: refs/heads/asf-site
Commit: f32f549ad3289754862f2f304edcdf1f923f9c5e
Parents: 3268e8d
Author: jenkins 
Authored: Thu Feb 18 15:24:32 2016 +
Committer: Misty Stanley-Jones 
Committed: Thu Feb 18 09:04:01 2016 -0800

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 apache_hbase_reference_guide.pdfmarks   | 4 +-
 apidocs/index-all.html  |14 +
 .../org/apache/hadoop/hbase/ProcedureInfo.html  |57 +-
 .../org/apache/hadoop/hbase/client/Append.html  |30 +-
 .../apache/hadoop/hbase/client/Increment.html   |54 +-
 .../apache/hadoop/hbase/client/Mutation.html|56 +-
 .../hadoop/hbase/client/class-use/Mutation.html | 4 +
 .../apache/hadoop/hbase/util/OrderedBytes.html  |66 +-
 .../util/class-use/PositionedByteRange.html |16 +-
 .../org/apache/hadoop/hbase/ProcedureInfo.html  |   409 +-
 .../org/apache/hadoop/hbase/client/Append.html  |   257 +-
 .../apache/hadoop/hbase/client/Increment.html   |   575 +-
 .../apache/hadoop/hbase/client/Mutation.html|   859 +-
 .../apache/hadoop/hbase/util/OrderedBytes.html  |   485 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 11020 -
 checkstyle.rss  | 6 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html |45 +-
 devapidocs/index-all.html   |30 +-
 .../org/apache/hadoop/hbase/ProcedureInfo.html  |71 +-
 .../class-use/InterfaceAudience.Private.html|20 +
 .../class-use/InterfaceStability.Unstable.html  |12 +-
 .../hbase/classification/package-tree.html  | 4 +-
 .../org/apache/hadoop/hbase/client/Append.html  |77 +-
 .../org/apache/hadoop/hbase/client/Delete.html  | 2 +-
 .../apache/hadoop/hbase/client/Increment.html   |78 +-
 .../apache/hadoop/hbase/client/Mutation.html|   129 +-
 .../org/apache/hadoop/hbase/client/Put.html | 2 +-
 .../hadoop/hbase/client/class-use/Mutation.html | 4 +
 .../hadoop/hbase/client/package-tree.html   | 4 +-
 .../hadoop/hbase/filter/package-tree.html   | 6 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 2 +-
 .../hadoop/hbase/master/package-tree.html   | 2 +-
 .../hbase/master/procedure/package-tree.html| 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |10 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 2 +-
 .../hadoop/hbase/quotas/package-tree.html   | 2 +-
 .../hadoop/hbase/regionserver/HRegion.html  |98 +-
 .../hadoop/hbase/regionserver/package-tree.html |26 +-
 .../hbase/security/access/package-tree.html | 2 +-
 .../hadoop/hbase/security/package-tree.html | 2 +-
 .../hadoop/hbase/thrift/package-tree.html   | 2 +-
 .../tmpl/master/MasterStatusTmpl.ImplData.html  |   240 +-
 .../hbase/tmpl/master/MasterStatusTmpl.html |96 +-
 .../hbase/tmpl/master/MasterStatusTmplImpl.html |48 +-
 .../regionserver/RSStatusTmpl.ImplData.html |90 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.html   |36 +-
 .../tmpl/regionserver/RSStatusTmplImpl.html |18 +-
 .../apache/hadoop/hbase/util/OrderedBytes.html  |66 +-
 .../util/class-use/PositionedByteRange.html |16 +-
 .../apache/hadoop/hbase/util/package-tree.html  | 4 +-
 .../WALSplitter.CorruptedLogFileException.html  | 6 +-
 .../hbase/wal/WALSplitter.EntryBuffers.html |24 +-
 ...WALSplitter.LogRecoveredEditsOutputSink.html |48 +-
 .../wal/WALSplitter.LogReplayOutputSink.html|60 +-
 .../hbase/wal/WALSplitter.MutationReplay.html   |12 +-
 .../hbase/wal/WALSplitter.OutputSink.html   |52 +-
 .../wal/WALSplitter.PipelineController.html |12 +-
 .../wal/WALSplitter.RegionEntryBuffer.html  |24 +-
 .../wal/WALSplitter.RegionServerWriter.html | 8 +-
 .../hbase/wal/WALSplitter.SinkWriter.html   |16 +-
 .../hbase/wal/WALSplitter.WriterAndPath.html|35 +-
 .../hbase/wal/WALSplitter.Wr

[19/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogReplayOutputSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogReplayOutputSink.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogReplayOutputSink.html
index 0597160..b444f46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogReplayOutputSink.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogReplayOutputSink.html
@@ -184,2145 +184,2183 @@
 176  // Min batch size when replay WAL 
edits
 177  private final int minBatchSize;
 178
-179  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
-180  FileSystem fs, LastSequenceId 
idChecker,
-181  CoordinatedStateManager csm, 
RecoveryMode mode) {
-182this.conf = 
HBaseConfiguration.create(conf);
-183String codecClassName = conf
-184
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-185
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-186this.rootDir = rootDir;
-187this.fs = fs;
-188this.sequenceIdChecker = idChecker;
-189this.csm = 
(BaseCoordinatedStateManager)csm;
-190this.walFactory = factory;
-191this.controller = new 
PipelineController();
-192
-193entryBuffers = new 
EntryBuffers(controller,
-194
this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-195128*1024*1024));
+179  // the file being split currently
+180  private FileStatus fileBeingSplit;
+181
+182  @VisibleForTesting
+183  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
+184  FileSystem fs, LastSequenceId 
idChecker,
+185  CoordinatedStateManager csm, 
RecoveryMode mode) {
+186this.conf = 
HBaseConfiguration.create(conf);
+187String codecClassName = conf
+188
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+189
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
+190this.rootDir = rootDir;
+191this.fs = fs;
+192this.sequenceIdChecker = idChecker;
+193this.csm = 
(BaseCoordinatedStateManager)csm;
+194this.walFactory = factory;
+195this.controller = new 
PipelineController();
 196
-197// a larger minBatchSize may slow 
down recovery because replay writer has to wait for
-198// enough edits before replaying 
them
-199this.minBatchSize = 
this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
-200this.distributedLogReplay = 
(RecoveryMode.LOG_REPLAY == mode);
-201
-202this.numWriterThreads = 
this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
-203if (csm != null && 
this.distributedLogReplay) {
-204  outputSink = new 
LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
-205} else {
-206  if (this.distributedLogReplay) {
-207LOG.info("ZooKeeperWatcher is 
passed in as NULL so disable distrubitedLogRepaly.");
-208  }
-209  this.distributedLogReplay = 
false;
-210  outputSink = new 
LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
-211}
-212
-213  }
-214
-215  /**
-216   * Splits a WAL file into region's 
recovered-edits directory.
-217   * This is the main entry point for 
distributed log splitting from SplitLogWorker.
-218   * 

-219 * If the log file has N regions then N recovered.edits files will be produced. -220 *

-221 * @param rootDir -222 * @param logfile -223 * @param fs -224 * @param conf -225 * @param reporter -226 * @param idChecker -227 * @param cp coordination state manager -228 * @return false if it is interrupted by the progress-able. -229 * @throws IOException -230 */ -231 public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs, -232 Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, -233 CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException { -234WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode); -235return s.splitLogFile(logfile, reporter); -236 } -237 -238 // A wrapper to split one log folder using the method used by distributed -239 // log splitting. Used by tools and unit tests. It should be package private. -240 // It is public only because TestWALObserver is in a different package, -241 // which uses this method to do log splitting. -242 @VisibleForTesting -243 public static List split(Path rootDir, Path logDir, Path oldLogDir, -244 FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { -245final FileStatus[] logfiles = SplitLogManager.getFileList(conf, -246 Collections.singletonList(logDir), null); -247List splits = new ArrayList(); -248


[33/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
index 1d64963..a232cc8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
@@ -6980,880 +6980,882 @@
 6972  
lock(this.updatesLock.readLock());
 6973  try {
 6974Result cpResult = 
doCoprocessorPreCall(op, mutation);
-6975if (cpResult != null) return 
cpResult;
-6976Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
-6977Map> forMemStore =
-6978new HashMap>(mutation.getFamilyCellMap().size());
-6979// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
-6980// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
-6981WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
-6982// Actually write to WAL now if 
a walEdit to apply.
-6983if (walEdit != null && 
!walEdit.isEmpty()) {
-6984  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
-6985} else {
-6986  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
-6987  // transaction.
-6988  
recordMutationWithoutWal(mutation.getFamilyCellMap());
-6989  writeEntry = mvcc.begin();
-6990}
-6991// Now write to MemStore. Do it 
a column family at a time.
-6992long sequenceId = 
writeEntry.getWriteNumber();
-6993for (Map.Entry> e: forMemStore.entrySet()) {
-6994  accumulatedResultSize +=
-6995  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceId);
-6996}
-6997
mvcc.completeAndWait(writeEntry);
-6998writeEntry = null;
-6999  } finally {
-7000
this.updatesLock.readLock().unlock();
-7001  }
-7002  // If results is null, then client 
asked that we not return the calculated results.
-7003  return results !=  null? 
Result.create(results): null;
-7004} finally {
-7005  // Call complete always, even on 
success. doDelta is doing a Get READ_UNCOMMITTED when it goes
-7006  // to get current value under an 
exclusive lock so no need so no need to wait to return to
-7007  // the client. Means only way to 
read-your-own-increment or append is to come in with an
-7008  // a 0 increment.
-7009  if (writeEntry != null) 
mvcc.complete(writeEntry);
-7010  rowLock.release();
-7011  // Request a cache flush if over 
the limit.  Do it outside update lock.
-7012  if 
(isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) 
requestFlush();
-7013  closeRegionOperation(op);
-7014  if (this.metricsRegion != null) 
{
-7015switch (op) {
-7016  case INCREMENT:
-7017
this.metricsRegion.updateIncrement();
-7018break;
-7019  case APPEND:
-7020
this.metricsRegion.updateAppend();
-7021break;
-7022  default:
+6975if (cpResult != null) {
+6976  return returnResults? 
cpResult: null;
+6977}
+6978Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
+6979Map> forMemStore =
+6980new HashMap>(mutation.getFamilyCellMap().size());
+6981// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
+6982// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
+6983WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
+6984// Actually write to WAL now if 
a walEdit to apply.
+6985if (walEdit != null && 
!walEdit.isEmpty()) {
+6986  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
+6987} else {
+6988  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
+6989  // transaction.
+6990  
recordMutationWithoutWal(mutation.getFamilyCellMap());
+6991  writeEntry = mvcc.begin();
+6992}
+6993// Now write to MemStore. Do it 
a column family at a time.
+6994long sequenceId = 
writeEntry.getWriteNumber();
+6995for (Map.Entry> e: forMemStore.entrySet()) {
+6996  accumulatedResultSize +=
+6997  
applyToMemstore(e.getKey(), e.getValue(), true, false,

[30/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
index 1d64963..a232cc8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
@@ -6980,880 +6980,882 @@
 6972  
lock(this.updatesLock.readLock());
 6973  try {
 6974Result cpResult = 
doCoprocessorPreCall(op, mutation);
-6975if (cpResult != null) return 
cpResult;
-6976Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
-6977Map> forMemStore =
-6978new HashMap>(mutation.getFamilyCellMap().size());
-6979// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
-6980// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
-6981WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
-6982// Actually write to WAL now if 
a walEdit to apply.
-6983if (walEdit != null && 
!walEdit.isEmpty()) {
-6984  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
-6985} else {
-6986  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
-6987  // transaction.
-6988  
recordMutationWithoutWal(mutation.getFamilyCellMap());
-6989  writeEntry = mvcc.begin();
-6990}
-6991// Now write to MemStore. Do it 
a column family at a time.
-6992long sequenceId = 
writeEntry.getWriteNumber();
-6993for (Map.Entry> e: forMemStore.entrySet()) {
-6994  accumulatedResultSize +=
-6995  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceId);
-6996}
-6997
mvcc.completeAndWait(writeEntry);
-6998writeEntry = null;
-6999  } finally {
-7000
this.updatesLock.readLock().unlock();
-7001  }
-7002  // If results is null, then client 
asked that we not return the calculated results.
-7003  return results !=  null? 
Result.create(results): null;
-7004} finally {
-7005  // Call complete always, even on 
success. doDelta is doing a Get READ_UNCOMMITTED when it goes
-7006  // to get current value under an 
exclusive lock so no need so no need to wait to return to
-7007  // the client. Means only way to 
read-your-own-increment or append is to come in with an
-7008  // a 0 increment.
-7009  if (writeEntry != null) 
mvcc.complete(writeEntry);
-7010  rowLock.release();
-7011  // Request a cache flush if over 
the limit.  Do it outside update lock.
-7012  if 
(isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) 
requestFlush();
-7013  closeRegionOperation(op);
-7014  if (this.metricsRegion != null) 
{
-7015switch (op) {
-7016  case INCREMENT:
-7017
this.metricsRegion.updateIncrement();
-7018break;
-7019  case APPEND:
-7020
this.metricsRegion.updateAppend();
-7021break;
-7022  default:
+6975if (cpResult != null) {
+6976  return returnResults? 
cpResult: null;
+6977}
+6978Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
+6979Map> forMemStore =
+6980new HashMap>(mutation.getFamilyCellMap().size());
+6981// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
+6982// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
+6983WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
+6984// Actually write to WAL now if 
a walEdit to apply.
+6985if (walEdit != null && 
!walEdit.isEmpty()) {
+6986  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
+6987} else {
+6988  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
+6989  // transaction.
+6990  
recordMutationWithoutWal(mutation.getFamilyCellMap());
+6991  writeEntry = mvcc.begin();
+6992}
+6993// Now write to MemStore. Do it 
a column family at a time.
+6994long sequenceId = 
writeEntry.getWriteNumber();
+6995for (Map.Entry> e: forMemStore.entrySet()) {
+6996  accumulatedResultSize +=
+6997  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceI

[08/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 63d37cd..b62a513 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -5168,6 +5168,8 @@
  
 createFSOutput(Path,
 FileSystem) - Static method in class 
org.apache.hadoop.hbase.io.hfile.TestHFileSeek
  
+createFSWALEntry(HTableDescriptor,
 HRegionInfo, long, byte[], byte[], EnvironmentEdge, 
MultiVersionConcurrencyControl, int) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.TestWALReplay
+ 
 createGet(int,
 int) - Method in class org.apache.hadoop.hbase.regionserver.TestPerColumnFamilyFlush
  
 createGet(long)
 - Method in class org.apache.hadoop.hbase.test.IntegrationTestTimeBoundedRequestsWithRegionReplicas.TimeBoundedMultiThreadedReader.TimeBoundedMultiThreadedReaderThread
@@ -6062,10 +6064,14 @@
  HBaseTestingUtility.createWal(Configuration,
 Path, HRegionInfo) because that method
  doesn't play nicely with FaultyFileSystem.
 
+createWALEdit(byte[],
 byte[], EnvironmentEdge, int) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.TestWALReplay
+ 
 createWALEntryBuilder(TableName)
 - Method in class org.apache.hadoop.hbase.replication.regionserver.TestReplicationSink
  
 createWALFactory(Configuration,
 Path) - Static method in class 
org.apache.hadoop.hbase.regionserver.TestHRegion
  
+createWALKey(TableName,
 HRegionInfo, MultiVersionConcurrencyControl) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.TestWALReplay
+ 
 createWALReaderForPrimary()
 - Method in class org.apache.hadoop.hbase.regionserver.TestHRegionReplayEvents
  
 createWalStore(Configuration,
 FileSystem, Path) - Static method in class 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility
@@ -34541,6 +34547,10 @@
 
 TestEncodedSeekers(DataBlockEncoding,
 boolean, boolean) - Constructor for class 
org.apache.hadoop.hbase.io.encoding.TestEncodedSeekers
  
+testEncodedValueCheck()
 - Method in class org.apache.hadoop.hbase.util.TestOrderedBytes
+
+Test encoded value check
+
 testEncodedValueContainsTerm()
 - Method in class org.apache.hadoop.hbase.types.TestTerminatedWrapper
  
 testEncoderOutput()
 - Method in class org.apache.hadoop.hbase.codec.prefixtree.row.TestRowEncoder
@@ -39296,6 +39306,14 @@
  
 testName
 - Variable in class org.apache.hadoop.hbase.regionserver.TestRecoveredEdits
  
+testNameConflictWhenSplit(boolean)
 - Method in class org.apache.hadoop.hbase.regionserver.wal.TestWALReplay
+
+testcase for 
https://issues.apache.org/jira/browse/HBASE-14949.
+
+testNameConflictWhenSplit0()
 - Method in class org.apache.hadoop.hbase.regionserver.wal.TestWALReplay
+ 
+testNameConflictWhenSplit1()
 - Method in class org.apache.hadoop.hbase.regionserver.wal.TestWALReplay
+ 
 TestNamespace - Class 
in org.apache.hadoop.hbase
  
 TestNamespace()
 - Constructor for class org.apache.hadoop.hbase.TestNamespace
@@ -49849,6 +49867,8 @@ the order they are declared.
  
 writerThreads
 - Variable in class org.apache.hadoop.hbase.util.TestMiniClusterLoadSequential
  
+writerWALFile(Path,
 List) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.TestWALReplay
+ 
 writeSomeData(FileSystem,
 Path, long, byte) - Method in class org.apache.hadoop.hbase.io.TestFileLink
 
 Write up to 'size' bytes with value 'v' into a new file 
called 'path'.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
index c423f10..149878b 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
@@ -156,9 +156,9 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.io.hfile.TestCacheOnWrite.CacheOnWriteType
-org.apache.hadoop.hbase.io.hfile.TestHFileBlockCompatibility.Writer.State
 org.apache.hadoop.hbase.io.hfile.TagUsage
+org.apache.hadoop.hbase.io.hfile.TestHFileBlockCompatibility.Writer.State
+org.apache.hadoop.hbase.io.hfile.TestCacheOnWrite.CacheOnWriteType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
-

[42/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html
index cfd2380..cfd1df0 100644
--- a/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html
@@ -160,8 +160,8 @@
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.thrift.ThriftServerRunner.ImplType
-org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl.FactoryStorage
 org.apache.hadoop.hbase.thrift.ThriftMetrics.ThriftServerType
+org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl.FactoryStorage
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index 545c9d5..595d49d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -379,76 +379,76 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 private HMaster m_master
 
 
-
+
 
 
 
 
-m_metaLocation
-private ServerName m_metaLocation
+m_frags
+private http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer> m_frags
 
 
-
+
 
 
 
 
-m_metaLocation__IsNotDefault
-private boolean m_metaLocation__IsNotDefault
+m_frags__IsNotDefault
+private boolean m_frags__IsNotDefault
 
 
-
+
 
 
 
 
-m_serverManager
-private ServerManager m_serverManager
+m_servers
+private http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List m_servers
 
 
-
+
 
 
 
 
-m_serverManager__IsNotDefault
-private boolean m_serverManager__IsNotDefault
+m_servers__IsNotDefault
+private boolean m_servers__IsNotDefault
 
 
-
+
 
 
 
 
-m_frags
-private http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer> m_frags
+m_metaLocation
+private ServerName m_metaLocation
 
 
-
+
 
 
 
 
-m_frags__IsNotDefault
-private boolean m_frags__IsNotDefault
+m_metaLocation__IsNotDefault
+private boolean m_metaLocation__IsNotDefault
 
 
-
+
 
 
 
 
-m_assignmentManager
-private AssignmentManager m_assignmentManager
+m_deadServers
+private http://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set m_deadServers
 
 
-
+
 
 
 
 
-m_assignmentManager__IsNotDefault
-private boolean m_assignmentManager__IsNotDefault
+m_deadServers__IsNotDefault
+private boolean m_deadServers__IsNotDefault
 
 
 
@@ -469,76 +469,76 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 private boolean m_format__IsNotDefault
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled
-private boolean m_catalogJanitorEnabled
+m_serverManager
+private ServerManager m_serverManager
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled__IsNotDefault
-private boolean m_catalogJanitorEnabled__IsNotDefault
+m_serverManager__IsNotDefault
+private boolean m_serverManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_filter
-private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String m_filter
+m_assignmentManager
+private AssignmentManager m_assignmentManager
 
 
-
+
 
 
 
 
-m_filter__IsNotDefault
-private boolean m_filter__IsNotDefault
+m_assignmentManager__IsNotDefault
+private boolean m_assignmentManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_deadServers
-private http://docs.oracle.com/javase/7/docs/api/java/util/Set.html

[03/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html
index aa5da14..b06a764 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html
@@ -41,1157 +41,1234 @@
 033import java.lang.reflect.Field;
 034import 
java.security.PrivilegedExceptionAction;
 035import java.util.ArrayList;
-036import java.util.Collection;
-037import java.util.HashSet;
-038import java.util.List;
-039import java.util.Set;
-040import 
java.util.concurrent.atomic.AtomicBoolean;
-041import 
java.util.concurrent.atomic.AtomicInteger;
-042
-043import org.apache.commons.logging.Log;
-044import 
org.apache.commons.logging.LogFactory;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.fs.FSDataInputStream;
-047import org.apache.hadoop.fs.FileStatus;
-048import org.apache.hadoop.fs.FileSystem;
-049import org.apache.hadoop.fs.Path;
-050import org.apache.hadoop.fs.PathFilter;
-051import org.apache.hadoop.hbase.Cell;
-052import 
org.apache.hadoop.hbase.HBaseConfiguration;
-053import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-054import 
org.apache.hadoop.hbase.HColumnDescriptor;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HTableDescriptor;
-058import 
org.apache.hadoop.hbase.KeyValue;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MiniHBaseCluster;
-061import 
org.apache.hadoop.hbase.ServerName;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-064import 
org.apache.hadoop.hbase.client.Delete;
-065import 
org.apache.hadoop.hbase.client.Get;
-066import 
org.apache.hadoop.hbase.client.Put;
-067import 
org.apache.hadoop.hbase.client.Result;
-068import 
org.apache.hadoop.hbase.client.ResultScanner;
-069import 
org.apache.hadoop.hbase.client.Scan;
-070import 
org.apache.hadoop.hbase.client.Table;
-071import 
org.apache.hadoop.hbase.master.HMaster;
-072import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-073import 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
-074import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-075import 
org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher;
-076import 
org.apache.hadoop.hbase.regionserver.FlushRequestListener;
-077import 
org.apache.hadoop.hbase.regionserver.FlushRequester;
-078import 
org.apache.hadoop.hbase.regionserver.HRegion;
-079import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-080import 
org.apache.hadoop.hbase.regionserver.MemStoreSnapshot;
-081import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-082import 
org.apache.hadoop.hbase.regionserver.Region;
-083import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-084import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
-085import 
org.apache.hadoop.hbase.regionserver.Store;
-086import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-087import 
org.apache.hadoop.hbase.security.User;
-088import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-089import 
org.apache.hadoop.hbase.testclassification.RegionServerTests;
-090import 
org.apache.hadoop.hbase.util.Bytes;
-091import 
org.apache.hadoop.hbase.util.EnvironmentEdge;
-092import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-093import 
org.apache.hadoop.hbase.util.FSUtils;
-094import 
org.apache.hadoop.hbase.util.HFileTestUtil;
-095import 
org.apache.hadoop.hbase.util.Pair;
-096import 
org.apache.hadoop.hbase.wal.DefaultWALProvider;
-097import org.apache.hadoop.hbase.wal.WAL;
-098import 
org.apache.hadoop.hbase.wal.WALFactory;
-099import 
org.apache.hadoop.hbase.wal.WALKey;
-100import 
org.apache.hadoop.hbase.wal.WALSplitter;
-101import 
org.apache.hadoop.hdfs.DFSInputStream;
-102import org.junit.After;
-103import org.junit.AfterClass;
-104import org.junit.Before;
-105import org.junit.BeforeClass;
-106import org.junit.Rule;
-107import org.junit.Test;
-108import 
org.junit.experimental.categories.Category;
-109import org.junit.rules.TestName;
-110import org.mockito.Mockito;
-111import 
org.mockito.invocation.InvocationOnMock;
-112import org.mockito.stubbing.Answer;
-113
-114/**
-115 * Test replay of edits out of a WAL 
split.
-116 */
-117@Category({RegionServerTests.class, 
MediumTests.class})
-118public class TestWALReplay {
-119  private static final Log LOG = 
LogFactory.getLog(TestWALReplay.class);
-120  static final HBaseTestingUtility 
TEST_UTIL = n

[26/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index 1d64963..a232cc8 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -6980,880 +6980,882 @@
 6972  
lock(this.updatesLock.readLock());
 6973  try {
 6974Result cpResult = 
doCoprocessorPreCall(op, mutation);
-6975if (cpResult != null) return 
cpResult;
-6976Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
-6977Map> forMemStore =
-6978new HashMap>(mutation.getFamilyCellMap().size());
-6979// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
-6980// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
-6981WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
-6982// Actually write to WAL now if 
a walEdit to apply.
-6983if (walEdit != null && 
!walEdit.isEmpty()) {
-6984  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
-6985} else {
-6986  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
-6987  // transaction.
-6988  
recordMutationWithoutWal(mutation.getFamilyCellMap());
-6989  writeEntry = mvcc.begin();
-6990}
-6991// Now write to MemStore. Do it 
a column family at a time.
-6992long sequenceId = 
writeEntry.getWriteNumber();
-6993for (Map.Entry> e: forMemStore.entrySet()) {
-6994  accumulatedResultSize +=
-6995  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceId);
-6996}
-6997
mvcc.completeAndWait(writeEntry);
-6998writeEntry = null;
-6999  } finally {
-7000
this.updatesLock.readLock().unlock();
-7001  }
-7002  // If results is null, then client 
asked that we not return the calculated results.
-7003  return results !=  null? 
Result.create(results): null;
-7004} finally {
-7005  // Call complete always, even on 
success. doDelta is doing a Get READ_UNCOMMITTED when it goes
-7006  // to get current value under an 
exclusive lock so no need so no need to wait to return to
-7007  // the client. Means only way to 
read-your-own-increment or append is to come in with an
-7008  // a 0 increment.
-7009  if (writeEntry != null) 
mvcc.complete(writeEntry);
-7010  rowLock.release();
-7011  // Request a cache flush if over 
the limit.  Do it outside update lock.
-7012  if 
(isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) 
requestFlush();
-7013  closeRegionOperation(op);
-7014  if (this.metricsRegion != null) 
{
-7015switch (op) {
-7016  case INCREMENT:
-7017
this.metricsRegion.updateIncrement();
-7018break;
-7019  case APPEND:
-7020
this.metricsRegion.updateAppend();
-7021break;
-7022  default:
+6975if (cpResult != null) {
+6976  return returnResults? 
cpResult: null;
+6977}
+6978Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
+6979Map> forMemStore =
+6980new HashMap>(mutation.getFamilyCellMap().size());
+6981// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
+6982// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
+6983WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
+6984// Actually write to WAL now if 
a walEdit to apply.
+6985if (walEdit != null && 
!walEdit.isEmpty()) {
+6986  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
+6987} else {
+6988  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
+6989  // transaction.
+6990  
recordMutationWithoutWal(mutation.getFamilyCellMap());
+6991  writeEntry = mvcc.begin();
+6992}
+6993// Now write to MemStore. Do it 
a column family at a time.
+6994long sequenceId = 
writeEntry.getWriteNumber();
+6995for (Map.Entry> e: forMemStore.entrySet()) {
+6996  accumulatedResultSize +=
+6997  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceId);
+6998}
+6999
mvcc.completeAndWait(writeEntr

[37/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
index 7bdc8f8..7203c61 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
@@ -60,297 +60,294 @@
 052@InterfaceStability.Stable
 053public class Increment extends Mutation 
implements Comparable {
 054  private static final long HEAP_OVERHEAD 
=  ClassSize.REFERENCE + ClassSize.TIMERANGE;
-055
-056  private static final String 
RETURN_RESULTS = "_rr_";
-057
-058  private TimeRange tr = new 
TimeRange();
-059
-060  /**
-061   * Create a Increment operation for the 
specified row.
-062   * 

-063 * At least one column must be incremented. -064 * @param row row key (we will make a copy of this). -065 */ -066 public Increment(byte [] row) { -067this(row, 0, row.length); -068 } -069 -070 /** -071 * Create a Increment operation for the specified row. -072 *

-073 * At least one column must be incremented. -074 * @param row row key (we will make a copy of this). -075 */ -076 public Increment(final byte [] row, final int offset, final int length) { -077checkRow(row, offset, length); -078this.row = Bytes.copy(row, offset, length); -079 } -080 /** -081 * Copy constructor -082 * @param i -083 */ -084 public Increment(Increment i) { -085this.row = i.getRow(); -086this.ts = i.getTimeStamp(); -087this.tr = i.getTimeRange(); -088 this.familyMap.putAll(i.getFamilyCellMap()); -089for (Map.Entry entry : i.getAttributesMap().entrySet()) { -090 this.setAttribute(entry.getKey(), entry.getValue()); -091} -092 } -093 -094 /** -095 * Add the specified KeyValue to this operation. -096 * @param cell individual Cell -097 * @return this -098 * @throws java.io.IOException e -099 */ -100 public Increment add(Cell cell) throws IOException{ -101byte [] family = CellUtil.cloneFamily(cell); -102List list = getCellList(family); -103//Checking that the row of the kv is the same as the put -104int res = Bytes.compareTo(this.row, 0, row.length, -105cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); -106if (res != 0) { -107 throw new WrongRowIOException("The row in " + cell + -108" doesn't match the original one " + Bytes.toStringBinary(this.row)); -109} -110list.add(cell); -111familyMap.put(family, list); -112return this; -113 } -114 -115 /** -116 * Increment the column from the specific family with the specified qualifier -117 * by the specified amount. -118 *

-119 * Overrides previous calls to addColumn for this family and qualifier. -120 * @param family family name -121 * @param qualifier column qualifier -122 * @param amount amount to increment by -123 * @return the Increment object -124 */ -125 public Increment addColumn(byte [] family, byte [] qualifier, long amount) { -126if (family == null) { -127 throw new IllegalArgumentException("family cannot be null"); +055 private TimeRange tr = new TimeRange(); +056 +057 /** +058 * Create a Increment operation for the specified row. +059 *

+060 * At least one column must be incremented. +061 * @param row row key (we will make a copy of this). +062 */ +063 public Increment(byte [] row) { +064this(row, 0, row.length); +065 } +066 +067 /** +068 * Create a Increment operation for the specified row. +069 *

+070 * At least one column must be incremented. +071 * @param row row key (we will make a copy of this). +072 */ +073 public Increment(final byte [] row, final int offset, final int length) { +074checkRow(row, offset, length); +075this.row = Bytes.copy(row, offset, length); +076 } +077 /** +078 * Copy constructor +079 * @param i +080 */ +081 public Increment(Increment i) { +082this.row = i.getRow(); +083this.ts = i.getTimeStamp(); +084this.tr = i.getTimeRange(); +085 this.familyMap.putAll(i.getFamilyCellMap()); +086for (Map.Entry entry : i.getAttributesMap().entrySet()) { +087 this.setAttribute(entry.getKey(), entry.getValue()); +088} +089 } +090 +091 /** +092 * Add the specified KeyValue to this operation. +093 * @param cell individual Cell +094 * @return this +095 * @throws java.io.IOException e +096 */ +097 public Increment add(Cell cell) throws IOException{ +098byte [] family = CellUtil.cloneFamily(cell); +099List list = getCellList(family); +100//Checking that the row of the kv is the same as the put +101int res = Bytes.compareTo(this.row, 0, row.length, +102c


[46/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 35e6ed0..ea7b704 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -283,7 +283,7 @@
 1683
 0
 0
-12749
+12747
 
 Files
 
@@ -3556,7 +3556,7 @@
 org/apache/hadoop/hbase/regionserver/HRegion.java
 0
 0
-211
+210
 
 org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
 0
@@ -5851,7 +5851,7 @@
 org/apache/hadoop/hbase/wal/WALSplitter.java
 0
 0
-70
+69
 
 org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java
 0
@@ -6033,7 +6033,7 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#NeedBraces";>NeedBraces
-1788
+1786
  Error
 
 coding
@@ -6124,12 +6124,12 @@
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation";>JavadocTagContinuationIndentation
 
 offset: "2"
-767
+766
  Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription";>NonEmptyAtclauseDescription
-3399
+3400
  Error
 
 misc
@@ -10463,25 +10463,25 @@
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-180
+181
 
  Error
 indentation
 Indentation
 'if' child have incorrect indentation level 7, expected level should be 
6.
-198
+199
 
  Error
 indentation
 Indentation
 'if' child have incorrect indentation level 8, expected level should be 
6.
-202
+203
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-215
+216
 
 org/apache/hadoop/hbase/RegionLoad.java
 
@@ -12098,25 +12098,25 @@
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-81
+80
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-96
+95
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-97
+96
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-118
+117
 
 org/apache/hadoop/hbase/client/AsyncProcess.java
 
@@ -14553,7 +14553,7 @@
 
  Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 64 has parse error. Missed HTML close tag 
'code'. Sometimes it means that close tag missed for one of previous tags.
 1767
 
@@ -15260,7 +15260,7 @@
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-82
+79
 
 org/apache/hadoop/hbase/client/MasterCallable.java
 
@@ -15764,79 +15764,79 @@
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-124
+126
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-125
+127
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-126
+128
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-127
+129
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-236
+238
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-330
+332
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-340
+342
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-344
+346
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-434
+436
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-466
+485
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-475
+494
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-476
+495
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-478
+497
 
 org/apache/hadoop/hbase/client/Operation.java
 
@@ -56747,479 +56747,473 @@
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-6975
+7011
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-7009
+7014
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-7012
+7055
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-7053
+7058
 
  Error
-blocks
-NeedBraces
-'if' construct must use '{}'s.
-7056
+javadoc
+JavadocTagContinuationIndentation
+Line continuation have incorrect indentation level, expected level should 
be 2.
+7067
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-7065
+7092
 
  Error
 javadoc
 JavadocTagContinuationInden

[23/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
index 42bf74e..29bc296 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
@@ -34,27 +34,27 @@
 026
 027{
 028  private final HRegionServer 
regionServer;
-029  private final String filter;
-030  private final String bcv;
+029  private final String format;
+030  private final String filter;
 031  private final String bcn;
-032  private final String format;
+032  private final String bcv;
 033  protected static 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData 
__jamon_setOptionalArguments(org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData
 p_implData)
 034  {
-035if(! 
p_implData.getFilter__IsNotDefault())
+035if(! 
p_implData.getFormat__IsNotDefault())
 036{
-037  p_implData.setFilter("general");
+037  p_implData.setFormat("html");
 038}
-039if(! 
p_implData.getBcv__IsNotDefault())
+039if(! 
p_implData.getFilter__IsNotDefault())
 040{
-041  p_implData.setBcv("");
+041  p_implData.setFilter("general");
 042}
 043if(! 
p_implData.getBcn__IsNotDefault())
 044{
 045  p_implData.setBcn("");
 046}
-047if(! 
p_implData.getFormat__IsNotDefault())
+047if(! 
p_implData.getBcv__IsNotDefault())
 048{
-049  p_implData.setFormat("html");
+049  p_implData.setBcv("");
 050}
 051return p_implData;
 052  }
@@ -62,10 +62,10 @@
 054  {
 055super(p_templateManager, 
__jamon_setOptionalArguments(p_implData));
 056regionServer = 
p_implData.getRegionServer();
-057filter = p_implData.getFilter();
-058bcv = p_implData.getBcv();
+057format = p_implData.getFormat();
+058filter = p_implData.getFilter();
 059bcn = p_implData.getBcn();
-060format = p_implData.getFormat();
+060bcv = p_implData.getBcv();
 061  }
 062  
 063  @Override public void 
renderNoFlush(final java.io.Writer jamonWriter)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
index 4e66df3..804d5fb 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
@@ -1511,242 +1511,261 @@
 1503   * false otherwise.
 1504   */
 1505  public static boolean 
isEncodedValue(PositionedByteRange src) {
-1506return isNull(src) || isNumeric(src) 
|| isFixedInt32(src) || isFixedInt64(src)
-1507|| isFixedFloat32(src) || 
isFixedFloat64(src) || isText(src) || isBlobCopy(src)
-1508|| isBlobVar(src);
-1509  }
-1510
-1511  /**
-1512   * Return true when the next encoded 
value in {@code src} is null, false
-1513   * otherwise.
-1514   */
-1515  public static boolean 
isNull(PositionedByteRange src) {
-1516return NULL ==
-1517(-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1518  }
-1519
-1520  /**
-1521   * Return true when the next encoded 
value in {@code src} uses Numeric
-1522   * encoding, false otherwise. {@code 
NaN}, {@code +/-Inf} are valid Numeric
-1523   * values.
-1524   */
-1525  public static boolean 
isNumeric(PositionedByteRange src) {
-1526byte x = (-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1527return x >= NEG_INF && x 
<= NAN;
-1528  }
-1529
-1530  /**
-1531   * Return true when the next encoded 
value in {@code src} uses Numeric
-1532   * encoding and is {@code Infinite}, 
false otherwise.
-1533   */
-1534  public static boolean 
isNumericInfinite(PositionedByteRange src) {
-1535byte x = (-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1536return NEG_INF == x || POS_INF == 
x;
-1537  }
-1538
-1539  /**
-1540   * Return true when the next encoded 
value in {@code src} uses Numeric
-1541   * encoding and is {@code NaN}, false 
otherwise.
-1542   */
-1543  public static boolean 
isNumericNaN(PositionedByteRange src) {
-1544return NAN == (-1 == 
Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
-1545  }
-1546
-1547  /**
-1548   * Return true when the next encoded 
value in {@code src} uses Numeric
-1549   * encoding and is {@code 0}, false 
otherwise.
-1550   */
-1551  public static boolean 
isNumericZero(PositionedByteRange src

[11/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
index 0597160..b444f46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
@@ -184,2145 +184,2183 @@
 176  // Min batch size when replay WAL 
edits
 177  private final int minBatchSize;
 178
-179  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
-180  FileSystem fs, LastSequenceId 
idChecker,
-181  CoordinatedStateManager csm, 
RecoveryMode mode) {
-182this.conf = 
HBaseConfiguration.create(conf);
-183String codecClassName = conf
-184
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-185
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-186this.rootDir = rootDir;
-187this.fs = fs;
-188this.sequenceIdChecker = idChecker;
-189this.csm = 
(BaseCoordinatedStateManager)csm;
-190this.walFactory = factory;
-191this.controller = new 
PipelineController();
-192
-193entryBuffers = new 
EntryBuffers(controller,
-194
this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-195128*1024*1024));
+179  // the file being split currently
+180  private FileStatus fileBeingSplit;
+181
+182  @VisibleForTesting
+183  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
+184  FileSystem fs, LastSequenceId 
idChecker,
+185  CoordinatedStateManager csm, 
RecoveryMode mode) {
+186this.conf = 
HBaseConfiguration.create(conf);
+187String codecClassName = conf
+188
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+189
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
+190this.rootDir = rootDir;
+191this.fs = fs;
+192this.sequenceIdChecker = idChecker;
+193this.csm = 
(BaseCoordinatedStateManager)csm;
+194this.walFactory = factory;
+195this.controller = new 
PipelineController();
 196
-197// a larger minBatchSize may slow 
down recovery because replay writer has to wait for
-198// enough edits before replaying 
them
-199this.minBatchSize = 
this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
-200this.distributedLogReplay = 
(RecoveryMode.LOG_REPLAY == mode);
-201
-202this.numWriterThreads = 
this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
-203if (csm != null && 
this.distributedLogReplay) {
-204  outputSink = new 
LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
-205} else {
-206  if (this.distributedLogReplay) {
-207LOG.info("ZooKeeperWatcher is 
passed in as NULL so disable distrubitedLogRepaly.");
-208  }
-209  this.distributedLogReplay = 
false;
-210  outputSink = new 
LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
-211}
-212
-213  }
-214
-215  /**
-216   * Splits a WAL file into region's 
recovered-edits directory.
-217   * This is the main entry point for 
distributed log splitting from SplitLogWorker.
-218   * 

-219 * If the log file has N regions then N recovered.edits files will be produced. -220 *

-221 * @param rootDir -222 * @param logfile -223 * @param fs -224 * @param conf -225 * @param reporter -226 * @param idChecker -227 * @param cp coordination state manager -228 * @return false if it is interrupted by the progress-able. -229 * @throws IOException -230 */ -231 public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs, -232 Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, -233 CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException { -234WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode); -235return s.splitLogFile(logfile, reporter); -236 } -237 -238 // A wrapper to split one log folder using the method used by distributed -239 // log splitting. Used by tools and unit tests. It should be package private. -240 // It is public only because TestWALObserver is in a different package, -241 // which uses this method to do log splitting. -242 @VisibleForTesting -243 public static List split(Path rootDir, Path logDir, Path oldLogDir, -244 FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { -245final FileStatus[] logfiles = SplitLogManager.getFileList(conf, -246 Collections.singletonList(logDir), null); -247List splits = new ArrayList(); -248if (logfiles != null && logfile


[01/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 3268e8dbe -> f32f549ad


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/xref-test/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html
--
diff --git 
a/xref-test/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html 
b/xref-test/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html
index 71ea0e5..4e7fed1 100644
--- a/xref-test/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html
+++ b/xref-test/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html
@@ -43,1157 +43,1234 @@
 33  import java.lang.reflect.Field;
 34  import java.security.PrivilegedExceptionAction;
 35  import java.util.ArrayList;
-36  import java.util.Collection;
-37  import java.util.HashSet;
-38  import java.util.List;
-39  import java.util.Set;
-40  import java.util.concurrent.atomic.AtomicBoolean;
-41  import java.util.concurrent.atomic.AtomicInteger;
-42  
-43  import org.apache.commons.logging.Log;
-44  import org.apache.commons.logging.LogFactory;
-45  import org.apache.hadoop.conf.Configuration;
-46  import org.apache.hadoop.fs.FSDataInputStream;
-47  import org.apache.hadoop.fs.FileStatus;
-48  import org.apache.hadoop.fs.FileSystem;
-49  import org.apache.hadoop.fs.Path;
-50  import org.apache.hadoop.fs.PathFilter;
-51  import org.apache.hadoop.hbase.Cell;
-52  import org.apache.hadoop.hbase.HBaseConfiguration;
-53  import org.apache.hadoop.hbase.HBaseTestingUtility;
-54  import org.apache.hadoop.hbase.HColumnDescriptor;
-55  import org.apache.hadoop.hbase.HConstants;
-56  import org.apache.hadoop.hbase.HRegionInfo;
-57  import org.apache.hadoop.hbase.HTableDescriptor;
-58  import org.apache.hadoop.hbase.KeyValue;
-59  import 
org.apache.hadoop.hbase.MasterNotRunningException;
-60  import org.apache.hadoop.hbase.MiniHBaseCluster;
-61  import org.apache.hadoop.hbase.ServerName;
-62  import org.apache.hadoop.hbase.TableName;
-63  import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-64  import org.apache.hadoop.hbase.client.Delete;
-65  import org.apache.hadoop.hbase.client.Get;
-66  import org.apache.hadoop.hbase.client.Put;
-67  import org.apache.hadoop.hbase.client.Result;
-68  import 
org.apache.hadoop.hbase.client.ResultScanner;
-69  import org.apache.hadoop.hbase.client.Scan;
-70  import org.apache.hadoop.hbase.client.Table;
-71  import org.apache.hadoop.hbase.master.HMaster;
-72  import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-73  import 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
-74  import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-75  import 
org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher;
-76  import 
org.apache.hadoop.hbase.regionserver.FlushRequestListener;
-77  import 
org.apache.hadoop.hbase.regionserver.FlushRequester;
-78  import 
org.apache.hadoop.hbase.regionserver.HRegion;
-79  import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-80  import 
org.apache.hadoop.hbase.regionserver.MemStoreSnapshot;
-81  import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-82  import org.apache.hadoop.hbase.regionserver.Region;
-83  import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-84  import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
-85  import org.apache.hadoop.hbase.regionserver.Store;
-86  import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-87  import org.apache.hadoop.hbase.security.User;
-88  import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-89  import 
org.apache.hadoop.hbase.testclassification.RegionServerTests;
-90  import org.apache.hadoop.hbase.util.Bytes;
-91  import 
org.apache.hadoop.hbase.util.EnvironmentEdge;
-92  import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-93  import org.apache.hadoop.hbase.util.FSUtils;
-94  import org.apache.hadoop.hbase.util.HFileTestUtil;
-95  import org.apache.hadoop.hbase.util.Pair;
-96  import 
org.apache.hadoop.hbase.wal.DefaultWALProvider;
-97  import org.apache.hadoop.hbase.wal.WAL;
-98  import org.apache.hadoop.hbase.wal.WALFactory;
-99  import org.apache.hadoop.hbase.wal.WALKey;
-100 import org.apache.hadoop.hbase.wal.WALSplitter;
-101 import org.apache.hadoop.hdfs.DFSInputStream;
-102 import org.junit.After;
-103 import org.junit.AfterClass;
-104 import org.junit.Before;
-105 import org.junit.BeforeClass;
-106 import org.junit.Rule;
-107 import org.junit.Test;
-108 import org.junit.experimental.categories.Category;
-109 import org.junit.rules.TestName;
-110 import org.mockito.Mockito;
-111 import org.mockito.invocation.InvocationOnMock;
-112 import org.mockito.stubbing.Answer;
-113 
-114 /**
-115  * Test replay of edits out of a WAL split.
-116  */
-117 
@Category({RegionServerTests.class, 
MediumTests.class})
-118 public class 
TestWALReplay
 {
-119   private static final 
Log LOG = LogFactory.getLog(TestWAL

[13/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
index 0597160..b444f46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
@@ -184,2145 +184,2183 @@
 176  // Min batch size when replay WAL 
edits
 177  private final int minBatchSize;
 178
-179  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
-180  FileSystem fs, LastSequenceId 
idChecker,
-181  CoordinatedStateManager csm, 
RecoveryMode mode) {
-182this.conf = 
HBaseConfiguration.create(conf);
-183String codecClassName = conf
-184
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-185
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-186this.rootDir = rootDir;
-187this.fs = fs;
-188this.sequenceIdChecker = idChecker;
-189this.csm = 
(BaseCoordinatedStateManager)csm;
-190this.walFactory = factory;
-191this.controller = new 
PipelineController();
-192
-193entryBuffers = new 
EntryBuffers(controller,
-194
this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-195128*1024*1024));
+179  // the file being split currently
+180  private FileStatus fileBeingSplit;
+181
+182  @VisibleForTesting
+183  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
+184  FileSystem fs, LastSequenceId 
idChecker,
+185  CoordinatedStateManager csm, 
RecoveryMode mode) {
+186this.conf = 
HBaseConfiguration.create(conf);
+187String codecClassName = conf
+188
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+189
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
+190this.rootDir = rootDir;
+191this.fs = fs;
+192this.sequenceIdChecker = idChecker;
+193this.csm = 
(BaseCoordinatedStateManager)csm;
+194this.walFactory = factory;
+195this.controller = new 
PipelineController();
 196
-197// a larger minBatchSize may slow 
down recovery because replay writer has to wait for
-198// enough edits before replaying 
them
-199this.minBatchSize = 
this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
-200this.distributedLogReplay = 
(RecoveryMode.LOG_REPLAY == mode);
-201
-202this.numWriterThreads = 
this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
-203if (csm != null && 
this.distributedLogReplay) {
-204  outputSink = new 
LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
-205} else {
-206  if (this.distributedLogReplay) {
-207LOG.info("ZooKeeperWatcher is 
passed in as NULL so disable distrubitedLogRepaly.");
-208  }
-209  this.distributedLogReplay = 
false;
-210  outputSink = new 
LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
-211}
-212
-213  }
-214
-215  /**
-216   * Splits a WAL file into region's 
recovered-edits directory.
-217   * This is the main entry point for 
distributed log splitting from SplitLogWorker.
-218   * 

-219 * If the log file has N regions then N recovered.edits files will be produced. -220 *

-221 * @param rootDir -222 * @param logfile -223 * @param fs -224 * @param conf -225 * @param reporter -226 * @param idChecker -227 * @param cp coordination state manager -228 * @return false if it is interrupted by the progress-able. -229 * @throws IOException -230 */ -231 public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs, -232 Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, -233 CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException { -234WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode); -235return s.splitLogFile(logfile, reporter); -236 } -237 -238 // A wrapper to split one log folder using the method used by distributed -239 // log splitting. Used by tools and unit tests. It should be package private. -240 // It is public only because TestWALObserver is in a different package, -241 // which uses this method to do log splitting. -242 @VisibleForTesting -243 public static List split(Path rootDir, Path logDir, Path oldLogDir, -244 FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { -245final FileStatus[] logfiles = SplitLogManager.getFileList(conf, -246 Collections.singletonList(logDir), null); -247List splits = new ArrayList(); -248if (logfiles != null && logfiles.length >


[06/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.CustomStoreFlusher.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.CustomStoreFlusher.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.CustomStoreFlusher.html
index aa5da14..b06a764 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.CustomStoreFlusher.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.CustomStoreFlusher.html
@@ -41,1157 +41,1234 @@
 033import java.lang.reflect.Field;
 034import 
java.security.PrivilegedExceptionAction;
 035import java.util.ArrayList;
-036import java.util.Collection;
-037import java.util.HashSet;
-038import java.util.List;
-039import java.util.Set;
-040import 
java.util.concurrent.atomic.AtomicBoolean;
-041import 
java.util.concurrent.atomic.AtomicInteger;
-042
-043import org.apache.commons.logging.Log;
-044import 
org.apache.commons.logging.LogFactory;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.fs.FSDataInputStream;
-047import org.apache.hadoop.fs.FileStatus;
-048import org.apache.hadoop.fs.FileSystem;
-049import org.apache.hadoop.fs.Path;
-050import org.apache.hadoop.fs.PathFilter;
-051import org.apache.hadoop.hbase.Cell;
-052import 
org.apache.hadoop.hbase.HBaseConfiguration;
-053import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-054import 
org.apache.hadoop.hbase.HColumnDescriptor;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HTableDescriptor;
-058import 
org.apache.hadoop.hbase.KeyValue;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MiniHBaseCluster;
-061import 
org.apache.hadoop.hbase.ServerName;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-064import 
org.apache.hadoop.hbase.client.Delete;
-065import 
org.apache.hadoop.hbase.client.Get;
-066import 
org.apache.hadoop.hbase.client.Put;
-067import 
org.apache.hadoop.hbase.client.Result;
-068import 
org.apache.hadoop.hbase.client.ResultScanner;
-069import 
org.apache.hadoop.hbase.client.Scan;
-070import 
org.apache.hadoop.hbase.client.Table;
-071import 
org.apache.hadoop.hbase.master.HMaster;
-072import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-073import 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
-074import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-075import 
org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher;
-076import 
org.apache.hadoop.hbase.regionserver.FlushRequestListener;
-077import 
org.apache.hadoop.hbase.regionserver.FlushRequester;
-078import 
org.apache.hadoop.hbase.regionserver.HRegion;
-079import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-080import 
org.apache.hadoop.hbase.regionserver.MemStoreSnapshot;
-081import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-082import 
org.apache.hadoop.hbase.regionserver.Region;
-083import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-084import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
-085import 
org.apache.hadoop.hbase.regionserver.Store;
-086import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-087import 
org.apache.hadoop.hbase.security.User;
-088import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-089import 
org.apache.hadoop.hbase.testclassification.RegionServerTests;
-090import 
org.apache.hadoop.hbase.util.Bytes;
-091import 
org.apache.hadoop.hbase.util.EnvironmentEdge;
-092import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-093import 
org.apache.hadoop.hbase.util.FSUtils;
-094import 
org.apache.hadoop.hbase.util.HFileTestUtil;
-095import 
org.apache.hadoop.hbase.util.Pair;
-096import 
org.apache.hadoop.hbase.wal.DefaultWALProvider;
-097import org.apache.hadoop.hbase.wal.WAL;
-098import 
org.apache.hadoop.hbase.wal.WALFactory;
-099import 
org.apache.hadoop.hbase.wal.WALKey;
-100import 
org.apache.hadoop.hbase.wal.WALSplitter;
-101import 
org.apache.hadoop.hdfs.DFSInputStream;
-102import org.junit.After;
-103import org.junit.AfterClass;
-104import org.junit.Before;
-105import org.junit.BeforeClass;
-106import org.junit.Rule;
-107import org.junit.Test;
-108import 
org.junit.experimental.categories.Category;
-109import org.junit.rules.TestName;
-110import org.mockito.Mockito;
-111import 
org.mockito.invocation.InvocationOnMock;
-112import org.mockito.stubbing.Answer;
-113
-114/**
-115 * Test replay of edits out of a WAL 
split.
-116 */
-117@Category({RegionServerTests.class, 
MediumTests.class})
-118public class TestWALReplay {
-119  private static final Log LOG =

[32/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index 1d64963..a232cc8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -6980,880 +6980,882 @@
 6972  
lock(this.updatesLock.readLock());
 6973  try {
 6974Result cpResult = 
doCoprocessorPreCall(op, mutation);
-6975if (cpResult != null) return 
cpResult;
-6976Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
-6977Map> forMemStore =
-6978new HashMap>(mutation.getFamilyCellMap().size());
-6979// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
-6980// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
-6981WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
-6982// Actually write to WAL now if 
a walEdit to apply.
-6983if (walEdit != null && 
!walEdit.isEmpty()) {
-6984  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
-6985} else {
-6986  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
-6987  // transaction.
-6988  
recordMutationWithoutWal(mutation.getFamilyCellMap());
-6989  writeEntry = mvcc.begin();
-6990}
-6991// Now write to MemStore. Do it 
a column family at a time.
-6992long sequenceId = 
writeEntry.getWriteNumber();
-6993for (Map.Entry> e: forMemStore.entrySet()) {
-6994  accumulatedResultSize +=
-6995  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceId);
-6996}
-6997
mvcc.completeAndWait(writeEntry);
-6998writeEntry = null;
-6999  } finally {
-7000
this.updatesLock.readLock().unlock();
-7001  }
-7002  // If results is null, then client 
asked that we not return the calculated results.
-7003  return results !=  null? 
Result.create(results): null;
-7004} finally {
-7005  // Call complete always, even on 
success. doDelta is doing a Get READ_UNCOMMITTED when it goes
-7006  // to get current value under an 
exclusive lock so no need so no need to wait to return to
-7007  // the client. Means only way to 
read-your-own-increment or append is to come in with an
-7008  // a 0 increment.
-7009  if (writeEntry != null) 
mvcc.complete(writeEntry);
-7010  rowLock.release();
-7011  // Request a cache flush if over 
the limit.  Do it outside update lock.
-7012  if 
(isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) 
requestFlush();
-7013  closeRegionOperation(op);
-7014  if (this.metricsRegion != null) 
{
-7015switch (op) {
-7016  case INCREMENT:
-7017
this.metricsRegion.updateIncrement();
-7018break;
-7019  case APPEND:
-7020
this.metricsRegion.updateAppend();
-7021break;
-7022  default:
+6975if (cpResult != null) {
+6976  return returnResults? 
cpResult: null;
+6977}
+6978Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
+6979Map> forMemStore =
+6980new HashMap>(mutation.getFamilyCellMap().size());
+6981// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
+6982// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
+6983WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
+6984// Actually write to WAL now if 
a walEdit to apply.
+6985if (walEdit != null && 
!walEdit.isEmpty()) {
+6986  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
+6987} else {
+6988  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
+6989  // transaction.
+6990  
recordMutationWithoutWal(mutation.getFamilyCellMap());
+6991  writeEntry = mvcc.begin();
+6992}
+6993// Now write to MemStore. Do it 
a column family at a time.
+6994long sequenceId = 
writeEntry.getWriteNumber();
+6995for (Map.Entry> e: forMemStore.entrySet()) {
+6996  accumulatedResultSize +=
+6997  
applyToMemstore(e.getKey(), e

[04/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.TestFlusher.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.TestFlusher.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.TestFlusher.html
index aa5da14..b06a764 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.TestFlusher.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.TestFlusher.html
@@ -41,1157 +41,1234 @@
 033import java.lang.reflect.Field;
 034import 
java.security.PrivilegedExceptionAction;
 035import java.util.ArrayList;
-036import java.util.Collection;
-037import java.util.HashSet;
-038import java.util.List;
-039import java.util.Set;
-040import 
java.util.concurrent.atomic.AtomicBoolean;
-041import 
java.util.concurrent.atomic.AtomicInteger;
-042
-043import org.apache.commons.logging.Log;
-044import 
org.apache.commons.logging.LogFactory;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.fs.FSDataInputStream;
-047import org.apache.hadoop.fs.FileStatus;
-048import org.apache.hadoop.fs.FileSystem;
-049import org.apache.hadoop.fs.Path;
-050import org.apache.hadoop.fs.PathFilter;
-051import org.apache.hadoop.hbase.Cell;
-052import 
org.apache.hadoop.hbase.HBaseConfiguration;
-053import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-054import 
org.apache.hadoop.hbase.HColumnDescriptor;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HTableDescriptor;
-058import 
org.apache.hadoop.hbase.KeyValue;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MiniHBaseCluster;
-061import 
org.apache.hadoop.hbase.ServerName;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-064import 
org.apache.hadoop.hbase.client.Delete;
-065import 
org.apache.hadoop.hbase.client.Get;
-066import 
org.apache.hadoop.hbase.client.Put;
-067import 
org.apache.hadoop.hbase.client.Result;
-068import 
org.apache.hadoop.hbase.client.ResultScanner;
-069import 
org.apache.hadoop.hbase.client.Scan;
-070import 
org.apache.hadoop.hbase.client.Table;
-071import 
org.apache.hadoop.hbase.master.HMaster;
-072import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-073import 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
-074import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-075import 
org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher;
-076import 
org.apache.hadoop.hbase.regionserver.FlushRequestListener;
-077import 
org.apache.hadoop.hbase.regionserver.FlushRequester;
-078import 
org.apache.hadoop.hbase.regionserver.HRegion;
-079import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-080import 
org.apache.hadoop.hbase.regionserver.MemStoreSnapshot;
-081import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-082import 
org.apache.hadoop.hbase.regionserver.Region;
-083import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-084import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
-085import 
org.apache.hadoop.hbase.regionserver.Store;
-086import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-087import 
org.apache.hadoop.hbase.security.User;
-088import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-089import 
org.apache.hadoop.hbase.testclassification.RegionServerTests;
-090import 
org.apache.hadoop.hbase.util.Bytes;
-091import 
org.apache.hadoop.hbase.util.EnvironmentEdge;
-092import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-093import 
org.apache.hadoop.hbase.util.FSUtils;
-094import 
org.apache.hadoop.hbase.util.HFileTestUtil;
-095import 
org.apache.hadoop.hbase.util.Pair;
-096import 
org.apache.hadoop.hbase.wal.DefaultWALProvider;
-097import org.apache.hadoop.hbase.wal.WAL;
-098import 
org.apache.hadoop.hbase.wal.WALFactory;
-099import 
org.apache.hadoop.hbase.wal.WALKey;
-100import 
org.apache.hadoop.hbase.wal.WALSplitter;
-101import 
org.apache.hadoop.hdfs.DFSInputStream;
-102import org.junit.After;
-103import org.junit.AfterClass;
-104import org.junit.Before;
-105import org.junit.BeforeClass;
-106import org.junit.Rule;
-107import org.junit.Test;
-108import 
org.junit.experimental.categories.Category;
-109import org.junit.rules.TestName;
-110import org.mockito.Mockito;
-111import 
org.mockito.invocation.InvocationOnMock;
-112import org.mockito.stubbing.Answer;
-113
-114/**
-115 * Test replay of edits out of a WAL 
split.
-116 */
-117@Category({RegionServerTests.class, 
MediumTests.class})
-118public class TestWALReplay {
-119  private static final Log LOG = 
LogFactory.getLog(TestWALReplay.c

[43/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
index aaa2515..b5f4032 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
@@ -161,14 +161,14 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.filter.FilterWrapper.FilterRowRetCode
-org.apache.hadoop.hbase.filter.FilterList.Operator
-org.apache.hadoop.hbase.filter.Filter.ReturnCode
 org.apache.hadoop.hbase.filter.FuzzyRowFilter.SatisfiesCode
+org.apache.hadoop.hbase.filter.FilterWrapper.FilterRowRetCode
 org.apache.hadoop.hbase.filter.FuzzyRowFilter.Order
+org.apache.hadoop.hbase.filter.FilterList.Operator
 org.apache.hadoop.hbase.filter.BitComparator.BitwiseOp
 org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
 org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
+org.apache.hadoop.hbase.filter.Filter.ReturnCode
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
index 646fd64..73842d3 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
@@ -270,12 +270,12 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.io.hfile.BlockPriority
+org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory
+org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
 org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State
 org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType
-org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory
+org.apache.hadoop.hbase.io.hfile.BlockPriority
 org.apache.hadoop.hbase.io.hfile.BlockType
-org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
index e166280..e9cdbb2 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
@@ -267,8 +267,8 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.mapreduce.SyncTable.SyncMapper.Counter
 org.apache.hadoop.hbase.mapreduce.TableSplit.Version
+org.apache.hadoop.hbase.mapreduce.SyncTable.SyncMapper.Counter
 org.apache.hadoop.hbase.mapreduce.CellCounter.CellCounterMapper.Counters
 org.apache.hadoop.hbase.mapreduce.RowCounter.RowCounterMapper.Counters
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
index 615e647..5261c33 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
@@ -319,10 +319,10 @@
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=

[48/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
index 0030a24..d2a7403 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
@@ -90,429 +90,448 @@
 082   */
 083  private static final String 
OP_ATTRIBUTE_TTL = "_ttl";
 084
-085  protected byte [] row = null;
-086  protected long ts = 
HConstants.LATEST_TIMESTAMP;
-087  protected Durability durability = 
Durability.USE_DEFAULT;
-088
-089  // A Map sorted by column family.
-090  protected NavigableMap> familyMap =
-091new TreeMap>(Bytes.BYTES_COMPARATOR);
-092
-093  @Override
-094  public CellScanner cellScanner() {
-095return 
CellUtil.createCellScanner(getFamilyCellMap());
-096  }
-097
-098  /**
-099   * Creates an empty list if one doesn't 
exist for the given column family
-100   * or else it returns the associated 
list of Cell objects.
-101   *
-102   * @param family column family
-103   * @return a list of Cell objects, 
returns an empty list if one doesn't exist.
-104   */
-105  List getCellList(byte[] 
family) {
-106List list = 
this.familyMap.get(family);
-107if (list == null) {
-108  list = new 
ArrayList();
-109}
-110return list;
-111  }
-112
-113  /*
-114   * Create a KeyValue with this objects 
row key and the Put identifier.
-115   *
-116   * @return a KeyValue with this objects 
row key and the Put identifier.
-117   */
-118  KeyValue createPutKeyValue(byte[] 
family, byte[] qualifier, long ts, byte[] value) {
-119return new KeyValue(this.row, family, 
qualifier, ts, KeyValue.Type.Put, value);
-120  }
-121
-122  /**
-123   * Create a KeyValue with this objects 
row key and the Put identifier.
-124   * @param family
-125   * @param qualifier
-126   * @param ts
-127   * @param value
-128   * @param tags - Specify the Tags as an 
Array
-129   * @return a KeyValue with this objects 
row key and the Put identifier.
-130   */
-131  KeyValue createPutKeyValue(byte[] 
family, byte[] qualifier, long ts, byte[] value, Tag[] tags) {
-132KeyValue kvWithTag = new 
KeyValue(this.row, family, qualifier, ts, value, tags);
-133return kvWithTag;
-134  }
-135
-136  /*
-137   * Create a KeyValue with this objects 
row key and the Put identifier.
-138   *
-139   * @return a KeyValue with this objects 
row key and the Put identifier.
-140   */
-141  KeyValue createPutKeyValue(byte[] 
family, ByteBuffer qualifier, long ts, ByteBuffer value,
-142  Tag[] tags) {
-143return new KeyValue(this.row, 0, 
this.row == null ? 0 : this.row.length,
-144family, 0, family == null ? 0 : 
family.length,
-145qualifier, ts, KeyValue.Type.Put, 
value, tags != null ? Arrays.asList(tags) : null);
-146  }
-147
-148  /**
-149   * Compile the column family (i.e. 
schema) information
-150   * into a Map. Useful for parsing and 
aggregation by debugging,
-151   * logging, and administration tools.
-152   * @return Map
-153   */
-154  @Override
-155  public Map 
getFingerprint() {
-156Map map = new 
HashMap();
-157List families = new 
ArrayList();
-158// ideally, we would also include 
table information, but that information
-159// is not stored in each Operation 
instance.
-160map.put("families", families);
-161for (Map.Entry> entry : this.familyMap.entrySet()) {
-162  
families.add(Bytes.toStringBinary(entry.getKey()));
-163}
-164return map;
-165  }
-166
-167  /**
-168   * Compile the details beyond the scope 
of getFingerprint (row, columns,
-169   * timestamps, etc.) into a Map along 
with the fingerprinted information.
-170   * Useful for debugging, logging, and 
administration tools.
-171   * @param maxCols a limit on the number 
of columns output prior to truncation
-172   * @return Map
-173   */
-174  @Override
-175  public Map 
toMap(int maxCols) {
-176// we start with the fingerprint map 
and build on top of it.
-177Map map = 
getFingerprint();
-178// replace the fingerprint's simple 
list of families with a
-179// map from column families to lists 
of qualifiers and kv details
-180Map>> columns =
-181  new HashMap>>();
-182map.put("families", columns);
-183map.put("row", 
Bytes.toStringBinary(this.row));
-184int colCount = 0;
-185// iterate through all column 
families affected
-186for (Map.Entry> entry : this.familyMap.entrySet()) {
-187  // map from this family to details 
for each cell affected withi

[31/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 1d64963..a232cc8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -6980,880 +6980,882 @@
 6972  
lock(this.updatesLock.readLock());
 6973  try {
 6974Result cpResult = 
doCoprocessorPreCall(op, mutation);
-6975if (cpResult != null) return 
cpResult;
-6976Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
-6977Map> forMemStore =
-6978new HashMap>(mutation.getFamilyCellMap().size());
-6979// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
-6980// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
-6981WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
-6982// Actually write to WAL now if 
a walEdit to apply.
-6983if (walEdit != null && 
!walEdit.isEmpty()) {
-6984  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
-6985} else {
-6986  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
-6987  // transaction.
-6988  
recordMutationWithoutWal(mutation.getFamilyCellMap());
-6989  writeEntry = mvcc.begin();
-6990}
-6991// Now write to MemStore. Do it 
a column family at a time.
-6992long sequenceId = 
writeEntry.getWriteNumber();
-6993for (Map.Entry> e: forMemStore.entrySet()) {
-6994  accumulatedResultSize +=
-6995  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceId);
-6996}
-6997
mvcc.completeAndWait(writeEntry);
-6998writeEntry = null;
-6999  } finally {
-7000
this.updatesLock.readLock().unlock();
-7001  }
-7002  // If results is null, then client 
asked that we not return the calculated results.
-7003  return results !=  null? 
Result.create(results): null;
-7004} finally {
-7005  // Call complete always, even on 
success. doDelta is doing a Get READ_UNCOMMITTED when it goes
-7006  // to get current value under an 
exclusive lock so no need so no need to wait to return to
-7007  // the client. Means only way to 
read-your-own-increment or append is to come in with an
-7008  // a 0 increment.
-7009  if (writeEntry != null) 
mvcc.complete(writeEntry);
-7010  rowLock.release();
-7011  // Request a cache flush if over 
the limit.  Do it outside update lock.
-7012  if 
(isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) 
requestFlush();
-7013  closeRegionOperation(op);
-7014  if (this.metricsRegion != null) 
{
-7015switch (op) {
-7016  case INCREMENT:
-7017
this.metricsRegion.updateIncrement();
-7018break;
-7019  case APPEND:
-7020
this.metricsRegion.updateAppend();
-7021break;
-7022  default:
+6975if (cpResult != null) {
+6976  return returnResults? 
cpResult: null;
+6977}
+6978Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
+6979Map> forMemStore =
+6980new HashMap>(mutation.getFamilyCellMap().size());
+6981// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
+6982// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
+6983WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
+6984// Actually write to WAL now if 
a walEdit to apply.
+6985if (walEdit != null && 
!walEdit.isEmpty()) {
+6986  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
+6987} else {
+6988  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
+6989  // transaction.
+6990  
recordMutationWithoutWal(mutation.getFamilyCellMap());
+6991  writeEntry = mvcc.begin();
+6992}
+6993// Now write to MemStore. Do it 
a column family at a time.
+6994long sequenceId = 
writeEntry.getWriteNumber();
+6995for (Map.Entry> e: forMemStore.entrySet()) {
+6996  accumulatedResultSize +=
+6997  
applyToMemstore(e.getKey(), e.getV

[40/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

2016-02-18 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogReplayOutputSink.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogReplayOutputSink.html 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogReplayOutputSink.html
index 36f415a..a0bb02a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogReplayOutputSink.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogReplayOutputSink.html
@@ -104,7 +104,7 @@
 
 
 
- class WALSplitter.LogReplayOutputSink
+ class WALSplitter.LogReplayOutputSink
 extends WALSplitter.OutputSink
 Class that manages to replay edits from WAL files directly 
to assigned fail over region servers
 
@@ -333,7 +333,7 @@ extends 
 
 BUFFER_THRESHOLD
-private static final double BUFFER_THRESHOLD
+private static final double BUFFER_THRESHOLD
 See Also:Constant
 Field Values
 
 
@@ -343,7 +343,7 @@ extends 
 
 KEY_DELIMITER
-private static final http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String KEY_DELIMITER
+private static final http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String KEY_DELIMITER
 See Also:Constant
 Field Values
 
 
@@ -353,7 +353,7 @@ extends 
 
 waitRegionOnlineTimeOut
-private long waitRegionOnlineTimeOut
+private long waitRegionOnlineTimeOut
 
 
 
@@ -362,7 +362,7 @@ extends 
 
 recoveredRegions
-private final http://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetString> recoveredRegions
+private final http://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetString> recoveredRegions
 
 
 
@@ -371,7 +371,7 @@ extends 
 
 writers
-private final http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,WALSplitter.RegionServerWriter> writers
+private final http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,WALSplitter.RegionServerWriter> writers
 
 
 
@@ -380,7 +380,7 @@ extends 
 
 onlineRegions
-private final http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,HRegionLocation> onlineRegions
+private final http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,HRegionLocation> onlineRegions
 
 
 
@@ -389,7 +389,7 @@ extends 
 
 tableNameToHConnectionMap
-private http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map tableNameToHConnectionMap
+private http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map tableNameToHConnectionMap
 
 
 
@@ -398,7 +398,7 @@ extends 
 
 serverToBufferQueueMap
-private http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List>> serverToBufferQueueMap
+private http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List>> serverToBufferQueueMap
 Map key -> value layout
  : -> Queue
 
@@ -409,7 +409,7 @@ extends 
 
 thrown
-private http://docs.oracl

[12/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
index 0597160..b444f46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
@@ -184,2145 +184,2183 @@
 176  // Min batch size when replay WAL 
edits
 177  private final int minBatchSize;
 178
-179  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
-180  FileSystem fs, LastSequenceId 
idChecker,
-181  CoordinatedStateManager csm, 
RecoveryMode mode) {
-182this.conf = 
HBaseConfiguration.create(conf);
-183String codecClassName = conf
-184
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-185
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-186this.rootDir = rootDir;
-187this.fs = fs;
-188this.sequenceIdChecker = idChecker;
-189this.csm = 
(BaseCoordinatedStateManager)csm;
-190this.walFactory = factory;
-191this.controller = new 
PipelineController();
-192
-193entryBuffers = new 
EntryBuffers(controller,
-194
this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-195128*1024*1024));
+179  // the file being split currently
+180  private FileStatus fileBeingSplit;
+181
+182  @VisibleForTesting
+183  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
+184  FileSystem fs, LastSequenceId 
idChecker,
+185  CoordinatedStateManager csm, 
RecoveryMode mode) {
+186this.conf = 
HBaseConfiguration.create(conf);
+187String codecClassName = conf
+188
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+189
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
+190this.rootDir = rootDir;
+191this.fs = fs;
+192this.sequenceIdChecker = idChecker;
+193this.csm = 
(BaseCoordinatedStateManager)csm;
+194this.walFactory = factory;
+195this.controller = new 
PipelineController();
 196
-197// a larger minBatchSize may slow 
down recovery because replay writer has to wait for
-198// enough edits before replaying 
them
-199this.minBatchSize = 
this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
-200this.distributedLogReplay = 
(RecoveryMode.LOG_REPLAY == mode);
-201
-202this.numWriterThreads = 
this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
-203if (csm != null && 
this.distributedLogReplay) {
-204  outputSink = new 
LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
-205} else {
-206  if (this.distributedLogReplay) {
-207LOG.info("ZooKeeperWatcher is 
passed in as NULL so disable distrubitedLogRepaly.");
-208  }
-209  this.distributedLogReplay = 
false;
-210  outputSink = new 
LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
-211}
-212
-213  }
-214
-215  /**
-216   * Splits a WAL file into region's 
recovered-edits directory.
-217   * This is the main entry point for 
distributed log splitting from SplitLogWorker.
-218   * 

-219 * If the log file has N regions then N recovered.edits files will be produced. -220 *

-221 * @param rootDir -222 * @param logfile -223 * @param fs -224 * @param conf -225 * @param reporter -226 * @param idChecker -227 * @param cp coordination state manager -228 * @return false if it is interrupted by the progress-able. -229 * @throws IOException -230 */ -231 public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs, -232 Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, -233 CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException { -234WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode); -235return s.splitLogFile(logfile, reporter); -236 } -237 -238 // A wrapper to split one log folder using the method used by distributed -239 // log splitting. Used by tools and unit tests. It should be package private. -240 // It is public only because TestWALObserver is in a different package, -241 // which uses this method to do log splitting. -242 @VisibleForTesting -243 public static List split(Path rootDir, Path logDir, Path oldLogDir, -244 FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { -245final FileStatus[] logfiles = SplitLogManager.getFileList(conf, -246 Collections.singletonList(logDir), null); -247List splits = new ArrayList(); -248if (logfiles != null && l


[07/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html
index 6ce1a53..0468609 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.html
@@ -99,7 +99,7 @@
 
 
 
-public class TestWALReplay
+public class TestWALReplay
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Test replay of edits out of a WAL split.
 
@@ -261,65 +261,103 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 createBasic3FamilyHTD(org.apache.hadoop.hbase.TableName tableName) 
 
 
+private 
org.apache.hadoop.hbase.regionserver.wal.FSWALEntry
+createFSWALEntry(org.apache.hadoop.hbase.HTableDescriptor htd,
+org.apache.hadoop.hbase.HRegionInfo hri,
+long sequence,
+byte[] rowName,
+byte[] family,
+
org.apache.hadoop.hbase.util.EnvironmentEdge ee,
+
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl mvcc,
+int index) 
+
+
 private TestWALReplay.MockWAL
 createMockWAL() 
 
-
+
 private org.apache.hadoop.hbase.wal.WAL
 createWAL(org.apache.hadoop.conf.Configuration c) 
 
+
+private 
org.apache.hadoop.hbase.regionserver.wal.WALEdit
+createWALEdit(byte[] rowName,
+  byte[] family,
+  org.apache.hadoop.hbase.util.EnvironmentEdge ee,
+  int index) 
+
 
+private 
org.apache.hadoop.hbase.wal.WALKey
+createWALKey(org.apache.hadoop.hbase.TableName tableName,
+org.apache.hadoop.hbase.HRegionInfo hri,
+
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl mvcc) 
+
+
 private void
 deleteDir(org.apache.hadoop.fs.Path p) 
 
-
+
 private int
 getScannedCount(org.apache.hadoop.hbase.regionserver.RegionScanner scanner) 
 
-
+
 private void
 moveRegionAndWait(org.apache.hadoop.hbase.regionserver.Region destRegion,
   
org.apache.hadoop.hbase.regionserver.HRegionServer destServer) 
 
-
+
 private org.apache.hadoop.fs.Path
 runWALSplit(org.apache.hadoop.conf.Configuration c) 
 
-
+
 void
 setUp() 
 
-
+
 static void
 setUpBeforeClass() 
 
-
+
 void
 tearDown() 
 
-
+
 static void
 tearDownAfterClass() 
 
-
+
 void
 test2727()
 Tests for hbase-2727.
 
 
-
+
 void
 testCompactedBulkLoadedFiles()
 HRegion test case that is made of a major compacted HFile 
(created with three bulk loaded
  files) and an edit in the memstore.
 
 
-
+
 void
 testDatalossWhenInputError()
 testcase for 
https://issues.apache.org/jira/browse/HBASE-15252
 
 
+
+private void
+testNameConflictWhenSplit(boolean largeFirst)
+testcase for 
https://issues.apache.org/jira/browse/HBASE-14949.
+
+
+
+void
+testNameConflictWhenSplit0() 
+
+
+void
+testNameConflictWhenSplit1() 
+
 
 void
 testRegionMadeOfBulkLoadedFilesOnly()
@@ -361,6 +399,11 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 void
 testSequentialEditLogSeqNum() 
 
+
+private void
+writerWALFile(org.apache.hadoop.fs.Path file,
+  http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List entries) 
+
 
 
 
@@ -389,7 +432,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -398,7 +441,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-static final HBaseTestingUtility TEST_UTIL
+static final HBaseTestingUtility TEST_UTIL
 
 
 
@@ -407,7 +450,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 ee
-private final org.apache.hadoop.hbase.util.EnvironmentEdge ee
+private final org.apache.hadoop.hbase.util.EnvironmentEdge ee
 
 
 
@@ -416,7 +459,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 hbaseRootDir
-private org.apache.hadoop.fs.Path hbaseRootDir
+private org.apache.hadoop.fs.Path hbaseRootDir
 
 
 
@@ -425,7 +468,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 logName
-private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String logName
+private http://docs.oracle.

[21/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
index 0597160..b444f46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
@@ -184,2145 +184,2183 @@
 176  // Min batch size when replay WAL 
edits
 177  private final int minBatchSize;
 178
-179  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
-180  FileSystem fs, LastSequenceId 
idChecker,
-181  CoordinatedStateManager csm, 
RecoveryMode mode) {
-182this.conf = 
HBaseConfiguration.create(conf);
-183String codecClassName = conf
-184
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-185
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-186this.rootDir = rootDir;
-187this.fs = fs;
-188this.sequenceIdChecker = idChecker;
-189this.csm = 
(BaseCoordinatedStateManager)csm;
-190this.walFactory = factory;
-191this.controller = new 
PipelineController();
-192
-193entryBuffers = new 
EntryBuffers(controller,
-194
this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-195128*1024*1024));
+179  // the file being split currently
+180  private FileStatus fileBeingSplit;
+181
+182  @VisibleForTesting
+183  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
+184  FileSystem fs, LastSequenceId 
idChecker,
+185  CoordinatedStateManager csm, 
RecoveryMode mode) {
+186this.conf = 
HBaseConfiguration.create(conf);
+187String codecClassName = conf
+188
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+189
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
+190this.rootDir = rootDir;
+191this.fs = fs;
+192this.sequenceIdChecker = idChecker;
+193this.csm = 
(BaseCoordinatedStateManager)csm;
+194this.walFactory = factory;
+195this.controller = new 
PipelineController();
 196
-197// a larger minBatchSize may slow 
down recovery because replay writer has to wait for
-198// enough edits before replaying 
them
-199this.minBatchSize = 
this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
-200this.distributedLogReplay = 
(RecoveryMode.LOG_REPLAY == mode);
-201
-202this.numWriterThreads = 
this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
-203if (csm != null && 
this.distributedLogReplay) {
-204  outputSink = new 
LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
-205} else {
-206  if (this.distributedLogReplay) {
-207LOG.info("ZooKeeperWatcher is 
passed in as NULL so disable distrubitedLogRepaly.");
-208  }
-209  this.distributedLogReplay = 
false;
-210  outputSink = new 
LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
-211}
-212
-213  }
-214
-215  /**
-216   * Splits a WAL file into region's 
recovered-edits directory.
-217   * This is the main entry point for 
distributed log splitting from SplitLogWorker.
-218   * 

-219 * If the log file has N regions then N recovered.edits files will be produced. -220 *

-221 * @param rootDir -222 * @param logfile -223 * @param fs -224 * @param conf -225 * @param reporter -226 * @param idChecker -227 * @param cp coordination state manager -228 * @return false if it is interrupted by the progress-able. -229 * @throws IOException -230 */ -231 public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs, -232 Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, -233 CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException { -234WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode); -235return s.splitLogFile(logfile, reporter); -236 } -237 -238 // A wrapper to split one log folder using the method used by distributed -239 // log splitting. Used by tools and unit tests. It should be package private. -240 // It is public only because TestWALObserver is in a different package, -241 // which uses this method to do log splitting. -242 @VisibleForTesting -243 public static List split(Path rootDir, Path logDir, Path oldLogDir, -244 FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { -245final FileStatus[] logfiles = SplitLogManager.getFileList(conf, -246 Collections.singletonList(logDir), null); -247List splits = new ArrayList(); -248if (logfiles != null && logfile


[15/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
index 0597160..b444f46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
@@ -184,2145 +184,2183 @@
 176  // Min batch size when replay WAL 
edits
 177  private final int minBatchSize;
 178
-179  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
-180  FileSystem fs, LastSequenceId 
idChecker,
-181  CoordinatedStateManager csm, 
RecoveryMode mode) {
-182this.conf = 
HBaseConfiguration.create(conf);
-183String codecClassName = conf
-184
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-185
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-186this.rootDir = rootDir;
-187this.fs = fs;
-188this.sequenceIdChecker = idChecker;
-189this.csm = 
(BaseCoordinatedStateManager)csm;
-190this.walFactory = factory;
-191this.controller = new 
PipelineController();
-192
-193entryBuffers = new 
EntryBuffers(controller,
-194
this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-195128*1024*1024));
+179  // the file being split currently
+180  private FileStatus fileBeingSplit;
+181
+182  @VisibleForTesting
+183  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
+184  FileSystem fs, LastSequenceId 
idChecker,
+185  CoordinatedStateManager csm, 
RecoveryMode mode) {
+186this.conf = 
HBaseConfiguration.create(conf);
+187String codecClassName = conf
+188
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+189
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
+190this.rootDir = rootDir;
+191this.fs = fs;
+192this.sequenceIdChecker = idChecker;
+193this.csm = 
(BaseCoordinatedStateManager)csm;
+194this.walFactory = factory;
+195this.controller = new 
PipelineController();
 196
-197// a larger minBatchSize may slow 
down recovery because replay writer has to wait for
-198// enough edits before replaying 
them
-199this.minBatchSize = 
this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
-200this.distributedLogReplay = 
(RecoveryMode.LOG_REPLAY == mode);
-201
-202this.numWriterThreads = 
this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
-203if (csm != null && 
this.distributedLogReplay) {
-204  outputSink = new 
LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
-205} else {
-206  if (this.distributedLogReplay) {
-207LOG.info("ZooKeeperWatcher is 
passed in as NULL so disable distrubitedLogRepaly.");
-208  }
-209  this.distributedLogReplay = 
false;
-210  outputSink = new 
LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
-211}
-212
-213  }
-214
-215  /**
-216   * Splits a WAL file into region's 
recovered-edits directory.
-217   * This is the main entry point for 
distributed log splitting from SplitLogWorker.
-218   * 

-219 * If the log file has N regions then N recovered.edits files will be produced. -220 *

-221 * @param rootDir -222 * @param logfile -223 * @param fs -224 * @param conf -225 * @param reporter -226 * @param idChecker -227 * @param cp coordination state manager -228 * @return false if it is interrupted by the progress-able. -229 * @throws IOException -230 */ -231 public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs, -232 Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, -233 CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException { -234WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode); -235return s.splitLogFile(logfile, reporter); -236 } -237 -238 // A wrapper to split one log folder using the method used by distributed -239 // log splitting. Used by tools and unit tests. It should be package private. -240 // It is public only because TestWALObserver is in a different package, -241 // which uses this method to do log splitting. -242 @VisibleForTesting -243 public static List split(Path rootDir, Path logDir, Path oldLogDir, -244 FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { -245final FileStatus[] logfiles = SplitLogManager.getFileList(conf, -246 Collections.singletonList(logDir), null); -247List splits = new ArrayList(); -248if (lo


[38/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/ProcedureInfo.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ProcedureInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ProcedureInfo.html
index 8178706..bf877b1 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ProcedureInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ProcedureInfo.html
@@ -57,210 +57,211 @@
 049
 050  private long clientAckTime = -1;
 051
-052  public ProcedureInfo(
-053  final long procId,
-054  final String procName,
-055  final String procOwner,
-056  final ProcedureState procState,
-057  final long parentId,
-058  final NonceKey nonceKey,
-059  final ForeignExceptionMessage 
exception,
-060  final long lastUpdate,
-061  final long startTime,
-062  final byte[] result) {
-063this.procId = procId;
-064this.procName = procName;
-065this.procOwner = procOwner;
-066this.procState = procState;
-067this.parentId = parentId;
-068this.nonceKey = nonceKey;
-069this.lastUpdate = lastUpdate;
-070this.startTime = startTime;
-071
-072// If the procedure is completed, we 
should treat exception and result differently
-073this.exception = exception;
-074this.result = result;
-075  }
-076
-077  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="CN_IDIOM_NO_SUPER_CALL",
-078  justification="Intentional; calling 
super class clone doesn't make sense here.")
-079  public ProcedureInfo clone() {
-080return new ProcedureInfo(procId, 
procName, procOwner, procState, parentId, nonceKey,
-081  exception, lastUpdate, startTime, 
result);
-082  }
-083
-084  public long getProcId() {
-085return procId;
-086  }
-087
-088  public String getProcName() {
-089return procName;
-090  }
-091
-092  public String getProcOwner() {
-093return procOwner;
-094  }
-095
-096  public ProcedureState getProcState() 
{
-097return procState;
-098  }
-099
-100  public boolean hasParentId() {
-101return (parentId != -1);
-102  }
-103
-104  public long getParentId() {
-105return parentId;
-106  }
-107
-108  public NonceKey getNonceKey() {
-109return nonceKey;
-110  }
-111
-112  public boolean isFailed() {
-113return exception != null;
-114  }
-115
-116  public IOException getException() {
-117if (isFailed()) {
-118  return 
ForeignExceptionUtil.toIOException(exception);
-119}
-120return null;
-121  }
-122
-123  @InterfaceAudience.Private
-124  public ForeignExceptionMessage 
getForeignExceptionMessage() {
-125return exception;
-126  }
-127
-128  public String getExceptionCause() {
-129assert isFailed();
-130return 
exception.getGenericException().getClassName();
-131  }
-132
-133  public String getExceptionMessage() {
-134assert isFailed();
-135return 
exception.getGenericException().getMessage();
-136  }
-137
-138  public String getExceptionFullMessage() 
{
-139assert isFailed();
-140return getExceptionCause() + " - " + 
getExceptionMessage();
-141  }
-142
-143  public boolean hasResultData() {
-144return result != null;
-145  }
-146
-147  public byte[] getResult() {
-148return result;
-149  }
-150
-151  public long getStartTime() {
-152return startTime;
-153  }
-154
-155  public long getLastUpdate() {
-156return lastUpdate;
-157  }
-158
-159  public long executionTime() {
-160return lastUpdate - startTime;
-161  }
-162
-163  @InterfaceAudience.Private
-164  public boolean hasClientAckTime() {
-165return clientAckTime > 0;
-166  }
-167
-168  @InterfaceAudience.Private
-169  public long getClientAckTime() {
-170return clientAckTime;
-171  }
-172
-173  @InterfaceAudience.Private
-174  public void setClientAckTime(final long 
timestamp) {
-175this.clientAckTime = timestamp;
-176  }
-177
-178  /**
-179   * @return Convert the current {@link 
ProcedureInfo} into a Protocol Buffers Procedure
-180   * instance.
-181   */
-182  @InterfaceAudience.Private
-183  public static ProcedureProtos.Procedure 
convertToProcedureProto(
-184  final ProcedureInfo procInfo) {
-185ProcedureProtos.Procedure.Builder 
builder = ProcedureProtos.Procedure.newBuilder();
-186
-187
builder.setClassName(procInfo.getProcName());
-188
builder.setProcId(procInfo.getProcId());
-189
builder.setStartTime(procInfo.getStartTime());
-190
builder.setState(procInfo.getProcState());
-191
builder.setLastUpdate(procInfo.getLastUpdate());
-192
-193if (procInfo.hasParentId()) {
-194  
builder.setParentId(procInfo.getParentId());
-195}
-196
-197if (procInfo.getProcOwner() != null) 
{
-198   
builder.setOwner(procInfo.getProcOwner());
-199}
-200
-201if (procInfo.isFailed()) {
-202
builder.setException(procInfo.getForeignExceptionMessage());
-203}
-204
-205if (procInfo.hasResultData()) {
-20

[25/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index c9f2d82..04fbb3a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -67,15 +67,15 @@
 059  requiredArguments = {
 060@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 061  optionalArguments = {
-062@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
-063@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-064@org.jamon.annotations.Argument(name 
= "frags", type = "Map"),
-065@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+062@org.jamon.annotations.Argument(name 
= "frags", type = "Map"),
+063@org.jamon.annotations.Argument(name 
= "servers", type = "List"),
+064@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
+065@org.jamon.annotations.Argument(name 
= "deadServers", type = "Set"),
 066@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-067@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-068@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-069@org.jamon.annotations.Argument(name 
= "deadServers", type = "Set"),
-070@org.jamon.annotations.Argument(name 
= "servers", type = "List")})
+067@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
+068@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+069@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+070@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean")})
 071public class MasterStatusTmpl
 072  extends 
org.jamon.AbstractTemplateProxy
 073{
@@ -116,74 +116,74 @@
 108  return m_master;
 109}
 110private HMaster m_master;
-111// 22, 1
-112public void 
setMetaLocation(ServerName metaLocation)
+111// 21, 1
+112public void 
setFrags(Map frags)
 113{
-114  // 22, 1
-115  m_metaLocation = metaLocation;
-116  m_metaLocation__IsNotDefault = 
true;
+114  // 21, 1
+115  m_frags = frags;
+116  m_frags__IsNotDefault = true;
 117}
-118public ServerName getMetaLocation()
+118public Map 
getFrags()
 119{
-120  return m_metaLocation;
+120  return m_frags;
 121}
-122private ServerName m_metaLocation;
-123public boolean 
getMetaLocation__IsNotDefault()
+122private Map 
m_frags;
+123public boolean 
getFrags__IsNotDefault()
 124{
-125  return 
m_metaLocation__IsNotDefault;
+125  return m_frags__IsNotDefault;
 126}
-127private boolean 
m_metaLocation__IsNotDefault;
-128// 28, 1
-129public void 
setServerManager(ServerManager serverManager)
+127private boolean 
m_frags__IsNotDefault;
+128// 23, 1
+129public void 
setServers(List servers)
 130{
-131  // 28, 1
-132  m_serverManager = serverManager;
-133  m_serverManager__IsNotDefault = 
true;
+131  // 23, 1
+132  m_servers = servers;
+133  m_servers__IsNotDefault = true;
 134}
-135public ServerManager 
getServerManager()
+135public List 
getServers()
 136{
-137  return m_serverManager;
+137  return m_servers;
 138}
-139private ServerManager 
m_serverManager;
-140public boolean 
getServerManager__IsNotDefault()
+139private List 
m_servers;
+140public boolean 
getServers__IsNotDefault()
 141{
-142  return 
m_serverManager__IsNotDefault;
+142  return m_servers__IsNotDefault;
 143}
-144private boolean 
m_serverManager__IsNotDefault;
-145// 21, 1
-146public void 
setFrags(Map frags)
+144private boolean 
m_servers__IsNotDefault;
+145// 22, 1
+146public void 
setMetaLocation(ServerName metaLocation)
 147{
-148  // 21, 1
-149  m_frags = frags;
-150  m_frags__IsNotDefault = true;
+148  // 22, 1
+149  m_metaLocation = metaLocation;
+150  m_metaLocation__IsNotDefault = 
true;
 151}
-152public Map 
getFrags()
+152public ServerName getMetaLocation()
 153{
-154  return m_frags;
+154  return m_metaLocation;
 155}
-156private Map 
m_frags;
-157public boolean 
getFrags__IsNot

[45/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 3acde3f..0fe27ea 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 ©2007 - 2016 The Apache Software Foundation
 
   File: 1683,
- Errors: 12749,
+ Errors: 12747,
  Warnings: 0,
  Infos: 0
   
@@ -1427,7 +1427,7 @@ under the License.
   0
 
 
-  211
+  210
 
   
   
@@ -19193,7 +19193,7 @@ under the License.
   0
 
 
-  70
+  69
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/coc.html
--
diff --git a/coc.html b/coc.html
index 2b7410c..ca221f8 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Code of Conduct Policy
@@ -331,7 +331,7 @@ For flagrant violations requiring a firm response the PMC 
may opt to skip early
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-17
+  Last Published: 
2016-02-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 30f262e..fed6d1d 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -673,7 +673,7 @@ Now your HBase server is running, start 
coding and build that next
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-17
+  Last Published: 
2016-02-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 5dd3bea..9a0eca9 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependencies
 
@@ -518,7 +518,7 @@
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-17
+  Last Published: 
2016-02-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 85f774b..90517a5 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Reactor Dependency Convergence
 
@@ -1617,7 +1617,7 @@
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-17
+  Last Published: 
2016-02-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 540d6b5..2ec3e4e 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Dependency Information
 
@@ -312,7 +312,7 @@
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-17
+  Last Published: 
2016-02-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index 46b4482..df30824 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependency Management
 
@@ -786,7 +786,7 @@
 http://www.apache.org/";>The Apache Software 
Foundation.
 

[09/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/distribution-management.html
--
diff --git a/distribution-management.html b/distribution-management.html
index 7640405..2c85db3 100644
--- a/distribution-management.html
+++ b/distribution-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Distribution Management
 
@@ -290,7 +290,7 @@
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-17
+  Last Published: 
2016-02-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/export_control.html
--
diff --git a/export_control.html b/export_control.html
index f1969d3..35737b2 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Export Control
@@ -330,7 +330,7 @@ for more details.
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-17
+  Last Published: 
2016-02-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/hbase-annotations/checkstyle.html
--
diff --git a/hbase-annotations/checkstyle.html 
b/hbase-annotations/checkstyle.html
index 171f077..4736ae2 100644
--- a/hbase-annotations/checkstyle.html
+++ b/hbase-annotations/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-02-17
+Last Published: 2016-02-18
    | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/hbase-annotations/dependencies.html
--
diff --git a/hbase-annotations/dependencies.html 
b/hbase-annotations/dependencies.html
index b666462..2e20851 100644
--- a/hbase-annotations/dependencies.html
+++ b/hbase-annotations/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-02-17
+Last Published: 2016-02-18
    | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/hbase-annotations/dependency-convergence.html
--
diff --git a/hbase-annotations/dependency-convergence.html 
b/hbase-annotations/dependency-convergence.html
index 7349296..8ce1d8d 100644
--- a/hbase-annotations/dependency-convergence.html
+++ b/hbase-annotations/dependency-convergence.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-02-17
+Last Published: 2016-02-18
    | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/hbase-annotations/dependency-info.html
--
diff --git a/hbase-annotations/dependency-info.html 
b/hbase-annotations/dependency-info.html
index 94e6c88..fc469df 100644
--- a/hbase-annotations/dependency-info.html
+++ b/hbase-annotations/dependency-info.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-02-17
+Last Published: 2016-02-18
    | Version: 

[34/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index 1d64963..a232cc8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -6980,880 +6980,882 @@
 6972  
lock(this.updatesLock.readLock());
 6973  try {
 6974Result cpResult = 
doCoprocessorPreCall(op, mutation);
-6975if (cpResult != null) return 
cpResult;
-6976Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
-6977Map> forMemStore =
-6978new HashMap>(mutation.getFamilyCellMap().size());
-6979// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
-6980// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
-6981WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
-6982// Actually write to WAL now if 
a walEdit to apply.
-6983if (walEdit != null && 
!walEdit.isEmpty()) {
-6984  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
-6985} else {
-6986  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
-6987  // transaction.
-6988  
recordMutationWithoutWal(mutation.getFamilyCellMap());
-6989  writeEntry = mvcc.begin();
-6990}
-6991// Now write to MemStore. Do it 
a column family at a time.
-6992long sequenceId = 
writeEntry.getWriteNumber();
-6993for (Map.Entry> e: forMemStore.entrySet()) {
-6994  accumulatedResultSize +=
-6995  
applyToMemstore(e.getKey(), e.getValue(), true, false, sequenceId);
-6996}
-6997
mvcc.completeAndWait(writeEntry);
-6998writeEntry = null;
-6999  } finally {
-7000
this.updatesLock.readLock().unlock();
-7001  }
-7002  // If results is null, then client 
asked that we not return the calculated results.
-7003  return results !=  null? 
Result.create(results): null;
-7004} finally {
-7005  // Call complete always, even on 
success. doDelta is doing a Get READ_UNCOMMITTED when it goes
-7006  // to get current value under an 
exclusive lock so no need so no need to wait to return to
-7007  // the client. Means only way to 
read-your-own-increment or append is to come in with an
-7008  // a 0 increment.
-7009  if (writeEntry != null) 
mvcc.complete(writeEntry);
-7010  rowLock.release();
-7011  // Request a cache flush if over 
the limit.  Do it outside update lock.
-7012  if 
(isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) 
requestFlush();
-7013  closeRegionOperation(op);
-7014  if (this.metricsRegion != null) 
{
-7015switch (op) {
-7016  case INCREMENT:
-7017
this.metricsRegion.updateIncrement();
-7018break;
-7019  case APPEND:
-7020
this.metricsRegion.updateAppend();
-7021break;
-7022  default:
+6975if (cpResult != null) {
+6976  return returnResults? 
cpResult: null;
+6977}
+6978Durability effectiveDurability = 
getEffectiveDurability(mutation.getDurability());
+6979Map> forMemStore =
+6980new HashMap>(mutation.getFamilyCellMap().size());
+6981// Reckon Cells to apply to WAL 
--  in returned walEdit -- and what to add to memstore and
+6982// what to return back to the 
client (in 'forMemStore' and 'results' respectively).
+6983WALEdit walEdit = 
reckonDeltas(op, mutation, effectiveDurability, forMemStore, results);
+6984// Actually write to WAL now if 
a walEdit to apply.
+6985if (walEdit != null && 
!walEdit.isEmpty()) {
+6986  writeEntry = 
doWALAppend(walEdit, durability, nonceGroup, nonce);
+6987} else {
+6988  // If walEdits is empty, it 
means we skipped the WAL; update counters and start an mvcc
+6989  // transaction.
+6990  
recordMutationWithoutWal(mutation.getFamilyCellMap());
+6991  writeEntry = mvcc.begin();
+6992}
+6993// Now write to MemStore. Do it 
a column family at a time.
+6994long sequenceId = 
writeEntry.getWriteNumber();
+6995for (Map.Entry> e: forMemStore.entrySet()) {
+6996  accumulatedResultSize +=
+6997  
applyToMemstore(e.getKey(), e.getValue(), tr

[36/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
index 0030a24..d2a7403 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
@@ -90,429 +90,448 @@
 082   */
 083  private static final String 
OP_ATTRIBUTE_TTL = "_ttl";
 084
-085  protected byte [] row = null;
-086  protected long ts = 
HConstants.LATEST_TIMESTAMP;
-087  protected Durability durability = 
Durability.USE_DEFAULT;
-088
-089  // A Map sorted by column family.
-090  protected NavigableMap> familyMap =
-091new TreeMap>(Bytes.BYTES_COMPARATOR);
-092
-093  @Override
-094  public CellScanner cellScanner() {
-095return 
CellUtil.createCellScanner(getFamilyCellMap());
-096  }
-097
-098  /**
-099   * Creates an empty list if one doesn't 
exist for the given column family
-100   * or else it returns the associated 
list of Cell objects.
-101   *
-102   * @param family column family
-103   * @return a list of Cell objects, 
returns an empty list if one doesn't exist.
-104   */
-105  List getCellList(byte[] 
family) {
-106List list = 
this.familyMap.get(family);
-107if (list == null) {
-108  list = new 
ArrayList();
-109}
-110return list;
-111  }
-112
-113  /*
-114   * Create a KeyValue with this objects 
row key and the Put identifier.
-115   *
-116   * @return a KeyValue with this objects 
row key and the Put identifier.
-117   */
-118  KeyValue createPutKeyValue(byte[] 
family, byte[] qualifier, long ts, byte[] value) {
-119return new KeyValue(this.row, family, 
qualifier, ts, KeyValue.Type.Put, value);
-120  }
-121
-122  /**
-123   * Create a KeyValue with this objects 
row key and the Put identifier.
-124   * @param family
-125   * @param qualifier
-126   * @param ts
-127   * @param value
-128   * @param tags - Specify the Tags as an 
Array
-129   * @return a KeyValue with this objects 
row key and the Put identifier.
-130   */
-131  KeyValue createPutKeyValue(byte[] 
family, byte[] qualifier, long ts, byte[] value, Tag[] tags) {
-132KeyValue kvWithTag = new 
KeyValue(this.row, family, qualifier, ts, value, tags);
-133return kvWithTag;
-134  }
-135
-136  /*
-137   * Create a KeyValue with this objects 
row key and the Put identifier.
-138   *
-139   * @return a KeyValue with this objects 
row key and the Put identifier.
-140   */
-141  KeyValue createPutKeyValue(byte[] 
family, ByteBuffer qualifier, long ts, ByteBuffer value,
-142  Tag[] tags) {
-143return new KeyValue(this.row, 0, 
this.row == null ? 0 : this.row.length,
-144family, 0, family == null ? 0 : 
family.length,
-145qualifier, ts, KeyValue.Type.Put, 
value, tags != null ? Arrays.asList(tags) : null);
-146  }
-147
-148  /**
-149   * Compile the column family (i.e. 
schema) information
-150   * into a Map. Useful for parsing and 
aggregation by debugging,
-151   * logging, and administration tools.
-152   * @return Map
-153   */
-154  @Override
-155  public Map 
getFingerprint() {
-156Map map = new 
HashMap();
-157List families = new 
ArrayList();
-158// ideally, we would also include 
table information, but that information
-159// is not stored in each Operation 
instance.
-160map.put("families", families);
-161for (Map.Entry> entry : this.familyMap.entrySet()) {
-162  
families.add(Bytes.toStringBinary(entry.getKey()));
-163}
-164return map;
-165  }
-166
-167  /**
-168   * Compile the details beyond the scope 
of getFingerprint (row, columns,
-169   * timestamps, etc.) into a Map along 
with the fingerprinted information.
-170   * Useful for debugging, logging, and 
administration tools.
-171   * @param maxCols a limit on the number 
of columns output prior to truncation
-172   * @return Map
-173   */
-174  @Override
-175  public Map 
toMap(int maxCols) {
-176// we start with the fingerprint map 
and build on top of it.
-177Map map = 
getFingerprint();
-178// replace the fingerprint's simple 
list of families with a
-179// map from column families to lists 
of qualifiers and kv details
-180Map>> columns =
-181  new HashMap>>();
-182map.put("families", columns);
-183map.put("row", 
Bytes.toStringBinary(this.row));
-184int colCount = 0;
-185// iterate through all column 
families affected
-186for (Map.Entry> entry : this.familyMap.entrySet()) {
-187  // map from this family to details 
for each cell

[17/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.OutputSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.OutputSink.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.OutputSink.html
index 0597160..b444f46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.OutputSink.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.OutputSink.html
@@ -184,2145 +184,2183 @@
 176  // Min batch size when replay WAL 
edits
 177  private final int minBatchSize;
 178
-179  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
-180  FileSystem fs, LastSequenceId 
idChecker,
-181  CoordinatedStateManager csm, 
RecoveryMode mode) {
-182this.conf = 
HBaseConfiguration.create(conf);
-183String codecClassName = conf
-184
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-185
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
-186this.rootDir = rootDir;
-187this.fs = fs;
-188this.sequenceIdChecker = idChecker;
-189this.csm = 
(BaseCoordinatedStateManager)csm;
-190this.walFactory = factory;
-191this.controller = new 
PipelineController();
-192
-193entryBuffers = new 
EntryBuffers(controller,
-194
this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-195128*1024*1024));
+179  // the file being split currently
+180  private FileStatus fileBeingSplit;
+181
+182  @VisibleForTesting
+183  WALSplitter(final WALFactory factory, 
Configuration conf, Path rootDir,
+184  FileSystem fs, LastSequenceId 
idChecker,
+185  CoordinatedStateManager csm, 
RecoveryMode mode) {
+186this.conf = 
HBaseConfiguration.create(conf);
+187String codecClassName = conf
+188
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+189
this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
+190this.rootDir = rootDir;
+191this.fs = fs;
+192this.sequenceIdChecker = idChecker;
+193this.csm = 
(BaseCoordinatedStateManager)csm;
+194this.walFactory = factory;
+195this.controller = new 
PipelineController();
 196
-197// a larger minBatchSize may slow 
down recovery because replay writer has to wait for
-198// enough edits before replaying 
them
-199this.minBatchSize = 
this.conf.getInt("hbase.regionserver.wal.logreplay.batch.size", 64);
-200this.distributedLogReplay = 
(RecoveryMode.LOG_REPLAY == mode);
-201
-202this.numWriterThreads = 
this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);
-203if (csm != null && 
this.distributedLogReplay) {
-204  outputSink = new 
LogReplayOutputSink(controller, entryBuffers, numWriterThreads);
-205} else {
-206  if (this.distributedLogReplay) {
-207LOG.info("ZooKeeperWatcher is 
passed in as NULL so disable distrubitedLogRepaly.");
-208  }
-209  this.distributedLogReplay = 
false;
-210  outputSink = new 
LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads);
-211}
-212
-213  }
-214
-215  /**
-216   * Splits a WAL file into region's 
recovered-edits directory.
-217   * This is the main entry point for 
distributed log splitting from SplitLogWorker.
-218   * 

-219 * If the log file has N regions then N recovered.edits files will be produced. -220 *

-221 * @param rootDir -222 * @param logfile -223 * @param fs -224 * @param conf -225 * @param reporter -226 * @param idChecker -227 * @param cp coordination state manager -228 * @return false if it is interrupted by the progress-able. -229 * @throws IOException -230 */ -231 public static boolean splitLogFile(Path rootDir, FileStatus logfile, FileSystem fs, -232 Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, -233 CoordinatedStateManager cp, RecoveryMode mode, final WALFactory factory) throws IOException { -234WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, idChecker, cp, mode); -235return s.splitLogFile(logfile, reporter); -236 } -237 -238 // A wrapper to split one log folder using the method used by distributed -239 // log splitting. Used by tools and unit tests. It should be package private. -240 // It is public only because TestWALObserver is in a different package, -241 // which uses this method to do log splitting. -242 @VisibleForTesting -243 public static List split(Path rootDir, Path logDir, Path oldLogDir, -244 FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { -245final FileStatus[] logfiles = SplitLogManager.getFileList(conf, -246 Collections.singletonList(logDir), null); -247List splits = new ArrayList(); -248if (logfiles != null && logfiles.length >


[50/51] [partial] hbase-site git commit: Published site at d2ba87509b8d193f58183beff4ab76c7edf47e11.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f32f549a/apidocs/org/apache/hadoop/hbase/client/Mutation.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Mutation.html 
b/apidocs/org/apache/hadoop/hbase/client/Mutation.html
index 18874a3..1bf65fe 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Mutation.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Mutation.html
@@ -374,7 +374,7 @@ implements 
 
 row
-protected byte[] row
+protected byte[] row
 
 
 
@@ -383,7 +383,7 @@ implements 
 
 ts
-protected long ts
+protected long ts
 
 
 
@@ -392,7 +392,7 @@ implements 
 
 durability
-protected Durability durability
+protected Durability durability
 
 
 
@@ -401,7 +401,7 @@ implements 
 
 familyMap
-protected http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> familyMap
+protected http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> familyMap
 
 
 
@@ -435,7 +435,7 @@ implements 
 
 cellScanner
-public org.apache.hadoop.hbase.CellScanner cellScanner()
+public org.apache.hadoop.hbase.CellScanner cellScanner()
 
 Specified by:
 cellScanner in 
interface org.apache.hadoop.hbase.CellScannable
@@ -448,7 +448,7 @@ implements 
 
 getFingerprint
-public http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object> getFingerprint()
+public http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object> getFingerprint()
 Compile the column family (i.e. schema) information
  into a Map. Useful for parsing and aggregation by debugging,
  logging, and administration tools.
@@ -464,7 +464,7 @@ implements 
 
 toMap
-public http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object> toMap(int maxCols)
+public http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object> toMap(int maxCols)
 Compile the details beyond the scope of getFingerprint 
(row, columns,
  timestamps, etc.) into a Map along with the fingerprinted information.
  Useful for debugging, logging, and administration tools.
@@ -481,7 +481,7 @@ implements 
 
 setDurability
-public Mutation setDurability(Durability d)
+public Mutation setDurability(Durability d)
 Set the durability for this mutation
 Parameters:d - 

 
@@ -492,7 +492,7 @@ implements 
 
 getDurability
-public Durability getDurability()
+public Durability getDurability()
 Get the current durability
 
 
@@ -502,7 +502,7 @@ implements 
 
 getFamilyCellMap
-public http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> getFamilyCellMap()
+public http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> getFamilyCellMap()
 Method for retrieving the put's familyMap
 Returns:familyMap
 
@@ -513,7 +513,7 @@ implements 
 
 setFamilyCellMap
-public Mutation setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 t

hbase git commit: HBASE-15285 ADDENDUM make RETURN_RESULTS attribute name protected to match branch-1.1 and deprecated to warn of 2.0 (Sean Busbey)

Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 34f852ab2 -> d4388ba01


HBASE-15285 ADDENDUM make RETURN_RESULTS attribute name protected to match 
branch-1.1 and deprecated to warn of 2.0 (Sean Busbey)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d4388ba0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d4388ba0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d4388ba0

Branch: refs/heads/branch-1.2
Commit: d4388ba0101928d1d272b2e88dddfd5203400ee8
Parents: 34f852a
Author: Jonathan M Hsieh 
Authored: Thu Feb 18 07:17:10 2016 -0800
Committer: Jonathan M Hsieh 
Committed: Thu Feb 18 07:19:14 2016 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Mutation.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d4388ba0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index 138a0fe..d11c459 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -83,7 +83,11 @@ public abstract class Mutation extends 
OperationWithAttributes implements Row, C
*/
   private static final String OP_ATTRIBUTE_TTL = "_ttl";
 
-  private static final String RETURN_RESULTS = "_rr_";
+  /**
+   * @deprecated this field is private as of HBase 2.0.
+   */
+  @Deprecated
+  protected static final String RETURN_RESULTS = "_rr_";
 
   protected byte [] row = null;
   protected long ts = HConstants.LATEST_TIMESTAMP;



hbase git commit: HBASE-15285 ADDENDUM make RETURN_RESULTS attribute name protected to match branch-1.1 and deprecated to warn of 2.0 (Sean Busbey)

Repository: hbase
Updated Branches:
  refs/heads/branch-1 7a1c407ea -> 7b4646fe3


HBASE-15285 ADDENDUM make RETURN_RESULTS attribute name protected to match 
branch-1.1 and deprecated to warn of 2.0 (Sean Busbey)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7b4646fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7b4646fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7b4646fe

Branch: refs/heads/branch-1
Commit: 7b4646fe3dc44607d33c7a05112e5e1a7cf5649c
Parents: 7a1c407
Author: Jonathan M Hsieh 
Authored: Thu Feb 18 07:17:10 2016 -0800
Committer: Jonathan M Hsieh 
Committed: Thu Feb 18 07:17:10 2016 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Mutation.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7b4646fe/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index 138a0fe..d11c459 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -83,7 +83,11 @@ public abstract class Mutation extends 
OperationWithAttributes implements Row, C
*/
   private static final String OP_ATTRIBUTE_TTL = "_ttl";
 
-  private static final String RETURN_RESULTS = "_rr_";
+  /**
+   * @deprecated this field is private as of HBase 2.0.
+   */
+  @Deprecated
+  protected static final String RETURN_RESULTS = "_rr_";
 
   protected byte [] row = null;
   protected long ts = HConstants.LATEST_TIMESTAMP;



hbase git commit: HBASE-14949 addendum fix compilation error on branch-1

Repository: hbase
Updated Branches:
  refs/heads/branch-1 b8f92312b -> 7a1c407ea


HBASE-14949 addendum fix compilation error on branch-1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7a1c407e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7a1c407e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7a1c407e

Branch: refs/heads/branch-1
Commit: 7a1c407ea64337505da21727976c494ba741f8ef
Parents: b8f9231
Author: zhangduo 
Authored: Thu Feb 18 21:04:14 2016 +0800
Committer: zhangduo 
Committed: Thu Feb 18 21:04:14 2016 +0800

--
 .../org/apache/hadoop/hbase/wal/WALSplitter.java| 16 
 1 file changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7a1c407e/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 36df1fd..bb23576 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -364,7 +364,7 @@ public class WALSplitter {
   }
   lastFlushedSequenceIds.put(encodedRegionNameAsStr, 
lastFlushedSequenceId);
 }
-if (lastFlushedSequenceId >= entry.getKey().getSequenceId()) {
+if (lastFlushedSequenceId >= entry.getKey().getLogSeqNum()) {
   editsSkipped++;
   continue;
 }
@@ -564,7 +564,7 @@ public class WALSplitter {
 // Append fileBeingSplit to prevent name conflict since we may have 
duplicate wal entries now.
 // Append file name ends with RECOVERED_LOG_TMPFILE_SUFFIX to ensure
 // region's replayRecoveredEdits will not delete it
-String fileName = 
formatRecoveredEditsFileName(logEntry.getKey().getSequenceId());
+String fileName = 
formatRecoveredEditsFileName(logEntry.getKey().getLogSeqNum());
 fileName = getTmpRecoveredEditsFileName(fileName + "-" + 
fileBeingSplit.getPath().getName());
 return new Path(dir, fileName);
   }
@@ -1192,9 +1192,9 @@ public class WALSplitter {
   synchronized (regionMaximumEditLogSeqNum) {
 Long currentMaxSeqNum = regionMaximumEditLogSeqNum.get(entry.getKey()
 .getEncodedRegionName());
-if (currentMaxSeqNum == null || entry.getKey().getSequenceId() > 
currentMaxSeqNum) {
+if (currentMaxSeqNum == null || entry.getKey().getLogSeqNum() > 
currentMaxSeqNum) {
   
regionMaximumEditLogSeqNum.put(entry.getKey().getEncodedRegionName(), 
entry.getKey()
-  .getSequenceId());
+  .getLogSeqNum());
 }
   }
 }
@@ -1319,7 +1319,7 @@ public class WALSplitter {
   try (WAL.Reader reader = walFactory.createReader(fs, dst)) {
 WAL.Entry entry = reader.next();
 if (entry != null) {
-  dstMinLogSeqNum = entry.getKey().getSequenceId();
+  dstMinLogSeqNum = entry.getKey().getLogSeqNum();
 }
   } catch (EOFException e) {
 if (LOG.isDebugEnabled()) {
@@ -1540,7 +1540,7 @@ public class WALSplitter {
   }
   Writer w = createWriter(regionedits);
   LOG.debug("Creating writer path=" + regionedits);
-  return new WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
+  return new WriterAndPath(regionedits, w, entry.getKey().getLogSeqNum());
 }
 
 private void filterCellByStore(Entry logEntry) {
@@ -1560,7 +1560,7 @@ public class WALSplitter {
   Long maxSeqId = maxSeqIdInStores.get(family);
   // Do not skip cell even if maxSeqId is null. Maybe we are in a 
rolling upgrade,
   // or the master was crashed before and we can not get the 
information.
-  if (maxSeqId == null || maxSeqId.longValue() < 
logEntry.getKey().getSequenceId()) {
+  if (maxSeqId == null || maxSeqId.longValue() < 
logEntry.getKey().getLogSeqNum()) {
 keptCells.add(cell);
   }
 }
@@ -1862,7 +1862,7 @@ public class WALSplitter {
 }
 if (maxStoreSequenceIds != null) {
   Long maxStoreSeqId = maxStoreSequenceIds.get(family);
-  if (maxStoreSeqId == null || maxStoreSeqId >= 
entry.getKey().getSequenceId()) {
+  if (maxStoreSeqId == null || maxStoreSeqId >= 
entry.getKey().getLogSeqNum()) {
 // skip current kv if column family doesn't exist anymore or 
already flushed
 skippedCells.add(cell);
 continue;



hbase git commit: HBASE-14949 Resolve name conflict when splitting if there are duplicated WAL entries

Repository: hbase
Updated Branches:
  refs/heads/branch-1 5e7fecc15 -> b8f92312b


HBASE-14949 Resolve name conflict when splitting if there are duplicated WAL 
entries


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b8f92312
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b8f92312
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b8f92312

Branch: refs/heads/branch-1
Commit: b8f92312b3e683c0181c4baac088f520cf7d0656
Parents: 5e7fecc
Author: zhangduo 
Authored: Thu Feb 18 10:31:01 2016 +0800
Committer: zhangduo 
Committed: Thu Feb 18 19:49:36 2016 +0800

--
 .../apache/hadoop/hbase/wal/WALSplitter.java|  88 +++-
 .../hbase/regionserver/wal/TestWALReplay.java   | 105 ---
 2 files changed, 154 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b8f92312/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 42bfabe..36df1fd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -177,6 +177,10 @@ public class WALSplitter {
   // Min batch size when replay WAL edits
   private final int minBatchSize;
 
+  // the file being split currently
+  private FileStatus fileBeingSplit;
+
+  @VisibleForTesting
   WALSplitter(final WALFactory factory, Configuration conf, Path rootDir,
   FileSystem fs, LastSequenceId idChecker,
   CoordinatedStateManager csm, RecoveryMode mode) {
@@ -267,6 +271,7 @@ public class WALSplitter {
* log splitting implementation, splits one log file.
* @param logfile should be an actual log file.
*/
+  @VisibleForTesting
   boolean splitLogFile(FileStatus logfile, CancelableProgressable reporter) 
throws IOException {
 Preconditions.checkState(status == null);
 Preconditions.checkArgument(logfile.isFile(),
@@ -285,6 +290,7 @@ public class WALSplitter {
 TaskMonitor.get().createStatus(
   "Splitting log file " + logfile.getPath() + "into a temporary 
staging area.");
 Reader in = null;
+this.fileBeingSplit = logfile;
 try {
   long logLength = logfile.getLen();
   LOG.info("Splitting wal: " + logPath + ", length=" + logLength);
@@ -358,7 +364,7 @@ public class WALSplitter {
   }
   lastFlushedSequenceIds.put(encodedRegionNameAsStr, 
lastFlushedSequenceId);
 }
-if (lastFlushedSequenceId >= entry.getKey().getLogSeqNum()) {
+if (lastFlushedSequenceId >= entry.getKey().getSequenceId()) {
   editsSkipped++;
   continue;
 }
@@ -444,7 +450,7 @@ public class WALSplitter {
 finishSplitLogFile(rootdir, oldLogDir, logPath, conf);
   }
 
-  static void finishSplitLogFile(Path rootdir, Path oldLogDir,
+  private static void finishSplitLogFile(Path rootdir, Path oldLogDir,
   Path logPath, Configuration conf) throws IOException {
 List processedLogs = new ArrayList();
 List corruptedLogs = new ArrayList();
@@ -518,12 +524,13 @@ public class WALSplitter {
* @param fs
* @param logEntry
* @param rootDir HBase root dir.
+   * @param fileBeingSplit the file being split currently. Used to generate 
tmp file name.
* @return Path to file into which to dump split log edits.
* @throws IOException
*/
   @SuppressWarnings("deprecation")
-  static Path getRegionSplitEditsPath(final FileSystem fs,
-  final Entry logEntry, final Path rootDir, boolean isCreate)
+  private static Path getRegionSplitEditsPath(final FileSystem fs,
+  final Entry logEntry, final Path rootDir, FileStatus fileBeingSplit)
   throws IOException {
 Path tableDir = FSUtils.getTableDir(rootDir, 
logEntry.getKey().getTablename());
 String encodedRegionName = 
Bytes.toString(logEntry.getKey().getEncodedRegionName());
@@ -551,17 +558,18 @@ public class WALSplitter {
   }
 }
 
-if (isCreate && !fs.exists(dir)) {
-  if (!fs.mkdirs(dir)) LOG.warn("mkdir failed on " + dir);
+if (!fs.exists(dir) && !fs.mkdirs(dir)) {
+  LOG.warn("mkdir failed on " + dir);
 }
+// Append fileBeingSplit to prevent name conflict since we may have 
duplicate wal entries now.
 // Append file name ends with RECOVERED_LOG_TMPFILE_SUFFIX to ensure
 // region's replayRecoveredEdits will not delete it
-String fileName = 
formatRecoveredEditsFileName(logEntry.getKey().getLogSeqNum());
-fileName = getTmpRecoveredEditsFileName(fileName);
+String fileName = 
formatRecoveredEditsFileName(logEntry.getKey().getSequenceId(

hbase git commit: HBASE-14949 Resolve name conflict when splitting if there are duplicated WAL entries

Repository: hbase
Updated Branches:
  refs/heads/master 6f8c7dca1 -> d2ba87509


HBASE-14949 Resolve name conflict when splitting if there are duplicated WAL 
entries


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d2ba8750
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d2ba8750
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d2ba8750

Branch: refs/heads/master
Commit: d2ba87509b8d193f58183beff4ab76c7edf47e11
Parents: 6f8c7dc
Author: zhangduo 
Authored: Thu Feb 18 10:31:01 2016 +0800
Committer: zhangduo 
Committed: Thu Feb 18 19:48:52 2016 +0800

--
 .../apache/hadoop/hbase/wal/WALSplitter.java|  88 +++-
 .../hbase/regionserver/wal/TestWALReplay.java   | 105 ---
 2 files changed, 154 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d2ba8750/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 8abd950..54b82b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -176,6 +176,10 @@ public class WALSplitter {
   // Min batch size when replay WAL edits
   private final int minBatchSize;
 
+  // the file being split currently
+  private FileStatus fileBeingSplit;
+
+  @VisibleForTesting
   WALSplitter(final WALFactory factory, Configuration conf, Path rootDir,
   FileSystem fs, LastSequenceId idChecker,
   CoordinatedStateManager csm, RecoveryMode mode) {
@@ -267,6 +271,7 @@ public class WALSplitter {
* log splitting implementation, splits one log file.
* @param logfile should be an actual log file.
*/
+  @VisibleForTesting
   boolean splitLogFile(FileStatus logfile, CancelableProgressable reporter) 
throws IOException {
 Preconditions.checkState(status == null);
 Preconditions.checkArgument(logfile.isFile(),
@@ -285,6 +290,7 @@ public class WALSplitter {
 TaskMonitor.get().createStatus(
   "Splitting log file " + logfile.getPath() + "into a temporary 
staging area.");
 Reader in = null;
+this.fileBeingSplit = logfile;
 try {
   long logLength = logfile.getLen();
   LOG.info("Splitting wal: " + logPath + ", length=" + logLength);
@@ -349,7 +355,7 @@ public class WALSplitter {
   }
   lastFlushedSequenceIds.put(encodedRegionNameAsStr, 
lastFlushedSequenceId);
 }
-if (lastFlushedSequenceId >= entry.getKey().getLogSeqNum()) {
+if (lastFlushedSequenceId >= entry.getKey().getSequenceId()) {
   editsSkipped++;
   continue;
 }
@@ -435,7 +441,7 @@ public class WALSplitter {
 finishSplitLogFile(rootdir, oldLogDir, logPath, conf);
   }
 
-  static void finishSplitLogFile(Path rootdir, Path oldLogDir,
+  private static void finishSplitLogFile(Path rootdir, Path oldLogDir,
   Path logPath, Configuration conf) throws IOException {
 List processedLogs = new ArrayList();
 List corruptedLogs = new ArrayList();
@@ -509,12 +515,13 @@ public class WALSplitter {
* @param fs
* @param logEntry
* @param rootDir HBase root dir.
+   * @param fileBeingSplit the file being split currently. Used to generate 
tmp file name.
* @return Path to file into which to dump split log edits.
* @throws IOException
*/
   @SuppressWarnings("deprecation")
-  static Path getRegionSplitEditsPath(final FileSystem fs,
-  final Entry logEntry, final Path rootDir, boolean isCreate)
+  private static Path getRegionSplitEditsPath(final FileSystem fs,
+  final Entry logEntry, final Path rootDir, FileStatus fileBeingSplit)
   throws IOException {
 Path tableDir = FSUtils.getTableDir(rootDir, 
logEntry.getKey().getTablename());
 String encodedRegionName = 
Bytes.toString(logEntry.getKey().getEncodedRegionName());
@@ -542,17 +549,18 @@ public class WALSplitter {
   }
 }
 
-if (isCreate && !fs.exists(dir)) {
-  if (!fs.mkdirs(dir)) LOG.warn("mkdir failed on " + dir);
+if (!fs.exists(dir) && !fs.mkdirs(dir)) {
+  LOG.warn("mkdir failed on " + dir);
 }
+// Append fileBeingSplit to prevent name conflict since we may have 
duplicate wal entries now.
 // Append file name ends with RECOVERED_LOG_TMPFILE_SUFFIX to ensure
 // region's replayRecoveredEdits will not delete it
-String fileName = 
formatRecoveredEditsFileName(logEntry.getKey().getLogSeqNum());
-fileName = getTmpRecoveredEditsFileName(fileName);
+String fileName = 
formatRecoveredEditsFileName(logEntry.getKey().getSequenceId());