hbase git commit: HBASE-18426 nightly job should use independent stages to check supported jdks [Forced Update!]

2017-07-28 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-18426 015d219c1 -> b7d589574 (forced update)


HBASE-18426 nightly job should use independent stages to check supported jdks

* Jenkinsfile that works for all current branches.
* adds dev-support script for setting parameters for our yetus nightly 
invocation
* quiets the "zip test results" step


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b7d58957
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b7d58957
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b7d58957

Branch: refs/heads/HBASE-18426
Commit: b7d589574220adcf9ed14987a1f99af2817762b9
Parents: bdc94b1
Author: Sean Busbey 
Authored: Thu Jul 20 01:01:59 2017 -0500
Committer: Sean Busbey 
Committed: Sat Jul 29 00:48:04 2017 -0500

--
 dev-support/Jenkinsfile| 255 +++-
 dev-support/hbase_nightly_yetus.sh |  86 +++
 2 files changed, 239 insertions(+), 102 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b7d58957/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 26f72d7..43108a3 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -20,10 +20,6 @@ pipeline {
   label 'Hadoop'
 }
   }
-  // work around for YETUS-508, requires maven outside of the dockerfile
-  tools {
-maven 'Maven (latest)'
-  }
   triggers {
 cron('@daily')
   }
@@ -38,8 +34,23 @@ pipeline {
 BASEDIR = "${env.WORKSPACE}/component"
 YETUS_RELEASE = '0.5.0'
 // where we'll write everything from different steps.
-OUTPUT_RELATIVE = 'output'
-OUTPUTDIR = "${env.WORKSPACE}/output"
+OUTPUT_RELATIVE_GENERAL = 'output-general'
+OUTPUTDIR_GENERAL = "${env.WORKSPACE}/output-general"
+OUTPUT_RELATIVE_JDK7 = 'output-jdk7'
+OUTPUTDIR_JDK7 = "${env.WORKSPACE}/output-jdk7"
+OUTPUT_RELATIVE_JDK8 = 'output-jdk8'
+OUTPUTDIR_JDK8 = "${env.WORKSPACE}/output-jdk8"
+PROJECT = 'hbase'
+PROJET_PERSONALITY = 
'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh'
+// This section of the docs tells folks not to use the javadoc tag. older 
branches have our old version of the check for said tag.
+AUTHOR_IGNORE_LIST = 
'src/main/asciidoc/_chapters/developer.adoc,dev-support/test-patch.sh'
+WHITESPACE_IGNORE_LIST = '.*/generated/.*'
+// output from surefire; sadly the archive function in yetus only works on 
file names.
+ARCHIVE_PATTERN_LIST = 
'TEST-*.xml,org.apache.h*-output.txt,org.apache.h*.txt'
+// These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
+TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop'
+BRANCH_SPECIFIC_DOCKERFILE = "${env.BASEDIR}/dev-support/docker/Dockerfile"
+EXCLUDE_TESTS_URL = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
   }
   parameters {
 booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.
@@ -48,24 +59,9 @@ pipeline {
 booleanParam(name: 'DEBUG', defaultValue: false, description: 'Produce a 
lot more meta-information.')
   }
   stages {
-stage ('yetus check') {
-  environment {
-PROJECT = 'hbase'
-PROJECT_PERSONALITY = 
'https://git-wip-us.apache.org/repos/asf?p=hbase.git;a=blob_plain;f=dev-support/hbase-personality.sh;hb=refs/heads/master'
-// This section of the docs tells folks not to use the javadoc tag. 
older branches have our old version of the check for said tag.
-AUTHOR_IGNORE_LIST = 
'src/main/asciidoc/_chapters/developer.adoc,dev-support/test-patch.sh'
-WHITESPACE_IGNORE_LIST = '.*/generated/.*'
-// output from surefire; sadly the archive function in yetus only 
works on file names.
-ARCHIVE_PATTERN_LIST = 
'TEST-*.xml,org.apache.h*-output.txt,org.apache.h*.txt'
-// These tests currently have known failures. Once they burn down to 0, remove 
from here so that new problems will cause a failure.
-TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop'
-BRANCH_SPECIFIC_DOCKERFILE = 
"${env.BASEDIR}/dev-support/docker/Dockerfile"
-EXCLUDE_TESTS_URL = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
-  }
+stage ('yetus install') {
   steps {
-// TODO we can move the yetus install into a different stage and then use 
stash to deploy it.
 sh  

[41/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
index a12c0f2..9c0bc84 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static interface HBaseFsck.ErrorReporter
+public static interface HBaseFsck.ErrorReporter
 
 
 
@@ -234,7 +234,7 @@ var activeTableTab = "activeTableTab";
 
 
 clear
-voidclear()
+voidclear()
 
 
 
@@ -243,7 +243,7 @@ var activeTableTab = "activeTableTab";
 
 
 report
-voidreport(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage)
+voidreport(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage)
 
 
 
@@ -252,7 +252,7 @@ var activeTableTab = "activeTableTab";
 
 
 reportError
-voidreportError(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage)
+voidreportError(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage)
 
 
 
@@ -261,7 +261,7 @@ var activeTableTab = "activeTableTab";
 
 
 reportError
-voidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
+voidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage)
 
 
@@ -271,7 +271,7 @@ var activeTableTab = "activeTableTab";
 
 
 reportError
-voidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
+voidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage,
  HBaseFsck.TableInfotable)
 
@@ -282,7 +282,7 @@ var activeTableTab = "activeTableTab";
 
 
 reportError
-voidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
+voidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage,
  HBaseFsck.TableInfotable,
  HBaseFsck.HbckInfoinfo)
@@ -294,7 +294,7 @@ var activeTableTab = "activeTableTab";
 
 
 reportError
-voidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
+voidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage,
  HBaseFsck.TableInfotable,
  HBaseFsck.HbckInfoinfo1,
@@ -307,7 +307,7 @@ var activeTableTab = "activeTableTab";
 
 
 summarize
-intsummarize()
+intsummarize()
 
 
 
@@ -316,7 +316,7 @@ var activeTableTab = "activeTableTab";
 
 
 detail
-voiddetail(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringdetails)
+voiddetail(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringdetails)
 
 
 
@@ -325,7 +325,7 @@ var activeTableTab = "activeTableTab";
 
 
 getErrorList
-http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListHBaseFsck.ErrorReporter.ERROR_CODEgetErrorList()
+http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListHBaseFsck.ErrorReporter.ERROR_CODEgetErrorList()
 
 
 
@@ -334,7 +334,7 @@ var activeTableTab = "activeTableTab";
 
 
 progress
-voidprogress()
+voidprogress()
 
 
 
@@ -343,7 +343,7 @@ var activeTableTab = "activeTableTab";
 
 
 print
-voidprint(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage)
+voidprint(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage)
 
 
 
@@ -352,7 +352,7 @@ var activeTableTab = "activeTableTab";
 
 
 resetErrors
-voidresetErrors()
+voidresetErrors()
 
 
 
@@ -361,7 +361,7 @@ var activeTableTab = "activeTableTab";
 
 
 tableHasErrors
-booleantableHasErrors(HBaseFsck.TableInfotable)
+booleantableHasErrors(HBaseFsck.TableInfotable)
 
 
 


[48/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 3bfa60b..ea6a772 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -289,7 +289,7 @@
 2249
 0
 0
-14885
+14890
 
 Files
 
@@ -4057,7 +4057,7 @@
 org/apache/hadoop/hbase/regionserver/CompactSplit.java
 0
 0
-12
+11
 
 org/apache/hadoop/hbase/regionserver/CompactedHFilesDischargeHandler.java
 0
@@ -4324,2961 +4324,2966 @@
 0
 1
 
+org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
+0
+0
+6
+
 org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/MetricsTable.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java
 0
 0
 20
-
+
 org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/regionserver/MutableSegment.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/NoOpHeapMemoryTuner.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/OnlineRegions.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/regionserver/OperationStatus.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 0
 0
 133
-
+
 org/apache/hadoop/hbase/regionserver/RSStatusServlet.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/regionserver/Region.java
 0
 0
 62
-
+
 org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
 0
 0
 191
-
+
 org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/RegionScanner.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/regionserver/RegionServerServices.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/regionserver/ReplicationService.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/RowProcessor.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/ScanInfo.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/regionserver/ScannerContext.java
 0
 0
 14
-
+
 org/apache/hadoop/hbase/regionserver/ScannerIdGenerator.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/regionserver/Segment.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/SegmentFactory.java
 0
 0
 24
-
+
 org/apache/hadoop/hbase/regionserver/SegmentScanner.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/ServerNonceManager.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/regionserver/ShipperListener.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/ShutdownHook.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/SplitRequest.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/Store.java
 0
 0
 16
-
+
 org/apache/hadoop/hbase/regionserver/StoreEngine.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/StoreFile.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/StoreFileComparators.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
 0
 0
 22
-
+
 org/apache/hadoop/hbase/regionserver/StoreFileManager.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/StoreFileReader.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
 0
 0
 16
-
+
 org/apache/hadoop/hbase/regionserver/StoreFlushContext.java
 0
 0
 4
-
+
 

[44/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
index 692e961..334478d 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":9,"i48":9,"i49":10,"i50":9,"i51":9,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":9,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":9,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":10
 
,"i110":10,"i111":10,"i112":10,"i113":10,"i114":9,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":41,"i123":41,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":9,"i148":10,"i149":10,"i150":10,"i151":10,"i152":42,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":9,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":9,"i187":10,"i188":10,"i189":9,"i190":9,"i191":9,"i192":9,"i193":9,"i194":9,"i195":9,"i196":9,"i197":9,"i198":10,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i209":10,"i210":10
 
,"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":10,"i229":10,"i230":10,"i231":10,"i232":10,"i233":10,"i234":10,"i235":10,"i236":10,"i237":9,"i238":9,"i239":10,"i240":10,"i241":10,"i242":10,"i243":10,"i244":10,"i245":10,"i246":10,"i247":10,"i248":10,"i249":10,"i250":10,"i251":9,"i252":10,"i253":10,"i254":10,"i255":10,"i256":10,"i257":10,"i258":10,"i259":10,"i260":10,"i261":10,"i262":10,"i263":10,"i264":10,"i265":10,"i266":10,"i267":9,"i268":10,"i269":10,"i270":10,"i271":10};
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":9,"i48":9,"i49":10,"i50":9,"i51":9,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":9,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":9,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":10
 

[26/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the maxSeqId up to which the store 
was 

[18/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html
index 80168c0..0066ffd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html
@@ -60,205 +60,237 @@
 052  private long minStoreFileAge;
 053  private long avgStoreFileAge;
 054  private long numReferenceFiles;
-055
-056  private ScheduledFuture? 
regionMetricsUpdateTask;
+055  private long maxFlushQueueSize;
+056  private long maxCompactionQueueSize;
 057
-058  public MetricsRegionWrapperImpl(HRegion 
region) {
-059this.region = region;
-060this.executor = 
CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
-061this.runnable = new 
HRegionMetricsWrapperRunnable();
-062this.regionMetricsUpdateTask = 
this.executor.scheduleWithFixedDelay(this.runnable, PERIOD,
-063  PERIOD, TimeUnit.SECONDS);
-064  }
-065
-066  @Override
-067  public String getTableName() {
-068TableDescriptor tableDesc = 
this.region.getTableDescriptor();
-069if (tableDesc == null) {
-070  return UNKNOWN;
-071}
-072return 
tableDesc.getTableName().getQualifierAsString();
-073  }
-074
-075  @Override
-076  public String getNamespace() {
-077TableDescriptor tableDesc = 
this.region.getTableDescriptor();
-078if (tableDesc == null) {
-079  return UNKNOWN;
-080}
-081return 
tableDesc.getTableName().getNamespaceAsString();
-082  }
-083
-084
-085  @Override
-086  public String getRegionName() {
-087HRegionInfo regionInfo = 
this.region.getRegionInfo();
-088if (regionInfo == null) {
-089  return UNKNOWN;
-090}
-091return regionInfo.getEncodedName();
-092  }
-093
-094  @Override
-095  public long getNumStores() {
-096Mapbyte[],Store stores = 
this.region.stores;
-097if (stores == null) {
-098  return 0;
-099}
-100return stores.size();
-101  }
-102
-103  @Override
-104  public long getNumStoreFiles() {
-105return numStoreFiles;
-106  }
-107
-108  @Override
-109  public long getMemstoreSize() {
-110return memstoreSize;
-111  }
-112
-113  @Override
-114  public long getStoreFileSize() {
-115return storeFileSize;
-116  }
-117
-118  @Override
-119  public long getReadRequestCount() {
-120return 
this.region.getReadRequestsCount();
-121  }
-122
-123  @Override
-124  public long 
getFilteredReadRequestCount() {
-125return 
this.region.getFilteredReadRequestsCount();
-126  }
-127
-128  @Override
-129  public long getWriteRequestCount() {
-130return 
this.region.getWriteRequestsCount();
-131  }
-132
-133  @Override
-134  public long getNumFilesCompacted() {
-135return 
this.region.compactionNumFilesCompacted.get();
-136  }
-137
-138  @Override
-139  public long getNumBytesCompacted() {
-140return 
this.region.compactionNumBytesCompacted.get();
-141  }
-142
-143  @Override
-144  public long 
getNumCompactionsCompleted() {
-145return 
this.region.compactionsFinished.get();
-146  }
-147
-148  @Override
-149  public long getLastMajorCompactionAge() 
{
-150long lastMajorCompactionTs = 0L;
-151try {
-152  lastMajorCompactionTs = 
this.region.getOldestHfileTs(true);
-153} catch (IOException ioe) {
-154  LOG.error("Could not load HFile 
info ", ioe);
-155}
-156long now = 
EnvironmentEdgeManager.currentTime();
-157return now - lastMajorCompactionTs;
-158  }
-159
-160  @Override
-161  public long getNumCompactionsFailed() 
{
-162return 
this.region.compactionsFailed.get();
-163  }
-164
-165  @Override
-166  public long getMaxStoreFileAge() {
-167return maxStoreFileAge;
-168  }
-169
-170  @Override
-171  public long getMinStoreFileAge() {
-172return minStoreFileAge;
-173  }
-174
-175  @Override
-176  public long getAvgStoreFileAge() {
-177return avgStoreFileAge;
-178  }
-179
-180  @Override
-181  public long getNumReferenceFiles() {
-182return numReferenceFiles;
-183  }
-184
-185  @Override
-186  public int getRegionHashCode() {
-187return this.region.hashCode();
-188  }
-189
-190  public class 
HRegionMetricsWrapperRunnable implements Runnable {
+058  private ScheduledFuture? 
regionMetricsUpdateTask;
+059
+060  public MetricsRegionWrapperImpl(HRegion 
region) {
+061this.region = region;
+062this.executor = 
CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
+063this.runnable = new 
HRegionMetricsWrapperRunnable();
+064this.regionMetricsUpdateTask = 
this.executor.scheduleWithFixedDelay(this.runnable, PERIOD,
+065  PERIOD, TimeUnit.SECONDS);
+066  }
+067
+068 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For 

[19/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.html
index 7f3e9e9..d633968 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.html
@@ -38,63 +38,71 @@
 030  String COMPACTIONS_COMPLETED_COUNT = 
"compactionsCompletedCount";
 031  String COMPACTIONS_FAILED_COUNT = 
"compactionsFailedCount";
 032  String LAST_MAJOR_COMPACTION_AGE = 
"lastMajorCompactionAge";
-033  String NUM_BYTES_COMPACTED_COUNT = 
"numBytesCompactedCount";
-034  String NUM_FILES_COMPACTED_COUNT = 
"numFilesCompactedCount";
-035  String COMPACTIONS_COMPLETED_DESC = 
"Number of compactions that have completed.";
-036  String COMPACTIONS_FAILED_DESC = 
"Number of compactions that have failed.";
-037  String LAST_MAJOR_COMPACTION_DESC = 
"Age of the last major compaction in milliseconds.";
-038  String  NUM_BYTES_COMPACTED_DESC =
-039  "Sum of filesize on all files 
entering a finished, successful or aborted, compaction";
-040  String NUM_FILES_COMPACTED_DESC =
-041  "Number of files that were input 
for finished, successful or aborted, compactions";
-042  String COPROCESSOR_EXECUTION_STATISTICS 
= "coprocessorExecutionStatistics";
-043  String 
COPROCESSOR_EXECUTION_STATISTICS_DESC = "Statistics for coprocessor execution 
times";
-044  String REPLICA_ID = "replicaid";
-045  String REPLICA_ID_DESC = "The replica 
ID of a region. 0 is primary, otherwise is secondary";
-046
-047  /**
-048   * Close the region's metrics as this 
region is closing.
-049   */
-050  void close();
-051
-052  /**
-053   * Update related counts of puts.
-054   */
-055  void updatePut();
-056
-057  /**
-058   * Update related counts of deletes.
-059   */
-060  void updateDelete();
-061
-062  /**
-063   * Update time of gets
-064   * @param mills time for this get 
operation.
-065   */
-066  void updateGet(long mills);
-067
-068  /**
-069   * Update time used of 
resultScanner.next().
-070   * */
-071  void updateScanTime(long mills);
-072
-073  /**
-074   * Update related counts of 
increments.
-075   */
-076  void updateIncrement();
-077
-078  /**
-079   * Update related counts of appends.
-080   */
-081  void updateAppend();
-082
-083  /**
-084   * Get the aggregate source to which 
this reports.
-085   */
-086  MetricsRegionAggregateSource 
getAggregateSource();
-087
-088
-089}
+033  String COMPACTIONS_QUEUED_COUNT = 
"compactionsQueuedCount";
+034  String MAX_COMPACTION_QUEUE_SIZE = 
"maxCompactionQueueSize";
+035  String NUM_BYTES_COMPACTED_COUNT = 
"numBytesCompactedCount";
+036  String NUM_FILES_COMPACTED_COUNT = 
"numFilesCompactedCount";
+037  String FLUSHES_QUEUED_COUNT = 
"flushesQueuedCount";
+038  String MAX_FLUSH_QUEUE_SIZE = 
"maxFlushQueueSize";
+039  String COMPACTIONS_COMPLETED_DESC = 
"Number of compactions that have completed.";
+040  String COMPACTIONS_FAILED_DESC = 
"Number of compactions that have failed.";
+041  String LAST_MAJOR_COMPACTION_DESC = 
"Age of the last major compaction in milliseconds.";
+042  String COMPACTIONS_QUEUED_DESC = 
"Number of compactions that are queued/running for this region";
+043  String MAX_COMPACTION_QUEUE_DESC = "Max 
number of compactions queued for this region";
+044  String FLUSHES_QUEUED_DESC = "Number 
flushes requested/queued for this region";
+045  String MAX_FLUSH_QUEUE_DESC = "Max 
number of flushes queued for this region";
+046  String  NUM_BYTES_COMPACTED_DESC =
+047  "Sum of filesize on all files 
entering a finished, successful or aborted, compaction";
+048  String NUM_FILES_COMPACTED_DESC =
+049  "Number of files that were input 
for finished, successful or aborted, compactions";
+050  String COPROCESSOR_EXECUTION_STATISTICS 
= "coprocessorExecutionStatistics";
+051  String 
COPROCESSOR_EXECUTION_STATISTICS_DESC = "Statistics for coprocessor execution 
times";
+052  String REPLICA_ID = "replicaid";
+053  String REPLICA_ID_DESC = "The replica 
ID of a region. 0 is primary, otherwise is secondary";
+054
+055  /**
+056   * Close the region's metrics as this 
region is closing.
+057   */
+058  void close();
+059
+060  /**
+061   * Update related counts of puts.
+062   */
+063  void updatePut();
+064
+065  /**
+066   * Update related counts of deletes.
+067   */
+068  void updateDelete();
+069
+070  /**
+071   * Update time of gets
+072   * @param mills time for this get 
operation.
+073   */
+074  void updateGet(long mills);
+075
+076  /**
+077   * Update time used of 
resultScanner.next().
+078   * */
+079  void updateScanTime(long mills);
+080
+081  /**
+082   * Update related counts of 
increments.
+083   */
+084  void 

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the 

[11/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
index c9a18a3..c80f6d8 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all regions and scans the tables.
-2567   * Collects all the pieces for 

[02/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all regions and scans the tables.
-2567  

[06/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all 

[45/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index a5a0006..76d868a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class HRegion.RegionScannerImpl
+class HRegion.RegionScannerImpl
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionScanner, RpcCallback
 RegionScannerImpl is used to combine scanners from multiple 
Stores (aka column families).
@@ -429,7 +429,7 @@ implements 
 
 storeHeap
-KeyValueHeap storeHeap
+KeyValueHeap storeHeap
 
 
 
@@ -438,7 +438,7 @@ implements 
 
 joinedHeap
-KeyValueHeap joinedHeap
+KeyValueHeap joinedHeap
 Heap of key-values that are not essential for the provided 
filters and are thus read
  on demand, if on-demand column family loading is enabled.
 
@@ -449,7 +449,7 @@ implements 
 
 joinedContinuationRow
-protectedCell joinedContinuationRow
+protectedCell joinedContinuationRow
 If the joined heap data gathering is interrupted due to 
scan limits, this will
  contain the row for which we are populating the values.
 
@@ -460,7 +460,7 @@ implements 
 
 filterClosed
-privateboolean filterClosed
+privateboolean filterClosed
 
 
 
@@ -469,7 +469,7 @@ implements 
 
 stopRow
-protected finalbyte[] stopRow
+protected finalbyte[] stopRow
 
 
 
@@ -478,7 +478,7 @@ implements 
 
 includeStopRow
-protected finalboolean includeStopRow
+protected finalboolean includeStopRow
 
 
 
@@ -487,7 +487,7 @@ implements 
 
 region
-protected finalHRegion region
+protected finalHRegion region
 
 
 
@@ -496,7 +496,7 @@ implements 
 
 comparator
-protected finalCellComparator comparator
+protected finalCellComparator comparator
 
 
 
@@ -505,7 +505,7 @@ implements 
 
 readPt
-private finallong readPt
+private finallong readPt
 
 
 
@@ -514,7 +514,7 @@ implements 
 
 maxResultSize
-private finallong maxResultSize
+private finallong maxResultSize
 
 
 
@@ -523,7 +523,7 @@ implements 
 
 defaultScannerContext
-private finalScannerContext defaultScannerContext
+private finalScannerContext defaultScannerContext
 
 
 
@@ -532,7 +532,7 @@ implements 
 
 filter
-private finalFilterWrapper filter
+private finalFilterWrapper filter
 
 
 
@@ -549,7 +549,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scanscan,
+RegionScannerImpl(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
   HRegionregion)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -565,7 +565,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scanscan,
+RegionScannerImpl(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
   HRegionregion,
   longnonceGroup,
@@ -591,7 +591,7 @@ implements 
 
 getRegionInfo
-publicHRegionInfogetRegionInfo()
+publicHRegionInfogetRegionInfo()
 
 Specified by:
 getRegionInfoin
 interfaceRegionScanner
@@ -606,7 +606,7 @@ implements 
 
 initializeScanners
-protectedvoidinitializeScanners(Scanscan,
+protectedvoidinitializeScanners(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -621,7 +621,7 @@ implements 
 
 initializeKVHeap
-protectedvoidinitializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerscanners,
+protectedvoidinitializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerscanners,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerjoinedScanners,
 HRegionregion)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks 

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the maxSeqId up to 

[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
index f355960..13d9b4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
@@ -360,478 +360,480 @@
 352
 353  @Override
 354  public void requestFlush(Region r, 
boolean forceFlushAllStores) {
-355synchronized (regionsInQueue) {
-356  if (!regionsInQueue.containsKey(r)) 
{
-357// This entry has no delay so it 
will be added at the top of the flush
-358// queue.  It'll come out near 
immediately.
-359FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-360this.regionsInQueue.put(r, 
fqe);
-361this.flushQueue.add(fqe);
-362  }
-363}
-364  }
-365
-366  @Override
-367  public void requestDelayedFlush(Region 
r, long delay, boolean forceFlushAllStores) {
-368synchronized (regionsInQueue) {
-369  if (!regionsInQueue.containsKey(r)) 
{
-370// This entry has some delay
-371FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-372fqe.requeue(delay);
-373this.regionsInQueue.put(r, 
fqe);
-374this.flushQueue.add(fqe);
-375  }
-376}
-377  }
-378
-379  public int getFlushQueueSize() {
-380return flushQueue.size();
-381  }
-382
-383  /**
-384   * Only interrupt once it's done with a 
run through the work loop.
-385   */
-386  void interruptIfNecessary() {
-387lock.writeLock().lock();
-388try {
-389  for (FlushHandler flushHander : 
flushHandlers) {
-390if (flushHander != null) 
flushHander.interrupt();
-391  }
-392} finally {
-393  lock.writeLock().unlock();
-394}
-395  }
-396
-397  synchronized void 
start(UncaughtExceptionHandler eh) {
-398ThreadFactory flusherThreadFactory = 
Threads.newDaemonThreadFactory(
-399
server.getServerName().toShortString() + "-MemStoreFlusher", eh);
-400for (int i = 0; i  
flushHandlers.length; i++) {
-401  flushHandlers[i] = new 
FlushHandler("MemStoreFlusher." + i);
-402  
flusherThreadFactory.newThread(flushHandlers[i]);
-403  flushHandlers[i].start();
-404}
-405  }
-406
-407  boolean isAlive() {
-408for (FlushHandler flushHander : 
flushHandlers) {
-409  if (flushHander != null  
flushHander.isAlive()) {
-410return true;
-411  }
-412}
-413return false;
-414  }
-415
-416  void join() {
-417for (FlushHandler flushHander : 
flushHandlers) {
-418  if (flushHander != null) {
-419
Threads.shutdown(flushHander.getThread());
-420  }
-421}
-422  }
-423
-424  /**
-425   * A flushRegion that checks store file 
count.  If too many, puts the flush
-426   * on delay queue to retry later.
-427   * @param fqe
-428   * @return true if the region was 
successfully flushed, false otherwise. If
-429   * false, there will be accompanying 
log messages explaining why the region was
-430   * not flushed.
-431   */
-432  private boolean flushRegion(final 
FlushRegionEntry fqe) {
-433Region region = fqe.region;
-434if 
(!region.getRegionInfo().isMetaRegion() 
-435isTooManyStoreFiles(region)) {
-436  if 
(fqe.isMaximumWait(this.blockingWaitTime)) {
-437LOG.info("Waited " + 
(EnvironmentEdgeManager.currentTime() - fqe.createTime) +
-438  "ms on a compaction to clean up 
'too many store files'; waited " +
-439  "long enough... proceeding with 
flush of " +
-440  
region.getRegionInfo().getRegionNameAsString());
-441  } else {
-442// If this is first time we've 
been put off, then emit a log message.
-443if (fqe.getRequeueCount() = 
0) {
-444  // Note: We don't impose 
blockingStoreFiles constraint on meta regions
-445  LOG.warn("Region " + 
region.getRegionInfo().getRegionNameAsString() + " has too many " +
-446"store files; delaying flush 
up to " + this.blockingWaitTime + "ms");
-447  if 
(!this.server.compactSplitThread.requestSplit(region)) {
-448try {
-449  
this.server.compactSplitThread.requestSystemCompaction(
-450  region, 
Thread.currentThread().getName());
-451} catch (IOException e) {
-452  e = e instanceof 
RemoteException ?
-453  
((RemoteException)e).unwrapRemoteException() : e;
-454  LOG.error("Cache flush 
failed for region " +
-455
Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e);
-456}
-457  

[12/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
index c9a18a3..c80f6d8 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all regions and scans the tables.
-2567   * Collects all the pieces for each 

[10/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
index c9a18a3..c80f6d8 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all regions and scans the tables.
-2567   * Collects all the pieces for 

[49/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/book.html
--
diff --git a/book.html b/book.html
index 189aea1..b8e8dd8 100644
--- a/book.html
+++ b/book.html
@@ -7207,6 +7207,13 @@ The colon character (:) delimits the column 
family from the column
 
 
 
+
+"com.example.www"
+t5
+contents:html = "html"
+
+people:author = "John Doe"
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/bulk-loads.html
--
diff --git a/bulk-loads.html b/bulk-loads.html
index bd967d8..c38059c 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
   Bulk Loads in Apache HBase (TM)
@@ -311,7 +311,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-27
+  Last Published: 
2017-07-28
 
 
 



[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables 

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html
index f355960..13d9b4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html
@@ -360,478 +360,480 @@
 352
 353  @Override
 354  public void requestFlush(Region r, 
boolean forceFlushAllStores) {
-355synchronized (regionsInQueue) {
-356  if (!regionsInQueue.containsKey(r)) 
{
-357// This entry has no delay so it 
will be added at the top of the flush
-358// queue.  It'll come out near 
immediately.
-359FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-360this.regionsInQueue.put(r, 
fqe);
-361this.flushQueue.add(fqe);
-362  }
-363}
-364  }
-365
-366  @Override
-367  public void requestDelayedFlush(Region 
r, long delay, boolean forceFlushAllStores) {
-368synchronized (regionsInQueue) {
-369  if (!regionsInQueue.containsKey(r)) 
{
-370// This entry has some delay
-371FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-372fqe.requeue(delay);
-373this.regionsInQueue.put(r, 
fqe);
-374this.flushQueue.add(fqe);
-375  }
-376}
-377  }
-378
-379  public int getFlushQueueSize() {
-380return flushQueue.size();
-381  }
-382
-383  /**
-384   * Only interrupt once it's done with a 
run through the work loop.
-385   */
-386  void interruptIfNecessary() {
-387lock.writeLock().lock();
-388try {
-389  for (FlushHandler flushHander : 
flushHandlers) {
-390if (flushHander != null) 
flushHander.interrupt();
-391  }
-392} finally {
-393  lock.writeLock().unlock();
-394}
-395  }
-396
-397  synchronized void 
start(UncaughtExceptionHandler eh) {
-398ThreadFactory flusherThreadFactory = 
Threads.newDaemonThreadFactory(
-399
server.getServerName().toShortString() + "-MemStoreFlusher", eh);
-400for (int i = 0; i  
flushHandlers.length; i++) {
-401  flushHandlers[i] = new 
FlushHandler("MemStoreFlusher." + i);
-402  
flusherThreadFactory.newThread(flushHandlers[i]);
-403  flushHandlers[i].start();
-404}
-405  }
-406
-407  boolean isAlive() {
-408for (FlushHandler flushHander : 
flushHandlers) {
-409  if (flushHander != null  
flushHander.isAlive()) {
-410return true;
-411  }
-412}
-413return false;
-414  }
-415
-416  void join() {
-417for (FlushHandler flushHander : 
flushHandlers) {
-418  if (flushHander != null) {
-419
Threads.shutdown(flushHander.getThread());
-420  }
-421}
-422  }
-423
-424  /**
-425   * A flushRegion that checks store file 
count.  If too many, puts the flush
-426   * on delay queue to retry later.
-427   * @param fqe
-428   * @return true if the region was 
successfully flushed, false otherwise. If
-429   * false, there will be accompanying 
log messages explaining why the region was
-430   * not flushed.
-431   */
-432  private boolean flushRegion(final 
FlushRegionEntry fqe) {
-433Region region = fqe.region;
-434if 
(!region.getRegionInfo().isMetaRegion() 
-435isTooManyStoreFiles(region)) {
-436  if 
(fqe.isMaximumWait(this.blockingWaitTime)) {
-437LOG.info("Waited " + 
(EnvironmentEdgeManager.currentTime() - fqe.createTime) +
-438  "ms on a compaction to clean up 
'too many store files'; waited " +
-439  "long enough... proceeding with 
flush of " +
-440  
region.getRegionInfo().getRegionNameAsString());
-441  } else {
-442// If this is first time we've 
been put off, then emit a log message.
-443if (fqe.getRequeueCount() = 
0) {
-444  // Note: We don't impose 
blockingStoreFiles constraint on meta regions
-445  LOG.warn("Region " + 
region.getRegionInfo().getRegionNameAsString() + " has too many " +
-446"store files; delaying flush 
up to " + this.blockingWaitTime + "ms");
-447  if 
(!this.server.compactSplitThread.requestSplit(region)) {
-448try {
-449  
this.server.compactSplitThread.requestSystemCompaction(
-450  region, 
Thread.currentThread().getName());
-451} catch (IOException e) {
-452  e = e instanceof 
RemoteException ?
-453  
((RemoteException)e).unwrapRemoteException() : e;
-454  LOG.error("Cache flush 
failed for region " +
-455
Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e);
-456}
-457 

[40/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
index 3dadb3d..aab3b54 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
@@ -126,7 +126,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class HBaseFsck.TableInfo.IntegrityFixSuggester
+private class HBaseFsck.TableInfo.IntegrityFixSuggester
 extends TableIntegrityErrorHandlerImpl
 
 
@@ -267,7 +267,7 @@ extends 
 
 errors
-HBaseFsck.ErrorReporter 
errors
+HBaseFsck.ErrorReporter 
errors
 
 
 
@@ -284,7 +284,7 @@ extends 
 
 IntegrityFixSuggester
-IntegrityFixSuggester(HBaseFsck.TableInfoti,
+IntegrityFixSuggester(HBaseFsck.TableInfoti,
   HBaseFsck.ErrorReportererrors)
 
 
@@ -302,7 +302,7 @@ extends 
 
 handleRegionStartKeyNotEmpty
-publicvoidhandleRegionStartKeyNotEmpty(HBaseFsck.HbckInfohi)
+publicvoidhandleRegionStartKeyNotEmpty(HBaseFsck.HbckInfohi)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
class:TableIntegrityErrorHandlerImpl
 Callback for handling case where a Table has a first region 
that does not
@@ -327,7 +327,7 @@ extends 
 
 handleRegionEndKeyNotEmpty
-publicvoidhandleRegionEndKeyNotEmpty(byte[]curEndKey)
+publicvoidhandleRegionEndKeyNotEmpty(byte[]curEndKey)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
class:TableIntegrityErrorHandlerImpl
 Callback for handling case where a Table has a last region 
that does not
@@ -351,7 +351,7 @@ extends 
 
 handleDegenerateRegion
-publicvoidhandleDegenerateRegion(HBaseFsck.HbckInfohi)
+publicvoidhandleDegenerateRegion(HBaseFsck.HbckInfohi)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
class:TableIntegrityErrorHandlerImpl
 Callback for handling a region that has the same start and 
end key.
@@ -373,7 +373,7 @@ extends 
 
 handleDuplicateStartKeys
-publicvoidhandleDuplicateStartKeys(HBaseFsck.HbckInfor1,
+publicvoidhandleDuplicateStartKeys(HBaseFsck.HbckInfor1,
  HBaseFsck.HbckInfor2)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
class:TableIntegrityErrorHandlerImpl
@@ -398,7 +398,7 @@ extends 
 
 handleSplit
-publicvoidhandleSplit(HBaseFsck.HbckInfor1,
+publicvoidhandleSplit(HBaseFsck.HbckInfor1,
 HBaseFsck.HbckInfor2)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:TableIntegrityErrorHandler
@@ -419,7 +419,7 @@ extends 
 
 handleOverlapInRegionChain
-publicvoidhandleOverlapInRegionChain(HBaseFsck.HbckInfohi1,
+publicvoidhandleOverlapInRegionChain(HBaseFsck.HbckInfohi1,
HBaseFsck.HbckInfohi2)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
class:TableIntegrityErrorHandlerImpl
@@ -446,7 +446,7 @@ extends 
 
 handleHoleInRegionChain
-publicvoidhandleHoleInRegionChain(byte[]holeStart,
+publicvoidhandleHoleInRegionChain(byte[]holeStart,
 byte[]holeStop)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
class:TableIntegrityErrorHandlerImpl

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
index 7430542..bb60c8d 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
@@ -113,7 +113,7 @@ var activeTableTab = 

[15/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all regions and scans the tables.
-2567   * 

[50/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/apache_hbase_reference_guide.pdf
--
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index c6e6c57..0806aa7 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,8 +5,8 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2)
 /Producer (Apache HBase Team)
-/ModDate (D:20170727145115+00'00')
-/CreationDate (D:20170727145115+00'00')
+/ModDate (D:20170728144910+00'00')
+/CreationDate (D:20170728144910+00'00')
 >>
 endobj
 2 0 obj
@@ -27334,7 +27334,7 @@ endobj
 endobj
 136 0 obj
 << /Limits [(__anchor-top) (adding.new.node)]
-/Names [(__anchor-top) 25 0 R (__indexterm-6954592) 3262 0 R 
(__indexterm-6956842) 3264 0 R (__indexterm-6958904) 3265 0 R 
(__indexterm-6960778) 3266 0 R (acid) 891 0 R 
(add-metric-name-and-function-to-hadoop-compat-interface) 3361 0 R 
(add-the-implementation-to-both-hadoop-1-and-hadoop-2-compat-modules) 3362 0 R 
(add.metrics) 3359 0 R (adding-a-new-chapter-to-the-hbase-reference-guide) 3596 
0 R (adding.new.node) 2859 0 R]
+/Names [(__anchor-top) 25 0 R (__indexterm-6955508) 3262 0 R 
(__indexterm-6957758) 3264 0 R (__indexterm-6959820) 3265 0 R 
(__indexterm-6961694) 3266 0 R (acid) 891 0 R 
(add-metric-name-and-function-to-hadoop-compat-interface) 3361 0 R 
(add-the-implementation-to-both-hadoop-1-and-hadoop-2-compat-modules) 3362 0 R 
(add.metrics) 3359 0 R (adding-a-new-chapter-to-the-hbase-reference-guide) 3596 
0 R (adding.new.node) 2859 0 R]
 >>
 endobj
 137 0 obj
@@ -94844,7 +94844,7 @@ endobj
 [779 0 R /XYZ 0 841.89 null]
 endobj
 781 0 obj
-<< /Length 29245
+<< /Length 31782
 >>
 stream
 q
@@ -96023,6 +96023,26 @@ f
 447.28 183.274 99.76 34.56 re
 f
 0.0 0.0 0.0 scn
+0.9765 0.9765 0.9765 scn
+48.24 148.714 99.76 34.56 re
+f
+0.0 0.0 0.0 scn
+0.9765 0.9765 0.9765 scn
+148.0 148.714 99.76 34.56 re
+f
+0.0 0.0 0.0 scn
+0.9765 0.9765 0.9765 scn
+247.76 148.714 99.76 34.56 re
+f
+0.0 0.0 0.0 scn
+0.9765 0.9765 0.9765 scn
+347.52 148.714 99.76 34.56 re
+f
+0.0 0.0 0.0 scn
+0.9765 0.9765 0.9765 scn
+447.28 148.714 99.76 34.56 re
+f
+0.0 0.0 0.0 scn
 0.5 w
 0.8667 0.8667 0.8667 SCN
 48.24 387.148 m
@@ -97071,13 +97091,202 @@ S
 0.0 0.0 0.0 SCN
 0.2 0.2 0.2 scn
 0.0 0.0 0.0 scn
+0.5 w
+0.8667 0.8667 0.8667 SCN
+48.24 183.274 m
+148.0 183.274 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+48.24 148.714 m
+148.0 148.714 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+48.24 183.524 m
+48.24 148.464 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+148.0 183.524 m
+148.0 148.464 l
+S
+[] 0 d
+1 w
+0.0 0.0 0.0 SCN
+0.2 0.2 0.2 scn
+
+BT
+51.24 169.06 Td
+/F1.0 10.5 Tf
+<22636f6d2e6578616d706c652e> Tj
+ET
+
+
+BT
+51.24 154.78 Td
+/F1.0 10.5 Tf
+<7722> Tj
+ET
+
+0.0 0.0 0.0 scn
+0.5 w
+0.8667 0.8667 0.8667 SCN
+148.0 183.274 m
+247.76 183.274 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+148.0 148.714 m
+247.76 148.714 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+148.0 183.524 m
+148.0 148.464 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+247.76 183.524 m
+247.76 148.464 l
+S
+[] 0 d
+1 w
+0.0 0.0 0.0 SCN
+0.2 0.2 0.2 scn
+
+BT
+151.0 169.06 Td
+/F1.0 10.5 Tf
+<7435> Tj
+ET
+
+0.0 0.0 0.0 scn
+0.5 w
+0.8667 0.8667 0.8667 SCN
+247.76 183.274 m
+347.52 183.274 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+247.76 148.714 m
+347.52 148.714 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+247.76 183.524 m
+247.76 148.464 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+347.52 183.524 m
+347.52 148.464 l
+S
+[] 0 d
+1 w
+0.0 0.0 0.0 SCN
+0.2 0.2 0.2 scn
+
+BT
+250.76 169.06 Td
+/F1.0 10.5 Tf
+<636f6e74656e74733a68746d6c203d> Tj
+ET
+
+
+BT
+250.76 154.78 Td
+/F1.0 10.5 Tf
+<223c68746d6c3ec922> Tj
+ET
+
+0.0 0.0 0.0 scn
+0.5 w
+0.8667 0.8667 0.8667 SCN
+347.52 183.274 m
+447.28 183.274 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+347.52 148.714 m
+447.28 148.714 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+347.52 183.524 m
+347.52 148.464 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+447.28 183.524 m
+447.28 148.464 l
+S
+[] 0 d
+1 w
+0.0 0.0 0.0 SCN
+0.2 0.2 0.2 scn
+0.0 0.0 0.0 scn
+0.5 w
+0.8667 0.8667 0.8667 SCN
+447.28 183.274 m
+547.04 183.274 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+447.28 148.714 m
+547.04 148.714 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+447.28 183.524 m
+447.28 148.464 l
+S
+[] 0 d
+0.5 w
+0.8667 0.8667 0.8667 SCN
+547.04 183.524 m
+547.04 148.464 l
+S
+[] 0 d
+1 w
+0.0 0.0 0.0 SCN
+0.2 0.2 0.2 scn
+
+BT
+450.28 169.06 Td
+/F1.0 10.5 Tf
+<70656f706c653a617574686f72203d> Tj
+ET
+
+
+BT
+450.28 154.78 Td
+/F1.0 10.5 Tf
+<224a6f686e20446f6522> Tj
+ET
+
+0.0 0.0 0.0 scn
 0.2 0.2 0.2 scn
 0.2 0.2 0.2 SCN
 
 0.5299 Tw
 
 BT
-48.24 159.31 Td
+48.24 124.75 Td
 /F1.0 10.5 Tf
 
[<43656c6c7320696e2074686973207461626c6520746861742061707065617220746f20626520656d70747920646f206e6f742074616b>
 20.0195 

[28/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the maxSeqId up to which 

hbase-site git commit: INFRA-10751 Empty commit

2017-07-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site a2b2dd19e -> 69d816fe2


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/69d816fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/69d816fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/69d816fe

Branch: refs/heads/asf-site
Commit: 69d816fe2a4e8f9ee02b790e38cb48ff6eb4d42f
Parents: a2b2dd1
Author: jenkins 
Authored: Fri Jul 28 15:07:34 2017 +
Committer: jenkins 
Committed: Fri Jul 28 15:07:34 2017 +

--

--




[39/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
index 1e809ce..91c71d0 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
@@ -2068,7 +2068,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
 
 
 cmp
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorHBaseFsck.HbckInfo cmp
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorHBaseFsck.HbckInfo cmp
 
 
 
@@ -3159,7 +3159,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
 
 
 checkIntegrity
-http://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true;
 title="class or interface in java.util">SortedMapTableName,HBaseFsck.TableInfocheckIntegrity()
+http://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true;
 title="class or interface in java.util">SortedMapTableName,HBaseFsck.TableInfocheckIntegrity()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Checks tables integrity. Goes over all regions and scans 
the tables.
  Collects all the pieces for each table and checks if there are missing,
@@ -3176,7 +3176,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
 
 
 loadTableInfosForTablesWithNoRegion
-privatevoidloadTableInfosForTablesWithNoRegion()
+privatevoidloadTableInfosForTablesWithNoRegion()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Loads table info's for tables that may not have been 
included, since there are no
  regions reported for the table, but table dir is there in hdfs
@@ -3192,7 +3192,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
 
 
 mergeRegionDirs
-publicintmergeRegionDirs(org.apache.hadoop.fs.PathtargetRegionDir,
+publicintmergeRegionDirs(org.apache.hadoop.fs.PathtargetRegionDir,
HBaseFsck.HbckInfocontained)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Merge hdfs data by moving from contained HbckInfo into 
targetRegionDir.
@@ -3210,7 +3210,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
 
 
 dumpOverlapProblems
-publicvoiddumpOverlapProblems(org.apache.hadoop.hbase.shaded.com.google.common.collect.Multimapbyte[],HBaseFsck.HbckInforegions)
+publicvoiddumpOverlapProblems(org.apache.hadoop.hbase.shaded.com.google.common.collect.Multimapbyte[],HBaseFsck.HbckInforegions)
 
 
 
@@ -3219,7 +3219,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
 
 
 dumpSidelinedRegions
-publicvoiddumpSidelinedRegions(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Maporg.apache.hadoop.fs.Path,HBaseFsck.HbckInforegions)
+publicvoiddumpSidelinedRegions(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Maporg.apache.hadoop.fs.Path,HBaseFsck.HbckInforegions)
 
 
 
@@ -3228,7 +3228,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
 
 
 getOverlapGroups
-publicorg.apache.hadoop.hbase.shaded.com.google.common.collect.Multimapbyte[],HBaseFsck.HbckInfogetOverlapGroups(TableNametable)
+publicorg.apache.hadoop.hbase.shaded.com.google.common.collect.Multimapbyte[],HBaseFsck.HbckInfogetOverlapGroups(TableNametable)
 
 
 
@@ -3237,7 +3237,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
 
 
 getTables
-HTableDescriptor[]getTables(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicIntegernumSkipped)
+HTableDescriptor[]getTables(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicIntegernumSkipped)
 Return a list of user-space table names whose metadata have 
not been
  modified in the last few milliseconds specified by timelag
  if any of the REGIONINFO_QUALIFIER, SERVER_QUALIFIER, STARTCODE_QUALIFIER,
@@ -3257,7 +3257,7 @@ implements 

[36/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.html
index 7dabb5e..782b6f3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.html
@@ -352,369 +352,376 @@
 344ThreadPoolExecutor pool = (selectNow 
 s.throttleCompaction(compaction.getRequest().getSize()))
 345  ? longCompactions : 
shortCompactions;
 346pool.execute(new CompactionRunner(s, 
r, compaction, pool, user));
-347if (LOG.isDebugEnabled()) {
-348  String type = (pool == 
shortCompactions) ? "Small " : "Large ";
-349  LOG.debug(type + "Compaction 
requested: " + (selectNow ? compaction.toString() : "system")
-350  + (why != null  
!why.isEmpty() ? "; Because: " + why : "") + "; " + this);
-351}
-352return selectNow ? 
compaction.getRequest() : null;
-353  }
-354
-355  private CompactionContext 
selectCompaction(final Region r, final Store s,
-356  int priority, CompactionRequest 
request, User user) throws IOException {
-357CompactionContext compaction = 
s.requestCompaction(priority, request, user);
-358if (compaction == null) {
-359  if(LOG.isDebugEnabled()  
r.getRegionInfo() != null) {
-360LOG.debug("Not compacting " + 
r.getRegionInfo().getRegionNameAsString() +
-361" because compaction request 
was cancelled");
-362  }
-363  return null;
-364}
-365assert compaction.hasSelection();
-366if (priority != Store.NO_PRIORITY) 
{
-367  
compaction.getRequest().setPriority(priority);
-368}
-369return compaction;
-370  }
-371
-372  /**
-373   * Only interrupt once it's done with a 
run through the work loop.
-374   */
-375  void interruptIfNecessary() {
-376splits.shutdown();
-377longCompactions.shutdown();
-378shortCompactions.shutdown();
-379  }
-380
-381  private void waitFor(ThreadPoolExecutor 
t, String name) {
-382boolean done = false;
-383while (!done) {
-384  try {
-385done = t.awaitTermination(60, 
TimeUnit.SECONDS);
-386LOG.info("Waiting for " + name + 
" to finish...");
-387if (!done) {
-388  t.shutdownNow();
-389}
-390  } catch (InterruptedException ie) 
{
-391LOG.warn("Interrupted waiting for 
" + name + " to finish...");
-392  }
-393}
-394  }
-395
-396  void join() {
-397waitFor(splits, "Split Thread");
-398waitFor(longCompactions, "Large 
Compaction Thread");
-399waitFor(shortCompactions, "Small 
Compaction Thread");
-400  }
-401
-402  /**
-403   * Returns the current size of the 
queue containing regions that are
-404   * processed.
-405   *
-406   * @return The current size of the 
regions queue.
-407   */
-408  public int getCompactionQueueSize() {
-409return 
longCompactions.getQueue().size() + shortCompactions.getQueue().size();
-410  }
-411
-412  public int 
getLargeCompactionQueueSize() {
-413return 
longCompactions.getQueue().size();
-414  }
-415
+347
((HRegion)r).incrementCompactionsQueuedCount();
+348if (LOG.isDebugEnabled()) {
+349  String type = (pool == 
shortCompactions) ? "Small " : "Large ";
+350  LOG.debug(type + "Compaction 
requested: " + (selectNow ? compaction.toString() : "system")
+351  + (why != null  
!why.isEmpty() ? "; Because: " + why : "") + "; " + this);
+352}
+353return selectNow ? 
compaction.getRequest() : null;
+354  }
+355
+356  private CompactionContext 
selectCompaction(final Region r, final Store s,
+357  int priority, CompactionRequest 
request, User user) throws IOException {
+358CompactionContext compaction = 
s.requestCompaction(priority, request, user);
+359if (compaction == null) {
+360  if(LOG.isDebugEnabled()  
r.getRegionInfo() != null) {
+361LOG.debug("Not compacting " + 
r.getRegionInfo().getRegionNameAsString() +
+362" because compaction request 
was cancelled");
+363  }
+364  return null;
+365}
+366assert compaction.hasSelection();
+367if (priority != Store.NO_PRIORITY) 
{
+368  
compaction.getRequest().setPriority(priority);
+369}
+370return compaction;
+371  }
+372
+373  /**
+374   * Only interrupt once it's done with a 
run through the work loop.
+375   */
+376  void interruptIfNecessary() {
+377splits.shutdown();
+378longCompactions.shutdown();
+379shortCompactions.shutdown();
+380  }
+381
+382  private void waitFor(ThreadPoolExecutor 
t, String name) {
+383boolean done = false;
+384while (!done) {
+385  try {
+386done = t.awaitTermination(60, 
TimeUnit.SECONDS);
+387LOG.info("Waiting for " + name + 
" to 

[43/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
index 4086524..d58b526 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
@@ -107,7 +107,7 @@
 
 
 
-static interface MemStoreFlusher.FlushQueueEntry
+static interface MemStoreFlusher.FlushQueueEntry
 extends http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Delayed.html?is-external=true;
 title="class or interface in java.util.concurrent">Delayed
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
index c64bc30..6d5e7e6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class MemStoreFlusher.FlushRegionEntry
+static class MemStoreFlusher.FlushRegionEntry
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements MemStoreFlusher.FlushQueueEntry
 Datastructure used in the flush queue.  Holds region and 
retry count.
@@ -261,7 +261,7 @@ implements 
 
 region
-private finalRegion region
+private finalRegion region
 
 
 
@@ -270,7 +270,7 @@ implements 
 
 createTime
-private finallong createTime
+private finallong createTime
 
 
 
@@ -279,7 +279,7 @@ implements 
 
 whenToExpire
-privatelong whenToExpire
+privatelong whenToExpire
 
 
 
@@ -288,7 +288,7 @@ implements 
 
 requeueCount
-privateint requeueCount
+privateint requeueCount
 
 
 
@@ -297,7 +297,7 @@ implements 
 
 forceFlushAllStores
-privateboolean forceFlushAllStores
+privateboolean forceFlushAllStores
 
 
 
@@ -314,7 +314,7 @@ implements 
 
 FlushRegionEntry
-FlushRegionEntry(Regionr,
+FlushRegionEntry(Regionr,
  booleanforceFlushAllStores)
 
 
@@ -332,7 +332,7 @@ implements 
 
 isMaximumWait
-publicbooleanisMaximumWait(longmaximumWait)
+publicbooleanisMaximumWait(longmaximumWait)
 
 Parameters:
 maximumWait - 
@@ -347,7 +347,7 @@ implements 
 
 getRequeueCount
-publicintgetRequeueCount()
+publicintgetRequeueCount()
 
 Returns:
 Count of times requeue(long)
 was called; i.e this is
@@ -361,7 +361,7 @@ implements 
 
 isForceFlushAllStores
-publicbooleanisForceFlushAllStores()
+publicbooleanisForceFlushAllStores()
 
 Returns:
 whether we need to flush all stores.
@@ -374,7 +374,7 @@ implements 
 
 requeue
-publicMemStoreFlusher.FlushRegionEntryrequeue(longwhen)
+publicMemStoreFlusher.FlushRegionEntryrequeue(longwhen)
 
 Parameters:
 when - When to expire, when to come up out of the queue.
@@ -391,7 +391,7 @@ implements 
 
 getDelay
-publiclonggetDelay(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+publiclonggetDelay(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Delayed.html?is-external=true#getDelay-java.util.concurrent.TimeUnit-;
 title="class or interface in java.util.concurrent">getDelayin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Delayed.html?is-external=true;
 title="class or interface in java.util.concurrent">Delayed
@@ -404,7 +404,7 @@ implements 
 
 compareTo
-publicintcompareTo(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Delayed.html?is-external=true;
 title="class or interface in 
java.util.concurrent">Delayedother)
+publicintcompareTo(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Delayed.html?is-external=true;
 title="class or interface in 
java.util.concurrent">Delayedother)
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true#compareTo-T-;
 title="class or interface in java.lang">compareToin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.Rejection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.Rejection.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.Rejection.html
index 7dabb5e..782b6f3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.Rejection.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.Rejection.html
@@ -352,369 +352,376 @@
 344ThreadPoolExecutor pool = (selectNow 
 s.throttleCompaction(compaction.getRequest().getSize()))
 345  ? longCompactions : 
shortCompactions;
 346pool.execute(new CompactionRunner(s, 
r, compaction, pool, user));
-347if (LOG.isDebugEnabled()) {
-348  String type = (pool == 
shortCompactions) ? "Small " : "Large ";
-349  LOG.debug(type + "Compaction 
requested: " + (selectNow ? compaction.toString() : "system")
-350  + (why != null  
!why.isEmpty() ? "; Because: " + why : "") + "; " + this);
-351}
-352return selectNow ? 
compaction.getRequest() : null;
-353  }
-354
-355  private CompactionContext 
selectCompaction(final Region r, final Store s,
-356  int priority, CompactionRequest 
request, User user) throws IOException {
-357CompactionContext compaction = 
s.requestCompaction(priority, request, user);
-358if (compaction == null) {
-359  if(LOG.isDebugEnabled()  
r.getRegionInfo() != null) {
-360LOG.debug("Not compacting " + 
r.getRegionInfo().getRegionNameAsString() +
-361" because compaction request 
was cancelled");
-362  }
-363  return null;
-364}
-365assert compaction.hasSelection();
-366if (priority != Store.NO_PRIORITY) 
{
-367  
compaction.getRequest().setPriority(priority);
-368}
-369return compaction;
-370  }
-371
-372  /**
-373   * Only interrupt once it's done with a 
run through the work loop.
-374   */
-375  void interruptIfNecessary() {
-376splits.shutdown();
-377longCompactions.shutdown();
-378shortCompactions.shutdown();
-379  }
-380
-381  private void waitFor(ThreadPoolExecutor 
t, String name) {
-382boolean done = false;
-383while (!done) {
-384  try {
-385done = t.awaitTermination(60, 
TimeUnit.SECONDS);
-386LOG.info("Waiting for " + name + 
" to finish...");
-387if (!done) {
-388  t.shutdownNow();
-389}
-390  } catch (InterruptedException ie) 
{
-391LOG.warn("Interrupted waiting for 
" + name + " to finish...");
-392  }
-393}
-394  }
-395
-396  void join() {
-397waitFor(splits, "Split Thread");
-398waitFor(longCompactions, "Large 
Compaction Thread");
-399waitFor(shortCompactions, "Small 
Compaction Thread");
-400  }
-401
-402  /**
-403   * Returns the current size of the 
queue containing regions that are
-404   * processed.
-405   *
-406   * @return The current size of the 
regions queue.
-407   */
-408  public int getCompactionQueueSize() {
-409return 
longCompactions.getQueue().size() + shortCompactions.getQueue().size();
-410  }
-411
-412  public int 
getLargeCompactionQueueSize() {
-413return 
longCompactions.getQueue().size();
-414  }
-415
+347
((HRegion)r).incrementCompactionsQueuedCount();
+348if (LOG.isDebugEnabled()) {
+349  String type = (pool == 
shortCompactions) ? "Small " : "Large ";
+350  LOG.debug(type + "Compaction 
requested: " + (selectNow ? compaction.toString() : "system")
+351  + (why != null  
!why.isEmpty() ? "; Because: " + why : "") + "; " + this);
+352}
+353return selectNow ? 
compaction.getRequest() : null;
+354  }
+355
+356  private CompactionContext 
selectCompaction(final Region r, final Store s,
+357  int priority, CompactionRequest 
request, User user) throws IOException {
+358CompactionContext compaction = 
s.requestCompaction(priority, request, user);
+359if (compaction == null) {
+360  if(LOG.isDebugEnabled()  
r.getRegionInfo() != null) {
+361LOG.debug("Not compacting " + 
r.getRegionInfo().getRegionNameAsString() +
+362" because compaction request 
was cancelled");
+363  }
+364  return null;
+365}
+366assert compaction.hasSelection();
+367if (priority != Store.NO_PRIORITY) 
{
+368  
compaction.getRequest().setPriority(priority);
+369}
+370return compaction;
+371  }
+372
+373  /**
+374   * Only interrupt once it's done with a 
run through the work loop.
+375   */
+376  void interruptIfNecessary() {
+377splits.shutdown();
+378longCompactions.shutdown();
+379shortCompactions.shutdown();
+380  }
+381
+382  private void waitFor(ThreadPoolExecutor 
t, String name) {
+383boolean done = false;
+384while (!done) {
+385  try {
+386done = t.awaitTermination(60, 
TimeUnit.SECONDS);

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site ee3302912 -> a2b2dd19e


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);

[46/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html
index 519f159..02c046b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class CompactSplit.CompactionRunner
+private class CompactSplit.CompactionRunner
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableCompactSplit.CompactionRunner
 
@@ -250,7 +250,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 store
-private finalStore store
+private finalStore store
 
 
 
@@ -259,7 +259,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 region
-private finalHRegion region
+private finalHRegion region
 
 
 
@@ -268,7 +268,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 compaction
-privateCompactionContext compaction
+privateCompactionContext compaction
 
 
 
@@ -277,7 +277,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 queuedPriority
-privateint queuedPriority
+privateint queuedPriority
 
 
 
@@ -286,7 +286,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 parent
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ThreadPoolExecutor.html?is-external=true;
 title="class or interface in java.util.concurrent">ThreadPoolExecutor parent
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ThreadPoolExecutor.html?is-external=true;
 title="class or interface in java.util.concurrent">ThreadPoolExecutor parent
 
 
 
@@ -295,7 +295,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 user
-privateUser user
+privateUser user
 
 
 
@@ -304,7 +304,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 time
-privatelong time
+privatelong time
 
 
 
@@ -321,7 +321,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 CompactionRunner
-publicCompactionRunner(Storestore,
+publicCompactionRunner(Storestore,
 Regionregion,
 CompactionContextcompaction,
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ThreadPoolExecutor.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ThreadPoolExecutorparent,
@@ -342,7 +342,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 toString
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 
 Overrides:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toStringin 
classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
@@ -355,7 +355,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 doCompaction
-privatevoiddoCompaction(Useruser)
+privatevoiddoCompaction(Useruser)
 
 
 
@@ -364,7 +364,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 run
-publicvoidrun()
+publicvoidrun()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--;
 title="class or interface in java.lang">runin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable
@@ -377,7 +377,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 formatStackTrace
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringformatStackTrace(http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exceptionex)
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the maxSeqId up to which the 

[47/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 2ab94b5..339f9cb 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 2007 - 2017 The Apache Software Foundation
 
   File: 2249,
- Errors: 14885,
+ Errors: 14890,
  Warnings: 0,
  Infos: 0
   
@@ -12137,7 +12137,7 @@ under the License.
   0
 
 
-  0
+  6
 
   
   
@@ -26543,7 +26543,7 @@ under the License.
   0
 
 
-  12
+  11
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/coc.html
--
diff --git a/coc.html b/coc.html
index 91ad662..f1b2856 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-27
+  Last Published: 
2017-07-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 2e02fb0..8b9055b 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-27
+  Last Published: 
2017-07-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index a06b240..79d528d 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -527,7 +527,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-27
+  Last Published: 
2017-07-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index dfcb2e4..e43f09d 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -724,7 +724,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-27
+  Last Published: 
2017-07-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index c2cf899..3ccac00 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Dependency Information
 
@@ -318,7 +318,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-27
+  Last Published: 
2017-07-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index 1e493fe..2b3f7fe 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependency Management
 
@@ -900,7 +900,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
  

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
index f355960..13d9b4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
@@ -360,478 +360,480 @@
 352
 353  @Override
 354  public void requestFlush(Region r, 
boolean forceFlushAllStores) {
-355synchronized (regionsInQueue) {
-356  if (!regionsInQueue.containsKey(r)) 
{
-357// This entry has no delay so it 
will be added at the top of the flush
-358// queue.  It'll come out near 
immediately.
-359FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-360this.regionsInQueue.put(r, 
fqe);
-361this.flushQueue.add(fqe);
-362  }
-363}
-364  }
-365
-366  @Override
-367  public void requestDelayedFlush(Region 
r, long delay, boolean forceFlushAllStores) {
-368synchronized (regionsInQueue) {
-369  if (!regionsInQueue.containsKey(r)) 
{
-370// This entry has some delay
-371FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-372fqe.requeue(delay);
-373this.regionsInQueue.put(r, 
fqe);
-374this.flushQueue.add(fqe);
-375  }
-376}
-377  }
-378
-379  public int getFlushQueueSize() {
-380return flushQueue.size();
-381  }
-382
-383  /**
-384   * Only interrupt once it's done with a 
run through the work loop.
-385   */
-386  void interruptIfNecessary() {
-387lock.writeLock().lock();
-388try {
-389  for (FlushHandler flushHander : 
flushHandlers) {
-390if (flushHander != null) 
flushHander.interrupt();
-391  }
-392} finally {
-393  lock.writeLock().unlock();
-394}
-395  }
-396
-397  synchronized void 
start(UncaughtExceptionHandler eh) {
-398ThreadFactory flusherThreadFactory = 
Threads.newDaemonThreadFactory(
-399
server.getServerName().toShortString() + "-MemStoreFlusher", eh);
-400for (int i = 0; i  
flushHandlers.length; i++) {
-401  flushHandlers[i] = new 
FlushHandler("MemStoreFlusher." + i);
-402  
flusherThreadFactory.newThread(flushHandlers[i]);
-403  flushHandlers[i].start();
-404}
-405  }
-406
-407  boolean isAlive() {
-408for (FlushHandler flushHander : 
flushHandlers) {
-409  if (flushHander != null  
flushHander.isAlive()) {
-410return true;
-411  }
-412}
-413return false;
-414  }
-415
-416  void join() {
-417for (FlushHandler flushHander : 
flushHandlers) {
-418  if (flushHander != null) {
-419
Threads.shutdown(flushHander.getThread());
-420  }
-421}
-422  }
-423
-424  /**
-425   * A flushRegion that checks store file 
count.  If too many, puts the flush
-426   * on delay queue to retry later.
-427   * @param fqe
-428   * @return true if the region was 
successfully flushed, false otherwise. If
-429   * false, there will be accompanying 
log messages explaining why the region was
-430   * not flushed.
-431   */
-432  private boolean flushRegion(final 
FlushRegionEntry fqe) {
-433Region region = fqe.region;
-434if 
(!region.getRegionInfo().isMetaRegion() 
-435isTooManyStoreFiles(region)) {
-436  if 
(fqe.isMaximumWait(this.blockingWaitTime)) {
-437LOG.info("Waited " + 
(EnvironmentEdgeManager.currentTime() - fqe.createTime) +
-438  "ms on a compaction to clean up 
'too many store files'; waited " +
-439  "long enough... proceeding with 
flush of " +
-440  
region.getRegionInfo().getRegionNameAsString());
-441  } else {
-442// If this is first time we've 
been put off, then emit a log message.
-443if (fqe.getRequeueCount() = 
0) {
-444  // Note: We don't impose 
blockingStoreFiles constraint on meta regions
-445  LOG.warn("Region " + 
region.getRegionInfo().getRegionNameAsString() + " has too many " +
-446"store files; delaying flush 
up to " + this.blockingWaitTime + "ms");
-447  if 
(!this.server.compactSplitThread.requestSplit(region)) {
-448try {
-449  
this.server.compactSplitThread.requestSystemCompaction(
-450  region, 
Thread.currentThread().getName());
-451} catch (IOException e) {
-452  e = e instanceof 
RemoteException ?
-453  
((RemoteException)e).unwrapRemoteException() : e;
-454  LOG.error("Cache flush 
failed for region " +
-455
Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e);
-456}
-457  }
-458}
-459
-460// Put back on the queue.  Have 
it come back out of 

[42/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html
index b695472..96f9250 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -168,6 +168,14 @@ implements 
 private long
+maxCompactionQueueSize
+
+
+private long
+maxFlushQueueSize
+
+
+private long
 maxStoreFileAge
 
 
@@ -262,93 +270,109 @@ implements 
 long
-getMaxStoreFileAge()
+getMaxCompactionQueueSize()
 
 
 long
+getMaxFlushQueueSize()
+
+
+long
+getMaxStoreFileAge()
+
+
+long
 getMemstoreSize()
 Get the size of the memstore on this region server.
 
 
-
+
 long
 getMinStoreFileAge()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getNamespace()
 Get the name of the namespace this table is in.
 
 
-
+
 long
 getNumBytesCompacted()
 
-
+
 long
 getNumCompactionsCompleted()
 
-
+
 long
 getNumCompactionsFailed()
 Returns the total number of compactions that have been 
reported as failed on this region.
 
 
-
+
+long
+getNumCompactionsQueued()
+
+
 long
 getNumFilesCompacted()
 
-
+
+long
+getNumFlushesQueued()
+
+
 long
 getNumReferenceFiles()
 
-
+
 long
 getNumStoreFiles()
 Get the number of store files hosted on this region 
server.
 
 
-
+
 long
 getNumStores()
 Get the number of stores hosted on this region server.
 
 
-
+
 long
 getReadRequestCount()
 Get the total number of read requests that have been issued 
against this region
 
 
-
+
 int
 getRegionHashCode()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getRegionName()
 Get the name of the region.
 
 
-
+
 int
 getReplicaId()
 Get the replica id of this region.
 
 
-
+
 long
 getStoreFileSize()
 Get the total size of the store files this region server is 
serving from.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getTableName()
 Get the name of the table the region belongs to.
 
 
-
+
 long
 getWriteRequestCount()
 Get the total number of mutations that have been issued 
against this region.
@@ -501,13 +525,31 @@ implements numReferenceFiles
 
 
+
+
+
+
+
+maxFlushQueueSize
+privatelong maxFlushQueueSize
+
+
+
+
+
+
+
+maxCompactionQueueSize
+privatelong maxCompactionQueueSize
+
+
 
 
 
 
 
 regionMetricsUpdateTask
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ScheduledFuture? regionMetricsUpdateTask
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ScheduledFuture? regionMetricsUpdateTask
 
 
 
@@ -524,7 +566,7 @@ implements 
 
 MetricsRegionWrapperImpl
-publicMetricsRegionWrapperImpl(HRegionregion)
+publicMetricsRegionWrapperImpl(HRegionregion)
 
 
 
@@ -541,7 +583,7 @@ implements 
 
 getTableName
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetTableName()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetTableName()
 Description copied from 
interface:MetricsRegionWrapper
 Get the name of the table the region belongs to.
 
@@ -558,7 +600,7 @@ implements 
 
 getNamespace
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetNamespace()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetNamespace()
 Description copied from 
interface:MetricsRegionWrapper
 Get the name of the namespace this table is in.
 
@@ -575,7 +617,7 @@ implements 
 
 getRegionName

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the maxSeqId up to which the store 
was 

[51/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/a2b2dd19
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/a2b2dd19
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/a2b2dd19

Branch: refs/heads/asf-site
Commit: a2b2dd19e3835158d7972644b66a73da4046de0a
Parents: ee33029
Author: jenkins 
Authored: Fri Jul 28 15:07:12 2017 +
Committer: jenkins 
Committed: Fri Jul 28 15:07:12 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf|  7791 ++--
 book.html   | 7 +
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 33242 +
 checkstyle.rss  | 6 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html |62 +-
 devapidocs/index-all.html   |46 +
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../hbase/classification/package-tree.html  | 6 +-
 .../hadoop/hbase/client/package-tree.html   |24 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   | 8 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 8 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 2 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../hbase/master/balancer/package-tree.html | 2 +-
 .../hadoop/hbase/master/package-tree.html   | 4 +-
 .../hbase/master/procedure/package-tree.html| 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |10 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 6 +-
 .../hadoop/hbase/quotas/package-tree.html   | 8 +-
 .../CompactSplit.CompactionRunner.html  |28 +-
 .../regionserver/CompactSplit.Rejection.html| 6 +-
 .../hadoop/hbase/regionserver/CompactSplit.html |40 +-
 .../regionserver/HRegion.BatchOperation.html|26 +-
 .../regionserver/HRegion.FlushResultImpl.html   |24 +-
 .../regionserver/HRegion.MutationBatch.html |20 +-
 .../HRegion.ObservedExceptionsInBatch.html  |22 +-
 .../HRegion.PrepareFlushResult.html |26 +-
 .../regionserver/HRegion.RegionScannerImpl.html |92 +-
 .../hbase/regionserver/HRegion.ReplayBatch.html |18 +-
 .../regionserver/HRegion.RowLockContext.html|28 +-
 .../hbase/regionserver/HRegion.RowLockImpl.html |16 +-
 .../hbase/regionserver/HRegion.WriteState.html  |26 +-
 .../hadoop/hbase/regionserver/HRegion.html  |  1213 +-
 .../MemStoreFlusher.FlushQueueEntry.html| 2 +-
 .../MemStoreFlusher.FlushRegionEntry.html   |32 +-
 .../MemStoreFlusher.WakeupFlushThread.html  |10 +-
 .../hbase/regionserver/MemStoreFlusher.html |36 +-
 .../hbase/regionserver/MetricsRegionSource.html |   174 +-
 .../regionserver/MetricsRegionSourceImpl.html   | 6 +-
 .../regionserver/MetricsRegionWrapper.html  |   112 +-
 ...apperImpl.HRegionMetricsWrapperRunnable.html | 6 +-
 .../regionserver/MetricsRegionWrapperImpl.html  |   190 +-
 .../hadoop/hbase/regionserver/package-tree.html |20 +-
 .../regionserver/querymatcher/package-tree.html | 4 +-
 .../hadoop/hbase/rest/model/package-tree.html   | 2 +-
 .../hbase/security/access/package-tree.html | 2 +-
 .../hadoop/hbase/security/package-tree.html | 2 +-
 .../HBaseFsck.ErrorReporter.ERROR_CODE.html |76 +-
 .../hbase/util/HBaseFsck.ErrorReporter.html |30 +-
 .../hbase/util/HBaseFsck.HBaseFsckTool.html | 6 +-
 .../hadoop/hbase/util/HBaseFsck.HbckInfo.html   |56 +-
 .../hadoop/hbase/util/HBaseFsck.HdfsEntry.html  |14 +-
 .../hadoop/hbase/util/HBaseFsck.MetaEntry.html  |18 +-
 .../hbase/util/HBaseFsck.OnlineEntry.html   |10 +-
 .../util/HBaseFsck.PrintingErrorReporter.html   |42 +-
 .../HBaseFsck.TableInfo.HDFSIntegrityFixer.html |22 +-
 ...aseFsck.TableInfo.IntegrityFixSuggester.html |20 +-
 .../hadoop/hbase/util/HBaseFsck.TableInfo.html  |38 +-
 .../hbase/util/HBaseFsck.WorkItemHdfsDir.html   |12 +-
 .../util/HBaseFsck.WorkItemHdfsRegionInfo.html  |12 +-
 .../util/HBaseFsck.WorkItemOverlapMerge.html|10 +-
 .../hbase/util/HBaseFsck.WorkItemRegion.html|16 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.html |   152 

[38/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html
index 7dabb5e..782b6f3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html
@@ -352,369 +352,376 @@
 344ThreadPoolExecutor pool = (selectNow 
 s.throttleCompaction(compaction.getRequest().getSize()))
 345  ? longCompactions : 
shortCompactions;
 346pool.execute(new CompactionRunner(s, 
r, compaction, pool, user));
-347if (LOG.isDebugEnabled()) {
-348  String type = (pool == 
shortCompactions) ? "Small " : "Large ";
-349  LOG.debug(type + "Compaction 
requested: " + (selectNow ? compaction.toString() : "system")
-350  + (why != null  
!why.isEmpty() ? "; Because: " + why : "") + "; " + this);
-351}
-352return selectNow ? 
compaction.getRequest() : null;
-353  }
-354
-355  private CompactionContext 
selectCompaction(final Region r, final Store s,
-356  int priority, CompactionRequest 
request, User user) throws IOException {
-357CompactionContext compaction = 
s.requestCompaction(priority, request, user);
-358if (compaction == null) {
-359  if(LOG.isDebugEnabled()  
r.getRegionInfo() != null) {
-360LOG.debug("Not compacting " + 
r.getRegionInfo().getRegionNameAsString() +
-361" because compaction request 
was cancelled");
-362  }
-363  return null;
-364}
-365assert compaction.hasSelection();
-366if (priority != Store.NO_PRIORITY) 
{
-367  
compaction.getRequest().setPriority(priority);
-368}
-369return compaction;
-370  }
-371
-372  /**
-373   * Only interrupt once it's done with a 
run through the work loop.
-374   */
-375  void interruptIfNecessary() {
-376splits.shutdown();
-377longCompactions.shutdown();
-378shortCompactions.shutdown();
-379  }
-380
-381  private void waitFor(ThreadPoolExecutor 
t, String name) {
-382boolean done = false;
-383while (!done) {
-384  try {
-385done = t.awaitTermination(60, 
TimeUnit.SECONDS);
-386LOG.info("Waiting for " + name + 
" to finish...");
-387if (!done) {
-388  t.shutdownNow();
-389}
-390  } catch (InterruptedException ie) 
{
-391LOG.warn("Interrupted waiting for 
" + name + " to finish...");
-392  }
-393}
-394  }
-395
-396  void join() {
-397waitFor(splits, "Split Thread");
-398waitFor(longCompactions, "Large 
Compaction Thread");
-399waitFor(shortCompactions, "Small 
Compaction Thread");
-400  }
-401
-402  /**
-403   * Returns the current size of the 
queue containing regions that are
-404   * processed.
-405   *
-406   * @return The current size of the 
regions queue.
-407   */
-408  public int getCompactionQueueSize() {
-409return 
longCompactions.getQueue().size() + shortCompactions.getQueue().size();
-410  }
-411
-412  public int 
getLargeCompactionQueueSize() {
-413return 
longCompactions.getQueue().size();
-414  }
-415
+347
((HRegion)r).incrementCompactionsQueuedCount();
+348if (LOG.isDebugEnabled()) {
+349  String type = (pool == 
shortCompactions) ? "Small " : "Large ";
+350  LOG.debug(type + "Compaction 
requested: " + (selectNow ? compaction.toString() : "system")
+351  + (why != null  
!why.isEmpty() ? "; Because: " + why : "") + "; " + this);
+352}
+353return selectNow ? 
compaction.getRequest() : null;
+354  }
+355
+356  private CompactionContext 
selectCompaction(final Region r, final Store s,
+357  int priority, CompactionRequest 
request, User user) throws IOException {
+358CompactionContext compaction = 
s.requestCompaction(priority, request, user);
+359if (compaction == null) {
+360  if(LOG.isDebugEnabled()  
r.getRegionInfo() != null) {
+361LOG.debug("Not compacting " + 
r.getRegionInfo().getRegionNameAsString() +
+362" because compaction request 
was cancelled");
+363  }
+364  return null;
+365}
+366assert compaction.hasSelection();
+367if (priority != Store.NO_PRIORITY) 
{
+368  
compaction.getRequest().setPriority(priority);
+369}
+370return compaction;
+371  }
+372
+373  /**
+374   * Only interrupt once it's done with a 
run through the work loop.
+375   */
+376  void interruptIfNecessary() {
+377splits.shutdown();
+378longCompactions.shutdown();
+379shortCompactions.shutdown();
+380  }
+381
+382  private void waitFor(ThreadPoolExecutor 
t, String name) {
+383boolean done = false;
+384while (!done) {
+385  try {
+386done = 

[04/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566  

[35/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the maxSeqId up to which 

[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all regions and scans the tables.
-2567   * 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
index c9a18a3..c80f6d8 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all regions and scans the tables.
-2567   * Collects all the pieces for 

[29/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the maxSeqId up to which the store 
was 

[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes 

[17/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all regions and scans the tables.

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
index f355960..13d9b4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
@@ -360,478 +360,480 @@
 352
 353  @Override
 354  public void requestFlush(Region r, 
boolean forceFlushAllStores) {
-355synchronized (regionsInQueue) {
-356  if (!regionsInQueue.containsKey(r)) 
{
-357// This entry has no delay so it 
will be added at the top of the flush
-358// queue.  It'll come out near 
immediately.
-359FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-360this.regionsInQueue.put(r, 
fqe);
-361this.flushQueue.add(fqe);
-362  }
-363}
-364  }
-365
-366  @Override
-367  public void requestDelayedFlush(Region 
r, long delay, boolean forceFlushAllStores) {
-368synchronized (regionsInQueue) {
-369  if (!regionsInQueue.containsKey(r)) 
{
-370// This entry has some delay
-371FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-372fqe.requeue(delay);
-373this.regionsInQueue.put(r, 
fqe);
-374this.flushQueue.add(fqe);
-375  }
-376}
-377  }
-378
-379  public int getFlushQueueSize() {
-380return flushQueue.size();
-381  }
-382
-383  /**
-384   * Only interrupt once it's done with a 
run through the work loop.
-385   */
-386  void interruptIfNecessary() {
-387lock.writeLock().lock();
-388try {
-389  for (FlushHandler flushHander : 
flushHandlers) {
-390if (flushHander != null) 
flushHander.interrupt();
-391  }
-392} finally {
-393  lock.writeLock().unlock();
-394}
-395  }
-396
-397  synchronized void 
start(UncaughtExceptionHandler eh) {
-398ThreadFactory flusherThreadFactory = 
Threads.newDaemonThreadFactory(
-399
server.getServerName().toShortString() + "-MemStoreFlusher", eh);
-400for (int i = 0; i  
flushHandlers.length; i++) {
-401  flushHandlers[i] = new 
FlushHandler("MemStoreFlusher." + i);
-402  
flusherThreadFactory.newThread(flushHandlers[i]);
-403  flushHandlers[i].start();
-404}
-405  }
-406
-407  boolean isAlive() {
-408for (FlushHandler flushHander : 
flushHandlers) {
-409  if (flushHander != null  
flushHander.isAlive()) {
-410return true;
-411  }
-412}
-413return false;
-414  }
-415
-416  void join() {
-417for (FlushHandler flushHander : 
flushHandlers) {
-418  if (flushHander != null) {
-419
Threads.shutdown(flushHander.getThread());
-420  }
-421}
-422  }
-423
-424  /**
-425   * A flushRegion that checks store file 
count.  If too many, puts the flush
-426   * on delay queue to retry later.
-427   * @param fqe
-428   * @return true if the region was 
successfully flushed, false otherwise. If
-429   * false, there will be accompanying 
log messages explaining why the region was
-430   * not flushed.
-431   */
-432  private boolean flushRegion(final 
FlushRegionEntry fqe) {
-433Region region = fqe.region;
-434if 
(!region.getRegionInfo().isMetaRegion() 
-435isTooManyStoreFiles(region)) {
-436  if 
(fqe.isMaximumWait(this.blockingWaitTime)) {
-437LOG.info("Waited " + 
(EnvironmentEdgeManager.currentTime() - fqe.createTime) +
-438  "ms on a compaction to clean up 
'too many store files'; waited " +
-439  "long enough... proceeding with 
flush of " +
-440  
region.getRegionInfo().getRegionNameAsString());
-441  } else {
-442// If this is first time we've 
been put off, then emit a log message.
-443if (fqe.getRequeueCount() = 
0) {
-444  // Note: We don't impose 
blockingStoreFiles constraint on meta regions
-445  LOG.warn("Region " + 
region.getRegionInfo().getRegionNameAsString() + " has too many " +
-446"store files; delaying flush 
up to " + this.blockingWaitTime + "ms");
-447  if 
(!this.server.compactSplitThread.requestSplit(region)) {
-448try {
-449  
this.server.compactSplitThread.requestSystemCompaction(
-450  region, 
Thread.currentThread().getName());
-451} catch (IOException e) {
-452  e = e instanceof 
RemoteException ?
-453  
((RemoteException)e).unwrapRemoteException() : e;
-454  LOG.error("Cache flush 
failed for region " +
-455
Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e);
-456}
-457  }
-458}

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all 

[09/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all regions and scans the tables.
-2567   * Collects all the 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
index f355960..13d9b4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
@@ -360,478 +360,480 @@
 352
 353  @Override
 354  public void requestFlush(Region r, 
boolean forceFlushAllStores) {
-355synchronized (regionsInQueue) {
-356  if (!regionsInQueue.containsKey(r)) 
{
-357// This entry has no delay so it 
will be added at the top of the flush
-358// queue.  It'll come out near 
immediately.
-359FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-360this.regionsInQueue.put(r, 
fqe);
-361this.flushQueue.add(fqe);
-362  }
-363}
-364  }
-365
-366  @Override
-367  public void requestDelayedFlush(Region 
r, long delay, boolean forceFlushAllStores) {
-368synchronized (regionsInQueue) {
-369  if (!regionsInQueue.containsKey(r)) 
{
-370// This entry has some delay
-371FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-372fqe.requeue(delay);
-373this.regionsInQueue.put(r, 
fqe);
-374this.flushQueue.add(fqe);
-375  }
-376}
-377  }
-378
-379  public int getFlushQueueSize() {
-380return flushQueue.size();
-381  }
-382
-383  /**
-384   * Only interrupt once it's done with a 
run through the work loop.
-385   */
-386  void interruptIfNecessary() {
-387lock.writeLock().lock();
-388try {
-389  for (FlushHandler flushHander : 
flushHandlers) {
-390if (flushHander != null) 
flushHander.interrupt();
-391  }
-392} finally {
-393  lock.writeLock().unlock();
-394}
-395  }
-396
-397  synchronized void 
start(UncaughtExceptionHandler eh) {
-398ThreadFactory flusherThreadFactory = 
Threads.newDaemonThreadFactory(
-399
server.getServerName().toShortString() + "-MemStoreFlusher", eh);
-400for (int i = 0; i  
flushHandlers.length; i++) {
-401  flushHandlers[i] = new 
FlushHandler("MemStoreFlusher." + i);
-402  
flusherThreadFactory.newThread(flushHandlers[i]);
-403  flushHandlers[i].start();
-404}
-405  }
-406
-407  boolean isAlive() {
-408for (FlushHandler flushHander : 
flushHandlers) {
-409  if (flushHander != null  
flushHander.isAlive()) {
-410return true;
-411  }
-412}
-413return false;
-414  }
-415
-416  void join() {
-417for (FlushHandler flushHander : 
flushHandlers) {
-418  if (flushHander != null) {
-419
Threads.shutdown(flushHander.getThread());
-420  }
-421}
-422  }
-423
-424  /**
-425   * A flushRegion that checks store file 
count.  If too many, puts the flush
-426   * on delay queue to retry later.
-427   * @param fqe
-428   * @return true if the region was 
successfully flushed, false otherwise. If
-429   * false, there will be accompanying 
log messages explaining why the region was
-430   * not flushed.
-431   */
-432  private boolean flushRegion(final 
FlushRegionEntry fqe) {
-433Region region = fqe.region;
-434if 
(!region.getRegionInfo().isMetaRegion() 
-435isTooManyStoreFiles(region)) {
-436  if 
(fqe.isMaximumWait(this.blockingWaitTime)) {
-437LOG.info("Waited " + 
(EnvironmentEdgeManager.currentTime() - fqe.createTime) +
-438  "ms on a compaction to clean up 
'too many store files'; waited " +
-439  "long enough... proceeding with 
flush of " +
-440  
region.getRegionInfo().getRegionNameAsString());
-441  } else {
-442// If this is first time we've 
been put off, then emit a log message.
-443if (fqe.getRequeueCount() = 
0) {
-444  // Note: We don't impose 
blockingStoreFiles constraint on meta regions
-445  LOG.warn("Region " + 
region.getRegionInfo().getRegionNameAsString() + " has too many " +
-446"store files; delaying flush 
up to " + this.blockingWaitTime + "ms");
-447  if 
(!this.server.compactSplitThread.requestSplit(region)) {
-448try {
-449  
this.server.compactSplitThread.requestSystemCompaction(
-450  region, 
Thread.currentThread().getName());
-451} catch (IOException e) {
-452  e = e instanceof 
RemoteException ?
-453  
((RemoteException)e).unwrapRemoteException() : e;
-454  LOG.error("Cache flush 
failed for region " +
-455
Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e);
-456}
-457  }

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index 5c95397..860416b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the maxSeqId up to which the store 
was flushed. And, skip the edits which
-342  // are equal to or 

hbase git commit: HBASE-15134 Add visibility into Flush and Compaction queues

2017-07-28 Thread achouhan
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 19496f15d -> 1ecaa1d2a


HBASE-15134 Add visibility into Flush and Compaction queues


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1ecaa1d2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1ecaa1d2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1ecaa1d2

Branch: refs/heads/branch-1.4
Commit: 1ecaa1d2a7ce6d96084f7558d52663bce8bdd2c3
Parents: 19496f1
Author: Abhishek Singh Chouhan 
Authored: Fri Jul 28 13:17:32 2017 +0530
Committer: Abhishek Singh Chouhan 
Committed: Fri Jul 28 13:24:28 2017 +0530

--
 .../hbase/regionserver/MetricsRegionSource.java |  8 +
 .../regionserver/MetricsRegionWrapper.java  | 24 +++
 .../regionserver/MetricsRegionSourceImpl.java   | 19 +++-
 .../TestMetricsRegionSourceImpl.java| 20 
 .../hbase/regionserver/CompactSplitThread.java  |  9 +-
 .../hadoop/hbase/regionserver/HRegion.java  | 20 +++-
 .../hbase/regionserver/MemStoreFlusher.java |  2 ++
 .../regionserver/MetricsRegionWrapperImpl.java  | 32 
 .../regionserver/MetricsRegionWrapperStub.java  | 20 
 .../hbase/regionserver/TestMetricsRegion.java   | 12 
 10 files changed, 163 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1ecaa1d2/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
index decf841..d5738cf 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
@@ -30,11 +30,19 @@ public interface MetricsRegionSource extends 
Comparable {
   String COMPACTIONS_COMPLETED_COUNT = "compactionsCompletedCount";
   String COMPACTIONS_FAILED_COUNT = "compactionsFailedCount";
   String LAST_MAJOR_COMPACTION_AGE = "lastMajorCompactionAge";
+  String COMPACTIONS_QUEUED_COUNT = "compactionsQueuedCount";
+  String MAX_COMPACTION_QUEUE_SIZE = "maxCompactionQueueSize";
   String NUM_BYTES_COMPACTED_COUNT = "numBytesCompactedCount";
   String NUM_FILES_COMPACTED_COUNT = "numFilesCompactedCount";
+  String FLUSHES_QUEUED_COUNT = "flushesQueuedCount";
+  String MAX_FLUSH_QUEUE_SIZE = "maxFlushQueueSize";
   String COMPACTIONS_COMPLETED_DESC = "Number of compactions that have 
completed.";
   String COMPACTIONS_FAILED_DESC = "Number of compactions that have failed.";
   String LAST_MAJOR_COMPACTION_DESC = "Age of the last major compaction in 
milliseconds.";
+  String COMPACTIONS_QUEUED_DESC = "Number of compactions that are 
queued/running for this region";
+  String MAX_COMPACTION_QUEUE_DESC = "Max number of compactions queued for 
this region";
+  String FLUSHES_QUEUED_DESC = "Number flushes requested/queued for this 
region";
+  String MAX_FLUSH_QUEUE_DESC = "Max number of flushes queued for this region";
   String  NUM_BYTES_COMPACTED_DESC =
   "Sum of filesize on all files entering a finished, successful or 
aborted, compaction";
   String NUM_FILES_COMPACTED_DESC =

http://git-wip-us.apache.org/repos/asf/hbase/blob/1ecaa1d2/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
index 9b7acd3..9a725cd 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -112,6 +112,30 @@ public interface MetricsRegionWrapper {
*/
   long getNumCompactionsFailed();
 
+  /**
+   * @return the total number of compactions that are currently queued(or 
being executed) at point in
+   *  time
+   */
+  long getNumCompactionsQueued();
+
+  /**
+   * @return the total number of flushes currently queued(being executed) for 
this region at point in
+   *  time
+   */
+  long getNumFlushesQueued();
+
+  /**
+   * @return the max number of compactions queued for this region
+   * Note that this metric is updated periodically and hence might miss some 
data points
+   */
+  long getMaxCompactionQueueSize();
+
+  /**
+   * @return the max 

hbase git commit: HBASE-15134 Add visibility into Flush and Compaction queues

2017-07-28 Thread achouhan
Repository: hbase
Updated Branches:
  refs/heads/branch-1 0053cb967 -> 927803710


HBASE-15134 Add visibility into Flush and Compaction queues


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/92780371
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/92780371
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/92780371

Branch: refs/heads/branch-1
Commit: 92780371080a341d0b6f98307a0ea176db327c5a
Parents: 0053cb9
Author: Abhishek Singh Chouhan 
Authored: Fri Jul 28 13:17:32 2017 +0530
Committer: Abhishek Singh Chouhan 
Committed: Fri Jul 28 13:21:04 2017 +0530

--
 .../hbase/regionserver/MetricsRegionSource.java |  8 +
 .../regionserver/MetricsRegionWrapper.java  | 24 +++
 .../regionserver/MetricsRegionSourceImpl.java   | 19 +++-
 .../TestMetricsRegionSourceImpl.java| 20 
 .../hbase/regionserver/CompactSplitThread.java  |  9 +-
 .../hadoop/hbase/regionserver/HRegion.java  | 20 +++-
 .../hbase/regionserver/MemStoreFlusher.java |  2 ++
 .../regionserver/MetricsRegionWrapperImpl.java  | 32 
 .../regionserver/MetricsRegionWrapperStub.java  | 20 
 .../hbase/regionserver/TestMetricsRegion.java   | 12 
 10 files changed, 163 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/92780371/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
index decf841..d5738cf 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
@@ -30,11 +30,19 @@ public interface MetricsRegionSource extends 
Comparable {
   String COMPACTIONS_COMPLETED_COUNT = "compactionsCompletedCount";
   String COMPACTIONS_FAILED_COUNT = "compactionsFailedCount";
   String LAST_MAJOR_COMPACTION_AGE = "lastMajorCompactionAge";
+  String COMPACTIONS_QUEUED_COUNT = "compactionsQueuedCount";
+  String MAX_COMPACTION_QUEUE_SIZE = "maxCompactionQueueSize";
   String NUM_BYTES_COMPACTED_COUNT = "numBytesCompactedCount";
   String NUM_FILES_COMPACTED_COUNT = "numFilesCompactedCount";
+  String FLUSHES_QUEUED_COUNT = "flushesQueuedCount";
+  String MAX_FLUSH_QUEUE_SIZE = "maxFlushQueueSize";
   String COMPACTIONS_COMPLETED_DESC = "Number of compactions that have 
completed.";
   String COMPACTIONS_FAILED_DESC = "Number of compactions that have failed.";
   String LAST_MAJOR_COMPACTION_DESC = "Age of the last major compaction in 
milliseconds.";
+  String COMPACTIONS_QUEUED_DESC = "Number of compactions that are 
queued/running for this region";
+  String MAX_COMPACTION_QUEUE_DESC = "Max number of compactions queued for 
this region";
+  String FLUSHES_QUEUED_DESC = "Number flushes requested/queued for this 
region";
+  String MAX_FLUSH_QUEUE_DESC = "Max number of flushes queued for this region";
   String  NUM_BYTES_COMPACTED_DESC =
   "Sum of filesize on all files entering a finished, successful or 
aborted, compaction";
   String NUM_FILES_COMPACTED_DESC =

http://git-wip-us.apache.org/repos/asf/hbase/blob/92780371/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
index 9b7acd3..9a725cd 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -112,6 +112,30 @@ public interface MetricsRegionWrapper {
*/
   long getNumCompactionsFailed();
 
+  /**
+   * @return the total number of compactions that are currently queued(or 
being executed) at point in
+   *  time
+   */
+  long getNumCompactionsQueued();
+
+  /**
+   * @return the total number of flushes currently queued(being executed) for 
this region at point in
+   *  time
+   */
+  long getNumFlushesQueued();
+
+  /**
+   * @return the max number of compactions queued for this region
+   * Note that this metric is updated periodically and hence might miss some 
data points
+   */
+  long getMaxCompactionQueueSize();
+
+  /**
+   * @return the max number 

hbase git commit: HBASE-15134 Add visibility into Flush and Compaction queues

2017-07-28 Thread achouhan
Repository: hbase
Updated Branches:
  refs/heads/branch-2 34a1ae875 -> 12b9a151e


HBASE-15134 Add visibility into Flush and Compaction queues


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/12b9a151
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/12b9a151
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/12b9a151

Branch: refs/heads/branch-2
Commit: 12b9a151e6338297b253ca2e005eda22b1f2da4e
Parents: 34a1ae8
Author: Abhishek Singh Chouhan 
Authored: Thu Jul 27 20:41:13 2017 +0530
Committer: Abhishek Singh Chouhan 
Committed: Fri Jul 28 13:04:52 2017 +0530

--
 .../hbase/regionserver/MetricsRegionSource.java |  8 +
 .../regionserver/MetricsRegionWrapper.java  | 24 +++
 .../regionserver/MetricsRegionSourceImpl.java   | 19 +++-
 .../TestMetricsRegionSourceImpl.java| 20 
 .../hadoop/hbase/regionserver/CompactSplit.java |  9 +-
 .../hadoop/hbase/regionserver/HRegion.java  | 20 +++-
 .../hbase/regionserver/MemStoreFlusher.java |  2 ++
 .../regionserver/MetricsRegionWrapperImpl.java  | 32 
 .../regionserver/MetricsRegionWrapperStub.java  | 20 
 .../hbase/regionserver/TestMetricsRegion.java   | 12 
 10 files changed, 163 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/12b9a151/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
index decf841..d5738cf 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
@@ -30,11 +30,19 @@ public interface MetricsRegionSource extends 
Comparable {
   String COMPACTIONS_COMPLETED_COUNT = "compactionsCompletedCount";
   String COMPACTIONS_FAILED_COUNT = "compactionsFailedCount";
   String LAST_MAJOR_COMPACTION_AGE = "lastMajorCompactionAge";
+  String COMPACTIONS_QUEUED_COUNT = "compactionsQueuedCount";
+  String MAX_COMPACTION_QUEUE_SIZE = "maxCompactionQueueSize";
   String NUM_BYTES_COMPACTED_COUNT = "numBytesCompactedCount";
   String NUM_FILES_COMPACTED_COUNT = "numFilesCompactedCount";
+  String FLUSHES_QUEUED_COUNT = "flushesQueuedCount";
+  String MAX_FLUSH_QUEUE_SIZE = "maxFlushQueueSize";
   String COMPACTIONS_COMPLETED_DESC = "Number of compactions that have 
completed.";
   String COMPACTIONS_FAILED_DESC = "Number of compactions that have failed.";
   String LAST_MAJOR_COMPACTION_DESC = "Age of the last major compaction in 
milliseconds.";
+  String COMPACTIONS_QUEUED_DESC = "Number of compactions that are 
queued/running for this region";
+  String MAX_COMPACTION_QUEUE_DESC = "Max number of compactions queued for 
this region";
+  String FLUSHES_QUEUED_DESC = "Number flushes requested/queued for this 
region";
+  String MAX_FLUSH_QUEUE_DESC = "Max number of flushes queued for this region";
   String  NUM_BYTES_COMPACTED_DESC =
   "Sum of filesize on all files entering a finished, successful or 
aborted, compaction";
   String NUM_FILES_COMPACTED_DESC =

http://git-wip-us.apache.org/repos/asf/hbase/blob/12b9a151/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
index cfc0742..fcc600d 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -117,6 +117,30 @@ public interface MetricsRegionWrapper {
*/
   long getNumCompactionsFailed();
 
+  /**
+   * @return the total number of compactions that are currently queued(or 
being executed) at point in
+   *  time
+   */
+  long getNumCompactionsQueued();
+
+  /**
+   * @return the total number of flushes currently queued(being executed) for 
this region at point in
+   *  time
+   */
+  long getNumFlushesQueued();
+
+  /**
+   * @return the max number of compactions queued for this region
+   * Note that this metric is updated periodically and hence might miss some 
data points
+   */
+  long getMaxCompactionQueueSize();
+
+  /**
+   * @return the max number 

hbase git commit: HBASE-15134 Add visibility into Flush and Compaction queues

2017-07-28 Thread achouhan
Repository: hbase
Updated Branches:
  refs/heads/master c5d3de0cd -> 2d06a06ba


HBASE-15134 Add visibility into Flush and Compaction queues


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2d06a06b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2d06a06b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2d06a06b

Branch: refs/heads/master
Commit: 2d06a06ba4bbd2f64e28be5973eb1d447114bedc
Parents: c5d3de0
Author: Abhishek Singh Chouhan 
Authored: Thu Jul 27 20:41:13 2017 +0530
Committer: Abhishek Singh Chouhan 
Committed: Fri Jul 28 12:59:09 2017 +0530

--
 .../hbase/regionserver/MetricsRegionSource.java |  8 +
 .../regionserver/MetricsRegionWrapper.java  | 24 +++
 .../regionserver/MetricsRegionSourceImpl.java   | 19 +++-
 .../TestMetricsRegionSourceImpl.java| 20 
 .../hadoop/hbase/regionserver/CompactSplit.java |  9 +-
 .../hadoop/hbase/regionserver/HRegion.java  | 20 +++-
 .../hbase/regionserver/MemStoreFlusher.java |  2 ++
 .../regionserver/MetricsRegionWrapperImpl.java  | 32 
 .../regionserver/MetricsRegionWrapperStub.java  | 20 
 .../hbase/regionserver/TestMetricsRegion.java   | 12 
 10 files changed, 163 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2d06a06b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
index decf841..d5738cf 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java
@@ -30,11 +30,19 @@ public interface MetricsRegionSource extends 
Comparable {
   String COMPACTIONS_COMPLETED_COUNT = "compactionsCompletedCount";
   String COMPACTIONS_FAILED_COUNT = "compactionsFailedCount";
   String LAST_MAJOR_COMPACTION_AGE = "lastMajorCompactionAge";
+  String COMPACTIONS_QUEUED_COUNT = "compactionsQueuedCount";
+  String MAX_COMPACTION_QUEUE_SIZE = "maxCompactionQueueSize";
   String NUM_BYTES_COMPACTED_COUNT = "numBytesCompactedCount";
   String NUM_FILES_COMPACTED_COUNT = "numFilesCompactedCount";
+  String FLUSHES_QUEUED_COUNT = "flushesQueuedCount";
+  String MAX_FLUSH_QUEUE_SIZE = "maxFlushQueueSize";
   String COMPACTIONS_COMPLETED_DESC = "Number of compactions that have 
completed.";
   String COMPACTIONS_FAILED_DESC = "Number of compactions that have failed.";
   String LAST_MAJOR_COMPACTION_DESC = "Age of the last major compaction in 
milliseconds.";
+  String COMPACTIONS_QUEUED_DESC = "Number of compactions that are 
queued/running for this region";
+  String MAX_COMPACTION_QUEUE_DESC = "Max number of compactions queued for 
this region";
+  String FLUSHES_QUEUED_DESC = "Number flushes requested/queued for this 
region";
+  String MAX_FLUSH_QUEUE_DESC = "Max number of flushes queued for this region";
   String  NUM_BYTES_COMPACTED_DESC =
   "Sum of filesize on all files entering a finished, successful or 
aborted, compaction";
   String NUM_FILES_COMPACTED_DESC =

http://git-wip-us.apache.org/repos/asf/hbase/blob/2d06a06b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
index cfc0742..fcc600d 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -117,6 +117,30 @@ public interface MetricsRegionWrapper {
*/
   long getNumCompactionsFailed();
 
+  /**
+   * @return the total number of compactions that are currently queued(or 
being executed) at point in
+   *  time
+   */
+  long getNumCompactionsQueued();
+
+  /**
+   * @return the total number of flushes currently queued(being executed) for 
this region at point in
+   *  time
+   */
+  long getNumFlushesQueued();
+
+  /**
+   * @return the max number of compactions queued for this region
+   * Note that this metric is updated periodically and hence might miss some 
data points
+   */
+  long getMaxCompactionQueueSize();
+
+  /**
+   * @return the max number of 

hbase git commit: HBASE-17131 Avoid livelock caused by HRegion#processRowsWithLocks

2017-07-28 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 2e7f55fe3 -> 670e9431d


HBASE-17131 Avoid livelock caused by HRegion#processRowsWithLocks


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/670e9431
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/670e9431
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/670e9431

Branch: refs/heads/branch-1.2
Commit: 670e9431d40d35df4802bc0445012271ee904efc
Parents: 2e7f55f
Author: Chia-Ping Tsai 
Authored: Fri Jul 28 14:07:53 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Fri Jul 28 14:07:53 2017 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  |  28 ++--
 .../hbase/client/TestFromClientSide3.java   | 139 ++-
 2 files changed, 152 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/670e9431/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 3abca47..719336e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -7003,28 +7003,28 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
 
 MultiVersionConcurrencyControl.WriteEntry writeEntry = null;
-boolean locked;
+boolean locked = false;
 boolean walSyncSuccessful = false;
-List acquiredRowLocks;
+List acquiredRowLocks = null;
 long addedSize = 0;
 List mutations = new ArrayList();
 Collection rowsToLock = processor.getRowsToLock();
 long mvccNum = 0;
 WALKey walKey = null;
 try {
-  // 2. Acquire the row lock(s)
-  acquiredRowLocks = new ArrayList(rowsToLock.size());
-  for (byte[] row : rowsToLock) {
-// Attempt to lock all involved rows, throw if any lock times out
-// use a writer lock for mixed reads and writes
-acquiredRowLocks.add(getRowLock(row));
-  }
-  // 3. Region lock
-  lock(this.updatesLock.readLock(), acquiredRowLocks.size() == 0 ? 1 : 
acquiredRowLocks.size());
-  locked = true;
-
-  long now = EnvironmentEdgeManager.currentTime();
   try {
+// 2. Acquire the row lock(s)
+acquiredRowLocks = new ArrayList(rowsToLock.size());
+for (byte[] row : rowsToLock) {
+  // Attempt to lock all involved rows, throw if any lock times out
+  // use a writer lock for mixed reads and writes
+  acquiredRowLocks.add(getRowLock(row));
+}
+// 3. Region lock
+lock(this.updatesLock.readLock(), acquiredRowLocks.isEmpty() ? 1 : 
acquiredRowLocks.size());
+locked = true;
+
+long now = EnvironmentEdgeManager.currentTime();
 // 4. Let the processor scan the rows, generate mutations and add
 //waledits
 doProcessRowWithTimeout(

http://git-wip-us.apache.org/repos/asf/hbase/blob/670e9431/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index 09c7e86..08ccc42 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -19,6 +19,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.io.IOException;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
@@ -28,16 +29,36 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Random;
-
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import static junit.framework.Assert.assertFalse;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import 

hbase git commit: HBASE-17131 Avoid livelock caused by HRegion#processRowsWithLocks

2017-07-28 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 488ae4be6 -> f18f916f0


HBASE-17131 Avoid livelock caused by HRegion#processRowsWithLocks


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f18f916f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f18f916f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f18f916f

Branch: refs/heads/branch-1.3
Commit: f18f916f050cf4dc106543d3dc7c6d2f78077661
Parents: 488ae4b
Author: Chia-Ping Tsai 
Authored: Fri Jul 28 03:06:41 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Fri Jul 28 14:07:01 2017 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  |  28 ++--
 .../hbase/client/TestFromClientSide3.java   | 139 ++-
 2 files changed, 152 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f18f916f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index c2dae91..42a2389 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -7203,28 +7203,28 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
 
 MultiVersionConcurrencyControl.WriteEntry writeEntry = null;
-boolean locked;
+boolean locked = false;
 boolean walSyncSuccessful = false;
-List acquiredRowLocks;
+List acquiredRowLocks = null;
 long addedSize = 0;
 List mutations = new ArrayList();
 Collection rowsToLock = processor.getRowsToLock();
 long mvccNum = 0;
 WALKey walKey = null;
 try {
-  // 2. Acquire the row lock(s)
-  acquiredRowLocks = new ArrayList(rowsToLock.size());
-  for (byte[] row : rowsToLock) {
-// Attempt to lock all involved rows, throw if any lock times out
-// use a writer lock for mixed reads and writes
-acquiredRowLocks.add(getRowLockInternal(row, false));
-  }
-  // 3. Region lock
-  lock(this.updatesLock.readLock(), acquiredRowLocks.size() == 0 ? 1 : 
acquiredRowLocks.size());
-  locked = true;
-
-  long now = EnvironmentEdgeManager.currentTime();
   try {
+// 2. Acquire the row lock(s)
+acquiredRowLocks = new ArrayList(rowsToLock.size());
+for (byte[] row : rowsToLock) {
+  // Attempt to lock all involved rows, throw if any lock times out
+  // use a writer lock for mixed reads and writes
+  acquiredRowLocks.add(getRowLockInternal(row, false));
+}
+// 3. Region lock
+lock(this.updatesLock.readLock(), acquiredRowLocks.isEmpty() ? 1 : 
acquiredRowLocks.size());
+locked = true;
+
+long now = EnvironmentEdgeManager.currentTime();
 // 4. Let the processor scan the rows, generate mutations and add
 //waledits
 doProcessRowWithTimeout(

http://git-wip-us.apache.org/repos/asf/hbase/blob/f18f916f/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index 09c7e86..08ccc42 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -19,6 +19,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.io.IOException;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
@@ -28,16 +29,36 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Random;
-
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import static junit.framework.Assert.assertFalse;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import