hbase git commit: HBASE-18913 TestShell fails because NoMethodError: undefined method parseColumn

2017-10-02 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/branch-2 0db82daa9 -> 2348b7fe4


HBASE-18913 TestShell fails because NoMethodError: undefined method parseColumn


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2348b7fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2348b7fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2348b7fe

Branch: refs/heads/branch-2
Commit: 2348b7fe4514a93f5b0cef2a76f65c98258fa7aa
Parents: 0db82da
Author: Guanghao Zhang 
Authored: Tue Oct 3 09:16:35 2017 +0800
Committer: Guanghao Zhang 
Committed: Tue Oct 3 11:52:46 2017 +0800

--
 hbase-shell/src/main/ruby/hbase/table.rb | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2348b7fe/hbase-shell/src/main/ruby/hbase/table.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb 
b/hbase-shell/src/main/ruby/hbase/table.rb
index 33c80ae..320ec7c 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -722,7 +722,7 @@ EOF
 
 # Returns family and (when has it) qualifier for a column name
 def parse_column_name(column)
-  split = 
org.apache.hadoop.hbase.KeyValue.parseColumn(column.to_java_bytes)
+  split = 
org.apache.hadoop.hbase.CellUtil.parseColumn(column.to_java_bytes)
   set_converter(split) if split.length > 1
   [split[0], split.length > 1 ? split[1] : nil]
 end
@@ -793,7 +793,7 @@ EOF
 # 2. register the CONVERTER information based on column spec - 
"cf:qualifier"
 def set_converter(column)
   family = String.from_java_bytes(column[0])
-  parts = org.apache.hadoop.hbase.KeyValue.parseColumn(column[1])
+  parts = org.apache.hadoop.hbase.CellUtil.parseColumn(column[1])
   if parts.length > 1
 @converters["#{family}:#{String.from_java_bytes(parts[0])}"] = 
String.from_java_bytes(parts[1])
 column[1] = parts[0]



hbase git commit: HBASE-18913 TestShell fails because NoMethodError: undefined method parseColumn

2017-10-02 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/master 214d21994 -> afa03a207


HBASE-18913 TestShell fails because NoMethodError: undefined method parseColumn


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/afa03a20
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/afa03a20
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/afa03a20

Branch: refs/heads/master
Commit: afa03a207ea5ff33439bc5a3bc67e7c59cc26298
Parents: 214d219
Author: Guanghao Zhang 
Authored: Tue Oct 3 09:16:35 2017 +0800
Committer: Guanghao Zhang 
Committed: Tue Oct 3 11:47:14 2017 +0800

--
 hbase-shell/src/main/ruby/hbase/table.rb | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/afa03a20/hbase-shell/src/main/ruby/hbase/table.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb 
b/hbase-shell/src/main/ruby/hbase/table.rb
index 33c80ae..320ec7c 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -722,7 +722,7 @@ EOF
 
 # Returns family and (when has it) qualifier for a column name
 def parse_column_name(column)
-  split = 
org.apache.hadoop.hbase.KeyValue.parseColumn(column.to_java_bytes)
+  split = 
org.apache.hadoop.hbase.CellUtil.parseColumn(column.to_java_bytes)
   set_converter(split) if split.length > 1
   [split[0], split.length > 1 ? split[1] : nil]
 end
@@ -793,7 +793,7 @@ EOF
 # 2. register the CONVERTER information based on column spec - 
"cf:qualifier"
 def set_converter(column)
   family = String.from_java_bytes(column[0])
-  parts = org.apache.hadoop.hbase.KeyValue.parseColumn(column[1])
+  parts = org.apache.hadoop.hbase.CellUtil.parseColumn(column[1])
   if parts.length > 1
 @converters["#{family}:#{String.from_java_bytes(parts[0])}"] = 
String.from_java_bytes(parts[1])
 column[1] = parts[0]



hbase git commit: HBASE-18814 Make ScanMetrics enabled and add counter into the MapReduce Job over snapshot

2017-10-02 Thread ashu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 c3697bd27 -> 5c1f2d668


HBASE-18814 Make ScanMetrics enabled and add counter  into the MapReduce Job over snapshot

Signed-off-by: Ashu Pachauri 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5c1f2d66
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5c1f2d66
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5c1f2d66

Branch: refs/heads/branch-1.4
Commit: 5c1f2d668213c57ff474ea37cafe322c98f79756
Parents: c3697bd
Author: libisthanks 
Authored: Thu Sep 14 09:58:34 2017 +0800
Committer: Ashu Pachauri 
Committed: Mon Oct 2 18:29:25 2017 -0700

--
 .../org/apache/hadoop/hbase/client/ClientSideRegionScanner.java | 1 +
 .../apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java | 1 +
 2 files changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5c1f2d66/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index 4fab6a2..df118fa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -86,6 +86,7 @@ public class ClientSideRegionScanner extends 
AbstractClientScanner {
 resultSize += CellUtil.estimatedSerializedSizeOf(cell);
   }
   this.scanMetrics.countOfBytesInResults.addAndGet(resultSize);
+  this.scanMetrics.countOfRowsScanned.incrementAndGet();
 }
 
 return result;

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c1f2d66/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index a8d387a..dab56c4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
@@ -205,6 +205,7 @@ public class TableSnapshotInputFormatImpl {
   scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
   // disable caching of data blocks
   scan.setCacheBlocks(false);
+  scan.setScanMetricsEnabled(true);
 
   scanner =
   new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), 
htd, hri, scan, null);



hbase git commit: HBASE-18814 Make ScanMetrics enabled and add counter into the MapReduce Job over snapshot

2017-10-02 Thread ashu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 61173522d -> 1c7321f9d


HBASE-18814 Make ScanMetrics enabled and add counter  into the MapReduce Job over snapshot

Signed-off-by: Ashu Pachauri 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1c7321f9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1c7321f9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1c7321f9

Branch: refs/heads/branch-1
Commit: 1c7321f9d5e639a27564863463e91ae4f5b5b0d8
Parents: 6117352
Author: libisthanks 
Authored: Thu Sep 14 09:58:34 2017 +0800
Committer: Ashu Pachauri 
Committed: Mon Oct 2 18:25:52 2017 -0700

--
 .../org/apache/hadoop/hbase/client/ClientSideRegionScanner.java | 1 +
 .../apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java | 1 +
 2 files changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1c7321f9/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index 4fab6a2..df118fa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -86,6 +86,7 @@ public class ClientSideRegionScanner extends 
AbstractClientScanner {
 resultSize += CellUtil.estimatedSerializedSizeOf(cell);
   }
   this.scanMetrics.countOfBytesInResults.addAndGet(resultSize);
+  this.scanMetrics.countOfRowsScanned.incrementAndGet();
 }
 
 return result;

http://git-wip-us.apache.org/repos/asf/hbase/blob/1c7321f9/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index a8d387a..dab56c4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
@@ -205,6 +205,7 @@ public class TableSnapshotInputFormatImpl {
   scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
   // disable caching of data blocks
   scan.setCacheBlocks(false);
+  scan.setScanMetricsEnabled(true);
 
   scanner =
   new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), 
htd, hri, scan, null);



hbase git commit: HBASE-18814 Make ScanMetrics enabled and add counter into the MapReduce Job over snapshot

2017-10-02 Thread ashu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 176d8bd2c -> 0db82daa9


HBASE-18814 Make ScanMetrics enabled and add counter  into the MapReduce Job over snapshot

Signed-off-by: Ashu Pachauri 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0db82daa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0db82daa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0db82daa

Branch: refs/heads/branch-2
Commit: 0db82daa91955afc5b836ae2d92d23ca0da30453
Parents: 176d8bd
Author: libisthanks 
Authored: Thu Sep 14 09:58:34 2017 +0800
Committer: Ashu Pachauri 
Committed: Mon Oct 2 18:25:34 2017 -0700

--
 .../apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java | 1 +
 .../org/apache/hadoop/hbase/client/ClientSideRegionScanner.java | 1 +
 2 files changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0db82daa/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index 5098b30..bcaa448 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
@@ -216,6 +216,7 @@ public class TableSnapshotInputFormatImpl {
   scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
   // disable caching of data blocks
   scan.setCacheBlocks(false);
+  scan.setScanMetricsEnabled(true);
 
   scanner =
   new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), 
htd, hri, scan, null);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0db82daa/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index 141fcdd..6871717 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -86,6 +86,7 @@ public class ClientSideRegionScanner extends 
AbstractClientScanner {
 resultSize += CellUtil.estimatedSerializedSizeOf(cell);
   }
   this.scanMetrics.countOfBytesInResults.addAndGet(resultSize);
+  this.scanMetrics.countOfRowsScanned.incrementAndGet();
 }
 
 return result;



hbase git commit: HBASE-18814 Make ScanMetrics enabled and add counter into the MapReduce Job over snapshot

2017-10-02 Thread ashu
Repository: hbase
Updated Branches:
  refs/heads/master 4093cc029 -> 214d21994


HBASE-18814 Make ScanMetrics enabled and add counter  into the MapReduce Job over snapshot

Signed-off-by: Ashu Pachauri 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/214d2199
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/214d2199
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/214d2199

Branch: refs/heads/master
Commit: 214d21994e5e0c64ca9f463b51a91279deffcef5
Parents: 4093cc0
Author: libisthanks 
Authored: Thu Sep 14 09:58:34 2017 +0800
Committer: Ashu Pachauri 
Committed: Mon Oct 2 18:25:18 2017 -0700

--
 .../apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java | 1 +
 .../org/apache/hadoop/hbase/client/ClientSideRegionScanner.java | 1 +
 2 files changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/214d2199/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index 5098b30..bcaa448 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
@@ -216,6 +216,7 @@ public class TableSnapshotInputFormatImpl {
   scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
   // disable caching of data blocks
   scan.setCacheBlocks(false);
+  scan.setScanMetricsEnabled(true);
 
   scanner =
   new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), 
htd, hri, scan, null);

http://git-wip-us.apache.org/repos/asf/hbase/blob/214d2199/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index 141fcdd..6871717 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -86,6 +86,7 @@ public class ClientSideRegionScanner extends 
AbstractClientScanner {
 resultSize += CellUtil.estimatedSerializedSizeOf(cell);
   }
   this.scanMetrics.countOfBytesInResults.addAndGet(resultSize);
+  this.scanMetrics.countOfRowsScanned.incrementAndGet();
 }
 
 return result;



[1/2] hbase git commit: HBASE-18894: null pointer exception in list_regions in shell command

2017-10-02 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2 24e40f3e8 -> 176d8bd2c
  refs/heads/master 38eaf47fa -> 4093cc029


HBASE-18894: null pointer exception in list_regions in shell command

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4093cc02
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4093cc02
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4093cc02

Branch: refs/heads/master
Commit: 4093cc0291aa7f00ec75a0bae48841ac85eb1fa5
Parents: 38eaf47
Author: Yi Liang 
Authored: Thu Sep 28 13:07:27 2017 -0700
Committer: Mike Drob 
Committed: Mon Oct 2 16:50:29 2017 -0500

--
 hbase-shell/src/main/ruby/shell/commands/list_regions.rb | 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4093cc02/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_regions.rb 
b/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
index 5feb926..bcc0c4a 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
@@ -77,6 +77,7 @@ EOF
   raise "#{cols} must be an array of strings. Possible values are 
SERVER_NAME, REGION_NAME, START_KEY, END_KEY, SIZE, REQ, LOCALITY."
 end
 
+error = false
 admin_instance = admin.instance_variable_get('@admin')
 conn_instance = admin_instance.getConnection
 cluster_status = admin_instance.getClusterStatus
@@ -105,6 +106,12 @@ EOF
 region_load_map = 
cluster_status.getLoad(server_name).getRegionsLoad
 region_load = region_load_map.get(hregion_info.getRegionName)
 
+if region_load.nil?
+  puts "Can not find region: #{hregion_info.getRegionName} , it 
may be disabled or in transition\n"
+  error = true
+  break
+end
+
 # Ignore regions which exceed our locality threshold
 next unless accept_region_for_locality? 
region_load.getDataLocality, locality_threshold
 result_hash = {}
@@ -157,12 +164,14 @@ EOF
 
 @end_time = Time.now
 
+return if error
+
 size_hash.each do |param, length|
   printf(" %#{length}s |", param)
 end
 printf("\n")
 
-size_hash.each do |_param, length|
+size_hash.each_value do |length|
   str = '-' * length
   printf(" %#{length}s |", str)
 end



[2/2] hbase git commit: HBASE-18894: null pointer exception in list_regions in shell command

2017-10-02 Thread mdrob
HBASE-18894: null pointer exception in list_regions in shell command

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/176d8bd2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/176d8bd2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/176d8bd2

Branch: refs/heads/branch-2
Commit: 176d8bd2c7dcfc7ecb619bd48eb0c4faf94a688c
Parents: 24e40f3
Author: Yi Liang 
Authored: Thu Sep 28 13:07:27 2017 -0700
Committer: Mike Drob 
Committed: Mon Oct 2 16:51:34 2017 -0500

--
 hbase-shell/src/main/ruby/shell/commands/list_regions.rb | 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/176d8bd2/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_regions.rb 
b/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
index 5feb926..bcc0c4a 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
@@ -77,6 +77,7 @@ EOF
   raise "#{cols} must be an array of strings. Possible values are 
SERVER_NAME, REGION_NAME, START_KEY, END_KEY, SIZE, REQ, LOCALITY."
 end
 
+error = false
 admin_instance = admin.instance_variable_get('@admin')
 conn_instance = admin_instance.getConnection
 cluster_status = admin_instance.getClusterStatus
@@ -105,6 +106,12 @@ EOF
 region_load_map = 
cluster_status.getLoad(server_name).getRegionsLoad
 region_load = region_load_map.get(hregion_info.getRegionName)
 
+if region_load.nil?
+  puts "Can not find region: #{hregion_info.getRegionName} , it 
may be disabled or in transition\n"
+  error = true
+  break
+end
+
 # Ignore regions which exceed our locality threshold
 next unless accept_region_for_locality? 
region_load.getDataLocality, locality_threshold
 result_hash = {}
@@ -157,12 +164,14 @@ EOF
 
 @end_time = Time.now
 
+return if error
+
 size_hash.each do |param, length|
   printf(" %#{length}s |", param)
 end
 printf("\n")
 
-size_hash.each do |_param, length|
+size_hash.each_value do |length|
   str = '-' * length
   printf(" %#{length}s |", str)
 end



hbase git commit: HBASE-18105 [AMv2] Split/Merge need cleanup; currently they diverge and do not fully embrace AMv2 world (Yi Liang)

2017-10-02 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 e047f518e -> 24e40f3e8


HBASE-18105 [AMv2] Split/Merge need cleanup; currently they diverge and do not 
fully embrace AMv2 world (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/24e40f3e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/24e40f3e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/24e40f3e

Branch: refs/heads/branch-2
Commit: 24e40f3e85e12f0eb74b8377c6470d35407282e6
Parents: e047f51
Author: Michael Stack 
Authored: Mon Oct 2 11:38:11 2017 -0700
Committer: Michael Stack 
Committed: Mon Oct 2 11:38:57 2017 -0700

--
 .../src/main/protobuf/MasterProcedure.proto |  4 +--
 .../src/main/protobuf/RegionServerStatus.proto  |  6 ++--
 .../hbase/coprocessor/MasterObserver.java   | 12 +++
 .../hbase/master/MasterCoprocessorHost.java | 12 +++
 .../master/assignment/AssignmentManager.java|  2 --
 .../assignment/SplitTableRegionProcedure.java   | 29 -
 .../hbase/coprocessor/TestMasterObserver.java   |  4 +--
 .../TestMergeTableRegionsProcedure.java | 29 +
 .../TestSplitTableRegionProcedure.java  | 33 
 .../MasterProcedureTestingUtility.java  |  2 +-
 .../TestSplitTransactionOnCluster.java  |  2 +-
 11 files changed, 99 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/24e40f3e/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 2cdebb1..626530f 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -286,9 +286,9 @@ enum SplitTableRegionState {
   SPLIT_TABLE_REGION_PRE_OPERATION = 2;
   SPLIT_TABLE_REGION_CLOSE_PARENT_REGION = 3;
   SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 4;
-  SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR = 5;
+  SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META = 5;
   SPLIT_TABLE_REGION_UPDATE_META = 6;
-  SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR = 7;
+  SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META = 7;
   SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 8;
   SPLIT_TABLE_REGION_POST_OPERATION = 9;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/24e40f3e/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto 
b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index 1cd4376..f83bb20 100644
--- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
@@ -105,11 +105,13 @@ message RegionStateTransition {
 READY_TO_SPLIT = 3;
 READY_TO_MERGE = 4;
 
-SPLIT_PONR = 5;
-MERGE_PONR = 6;
 
+/** We used to have PONR enums for split and merge in here occupying
+ positions 5 and 6 but they have since been removed. Do not reuse these
+ indices */
 SPLIT = 7;
 MERGED = 8;
+
 SPLIT_REVERTED = 9;
 MERGE_REVERTED = 10;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/24e40f3e/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index bfa88e6..85da610 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -696,25 +696,25 @@ public interface MasterObserver {
   final RegionInfo regionInfoB) throws IOException {}
 
   /**
-   * This will be called before PONR step as part of split transaction. Calling
+   * This will be called before update META step as part of split transaction. 
Calling
* {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} 
rollback the split
* @param ctx the environment to interact with the framework and master
* @param splitKey
* @param metaEntries
*/
-  default void preSplitRegionBeforePONRAction(
+  default void preSplitRegionBeforeMETAAction(
   final ObserverContext ctx,
   final byte[] splitKey,
   final List metaEntries) throws IOException {}
 
 
   /**
-   * This will be called after PONR step as part of split transaction
+   * This will be called after update META step as part o

hbase git commit: HBASE-18105 [AMv2] Split/Merge need cleanup; currently they diverge and do not fully embrace AMv2 world (Yi Liang)

2017-10-02 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master d35d8376a -> 38eaf47fa


HBASE-18105 [AMv2] Split/Merge need cleanup; currently they diverge and do not 
fully embrace AMv2 world (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/38eaf47f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/38eaf47f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/38eaf47f

Branch: refs/heads/master
Commit: 38eaf47fa7dd4b0b3b822e73c9e644370a5cacb6
Parents: d35d837
Author: Michael Stack 
Authored: Mon Oct 2 11:38:11 2017 -0700
Committer: Michael Stack 
Committed: Mon Oct 2 11:38:11 2017 -0700

--
 .../src/main/protobuf/MasterProcedure.proto |  4 +--
 .../src/main/protobuf/RegionServerStatus.proto  |  6 ++--
 .../hbase/coprocessor/MasterObserver.java   | 12 +++
 .../hbase/master/MasterCoprocessorHost.java | 12 +++
 .../master/assignment/AssignmentManager.java|  2 --
 .../assignment/SplitTableRegionProcedure.java   | 29 -
 .../hbase/coprocessor/TestMasterObserver.java   |  4 +--
 .../TestMergeTableRegionsProcedure.java | 29 +
 .../TestSplitTableRegionProcedure.java  | 33 
 .../MasterProcedureTestingUtility.java  |  2 +-
 .../TestSplitTransactionOnCluster.java  |  2 +-
 11 files changed, 99 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/38eaf47f/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 2cdebb1..626530f 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -286,9 +286,9 @@ enum SplitTableRegionState {
   SPLIT_TABLE_REGION_PRE_OPERATION = 2;
   SPLIT_TABLE_REGION_CLOSE_PARENT_REGION = 3;
   SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 4;
-  SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR = 5;
+  SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META = 5;
   SPLIT_TABLE_REGION_UPDATE_META = 6;
-  SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR = 7;
+  SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META = 7;
   SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 8;
   SPLIT_TABLE_REGION_POST_OPERATION = 9;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/38eaf47f/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto 
b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index 1cd4376..f83bb20 100644
--- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
@@ -105,11 +105,13 @@ message RegionStateTransition {
 READY_TO_SPLIT = 3;
 READY_TO_MERGE = 4;
 
-SPLIT_PONR = 5;
-MERGE_PONR = 6;
 
+/** We used to have PONR enums for split and merge in here occupying
+ positions 5 and 6 but they have since been removed. Do not reuse these
+ indices */
 SPLIT = 7;
 MERGED = 8;
+
 SPLIT_REVERTED = 9;
 MERGE_REVERTED = 10;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/38eaf47f/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index bfa88e6..85da610 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -696,25 +696,25 @@ public interface MasterObserver {
   final RegionInfo regionInfoB) throws IOException {}
 
   /**
-   * This will be called before PONR step as part of split transaction. Calling
+   * This will be called before update META step as part of split transaction. 
Calling
* {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} 
rollback the split
* @param ctx the environment to interact with the framework and master
* @param splitKey
* @param metaEntries
*/
-  default void preSplitRegionBeforePONRAction(
+  default void preSplitRegionBeforeMETAAction(
   final ObserverContext ctx,
   final byte[] splitKey,
   final List metaEntries) throws IOException {}
 
 
   /**
-   * This will be called after PONR step as part of split transaction
+   * This will be called after update META step as part of sp

[51/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/3332caca
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/3332caca
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/3332caca

Branch: refs/heads/asf-site
Commit: 3332cacabbcd2dc8ee5da890584b607567436d06
Parents: 98cfcf4
Author: jenkins 
Authored: Mon Oct 2 15:13:07 2017 +
Committer: jenkins 
Committed: Mon Oct 2 15:13:07 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 6 +-
 apidocs/deprecated-list.html|   128 +-
 apidocs/index-all.html  |29 +-
 .../apache/hadoop/hbase/HTableDescriptor.html   |   192 +-
 apidocs/org/apache/hadoop/hbase/ServerLoad.html |12 +-
 .../hbase/class-use/HTableDescriptor.html   |19 +-
 .../hadoop/hbase/client/RegionLoadStats.html|35 +-
 .../hadoop/hbase/client/TableDescriptor.html| 8 +-
 .../hbase/client/TableDescriptorBuilder.html| 8 +-
 .../class-use/TableDescriptorBuilder.html   | 2 +-
 .../apache/hadoop/hbase/HTableDescriptor.html   |   634 +-
 .../org/apache/hadoop/hbase/RegionLoad.html | 2 +-
 .../org/apache/hadoop/hbase/ServerLoad.html | 6 +-
 .../hadoop/hbase/client/RegionLoadStats.html|33 +-
 .../hadoop/hbase/client/TableDescriptor.html| 2 +-
 .../hbase/client/TableDescriptorBuilder.html| 8 +-
 .../backoff/ExponentialClientBackoffPolicy.html | 2 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 10856 -
 checkstyle.rss  |36 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/allclasses-frame.html| 6 +-
 devapidocs/allclasses-noframe.html  | 6 +-
 devapidocs/constant-values.html |26 +-
 devapidocs/deprecated-list.html |   188 +-
 devapidocs/index-all.html   |   283 +-
 .../apache/hadoop/hbase/HTableDescriptor.html   |   192 +-
 .../org/apache/hadoop/hbase/ScheduledChore.html | 2 +-
 .../org/apache/hadoop/hbase/ServerLoad.html |12 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../org/apache/hadoop/hbase/class-use/Cell.html |88 +-
 .../hbase/class-use/HTableDescriptor.html   |19 +-
 .../hadoop/hbase/class-use/ScheduledChore.html  | 2 +-
 .../hbase/client/ImmutableHTableDescriptor.html | 2 +-
 .../hadoop/hbase/client/RegionLoadStats.html|35 +-
 .../hadoop/hbase/client/TableDescriptor.html| 8 +-
 ...riptorBuilder.ModifyableTableDescriptor.html |18 +-
 .../hbase/client/TableDescriptorBuilder.html| 8 +-
 .../ServerStatistics.RegionStatistics.html  | 8 +-
 ...riptorBuilder.ModifyableTableDescriptor.html | 2 +-
 .../class-use/TableDescriptorBuilder.html   | 2 +-
 .../hadoop/hbase/client/package-tree.html   |24 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   | 8 +-
 .../hadoop/hbase/io/hfile/HFile.Reader.html |16 +-
 .../hbase/io/hfile/HFileBlock.FSReader.html | 8 +-
 .../hbase/io/hfile/HFileBlock.FSReaderImpl.html |10 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.html  |20 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 4 +-
 .../hadoop/hbase/io/util/MemorySizeUtil.html|16 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 4 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../balancer/FavoredStochasticBalancer.html | 2 +-
 ...lancer.CostFromRegionLoadAsRateFunction.html | 2 +-
 ...cLoadBalancer.LocalityBasedCostFunction.html | 4 +-
 ...icLoadBalancer.MemStoreSizeCostFunction.html |   382 +
 ...icLoadBalancer.MemstoreSizeCostFunction.html |   382 -
 ...StochasticLoadBalancer.MoveCostFunction.html | 4 +-
 .../master/balancer/StochasticLoadBalancer.html | 2 +-
 .../balancer/class-use/BalancerRegionLoad.html  | 2 +-
 ...lancer.CostFromRegionLoadAsRateFunction.html | 2 +-
 ...LoadBalancer.CostFromRegionLoadFunction.html | 2 +-
 .../StochasticLoadBalancer.CostFunction.html| 2 +-
 ...icLoadBalancer.MemStoreSizeCostFunction.html |   125 +
 ...icLoadBalancer.MemstoreSizeCostFunction.html |   125 -
 .../hbase/master/balancer/package-frame.html| 2 +-
 .../hbase/master/balan

hbase-site git commit: INFRA-10751 Empty commit

2017-10-02 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 3332cacab -> 84574d8c9


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/84574d8c
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/84574d8c
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/84574d8c

Branch: refs/heads/asf-site
Commit: 84574d8c9dc41cf4c715f2a7d038dcbcb51bb4cb
Parents: 3332cac
Author: jenkins 
Authored: Mon Oct 2 15:13:50 2017 +
Committer: jenkins 
Committed: Mon Oct 2 15:13:50 2017 +

--

--




[25/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index a0961f7..4a7f4ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -526,23 +526,23 @@
 518final FlushResultImpl result; // 
indicating a failure result from prepare
 519final TreeMap storeFlushCtxs;
 520final TreeMap> committedFiles;
-521final TreeMap storeFlushableSize;
+521final TreeMap storeFlushableSize;
 522final long startTime;
 523final long flushOpSeqId;
 524final long flushedSeqId;
-525final MemstoreSize 
totalFlushableSize;
+525final MemStoreSize 
totalFlushableSize;
 526
 527/** Constructs an early exit case 
*/
 528PrepareFlushResult(FlushResultImpl 
result, long flushSeqId) {
-529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemstoreSize());
+529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemStoreSize());
 530}
 531
 532/** Constructs a successful prepare 
flush result */
 533PrepareFlushResult(
 534  TreeMap storeFlushCtxs,
 535  TreeMap> committedFiles,
-536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-537  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+537  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 538  this(null, storeFlushCtxs, 
committedFiles, storeFlushableSize, startTime,
 539flushSeqId, flushedSeqId, 
totalFlushableSize);
 540}
@@ -551,8 +551,8 @@
 543FlushResultImpl result,
 544  TreeMap storeFlushCtxs,
 545  TreeMap> committedFiles,
-546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-547  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+547  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 548  this.result = result;
 549  this.storeFlushCtxs = 
storeFlushCtxs;
 550  this.committedFiles = 
committedFiles;
@@ -1015,7 +1015,7 @@
 1007  Future future = 
completionService.take();
 1008  HStore store = future.get();
 1009  
this.stores.put(store.getColumnFamilyDescriptor().getName(), store);
-1010  if (store.isSloppyMemstore()) 
{
+1010  if (store.isSloppyMemStore()) 
{
 1011hasSloppyStores = true;
 1012  }
 1013
@@ -1025,7 +1025,7 @@
 1017  if (maxSeqId == -1 || 
storeMaxSequenceId > maxSeqId) {
 1018maxSeqId = 
storeMaxSequenceId;
 1019  }
-1020  long maxStoreMemstoreTS = 
store.getMaxMemstoreTS().orElse(0L);
+1020  long maxStoreMemstoreTS = 
store.getMaxMemStoreTS().orElse(0L);
 1021  if (maxStoreMemstoreTS > 
maxMemstoreTS) {
 1022maxMemstoreTS = 
maxStoreMemstoreTS;
 1023  }
@@ -1202,24 +1202,24 @@
 1194   * store
 1195   * @return the size of memstore in 
this region
 1196   */
-1197  public long 
addAndGetMemstoreSize(MemstoreSize memstoreSize) {
+1197  public long 
addAndGetMemStoreSize(MemStoreSize memstoreSize) {
 1198if (this.rsAccounting != null) {
-1199  
rsAccounting.incGlobalMemstoreSize(memstoreSize);
+1199  
rsAccounting.incGlobalMemStoreSize(memstoreSize);
 1200}
 1201long size = 
this.memstoreDataSize.addAndGet(memstoreSize.getDataSize());
-1202checkNegativeMemstoreDataSize(size, 
memstoreSize.getDataSize());
+1202checkNegativeMemStoreDataSize(size, 
memstoreSize.getDataSize());
 1203return size;
 1204  }
 1205
-1206  public void 
decrMemstoreSize(MemstoreSize memstoreSize) {
+1206  public void 
decrMemStoreSize(MemStoreSize memstoreSize) {
 1207if (this.rsAccounting != null) {
-1208  
rsAccounting.decGlobalMemstoreSize(memstoreSize);
+1208  
rsAccounting.decGlobalMemStoreSize(memstoreSize);
 1209}
 1210long size = 
this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize());
-1211checkNegativeMemstoreDataSize(size, 
-memstoreSize.getDataSize());
+1211checkNegativeMemStoreDataSize(size, 
-memstoreSize.getDataSize());
 1212  }
 1213
-1214  private void 
checkNegativeMemstoreDataSize(long memstoreDataSize, long delta) {
+1214  private void 
checkNeg

[35/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreSize.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreSize.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreSize.html
new file mode 100644
index 000..3e2e760
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreSize.html
@@ -0,0 +1,549 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Class org.apache.hadoop.hbase.regionserver.MemStoreSize (Apache 
HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.regionserver.MemStoreSize
+
+
+
+
+
+Packages that use MemStoreSize 
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.regionserver
+ 
+
+
+
+
+
+
+
+
+
+Uses of MemStoreSize in org.apache.hadoop.hbase.regionserver
+
+Fields in org.apache.hadoop.hbase.regionserver
 declared as MemStoreSize 
+
+Modifier and Type
+Field and Description
+
+
+
+(package private) MemStoreSize
+HRegion.PrepareFlushResult.totalFlushableSize 
+
+
+
+
+Fields in org.apache.hadoop.hbase.regionserver
 with type parameters of type MemStoreSize 
+
+Modifier and Type
+Field and Description
+
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentMap
+RegionServerAccounting.replayEditsPerRegion 
+
+
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/TreeMap.html?is-external=true";
 title="class or interface in java.util">TreeMap
+HRegion.PrepareFlushResult.storeFlushableSize 
+
+
+
+
+Methods in org.apache.hadoop.hbase.regionserver
 that return MemStoreSize 
+
+Modifier and Type
+Method and Description
+
+
+
+private MemStoreSize
+HRegion.doDropStoreMemStoreContentsForSeqId(HStore s,
+   long currentSeqId) 
+
+
+private MemStoreSize
+HRegion.dropMemStoreContentsForSeqId(long seqId,
+HStore store)
+Drops the memstore contents after replaying a flush 
descriptor or region open event replay
+ if the memstore edits have seqNums smaller than the given seq id
+
+
+
+MemStoreSize
+MemStore.getFlushableSize()
+On flush, how much memory we will clear.
+
+
+
+MemStoreSize
+CompactingMemStore.getFlushableSize()
+On flush, how much memory we will clear.
+
+
+
+MemStoreSize
+DefaultMemStore.getFlushableSize()
+On flush, how much memory we will clear from the active 
cell set.
+
+
+
+MemStoreSize
+Store.getFlushableSize() 
+
+
+MemStoreSize
+HStore.getFlushableSize() 
+
+
+MemStoreSize
+Store.getMemStoreSize() 
+
+
+MemStoreSize
+HStore.getMemStoreSize() 
+
+
+MemStoreSize
+CompactionPipeline.getPipelineSize() 
+
+
+MemStoreSize
+MemStore.getSnapshotSize()
+Return the size of the snapshot(s) if any
+
+
+
+MemStoreSize
+AbstractMemStore.getSnapshotSize() 
+
+
+MemStoreSize
+Store.getSnapshotSize() 
+
+
+MemStoreSize
+HStore.getSnapshotSize() 
+
+
+MemStoreSize
+CompactionPipeline.getTailSize() 
+
+
+MemStoreSize
+MemStore.size() 
+
+
+MemStoreSize
+CompactingMemStore.size() 
+
+
+MemStoreSize
+DefaultMemStore.size() 
+
+
+
+
+Methods in org.apache.hadoop.hbase.regionserver
 with parameters of type MemStoreSize 
+
+Modifier and Type
+Method and Description
+
+
+
+void
+MutableSegment.add(Cell cell,
+   boolean mslabUsed,
+   MemStoreSize memstoreSize)
+Adds the given cell into the segment
+
+
+
+void
+MemStore.add(Cell cell,
+   MemStoreSize memstoreSize)
+Write an update
+
+
+
+void
+AbstractMemStore.add(Cell cell,
+   MemStoreSize memstoreSize) 
+
+
+void
+HStore.add(Cell cell,
+   MemStoreSize memstoreSize)
+Adds a value to the memstore
+
+
+
+void
+MemStore.add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
+   MemStoreSize memstoreSize)
+Write the updates
+
+
+
+void
+AbstractMemStore.add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
+   MemStoreSize memstoreSize) 
+
+
+void
+HStore.add(htt

[38/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html
index b044150..8737db3 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html
@@ -132,15 +132,15 @@ public interface 
 void
-add(Cell cell,
-   MemstoreSize memstoreSize)
+add(Cell cell,
+   MemStoreSize memstoreSize)
 Write an update
 
 
 
 void
-add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
-   MemstoreSize memstoreSize)
+add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
+   MemStoreSize memstoreSize)
 Write the updates
 
 
@@ -151,7 +151,7 @@ public interface 
-MemstoreSize
+MemStoreSize
 getFlushableSize()
 On flush, how much memory we will clear.
 
@@ -161,7 +161,7 @@ public interface getScanners(long readPt) 
 
 
-MemstoreSize
+MemStoreSize
 getSnapshotSize()
 Return the size of the snapshot(s) if any
 
@@ -177,7 +177,7 @@ public interface 
-MemstoreSize
+MemStoreSize
 size() 
 
 
@@ -206,9 +206,9 @@ public interface 
 void
-upsert(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
+upsert(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
   long readpoint,
-  MemstoreSize memstoreSize)
+  MemStoreSize memstoreSize)
 Update or insert the specified cells.
 
 
@@ -267,7 +267,7 @@ public interface 
 
 getFlushableSize
-MemstoreSize getFlushableSize()
+MemStoreSize getFlushableSize()
 On flush, how much memory we will clear.
  Flush will first clear out the data in snapshot if any (It will take a second 
flush
  invocation to clear the current Cell set). If snapshot is empty, current
@@ -284,7 +284,7 @@ public interface 
 
 getSnapshotSize
-MemstoreSize getSnapshotSize()
+MemStoreSize getSnapshotSize()
 Return the size of the snapshot(s) if any
 
 Returns:
@@ -292,14 +292,14 @@ public interface 
+
 
 
 
 
 add
 void add(Cell cell,
- MemstoreSize memstoreSize)
+ MemStoreSize memstoreSize)
 Write an update
 
 Parameters:
@@ -309,14 +309,14 @@ public interface 
+
 
 
 
 
 add
 void add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
- MemstoreSize memstoreSize)
+ MemStoreSize memstoreSize)
 Write the updates
 
 Parameters:
@@ -339,7 +339,7 @@ public interface 
+
 
 
 
@@ -347,7 +347,7 @@ public interface upsert(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
 long readpoint,
-MemstoreSize memstoreSize)
+MemStoreSize memstoreSize)
 Update or insert the specified cells.
  
  For each Cell, insert into MemStore. This will atomically upsert the value 
for that
@@ -390,7 +390,7 @@ public interface 
 
 size
-MemstoreSize size()
+MemStoreSize size()
 
 Returns:
 Total memory occupied by this MemStore. This won't include any size 
occupied by the

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
index 68f5f27..247b137 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
@@ -290,12 +290,12 @@ implements 
 private Region
-getBiggestMemstoreOfRegionReplica(http://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true";
 title="class or interface in java.util">SortedMapLong,Region> regionsBySize,
+getBiggestMemStoreOfRegionReplica(http://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true";
 title="class or interface in java.util">SortedMapLong,Region> regionsBySize,
  http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set excludedRegions) 
 

[33/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index ede8048..0bb2d1c 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -327,7 +327,7 @@
 org.apache.hadoop.hbase.regionserver.MemStoreMergerSegmentsIterator
 
 
-org.apache.hadoop.hbase.regionserver.MemstoreSize
+org.apache.hadoop.hbase.regionserver.MemStoreSize
 org.apache.hadoop.hbase.regionserver.MemStoreSnapshot
 org.apache.hadoop.hbase.regionserver.MetricsHeapMemoryManager
 org.apache.hadoop.hbase.regionserver.MetricsRegion
@@ -430,7 +430,7 @@
 org.apache.hadoop.hbase.regionserver.HeapMemoryManager.HeapMemoryTunerChore 
(implements org.apache.hadoop.hbase.regionserver.FlushRequestListener)
 org.apache.hadoop.hbase.regionserver.HRegionServer.CompactionChecker
 org.apache.hadoop.hbase.regionserver.HRegionServer.MovedRegionsCleaner (implements 
org.apache.hadoop.hbase.Stoppable)
-org.apache.hadoop.hbase.regionserver.HRegionServer.PeriodicMemstoreFlusher
+org.apache.hadoop.hbase.regionserver.HRegionServer.PeriodicMemStoreFlusher
 org.apache.hadoop.hbase.regionserver.StorefileRefresherChore
 
 
@@ -684,19 +684,19 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.MemStoreCompactor.Action
+org.apache.hadoop.hbase.regionserver.FlushType
 org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
-org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
-org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
+org.apache.hadoop.hbase.regionserver.ScanType
 org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
-org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.FlushType
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
 org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
+org.apache.hadoop.hbase.regionserver.BloomType
+org.apache.hadoop.hbase.regionserver.MemStoreCompactor.Action
 org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
+org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
 org.apache.hadoop.hbase.regionserver.Region.FlushResult.Result
-org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
index 8184f79..8614e9a 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
@@ -1269,7 +1269,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-MemstoreSize
+MemStoreSize
 Wraps the data size part and total heap space occupied by 
the memstore.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index 9b7ac1f..9d29552 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -130,9 +130,9 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="clas

[40/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/FlushRequester.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/FlushRequester.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/FlushRequester.html
index 0b5ac55..d4074db 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/FlushRequester.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/FlushRequester.html
@@ -149,7 +149,7 @@ public interface 
 void
-setGlobalMemstoreLimit(long globalMemStoreSize)
+setGlobalMemStoreLimit(long globalMemStoreSize)
 Sets the global memstore limit to a new size.
 
 
@@ -240,13 +240,13 @@ public interface 
+
 
 
 
 
-setGlobalMemstoreLimit
-void setGlobalMemstoreLimit(long globalMemStoreSize)
+setGlobalMemStoreLimit
+void setGlobalMemStoreLimit(long globalMemStoreSize)
 Sets the global memstore limit to a new size.
 
 Parameters:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
index 817f1d7..8fd3c00 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
@@ -472,7 +472,7 @@ extends HStore
-add,
 add,
 addChangedReaderObserver,
 areWritesEnabled,
 assertBulkLoadHFileOk,
 bulkLoadHFile,
 bulkLoadHFile,
 cancelRequestedCompaction,
 canSplit,
 close,
 closeAndArchiveCompactedFiles,
 compact,
 compactRecentForTestingAssumingDefaultPolicy,
 completeCompaction,
 createFlushContext,
 createWriterInTmp,
 createWriterInTmp,
 deleteChangedR
 eaderObserver, deregisterChildren,
 determineTTLFromFamily,
 flushCache,
 getAvgStoreFileAge,
 getBlockingFileCount,
 getBytesPerChecksum, getCacheConfig,
 getChecksumType,
 getCloseCheckInterval,
 getColumnFamilyDescriptor,
 getColumnFamilyName,
 getCompactedCellsCount,
 getCompactedCellsSize,
 getCompactedFiles, getCompactedFilesCount,
 getCompactionCheckMultiplier,
 getCompactionPressure,
 getCompactionProgress,
 getCompactPriority,
 getComparator,
 getCoprocessorHost,
 getDataBlockEncoder, getFileSystem,
 getFlushableSize,
 getFlushedCellsCount,
 getFlushedCellsSize,
 getFlushedOutputFileSize,
 getHFilesSize,
 getHRegion,
 getLastCompactSize,
 getMajorCompactedCellsCount,
 getMajorCompactedCellsSize,
 getMaxMemstoreTS,
 getMaxSequenceId,
 getMaxStoreFileAge,
 getMemstoreFlushSize,
 getMemStoreSize,
 getMinStoreFileAge,
 getNumHFiles, getNumReferenceFiles,
 getOffPeakHours,
 getRegionFileSystem,
 getRegionInfo,
 getScanInfo,
 getScanner,
 getScanners, getScanners,
 getScanners,
 getScanners,
 getSize,
 getSmallestReadPoint,
 getSnapshotSize,
 getSplitPoint,
 getStoreEngine,
 getStorefiles,
 getStorefilesCount,
 getStorefilesIndexSize,
 getStorefilesSize,
 getStoreFileTtl,
 getStoreHomedir,
 getStoreHomedir,
 getStoreSizeUncompressed,
 getTableName,
 getTotalStaticBloomSize,
 getTotalStaticIndexSize,
 hasReferences,
 hasTooManyStoreFiles,
 heapSize, isPrimaryReplicaStore,
 isSloppyMemstore,
 moveFileIntoPlace,
 needsCompaction,
 onConfigurationChange,
 postSnapshotOperation,
 preBulkLoadHFile,
 preFlushSeqIDEstimation,
 preSnapshotOperation,
 recreateScanners,
 refreshStoreFiles,
 refreshStoreFiles,
 registerChildren,
 replaceStoreFiles,
 replayCompactionMarker,
 requestCompaction,
 requestCompaction,
 setDataBlockEncoderInTest,
 setScanInfo, shouldPerformMajorCompaction,
 snapshot,
 startReplayingFromWAL,
 stopReplayingFromWAL,
 throttleCompaction,
 timeOfOldestEdit,
 toString,
 triggerMajorCompaction,
 upsert,
 versionsToReturn
+add,
 add,
 addChangedReaderObserver,
 areWritesEnabled,
 assertBulkLoadHFileOk,
 bulkLoadHFile,
 bulkLoadHFile,
 cancelRequestedCompaction,
 canSplit,
 close,
 closeAndArchiveCompactedFiles,
 compact,
 compactRecentForTestingAssumingDefaultPolicy,
 completeCompaction,
 createFlushContext,
 createWriterInTmp,
 createWriterInTmp,
 deleteChangedR
 eaderObserver, deregisterChildren,
 determineTTLFromFamily,
 flushCache,
 getAvgStoreFileAge,
 getBlockingFileCount,
 getBytesPerChecksum, getCacheConfig,
 getChecksumType,
 getCloseCheckInterval,
 getColumnFamilyDescriptor,
 getColumnFamilyName,
 getCompactedCellsCount,
 getCompactedCellsSize,
 getCompactedFiles, getCompactedFilesCount,
 getCompactionCheckMultiplier,
 getCompactionPressure,
 getCompactionProgress,
 getCompactPriority,
 getComparator,
 getCoprocessorHost,
 getDataBlockEncoder, getFileSystem,
 getFlushableSize,
 getFlushedCellsCount,
 getFlushedCellsSize,
 getFlushedOutputFileSize,
 getHFilesSize,
 getHRegion,
 getLastCompactSize,
 getMajorCompactedCellsCount,
 getMajorCompactedCellsSize,
 getMaxMemStoreTS,
 getMaxSequenceId,
 getMaxStoreFileAge,
 getMemStoreFlushSize,

[48/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index de6ebc4..23be424 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -289,7 +289,7 @@
 2051
 0
 0
-13712
+13709
 
 Files
 
@@ -5217,7 +5217,7 @@
 org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
 0
 0
-3
+1
 
 org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
 0
@@ -5357,7 +5357,7 @@
 org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
 0
 0
-6
+7
 
 org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
 0
@@ -5432,7 +5432,7 @@
 org/apache/hadoop/hbase/regionserver/SegmentFactory.java
 0
 0
-24
+22
 
 org/apache/hadoop/hbase/regionserver/SegmentScanner.java
 0
@@ -8263,7 +8263,7 @@
 http://checkstyle.sourceforge.net/config_imports.html#UnusedImports";>UnusedImports
 
 processJavadoc: "true"
-108
+107
  Error
 
 indentation
@@ -8274,19 +8274,19 @@
 caseIndent: "2"
 basicOffset: "2"
 lineWrappingIndentation: "2"
-3768
+3767
  Error
 
 javadoc
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation";>JavadocTagContinuationIndentation
 
 offset: "2"
-764
+770
  Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription";>NonEmptyAtclauseDescription
-3279
+3273
  Error
 
 misc
@@ -8314,7 +8314,7 @@
 
 whitespace
 http://checkstyle.sourceforge.net/config_whitespace.html#FileTabCharacter";>FileTabCharacter
-8
+7
  Error
 
 
@@ -10847,109 +10847,109 @@
 annotation
 MissingDeprecated
 Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
-631
+649
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-644
+662
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-659
+677
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-689
+707
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-703
+721
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-707
+725
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-724
+742
 
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 110).
-761
+779
 
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 104).
-762
+780
 
  Error
 annotation
 MissingDeprecated
 Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
-768
+786
 
  Error
 annotation
 MissingDeprecated
 Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
-775
+793
 
  Error
 annotation
 MissingDeprecated
 Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
-781
+799
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-798
+816
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-799
+817
 
  Error
 indentation
 Indentation
 'lambda arguments' have incorrect indentation level 20, expected level 
should be 14.
-825
+843
 
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 106).
-825
+843
 
  Error
 indentation
 Indentation
 'lambda arguments' have incorrect indentation level 20, expected level 
should be 14.
-826
+844
 
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 111).
-826
+844
 
 org/apache/hadoop/hbase/HealthChecker.java
 
@@ -14646,7 +14646,7 @@
 
  Error
 javadoc
-NonEmptyAtclauseDescription
+JavadocTagContinuationIndentation
 Javadoc comment at column 43 has parse error. Missed HTML close tag 
'TableName'. Sometimes it means that close tag missed for one of previous 
tags.
 179
 
@@ -19104,7 +19104,7 @@
 
  Error
 javadoc
-NonEmptyAtclauseDescription
+JavadocTagContinuationIndentation
 Javadoc comment at column 64 has parse error. Missed HTML close tag 
'code'. Sometimes it means that close tag missed for one of previous tags.
 2113
 
@@ -26046,7 +26046,7 @@
 
  Error
 javadoc
-NonEmptyAtclauseDescription
+JavadocTagContinuationIndentation
 Javadoc comment at column 37 has parse error. Details: no viable 
alternative at input '

[49/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/apidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html 
b/apidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
index 3f18200..c678b81 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -571,321 +571,339 @@
 563  }
 564
 565  /**
-566   * @return true if the read-replicas 
memstore replication is enabled.
-567   */
-568  @Override
-569  public boolean 
hasRegionMemstoreReplication() {
-570return 
delegatee.hasRegionMemstoreReplication();
-571  }
-572
-573  /**
-574   * Enable or Disable the memstore 
replication from the primary region to the replicas.
-575   * The replication will be used only 
for meta operations (e.g. flush, compaction, ...)
-576   *
-577   * @param memstoreReplication true if 
the new data written to the primary region
-578   * 
should be replicated.
-579   *false if 
the secondaries can tollerate to have new
-580   *  
data only when the primary flushes the memstore.
-581   */
-582  public HTableDescriptor 
setRegionMemstoreReplication(boolean memstoreReplication) {
-583
getDelegateeForModification().setRegionMemstoreReplication(memstoreReplication);
-584return this;
-585  }
-586
-587  public HTableDescriptor setPriority(int 
priority) {
-588
getDelegateeForModification().setPriority(priority);
-589return this;
-590  }
-591
-592  @Override
-593  public int getPriority() {
-594return delegatee.getPriority();
-595  }
-596
-597  /**
-598   * Returns all the column family names 
of the current table. The map of
-599   * HTableDescriptor contains mapping of 
family name to HColumnDescriptors.
-600   * This returns all the keys of the 
family map which represents the column
-601   * family names of the table.
-602   *
-603   * @return Immutable sorted set of the 
keys of the families.
-604   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0
-605   * (HBASE-18008;).
-606   * Use {@link 
#getColumnFamilyNames()}.
-607   */
-608  @Deprecated
-609  public Set 
getFamiliesKeys() {
-610return 
delegatee.getColumnFamilyNames();
-611  }
-612
-613  /**
-614   * Returns the count of the column 
families of the table.
-615   *
-616   * @return Count of column families of 
the table
-617   */
-618  @Override
-619  public int getColumnFamilyCount() {
-620return 
delegatee.getColumnFamilyCount();
-621  }
-622
-623  /**
-624   * Returns an array all the {@link 
HColumnDescriptor} of the column families
-625   * of the table.
-626   *
-627   * @return Array of all the 
HColumnDescriptors of the current table
-628   *
-629   * @see #getFamilies()
-630   */
-631  @Deprecated
-632  @Override
-633  public HColumnDescriptor[] 
getColumnFamilies() {
-634return 
Stream.of(delegatee.getColumnFamilies())
-635
.map(this::toHColumnDescriptor)
-636.toArray(size -> new 
HColumnDescriptor[size]);
-637  }
-638
-639  /**
-640   * Returns the HColumnDescriptor for a 
specific column family with name as
-641   * specified by the parameter column.
-642   * @param column Column family name
-643   * @return Column descriptor for the 
passed family name or the family on
-644   * passed in column.
-645   * @deprecated Use {@link 
#getColumnFamily(byte[])}.
-646   */
-647  @Deprecated
-648  public HColumnDescriptor 
getFamily(final byte[] column) {
-649return 
toHColumnDescriptor(delegatee.getColumnFamily(column));
-650  }
-651
-652
-653  /**
-654   * Removes the HColumnDescriptor with 
name specified by the parameter column
-655   * from the table descriptor
-656   *
-657   * @param column Name of the column 
family to be removed.
-658   * @return Column descriptor for the 
passed family name or the family on
-659   * passed in column.
-660   */
-661  public HColumnDescriptor 
removeFamily(final byte [] column) {
-662return 
toHColumnDescriptor(getDelegateeForModification().removeColumnFamily(column));
-663  }
-664
-665  /**
-666   * Return a HColumnDescriptor for user 
to keep the compatibility as much as possible.
-667   * @param desc read-only 
ColumnFamilyDescriptor
-668   * @return The older implementation of 
ColumnFamilyDescriptor
-669   */
-670  protected HColumnDescriptor 
toHColumnDescriptor(ColumnFamilyDescriptor desc) {
-671if (desc == null) {
-672  return null;
-673} else if (desc instanceof 
ModifyableColumnFamilyDescriptor) {
-674  return new 
HColumnDescriptor((ModifyableColumnFamilyDescriptor) desc);
-675} else if (desc instanceof 
HColumnDescriptor) {
-676  return (HColumnDescriptor) desc;
-677 

[36/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentFactory.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentFactory.html
index 9b0b1ed..0f0e694 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentFactory.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentFactory.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public final class SegmentFactory
+public final class SegmentFactory
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 A singleton store segment factory.
  Generate concrete store segments.
@@ -203,9 +203,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ImmutableSegment
-createImmutableSegmentByFlattening(CSLMImmutableSegment segment,
+createImmutableSegmentByFlattening(CSLMImmutableSegment segment,
   CompactingMemStore.IndexType idxType,
-  MemstoreSize memstoreSize) 
+  MemStoreSize memstoreSize) 
 
 
 ImmutableSegment
@@ -264,7 +264,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 instance
-private static SegmentFactory instance
+private static SegmentFactory instance
 
 
 
@@ -281,7 +281,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 SegmentFactory
-private SegmentFactory()
+private SegmentFactory()
 
 
 
@@ -298,7 +298,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 instance
-public static SegmentFactory instance()
+public static SegmentFactory instance()
 
 
 
@@ -307,7 +307,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createCompositeImmutableSegment
-public CompositeImmutableSegment createCompositeImmutableSegment(CellComparator comparator,
+public CompositeImmutableSegment createCompositeImmutableSegment(CellComparator comparator,
  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List segments)
 
 
@@ -317,7 +317,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createImmutableSegmentByCompaction
-public ImmutableSegment createImmutableSegmentByCompaction(org.apache.hadoop.conf.Configuration conf,
+public ImmutableSegment createImmutableSegmentByCompaction(org.apache.hadoop.conf.Configuration conf,
CellComparator comparator,
MemStoreSegmentsIterator iterator,
int numOfCells,
@@ -335,7 +335,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createImmutableSegment
-public ImmutableSegment createImmutableSegment(CellComparator comparator)
+public ImmutableSegment createImmutableSegment(CellComparator comparator)
 
 
 
@@ -344,7 +344,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createImmutableSegment
-public ImmutableSegment createImmutableSegment(MutableSegment segment)
+public ImmutableSegment createImmutableSegment(MutableSegment segment)
 
 
 
@@ -353,7 +353,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createMutableSegment
-public MutableSegment createMutableSegment(org.apache.hadoop.conf.Configuration conf,
+public MutableSegment createMutableSegment(org.apache.hadoop.conf.Configuration conf,
CellComparator comparator)
 
 
@@ -363,7 +363,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createImmutableSegmentByMerge
-public ImmutableSegment createImmutableSegmentByMerge(org.apache.hadoop.conf.Configuration conf,
+public ImmutableSegment createImmutableSegmentByMerge(org.apache.hadoop.conf.Configuration conf,
   CellComparator comparator,
   MemStoreSegmentsIterator iterator,
   int numOfCells,
@@ -376,15 +376,15 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-
+
 
 
 
 
 createImmutableSegmentByFlattening
-public ImmutableSegment createImmutableSegmentByFlattening(CSLMImmutableSegment segment,
+public ImmutableSegment createImmutableSegmentByFlattening(CSLMImmutableSegment segment,
CompactingMemStore.IndexType idxType,
- 

[43/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index 336bf14..36fc445 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -547,25 +547,25 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.client.HBaseAdmin.ReplicationState
-org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
-org.apache.hadoop.hbase.client.RegionLocateType
-org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
+org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
 org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState
-org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
+org.apache.hadoop.hbase.client.HBaseAdmin.ReplicationState
 org.apache.hadoop.hbase.client.AbstractResponse.ResponseType
+org.apache.hadoop.hbase.client.IsolationLevel
+org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
 org.apache.hadoop.hbase.client.TableState.State
+org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
 org.apache.hadoop.hbase.client.MasterSwitchType
-org.apache.hadoop.hbase.client.Durability
-org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
-org.apache.hadoop.hbase.client.Scan.ReadType
-org.apache.hadoop.hbase.client.ScannerCallable.MoreResults
 org.apache.hadoop.hbase.client.RequestController.ReturnCode
-org.apache.hadoop.hbase.client.Consistency
-org.apache.hadoop.hbase.client.SnapshotType
-org.apache.hadoop.hbase.client.IsolationLevel
+org.apache.hadoop.hbase.client.Scan.ReadType
+org.apache.hadoop.hbase.client.Durability
 org.apache.hadoop.hbase.client.CompactType
+org.apache.hadoop.hbase.client.RegionLocateType
+org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
+org.apache.hadoop.hbase.client.Consistency
 org.apache.hadoop.hbase.client.CompactionState
+org.apache.hadoop.hbase.client.SnapshotType
+org.apache.hadoop.hbase.client.ScannerCallable.MoreResults
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
index 5bdab95..bbf68db 100644
--- a/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
@@ -104,8 +104,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.executor.ExecutorType
 org.apache.hadoop.hbase.executor.EventType
+org.apache.hadoop.hbase.executor.ExecutorType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
index 32f9a9f..8af9c7d 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
@@ -175,14 +175,14 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.filter.FuzzyRowFilter.Order
+org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
 org.apache.hadoop.hbase.filter.BitComparator.BitwiseOp
+org.apache.hadoop.hbase.filter.

[47/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 716b231..c174cdd 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 ©2007 - 2017 The Apache Software Foundation
 
   File: 2051,
- Errors: 13712,
+ Errors: 13709,
  Warnings: 0,
  Infos: 0
   
@@ -4470,20 +4470,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.MemstoreSize.java";>org/apache/hadoop/hbase/regionserver/MemstoreSize.java
-
-
-  0
-
-
-  0
-
-
-  0
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.util.RetryCounterFactory.java";>org/apache/hadoop/hbase/util/RetryCounterFactory.java
 
 
@@ -7783,7 +7769,7 @@ under the License.
   0
 
 
-  24
+  22
 
   
   
@@ -16953,7 +16939,7 @@ under the License.
   0
 
 
-  6
+  7
 
   
   
@@ -19464,6 +19450,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.MemStoreSize.java";>org/apache/hadoop/hbase/regionserver/MemStoreSize.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.http.HttpConfig.java";>org/apache/hadoop/hbase/http/HttpConfig.java
 
 
@@ -19767,7 +19767,7 @@ under the License.
   0
 
 
-  3
+  1
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/coc.html
--
diff --git a/coc.html b/coc.html
index 5378638..78eef01 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org";>the priv
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index c27fcb1..cf16711 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index b1992ef..007d790 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependencies
 
@@ -445,7 +445,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index d4f8208..c804bc3 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7

[42/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemstoreSizeCostFunction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemstoreSizeCostFunction.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemstoreSizeCostFunction.html
deleted file mode 100644
index 683a311..000
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemstoreSizeCostFunction.html
+++ /dev/null
@@ -1,382 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-
-
-
-StochasticLoadBalancer.MemstoreSizeCostFunction (Apache HBase 
3.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = {"i0":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev Class
-Next Class
-
-
-Frames
-No Frames
-
-
-All Classes
-
-
-
-
-
-
-
-Summary: 
-Nested | 
-Field | 
-Constr | 
-Method
-
-
-Detail: 
-Field | 
-Constr | 
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.master.balancer
-Class StochasticLoadBalancer.MemstoreSizeCostFunction
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CostFunction
-
-
-org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CostFromRegionLoadFunction
-
-
-org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CostFromRegionLoadAsRateFunction
-
-
-org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.MemstoreSizeCostFunction
-
-
-
-
-
-
-
-
-
-
-
-
-
-Enclosing class:
-StochasticLoadBalancer
-
-
-
-static class StochasticLoadBalancer.MemstoreSizeCostFunction
-extends StochasticLoadBalancer.CostFromRegionLoadAsRateFunction
-Compute the cost of total memstore size.  The more 
unbalanced the higher the
- computed cost will be.  This uses a rolling average of regionload.
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields 
-
-Modifier and Type
-Field and Description
-
-
-private static float
-DEFAULT_MEMSTORE_SIZE_COST 
-
-
-private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
-MEMSTORE_SIZE_COST_KEY 
-
-
-
-
-
-
-Fields inherited from 
class org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CostFunction
-cluster
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors 
-
-Constructor and Description
-
-
-MemstoreSizeCostFunction(org.apache.hadoop.conf.Configuration conf) 
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All Methods Instance Methods Concrete Methods 
-
-Modifier and Type
-Method and Description
-
-
-protected double
-getCostFromRl(BalancerRegionLoad rl) 
-
-
-
-
-
-
-Methods inherited from 
class org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CostFromRegionLoadAsRateFunction
-getRegionLoadCost
-
-
-
-
-
-Methods inherited from 
class org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CostFromRegionLoadFunction
-cost,
 setClusterStatus,
 setLoads
-
-
-
-
-
-Methods inherited from 
class org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CostFunction
-costFromArray,
 getMultiplier,
 init,
 isNeeded,
 postAction,
 regionMoved,
 scale,
 setMultiplier
-
-
-
-
-
-Methods inherited from class java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=t

[20/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
index a0961f7..4a7f4ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
@@ -526,23 +526,23 @@
 518final FlushResultImpl result; // 
indicating a failure result from prepare
 519final TreeMap storeFlushCtxs;
 520final TreeMap> committedFiles;
-521final TreeMap storeFlushableSize;
+521final TreeMap storeFlushableSize;
 522final long startTime;
 523final long flushOpSeqId;
 524final long flushedSeqId;
-525final MemstoreSize 
totalFlushableSize;
+525final MemStoreSize 
totalFlushableSize;
 526
 527/** Constructs an early exit case 
*/
 528PrepareFlushResult(FlushResultImpl 
result, long flushSeqId) {
-529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemstoreSize());
+529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemStoreSize());
 530}
 531
 532/** Constructs a successful prepare 
flush result */
 533PrepareFlushResult(
 534  TreeMap storeFlushCtxs,
 535  TreeMap> committedFiles,
-536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-537  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+537  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 538  this(null, storeFlushCtxs, 
committedFiles, storeFlushableSize, startTime,
 539flushSeqId, flushedSeqId, 
totalFlushableSize);
 540}
@@ -551,8 +551,8 @@
 543FlushResultImpl result,
 544  TreeMap storeFlushCtxs,
 545  TreeMap> committedFiles,
-546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-547  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+547  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 548  this.result = result;
 549  this.storeFlushCtxs = 
storeFlushCtxs;
 550  this.committedFiles = 
committedFiles;
@@ -1015,7 +1015,7 @@
 1007  Future future = 
completionService.take();
 1008  HStore store = future.get();
 1009  
this.stores.put(store.getColumnFamilyDescriptor().getName(), store);
-1010  if (store.isSloppyMemstore()) 
{
+1010  if (store.isSloppyMemStore()) 
{
 1011hasSloppyStores = true;
 1012  }
 1013
@@ -1025,7 +1025,7 @@
 1017  if (maxSeqId == -1 || 
storeMaxSequenceId > maxSeqId) {
 1018maxSeqId = 
storeMaxSequenceId;
 1019  }
-1020  long maxStoreMemstoreTS = 
store.getMaxMemstoreTS().orElse(0L);
+1020  long maxStoreMemstoreTS = 
store.getMaxMemStoreTS().orElse(0L);
 1021  if (maxStoreMemstoreTS > 
maxMemstoreTS) {
 1022maxMemstoreTS = 
maxStoreMemstoreTS;
 1023  }
@@ -1202,24 +1202,24 @@
 1194   * store
 1195   * @return the size of memstore in 
this region
 1196   */
-1197  public long 
addAndGetMemstoreSize(MemstoreSize memstoreSize) {
+1197  public long 
addAndGetMemStoreSize(MemStoreSize memstoreSize) {
 1198if (this.rsAccounting != null) {
-1199  
rsAccounting.incGlobalMemstoreSize(memstoreSize);
+1199  
rsAccounting.incGlobalMemStoreSize(memstoreSize);
 1200}
 1201long size = 
this.memstoreDataSize.addAndGet(memstoreSize.getDataSize());
-1202checkNegativeMemstoreDataSize(size, 
memstoreSize.getDataSize());
+1202checkNegativeMemStoreDataSize(size, 
memstoreSize.getDataSize());
 1203return size;
 1204  }
 1205
-1206  public void 
decrMemstoreSize(MemstoreSize memstoreSize) {
+1206  public void 
decrMemStoreSize(MemStoreSize memstoreSize) {
 1207if (this.rsAccounting != null) {
-1208  
rsAccounting.decGlobalMemstoreSize(memstoreSize);
+1208  
rsAccounting.decGlobalMemStoreSize(memstoreSize);
 1209}
 1210long size = 
this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize());
-1211checkNegativeMemstoreDataSize(size, 
-memstoreSize.getDataSize());
+1211checkNegativeMemStoreDataSize(size, 
-memstoreSize.getDataSize());
 1212  }
 1213
-1214  private void 
checkNegativeMemstoreDataSize(long memstoreDataSize, long delta) {
+1214  private void 
checkNegativeMemStoreDataSiz

[24/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
index a0961f7..4a7f4ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
@@ -526,23 +526,23 @@
 518final FlushResultImpl result; // 
indicating a failure result from prepare
 519final TreeMap storeFlushCtxs;
 520final TreeMap> committedFiles;
-521final TreeMap storeFlushableSize;
+521final TreeMap storeFlushableSize;
 522final long startTime;
 523final long flushOpSeqId;
 524final long flushedSeqId;
-525final MemstoreSize 
totalFlushableSize;
+525final MemStoreSize 
totalFlushableSize;
 526
 527/** Constructs an early exit case 
*/
 528PrepareFlushResult(FlushResultImpl 
result, long flushSeqId) {
-529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemstoreSize());
+529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemStoreSize());
 530}
 531
 532/** Constructs a successful prepare 
flush result */
 533PrepareFlushResult(
 534  TreeMap storeFlushCtxs,
 535  TreeMap> committedFiles,
-536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-537  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+537  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 538  this(null, storeFlushCtxs, 
committedFiles, storeFlushableSize, startTime,
 539flushSeqId, flushedSeqId, 
totalFlushableSize);
 540}
@@ -551,8 +551,8 @@
 543FlushResultImpl result,
 544  TreeMap storeFlushCtxs,
 545  TreeMap> committedFiles,
-546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-547  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+547  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 548  this.result = result;
 549  this.storeFlushCtxs = 
storeFlushCtxs;
 550  this.committedFiles = 
committedFiles;
@@ -1015,7 +1015,7 @@
 1007  Future future = 
completionService.take();
 1008  HStore store = future.get();
 1009  
this.stores.put(store.getColumnFamilyDescriptor().getName(), store);
-1010  if (store.isSloppyMemstore()) 
{
+1010  if (store.isSloppyMemStore()) 
{
 1011hasSloppyStores = true;
 1012  }
 1013
@@ -1025,7 +1025,7 @@
 1017  if (maxSeqId == -1 || 
storeMaxSequenceId > maxSeqId) {
 1018maxSeqId = 
storeMaxSequenceId;
 1019  }
-1020  long maxStoreMemstoreTS = 
store.getMaxMemstoreTS().orElse(0L);
+1020  long maxStoreMemstoreTS = 
store.getMaxMemStoreTS().orElse(0L);
 1021  if (maxStoreMemstoreTS > 
maxMemstoreTS) {
 1022maxMemstoreTS = 
maxStoreMemstoreTS;
 1023  }
@@ -1202,24 +1202,24 @@
 1194   * store
 1195   * @return the size of memstore in 
this region
 1196   */
-1197  public long 
addAndGetMemstoreSize(MemstoreSize memstoreSize) {
+1197  public long 
addAndGetMemStoreSize(MemStoreSize memstoreSize) {
 1198if (this.rsAccounting != null) {
-1199  
rsAccounting.incGlobalMemstoreSize(memstoreSize);
+1199  
rsAccounting.incGlobalMemStoreSize(memstoreSize);
 1200}
 1201long size = 
this.memstoreDataSize.addAndGet(memstoreSize.getDataSize());
-1202checkNegativeMemstoreDataSize(size, 
memstoreSize.getDataSize());
+1202checkNegativeMemStoreDataSize(size, 
memstoreSize.getDataSize());
 1203return size;
 1204  }
 1205
-1206  public void 
decrMemstoreSize(MemstoreSize memstoreSize) {
+1206  public void 
decrMemStoreSize(MemStoreSize memstoreSize) {
 1207if (this.rsAccounting != null) {
-1208  
rsAccounting.decGlobalMemstoreSize(memstoreSize);
+1208  
rsAccounting.decGlobalMemStoreSize(memstoreSize);
 1209}
 1210long size = 
this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize());
-1211checkNegativeMemstoreDataSize(size, 
-memstoreSize.getDataSize());
+1211checkNegativeMemStoreDataSize(size, 
-memstoreSize.getDataSize());
 1212  }
 1213
-1214  private void 
checkNegativeMemstoreDataSize(long memstoreDataSize, long delta) {
+1214  private void 
checkNegativeMemSt

[39/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
new file mode 100644
index 000..5027080
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
@@ -0,0 +1,372 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+HRegionServer.PeriodicMemStoreFlusher (Apache HBase 3.0.0-SNAPSHOT 
API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.regionserver
+Class 
HRegionServer.PeriodicMemStoreFlusher
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.ScheduledChore
+
+
+org.apache.hadoop.hbase.regionserver.HRegionServer.PeriodicMemStoreFlusher
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true";
 title="class or interface in java.lang">Runnable
+
+
+Enclosing class:
+HRegionServer
+
+
+
+static class HRegionServer.PeriodicMemStoreFlusher
+extends ScheduledChore
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+(package private) static int
+MIN_DELAY_TIME 
+
+
+(package private) static int
+RANGE_OF_DELAY 
+
+
+(package private) HRegionServer
+server 
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+PeriodicMemStoreFlusher(int cacheFlushInterval,
+   HRegionServer server) 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+protected void
+chore()
+The task to execute on each scheduled execution of the 
Chore
+
+
+
+
+
+
+
+Methods inherited from class org.apache.hadoop.hbase.ScheduledChore
+cancel,
 cancel,
 choreForTesting,
 cleanup,
 getInitialDelay,
 getName,
 getPeriod,
 getStopper,
 getTimeUnit,
 initialChore,
 isInitialChoreComplete,
 isScheduled,
 run,
 toString,
 triggerNow
+
+
+
+
+
+Methods inherited from class java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--";
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-";
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-";
 title="class or interface in java.lang">wait
+
+
+
+
+
+
+
+
+
+
+

[50/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html 
b/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
index 3aacd19..f4b62dc 100644
--- a/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":42,"i1":42,"i2":42,"i3":42,"i4":42,"i5":42,"i6":42,"i7":42,"i8":42,"i9":42,"i10":42,"i11":42,"i12":42,"i13":42,"i14":42,"i15":42,"i16":42,"i17":42,"i18":42,"i19":42,"i20":42,"i21":42,"i22":42,"i23":42,"i24":42,"i25":42,"i26":42,"i27":42,"i28":42,"i29":42,"i30":42,"i31":42,"i32":42,"i33":42,"i34":42,"i35":42,"i36":42,"i37":42,"i38":42,"i39":42,"i40":42,"i41":42,"i42":42,"i43":42,"i44":41,"i45":42,"i46":42,"i47":42,"i48":42,"i49":42,"i50":42,"i51":42,"i52":42,"i53":42,"i54":42,"i55":42,"i56":42,"i57":42,"i58":42,"i59":42,"i60":42,"i61":42,"i62":42,"i63":42,"i64":42,"i65":42,"i66":42,"i67":42,"i68":42,"i69":42,"i70":42,"i71":42,"i72":42};
+var methods = 
{"i0":42,"i1":42,"i2":42,"i3":42,"i4":42,"i5":42,"i6":42,"i7":42,"i8":42,"i9":42,"i10":42,"i11":42,"i12":42,"i13":42,"i14":42,"i15":42,"i16":42,"i17":42,"i18":42,"i19":42,"i20":42,"i21":42,"i22":42,"i23":42,"i24":42,"i25":42,"i26":42,"i27":42,"i28":42,"i29":42,"i30":42,"i31":42,"i32":42,"i33":42,"i34":42,"i35":42,"i36":42,"i37":42,"i38":42,"i39":42,"i40":42,"i41":42,"i42":42,"i43":42,"i44":42,"i45":41,"i46":42,"i47":42,"i48":42,"i49":42,"i50":42,"i51":42,"i52":42,"i53":42,"i54":42,"i55":42,"i56":42,"i57":42,"i58":42,"i59":42,"i60":42,"i61":42,"i62":42,"i63":42,"i64":42,"i65":42,"i66":42,"i67":42,"i68":42,"i69":42,"i70":42,"i71":42,"i72":42,"i73":42,"i74":42};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -627,24 +627,33 @@ implements 
 boolean
 hasRegionMemstoreReplication()
+Deprecated. 
+As of release 2.0.0, this 
will be removed in HBase 3.0.0.
+ Use hasRegionMemStoreReplication()
 instead
+
+
+
+
+boolean
+hasRegionMemStoreReplication()
 Deprecated. 
  
 
-
+
 boolean
 hasSerialReplicationScope()
 Deprecated. 
 Return true if there are at least one cf whose replication 
scope is serial.
 
 
-
+
 boolean
 isCompactionEnabled()
 Deprecated. 
 Check if the compaction enable flag of the table is 
true.
 
 
-
+
 boolean
 isMetaRegion()
 Deprecated. 
@@ -652,83 +661,83 @@ implements 
+
 boolean
 isMetaTable()
 Deprecated. 
 Checks if the table is a hbase:meta table
 
 
-
+
 boolean
 isNormalizationEnabled()
 Deprecated. 
 Check if normalization enable flag of the table is 
true.
 
 
-
+
 boolean
 isReadOnly()
 Deprecated. 
 Check if the readOnly flag of the table is set.
 
 
-
+
 boolean
 isRootRegion()
 Deprecated. 
 This is vestigial API.
 
 
-
+
 HTableDescriptor
 modifyFamily(HColumnDescriptor family)
 Deprecated. 
 Modifies the existing column family.
 
 
-
+
 static HTableDescriptor
 parseFrom(byte[] bytes)
 Deprecated. 
  
 
-
+
 void
 remove(byte[] key)
 Deprecated. 
 Remove metadata represented by the key from the map
 
 
-
+
 void
 remove(Bytes key)
 Deprecated. 
 Remove metadata represented by the key from the map
 
 
-
+
 void
 remove(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String key)
 Deprecated. 
 Remove metadata represented by the key from the map
 
 
-
+
 void
 removeConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String key)
 Deprecated. 
 Remove a config setting represented by the key from the 
map
 
 
-
+
 void
 removeCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String className)
 Deprecated. 
 Remove a coprocessor from those set on the table
 
 
-
+
 HColumnDescriptor
 removeFamily(byte[] column)
 Deprecated. 
@@ -736,14 +745,14 @@ implements 
+
 HTableDescriptor
 setCompactionEnabled(boolean isEnable)
 Deprecated. 
 Setting the table compaction enable flag.
 
 
-
+
 HTableDescriptor
 setConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String key,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String value)
@@ -751,14 +760,14 @@ implements Setter for storing a configuration setting in map.
 
 
-
+
 HTableDescriptor
 setDurability(Durability durability)
 Deprecated. 
 Sets the Durability setting for the 
table.
 
 
-
+
 HTableDescriptor
 setFlushPolicyClassName(http://docs.oracle.com/javase/8/docs/api/ja

[37/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
index 8c0d0fb..9f0073f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
@@ -744,11 +744,11 @@ implements 
 long
-getMemstoreLimit() 
+getMemStoreLimit() 
 
 
 long
-getMemstoreSize()
+getMemStoreSize()
 Get the size of the memstore on this region server.
 
 
@@ -1816,16 +1816,16 @@ implements 
+
 
 
 
 
-getMemstoreLimit
-public long getMemstoreLimit()
+getMemStoreLimit
+public long getMemStoreLimit()
 
 Specified by:
-getMemstoreLimit in
 interface MetricsRegionServerWrapper
+getMemStoreLimit in
 interface MetricsRegionServerWrapper
 
 
 
@@ -2264,18 +2264,18 @@ implements 
+
 
 
 
 
-getMemstoreSize
-public long getMemstoreSize()
-Description copied from 
interface: MetricsRegionServerWrapper
+getMemStoreSize
+public long getMemStoreSize()
+Description copied from 
interface: MetricsRegionServerWrapper
 Get the size of the memstore on this region server.
 
 Specified by:
-getMemstoreSize in
 interface MetricsRegionServerWrapper
+getMemStoreSize in
 interface MetricsRegionServerWrapper
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
index 731d60d..c915862 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
@@ -154,7 +154,7 @@ var activeTableTab = "activeTableTab";
 
 
 long
-getMemstoreSize()
+getMemStoreSize()
 Get the size of the memstore on this region server.
 
 
@@ -327,13 +327,13 @@ var activeTableTab = "activeTableTab";
 Get the number of store files hosted on this region 
server.
 
 
-
+
 
 
 
 
-getMemstoreSize
-long getMemstoreSize()
+getMemStoreSize
+long getMemStoreSize()
 Get the size of the memstore on this region server.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html
index be575b6..2abf080 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.html
@@ -282,7 +282,7 @@ implements 
 long
-getMemstoreSize()
+getMemStoreSize()
 Get the size of the memstore on this region server.
 
 
@@ -658,18 +658,18 @@ implements 
+
 
 
 
 
-getMemstoreSize
-public long getMemstoreSize()
-Description copied from 
interface: MetricsRegionWrapper
+getMemStoreSize
+public long getMemStoreSize()
+Description copied from 
interface: MetricsRegionWrapper
 Get the size of the memstore on this region server.
 
 Specified by:
-getMemstoreSize in
 interface MetricsRegionWrapper
+getMemStoreSize in
 interface MetricsRegionWrapper
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
index 9a9a6a6..444b88b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 long
-getMemstoresSize(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String table)
+getMemStoresSize(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String table)
 Get the memory store size against this table
 
 
@@ -207,13 +207,13 @@ var activeTableTab = "activeTableTab";
 Get the total number of requests that have been issued 
against this table
 
 
-
+
 
 
 
 
-getMemstoresSize
-long getMemstoresSize(http://docs.oracle.com/javase/8/docs/api/java/lang/S

[32/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
index 3f18200..c678b81 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -571,321 +571,339 @@
 563  }
 564
 565  /**
-566   * @return true if the read-replicas 
memstore replication is enabled.
-567   */
-568  @Override
-569  public boolean 
hasRegionMemstoreReplication() {
-570return 
delegatee.hasRegionMemstoreReplication();
-571  }
-572
-573  /**
-574   * Enable or Disable the memstore 
replication from the primary region to the replicas.
-575   * The replication will be used only 
for meta operations (e.g. flush, compaction, ...)
-576   *
-577   * @param memstoreReplication true if 
the new data written to the primary region
-578   * 
should be replicated.
-579   *false if 
the secondaries can tollerate to have new
-580   *  
data only when the primary flushes the memstore.
-581   */
-582  public HTableDescriptor 
setRegionMemstoreReplication(boolean memstoreReplication) {
-583
getDelegateeForModification().setRegionMemstoreReplication(memstoreReplication);
-584return this;
-585  }
-586
-587  public HTableDescriptor setPriority(int 
priority) {
-588
getDelegateeForModification().setPriority(priority);
-589return this;
-590  }
-591
-592  @Override
-593  public int getPriority() {
-594return delegatee.getPriority();
-595  }
-596
-597  /**
-598   * Returns all the column family names 
of the current table. The map of
-599   * HTableDescriptor contains mapping of 
family name to HColumnDescriptors.
-600   * This returns all the keys of the 
family map which represents the column
-601   * family names of the table.
-602   *
-603   * @return Immutable sorted set of the 
keys of the families.
-604   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0
-605   * (HBASE-18008;).
-606   * Use {@link 
#getColumnFamilyNames()}.
-607   */
-608  @Deprecated
-609  public Set 
getFamiliesKeys() {
-610return 
delegatee.getColumnFamilyNames();
-611  }
-612
-613  /**
-614   * Returns the count of the column 
families of the table.
-615   *
-616   * @return Count of column families of 
the table
-617   */
-618  @Override
-619  public int getColumnFamilyCount() {
-620return 
delegatee.getColumnFamilyCount();
-621  }
-622
-623  /**
-624   * Returns an array all the {@link 
HColumnDescriptor} of the column families
-625   * of the table.
-626   *
-627   * @return Array of all the 
HColumnDescriptors of the current table
-628   *
-629   * @see #getFamilies()
-630   */
-631  @Deprecated
-632  @Override
-633  public HColumnDescriptor[] 
getColumnFamilies() {
-634return 
Stream.of(delegatee.getColumnFamilies())
-635
.map(this::toHColumnDescriptor)
-636.toArray(size -> new 
HColumnDescriptor[size]);
-637  }
-638
-639  /**
-640   * Returns the HColumnDescriptor for a 
specific column family with name as
-641   * specified by the parameter column.
-642   * @param column Column family name
-643   * @return Column descriptor for the 
passed family name or the family on
-644   * passed in column.
-645   * @deprecated Use {@link 
#getColumnFamily(byte[])}.
-646   */
-647  @Deprecated
-648  public HColumnDescriptor 
getFamily(final byte[] column) {
-649return 
toHColumnDescriptor(delegatee.getColumnFamily(column));
-650  }
-651
-652
-653  /**
-654   * Removes the HColumnDescriptor with 
name specified by the parameter column
-655   * from the table descriptor
-656   *
-657   * @param column Name of the column 
family to be removed.
-658   * @return Column descriptor for the 
passed family name or the family on
-659   * passed in column.
-660   */
-661  public HColumnDescriptor 
removeFamily(final byte [] column) {
-662return 
toHColumnDescriptor(getDelegateeForModification().removeColumnFamily(column));
-663  }
-664
-665  /**
-666   * Return a HColumnDescriptor for user 
to keep the compatibility as much as possible.
-667   * @param desc read-only 
ColumnFamilyDescriptor
-668   * @return The older implementation of 
ColumnFamilyDescriptor
-669   */
-670  protected HColumnDescriptor 
toHColumnDescriptor(ColumnFamilyDescriptor desc) {
-671if (desc == null) {
-672  return null;
-673} else if (desc instanceof 
ModifyableColumnFamilyDescriptor) {
-674  return new 
HColumnDescriptor((ModifyableColumnFamilyDescriptor) desc);
-675} else if (desc instanceof 
HColumnDescriptor) {
-676  return (HColumnDescript

[44/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
index ed5695d..df7e749 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
@@ -5352,28 +5352,28 @@ service.
 
 
 void
-MutableSegment.add(Cell cell,
+MutableSegment.add(Cell cell,
boolean mslabUsed,
-   MemstoreSize memstoreSize)
+   MemStoreSize memstoreSize)
 Adds the given cell into the segment
 
 
 
 void
-MemStore.add(Cell cell,
-   MemstoreSize memstoreSize)
+MemStore.add(Cell cell,
+   MemStoreSize memstoreSize)
 Write an update
 
 
 
 void
-AbstractMemStore.add(Cell cell,
-   MemstoreSize memstoreSize) 
+AbstractMemStore.add(Cell cell,
+   MemStoreSize memstoreSize) 
 
 
 void
-HStore.add(Cell cell,
-   MemstoreSize memstoreSize)
+HStore.add(Cell cell,
+   MemStoreSize memstoreSize)
 Adds a value to the memstore
 
 
@@ -5409,9 +5409,9 @@ service.
 
 
 private void
-HRegion.applyToMemstore(HStore store,
+HRegion.applyToMemStore(HStore store,
Cell cell,
-   MemstoreSize memstoreSize) 
+   MemStoreSize memstoreSize) 
 
 
 boolean
@@ -5699,21 +5699,21 @@ service.
 
 
 private void
-AbstractMemStore.internalAdd(Cell toAdd,
+AbstractMemStore.internalAdd(Cell toAdd,
boolean mslabUsed,
-   MemstoreSize memstoreSize) 
+   MemStoreSize memstoreSize) 
 
 
 protected void
-Segment.internalAdd(Cell cell,
+Segment.internalAdd(Cell cell,
boolean mslabUsed,
-   MemstoreSize memstoreSize) 
+   MemStoreSize memstoreSize) 
 
 
 protected void
-CompositeImmutableSegment.internalAdd(Cell cell,
+CompositeImmutableSegment.internalAdd(Cell cell,
boolean mslabUsed,
-   MemstoreSize memstoreSize) 
+   MemStoreSize memstoreSize) 
 
 
 private boolean
@@ -5962,9 +5962,9 @@ service.
 
 
 protected void
-HRegion.restoreEdit(HStore s,
+HRegion.restoreEdit(HStore s,
Cell cell,
-   MemstoreSize memstoreSize)
+   MemStoreSize memstoreSize)
 Used by tests
 
 
@@ -6220,35 +6220,35 @@ service.
 
 
 protected void
-Segment.updateMetaInfo(Cell cellToAdd,
+Segment.updateMetaInfo(Cell cellToAdd,
   boolean succ,
   boolean mslabUsed,
-  MemstoreSize memstoreSize) 
+  MemStoreSize memstoreSize) 
 
 
 protected void
-CompositeImmutableSegment.updateMetaInfo(Cell cellToAdd,
+CompositeImmutableSegment.updateMetaInfo(Cell cellToAdd,
   boolean succ,
   boolean mslabUsed,
-  MemstoreSize memstoreSize) 
+  MemStoreSize memstoreSize) 
 
 
 protected void
-Segment.updateMetaInfo(Cell cellToAdd,
+Segment.updateMetaInfo(Cell cellToAdd,
   boolean succ,
-  MemstoreSize memstoreSize) 
+  MemStoreSize memstoreSize) 
 
 
 private void
-AbstractMemStore.upsert(Cell cell,
+AbstractMemStore.upsert(Cell cell,
   long readpoint,
-  MemstoreSize memstoreSize) 
+  MemStoreSize memstoreSize) 
 
 
 void
-MutableSegment.upsert(Cell cell,
+MutableSegment.upsert(Cell cell,
   long readpoint,
-  MemstoreSize memstoreSize) 
+  MemStoreSize memstoreSize) 
 
 
 
@@ -6261,20 +6261,20 @@ service.
 
 
 void
-MemStore.add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
-   MemstoreSize memstoreSize)
+MemStore.add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
+   MemStoreSize memstoreSize)
 Write the updates
 
 
 
 void
-AbstractMemStore.add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
-   MemstoreSize memstoreSize) 
+AbstractMemStore.add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
+   MemStoreSize memstoreSize) 
 
 
 void
-HStore.add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
-   MemstoreSize memstoreSize)
+HStore.add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
+   MemStoreSize memstoreSize)
 Adds the specified value to the memstore
 
 
@@ -6295,15 +6295,15 @@ service.
 
 
 private void
-HRegion.applyFamilyMapToMemstore(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map

[19/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index a0961f7..4a7f4ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -526,23 +526,23 @@
 518final FlushResultImpl result; // 
indicating a failure result from prepare
 519final TreeMap storeFlushCtxs;
 520final TreeMap> committedFiles;
-521final TreeMap storeFlushableSize;
+521final TreeMap storeFlushableSize;
 522final long startTime;
 523final long flushOpSeqId;
 524final long flushedSeqId;
-525final MemstoreSize 
totalFlushableSize;
+525final MemStoreSize 
totalFlushableSize;
 526
 527/** Constructs an early exit case 
*/
 528PrepareFlushResult(FlushResultImpl 
result, long flushSeqId) {
-529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemstoreSize());
+529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemStoreSize());
 530}
 531
 532/** Constructs a successful prepare 
flush result */
 533PrepareFlushResult(
 534  TreeMap storeFlushCtxs,
 535  TreeMap> committedFiles,
-536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-537  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+537  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 538  this(null, storeFlushCtxs, 
committedFiles, storeFlushableSize, startTime,
 539flushSeqId, flushedSeqId, 
totalFlushableSize);
 540}
@@ -551,8 +551,8 @@
 543FlushResultImpl result,
 544  TreeMap storeFlushCtxs,
 545  TreeMap> committedFiles,
-546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-547  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+547  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 548  this.result = result;
 549  this.storeFlushCtxs = 
storeFlushCtxs;
 550  this.committedFiles = 
committedFiles;
@@ -1015,7 +1015,7 @@
 1007  Future future = 
completionService.take();
 1008  HStore store = future.get();
 1009  
this.stores.put(store.getColumnFamilyDescriptor().getName(), store);
-1010  if (store.isSloppyMemstore()) 
{
+1010  if (store.isSloppyMemStore()) 
{
 1011hasSloppyStores = true;
 1012  }
 1013
@@ -1025,7 +1025,7 @@
 1017  if (maxSeqId == -1 || 
storeMaxSequenceId > maxSeqId) {
 1018maxSeqId = 
storeMaxSequenceId;
 1019  }
-1020  long maxStoreMemstoreTS = 
store.getMaxMemstoreTS().orElse(0L);
+1020  long maxStoreMemstoreTS = 
store.getMaxMemStoreTS().orElse(0L);
 1021  if (maxStoreMemstoreTS > 
maxMemstoreTS) {
 1022maxMemstoreTS = 
maxStoreMemstoreTS;
 1023  }
@@ -1202,24 +1202,24 @@
 1194   * store
 1195   * @return the size of memstore in 
this region
 1196   */
-1197  public long 
addAndGetMemstoreSize(MemstoreSize memstoreSize) {
+1197  public long 
addAndGetMemStoreSize(MemStoreSize memstoreSize) {
 1198if (this.rsAccounting != null) {
-1199  
rsAccounting.incGlobalMemstoreSize(memstoreSize);
+1199  
rsAccounting.incGlobalMemStoreSize(memstoreSize);
 1200}
 1201long size = 
this.memstoreDataSize.addAndGet(memstoreSize.getDataSize());
-1202checkNegativeMemstoreDataSize(size, 
memstoreSize.getDataSize());
+1202checkNegativeMemStoreDataSize(size, 
memstoreSize.getDataSize());
 1203return size;
 1204  }
 1205
-1206  public void 
decrMemstoreSize(MemstoreSize memstoreSize) {
+1206  public void 
decrMemStoreSize(MemStoreSize memstoreSize) {
 1207if (this.rsAccounting != null) {
-1208  
rsAccounting.decGlobalMemstoreSize(memstoreSize);
+1208  
rsAccounting.decGlobalMemStoreSize(memstoreSize);
 1209}
 1210long size = 
this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize());
-1211checkNegativeMemstoreDataSize(size, 
-memstoreSize.getDataSize());
+1211checkNegativeMemStoreDataSize(size, 
-memstoreSize.getDataSize());
 1212  }
 1213
-1214  private void 
checkNegativeMemstoreDataSize(long memstoreDataSize, long delta) {
+1214  private void 
checkNegative

[45/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
index ec8e6db..c292589 100644
--- a/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":42,"i1":42,"i2":42,"i3":42,"i4":42,"i5":42,"i6":42,"i7":42,"i8":42,"i9":42,"i10":42,"i11":42,"i12":42,"i13":42,"i14":42,"i15":42,"i16":42,"i17":42,"i18":42,"i19":42,"i20":42,"i21":42,"i22":42,"i23":42,"i24":42,"i25":42,"i26":42,"i27":42,"i28":42,"i29":42,"i30":42,"i31":42,"i32":42,"i33":42,"i34":42,"i35":42,"i36":42,"i37":42,"i38":42,"i39":42,"i40":42,"i41":42,"i42":42,"i43":42,"i44":41,"i45":42,"i46":42,"i47":42,"i48":42,"i49":42,"i50":42,"i51":42,"i52":42,"i53":42,"i54":42,"i55":42,"i56":42,"i57":42,"i58":42,"i59":42,"i60":42,"i61":42,"i62":42,"i63":42,"i64":42,"i65":42,"i66":42,"i67":42,"i68":42,"i69":42,"i70":42,"i71":42,"i72":42};
+var methods = 
{"i0":42,"i1":42,"i2":42,"i3":42,"i4":42,"i5":42,"i6":42,"i7":42,"i8":42,"i9":42,"i10":42,"i11":42,"i12":42,"i13":42,"i14":42,"i15":42,"i16":42,"i17":42,"i18":42,"i19":42,"i20":42,"i21":42,"i22":42,"i23":42,"i24":42,"i25":42,"i26":42,"i27":42,"i28":42,"i29":42,"i30":42,"i31":42,"i32":42,"i33":42,"i34":42,"i35":42,"i36":42,"i37":42,"i38":42,"i39":42,"i40":42,"i41":42,"i42":42,"i43":42,"i44":42,"i45":41,"i46":42,"i47":42,"i48":42,"i49":42,"i50":42,"i51":42,"i52":42,"i53":42,"i54":42,"i55":42,"i56":42,"i57":42,"i58":42,"i59":42,"i60":42,"i61":42,"i62":42,"i63":42,"i64":42,"i65":42,"i66":42,"i67":42,"i68":42,"i69":42,"i70":42,"i71":42,"i72":42,"i73":42,"i74":42};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -638,24 +638,33 @@ implements 
 boolean
 hasRegionMemstoreReplication()
+Deprecated. 
+As of release 2.0.0, this 
will be removed in HBase 3.0.0.
+ Use hasRegionMemStoreReplication()
 instead
+
+
+
+
+boolean
+hasRegionMemStoreReplication()
 Deprecated. 
  
 
-
+
 boolean
 hasSerialReplicationScope()
 Deprecated. 
 Return true if there are at least one cf whose replication 
scope is serial.
 
 
-
+
 boolean
 isCompactionEnabled()
 Deprecated. 
 Check if the compaction enable flag of the table is 
true.
 
 
-
+
 boolean
 isMetaRegion()
 Deprecated. 
@@ -663,83 +672,83 @@ implements 
+
 boolean
 isMetaTable()
 Deprecated. 
 Checks if the table is a hbase:meta table
 
 
-
+
 boolean
 isNormalizationEnabled()
 Deprecated. 
 Check if normalization enable flag of the table is 
true.
 
 
-
+
 boolean
 isReadOnly()
 Deprecated. 
 Check if the readOnly flag of the table is set.
 
 
-
+
 boolean
 isRootRegion()
 Deprecated. 
 This is vestigial API.
 
 
-
+
 HTableDescriptor
 modifyFamily(HColumnDescriptor family)
 Deprecated. 
 Modifies the existing column family.
 
 
-
+
 static HTableDescriptor
 parseFrom(byte[] bytes)
 Deprecated. 
  
 
-
+
 void
 remove(byte[] key)
 Deprecated. 
 Remove metadata represented by the key from the map
 
 
-
+
 void
 remove(Bytes key)
 Deprecated. 
 Remove metadata represented by the key from the map
 
 
-
+
 void
 remove(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String key)
 Deprecated. 
 Remove metadata represented by the key from the map
 
 
-
+
 void
 removeConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String key)
 Deprecated. 
 Remove a config setting represented by the key from the 
map
 
 
-
+
 void
 removeCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String className)
 Deprecated. 
 Remove a coprocessor from those set on the table
 
 
-
+
 HColumnDescriptor
 removeFamily(byte[] column)
 Deprecated. 
@@ -747,14 +756,14 @@ implements 
+
 HTableDescriptor
 setCompactionEnabled(boolean isEnable)
 Deprecated. 
 Setting the table compaction enable flag.
 
 
-
+
 HTableDescriptor
 setConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String key,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String value)
@@ -762,14 +771,14 @@ implements Setter for storing a configuration setting in map.
 
 
-
+
 HTableDescriptor
 setDurability(Durability durability)
 Deprecated. 
 Sets the Durability setting for the 
table.
 
 
-
+
 HTableDescriptor
 setFlushPolicyClassName(http://docs.oracle.com/javas

[28/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemstoreSizeCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemstoreSizeCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemstoreSizeCostFunction.html
deleted file mode 100644
index 5c157b5..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemstoreSizeCostFunction.html
+++ /dev/null
@@ -1,1786 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-Source code
-
-
-
-
-001/**
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018package 
org.apache.hadoop.hbase.master.balancer;
-019
-020import java.util.ArrayDeque;
-021import java.util.ArrayList;
-022import java.util.Arrays;
-023import java.util.Collection;
-024import java.util.Collections;
-025import java.util.Deque;
-026import java.util.HashMap;
-027import java.util.LinkedList;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.Map.Entry;
-031import java.util.Random;
-032
-033import org.apache.commons.logging.Log;
-034import 
org.apache.commons.logging.LogFactory;
-035import 
org.apache.hadoop.conf.Configuration;
-036import 
org.apache.hadoop.hbase.ClusterStatus;
-037import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-038import 
org.apache.hadoop.hbase.HConstants;
-039import 
org.apache.hadoop.hbase.RegionLoad;
-040import 
org.apache.hadoop.hbase.ServerLoad;
-041import 
org.apache.hadoop.hbase.ServerName;
-042import 
org.apache.hadoop.hbase.TableName;
-043import 
org.apache.hadoop.hbase.client.RegionInfo;
-044import 
org.apache.hadoop.hbase.master.MasterServices;
-045import 
org.apache.hadoop.hbase.master.RegionPlan;
-046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
-047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
-048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
-049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
-050import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
-051import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-052import 
org.apache.hadoop.hbase.util.Bytes;
-053import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-054import 
org.apache.yetus.audience.InterfaceAudience;
-055
-056import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Optional;
-057import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-058
-059import 
com.google.common.annotations.VisibleForTesting;
-060
-061/**
-062 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -063 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -064 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-065 *
    -066 *
  • Region Load
  • -067 *
  • Table Load
  • -068 *
  • Data Locality
  • -069 *
  • Memstore Sizes
  • -070 *
  • Storefile Sizes
  • -071 *
-072 * -073 * -074 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -075 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -076 * scaled by their respective multipliers:

-077 * -078 *
    -079 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.moveCost
  • -081 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -082 *
  • hbase.master.balancer.stochastic.localityCost
  • -083 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -084 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -085 *
-086 * -087 *

In addition to the above configurations, the balancer can be tuned by the following -088 * configuration values:

-089 *

[17/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index a0961f7..4a7f4ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -526,23 +526,23 @@
 518final FlushResultImpl result; // 
indicating a failure result from prepare
 519final TreeMap storeFlushCtxs;
 520final TreeMap> committedFiles;
-521final TreeMap storeFlushableSize;
+521final TreeMap storeFlushableSize;
 522final long startTime;
 523final long flushOpSeqId;
 524final long flushedSeqId;
-525final MemstoreSize 
totalFlushableSize;
+525final MemStoreSize 
totalFlushableSize;
 526
 527/** Constructs an early exit case 
*/
 528PrepareFlushResult(FlushResultImpl 
result, long flushSeqId) {
-529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemstoreSize());
+529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemStoreSize());
 530}
 531
 532/** Constructs a successful prepare 
flush result */
 533PrepareFlushResult(
 534  TreeMap storeFlushCtxs,
 535  TreeMap> committedFiles,
-536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-537  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+537  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 538  this(null, storeFlushCtxs, 
committedFiles, storeFlushableSize, startTime,
 539flushSeqId, flushedSeqId, 
totalFlushableSize);
 540}
@@ -551,8 +551,8 @@
 543FlushResultImpl result,
 544  TreeMap storeFlushCtxs,
 545  TreeMap> committedFiles,
-546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-547  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+547  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 548  this.result = result;
 549  this.storeFlushCtxs = 
storeFlushCtxs;
 550  this.committedFiles = 
committedFiles;
@@ -1015,7 +1015,7 @@
 1007  Future future = 
completionService.take();
 1008  HStore store = future.get();
 1009  
this.stores.put(store.getColumnFamilyDescriptor().getName(), store);
-1010  if (store.isSloppyMemstore()) 
{
+1010  if (store.isSloppyMemStore()) 
{
 1011hasSloppyStores = true;
 1012  }
 1013
@@ -1025,7 +1025,7 @@
 1017  if (maxSeqId == -1 || 
storeMaxSequenceId > maxSeqId) {
 1018maxSeqId = 
storeMaxSequenceId;
 1019  }
-1020  long maxStoreMemstoreTS = 
store.getMaxMemstoreTS().orElse(0L);
+1020  long maxStoreMemstoreTS = 
store.getMaxMemStoreTS().orElse(0L);
 1021  if (maxStoreMemstoreTS > 
maxMemstoreTS) {
 1022maxMemstoreTS = 
maxStoreMemstoreTS;
 1023  }
@@ -1202,24 +1202,24 @@
 1194   * store
 1195   * @return the size of memstore in 
this region
 1196   */
-1197  public long 
addAndGetMemstoreSize(MemstoreSize memstoreSize) {
+1197  public long 
addAndGetMemStoreSize(MemStoreSize memstoreSize) {
 1198if (this.rsAccounting != null) {
-1199  
rsAccounting.incGlobalMemstoreSize(memstoreSize);
+1199  
rsAccounting.incGlobalMemStoreSize(memstoreSize);
 1200}
 1201long size = 
this.memstoreDataSize.addAndGet(memstoreSize.getDataSize());
-1202checkNegativeMemstoreDataSize(size, 
memstoreSize.getDataSize());
+1202checkNegativeMemStoreDataSize(size, 
memstoreSize.getDataSize());
 1203return size;
 1204  }
 1205
-1206  public void 
decrMemstoreSize(MemstoreSize memstoreSize) {
+1206  public void 
decrMemStoreSize(MemStoreSize memstoreSize) {
 1207if (this.rsAccounting != null) {
-1208  
rsAccounting.decGlobalMemstoreSize(memstoreSize);
+1208  
rsAccounting.decGlobalMemStoreSize(memstoreSize);
 1209}
 1210long size = 
this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize());
-1211checkNegativeMemstoreDataSize(size, 
-memstoreSize.getDataSize());
+1211checkNegativeMemStoreDataSize(size, 
-memstoreSize.getDataSize());
 1212  }
 1213
-1214  private void 
checkNegativeMemstoreDataSize(long memstoreDataSize, long delta) {
+1214  private void 
checkNegativeMemStoreDataSize(lon

[34/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemstoreSize.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemstoreSize.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemstoreSize.html
deleted file mode 100644
index 2cbe596..000
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemstoreSize.html
+++ /dev/null
@@ -1,549 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-
-
-
-Uses of Class org.apache.hadoop.hbase.regionserver.MemstoreSize (Apache 
HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-No Frames
-
-
-All Classes
-
-
-
-
-
-
-
-
-
-
-Uses of 
Classorg.apache.hadoop.hbase.regionserver.MemstoreSize
-
-
-
-
-
-Packages that use MemstoreSize 
-
-Package
-Description
-
-
-
-org.apache.hadoop.hbase.regionserver
- 
-
-
-
-
-
-
-
-
-
-Uses of MemstoreSize in org.apache.hadoop.hbase.regionserver
-
-Fields in org.apache.hadoop.hbase.regionserver
 declared as MemstoreSize 
-
-Modifier and Type
-Field and Description
-
-
-
-(package private) MemstoreSize
-HRegion.PrepareFlushResult.totalFlushableSize 
-
-
-
-
-Fields in org.apache.hadoop.hbase.regionserver
 with type parameters of type MemstoreSize 
-
-Modifier and Type
-Field and Description
-
-
-
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentMap
-RegionServerAccounting.replayEditsPerRegion 
-
-
-(package private) http://docs.oracle.com/javase/8/docs/api/java/util/TreeMap.html?is-external=true";
 title="class or interface in java.util">TreeMap
-HRegion.PrepareFlushResult.storeFlushableSize 
-
-
-
-
-Methods in org.apache.hadoop.hbase.regionserver
 that return MemstoreSize 
-
-Modifier and Type
-Method and Description
-
-
-
-private MemstoreSize
-HRegion.doDropStoreMemstoreContentsForSeqId(HStore s,
-   long currentSeqId) 
-
-
-private MemstoreSize
-HRegion.dropMemstoreContentsForSeqId(long seqId,
-HStore store)
-Drops the memstore contents after replaying a flush 
descriptor or region open event replay
- if the memstore edits have seqNums smaller than the given seq id
-
-
-
-MemstoreSize
-MemStore.getFlushableSize()
-On flush, how much memory we will clear.
-
-
-
-MemstoreSize
-CompactingMemStore.getFlushableSize()
-On flush, how much memory we will clear.
-
-
-
-MemstoreSize
-DefaultMemStore.getFlushableSize()
-On flush, how much memory we will clear from the active 
cell set.
-
-
-
-MemstoreSize
-Store.getFlushableSize() 
-
-
-MemstoreSize
-HStore.getFlushableSize() 
-
-
-MemstoreSize
-Store.getMemStoreSize() 
-
-
-MemstoreSize
-HStore.getMemStoreSize() 
-
-
-MemstoreSize
-CompactionPipeline.getPipelineSize() 
-
-
-MemstoreSize
-MemStore.getSnapshotSize()
-Return the size of the snapshot(s) if any
-
-
-
-MemstoreSize
-AbstractMemStore.getSnapshotSize() 
-
-
-MemstoreSize
-Store.getSnapshotSize() 
-
-
-MemstoreSize
-HStore.getSnapshotSize() 
-
-
-MemstoreSize
-CompactionPipeline.getTailSize() 
-
-
-MemstoreSize
-MemStore.size() 
-
-
-MemstoreSize
-CompactingMemStore.size() 
-
-
-MemstoreSize
-DefaultMemStore.size() 
-
-
-
-
-Methods in org.apache.hadoop.hbase.regionserver
 with parameters of type MemstoreSize 
-
-Modifier and Type
-Method and Description
-
-
-
-void
-MutableSegment.add(Cell cell,
-   boolean mslabUsed,
-   MemstoreSize memstoreSize)
-Adds the given cell into the segment
-
-
-
-void
-MemStore.add(Cell cell,
-   MemstoreSize memstoreSize)
-Write an update
-
-
-
-void
-AbstractMemStore.add(Cell cell,
-   MemstoreSize memstoreSize) 
-
-
-void
-HStore.add(Cell cell,
-   MemstoreSize memstoreSize)
-Adds a value to the memstore
-
-
-
-void
-MemStore.add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
-   MemstoreSize memstoreSize)
-Write the updates
-
-
-
-void
-AbstractMemStore.add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
-   MemstoreSize memstoreSize) 
-
-
-void
-HStore.add

[30/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
index 5c157b5..788fb93 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
@@ -200,7 +200,7 @@
 192regionLoadFunctions = new 
CostFromRegionLoadFunction[] {
 193  new 
ReadRequestCostFunction(conf),
 194  new 
WriteRequestCostFunction(conf),
-195  new 
MemstoreSizeCostFunction(conf),
+195  new 
MemStoreSizeCostFunction(conf),
 196  new StoreFileCostFunction(conf)
 197};
 198regionReplicaHostCostFunction = new 
RegionReplicaHostCostFunction(conf);
@@ -1676,13 +1676,13 @@
 1668   * Compute the cost of total memstore 
size.  The more unbalanced the higher the
 1669   * computed cost will be.  This uses a 
rolling average of regionload.
 1670   */
-1671  static class MemstoreSizeCostFunction 
extends CostFromRegionLoadAsRateFunction {
+1671  static class MemStoreSizeCostFunction 
extends CostFromRegionLoadAsRateFunction {
 1672
 1673private static final String 
MEMSTORE_SIZE_COST_KEY =
 1674
"hbase.master.balancer.stochastic.memstoreSizeCost";
 1675private static final float 
DEFAULT_MEMSTORE_SIZE_COST = 5;
 1676
-1677
MemstoreSizeCostFunction(Configuration conf) {
+1677
MemStoreSizeCostFunction(Configuration conf) {
 1678  super(conf);
 1679  
this.setMultiplier(conf.getFloat(MEMSTORE_SIZE_COST_KEY, 
DEFAULT_MEMSTORE_SIZE_COST));
 1680}

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
index 5c157b5..788fb93 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
@@ -200,7 +200,7 @@
 192regionLoadFunctions = new 
CostFromRegionLoadFunction[] {
 193  new 
ReadRequestCostFunction(conf),
 194  new 
WriteRequestCostFunction(conf),
-195  new 
MemstoreSizeCostFunction(conf),
+195  new 
MemStoreSizeCostFunction(conf),
 196  new StoreFileCostFunction(conf)
 197};
 198regionReplicaHostCostFunction = new 
RegionReplicaHostCostFunction(conf);
@@ -1676,13 +1676,13 @@
 1668   * Compute the cost of total memstore 
size.  The more unbalanced the higher the
 1669   * computed cost will be.  This uses a 
rolling average of regionload.
 1670   */
-1671  static class MemstoreSizeCostFunction 
extends CostFromRegionLoadAsRateFunction {
+1671  static class MemStoreSizeCostFunction 
extends CostFromRegionLoadAsRateFunction {
 1672
 1673private static final String 
MEMSTORE_SIZE_COST_KEY =
 1674
"hbase.master.balancer.stochastic.memstoreSizeCost";
 1675private static final float 
DEFAULT_MEMSTORE_SIZE_COST = 5;
 1676
-1677
MemstoreSizeCostFunction(Configuration conf) {
+1677
MemStoreSizeCostFunction(Configuration conf) {
 1678  super(conf);
 1679  
this.setMultiplier(conf.getFloat(MEMSTORE_SIZE_COST_KEY, 
DEFAULT_MEMSTORE_SIZE_COST));
 1680}

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
index 5c157b5..788fb93 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
@@ -200,7 +200,7 @@
 192regionLoadFunctions = new 
CostFromRegionLoadFunction[] {
 193  new 
ReadRequestCostFunction(conf),
 194  new 
WriteRequestCostFunction(conf),
-195  new 
MemstoreSizeCostFunction(conf),
+195  new 
MemStoreSiz

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
deleted file mode 100644
index 95e2ca9..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
+++ /dev/null
@@ -1,3841 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-Source code
-
-
-
-
-001/**
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
-019package 
org.apache.hadoop.hbase.regionserver;
-020
-021import 
javax.management.MalformedObjectNameException;
-022import javax.management.ObjectName;
-023import javax.servlet.http.HttpServlet;
-024import java.io.IOException;
-025import java.io.InterruptedIOException;
-026import 
java.lang.Thread.UncaughtExceptionHandler;
-027import java.lang.management.MemoryType;
-028import 
java.lang.management.MemoryUsage;
-029import java.lang.reflect.Constructor;
-030import java.net.BindException;
-031import java.net.InetAddress;
-032import java.net.InetSocketAddress;
-033import java.util.ArrayList;
-034import java.util.Collection;
-035import java.util.Collections;
-036import java.util.Comparator;
-037import java.util.HashMap;
-038import java.util.HashSet;
-039import java.util.Iterator;
-040import java.util.List;
-041import java.util.Map;
-042import java.util.Map.Entry;
-043import java.util.Objects;
-044import java.util.Set;
-045import java.util.SortedMap;
-046import java.util.TreeMap;
-047import java.util.TreeSet;
-048import 
java.util.concurrent.ConcurrentHashMap;
-049import 
java.util.concurrent.ConcurrentMap;
-050import 
java.util.concurrent.ConcurrentSkipListMap;
-051import 
java.util.concurrent.CountDownLatch;
-052import java.util.concurrent.TimeUnit;
-053import 
java.util.concurrent.atomic.AtomicBoolean;
-054import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-055import java.util.function.Function;
-056
-057import 
org.apache.commons.lang3.RandomUtils;
-058import 
org.apache.commons.lang3.SystemUtils;
-059import org.apache.commons.logging.Log;
-060import 
org.apache.commons.logging.LogFactory;
-061import 
org.apache.hadoop.conf.Configuration;
-062import org.apache.hadoop.fs.FileSystem;
-063import org.apache.hadoop.fs.Path;
-064import 
org.apache.hadoop.hbase.Abortable;
-065import 
org.apache.hadoop.hbase.ChoreService;
-066import 
org.apache.hadoop.hbase.ClockOutOfSyncException;
-067import 
org.apache.hadoop.hbase.CoordinatedStateManager;
-068import 
org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
-069import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-070import 
org.apache.hadoop.hbase.HBaseConfiguration;
-071import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-072import 
org.apache.hadoop.hbase.HConstants;
-073import 
org.apache.hadoop.hbase.HealthCheckChore;
-074import 
org.apache.hadoop.hbase.MetaTableAccessor;
-075import 
org.apache.hadoop.hbase.NotServingRegionException;
-076import 
org.apache.hadoop.hbase.PleaseHoldException;
-077import 
org.apache.hadoop.hbase.ScheduledChore;
-078import 
org.apache.hadoop.hbase.ServerName;
-079import 
org.apache.hadoop.hbase.Stoppable;
-080import 
org.apache.hadoop.hbase.TableDescriptors;
-081import 
org.apache.hadoop.hbase.TableName;
-082import 
org.apache.hadoop.hbase.YouAreDeadException;
-083import 
org.apache.hadoop.hbase.ZNodeClearer;
-084import 
org.apache.hadoop.hbase.client.ClusterConnection;
-085import 
org.apache.hadoop.hbase.client.Connection;
-086import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-087import 
org.apache.hadoop.hbase.client.Put;
-088import 
org.apache.hadoop.hbase.client.RegionInfo;
-089import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-090import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-091import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-092import 
org.apache.hadoop.hbase.client.locking.EntityLock;
-093

[10/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.html
index ae6ba6a..f781d96 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.html
@@ -54,7 +54,7 @@
 046
 047  // Store the edits size during 
replaying WAL. Use this to roll back the
 048  // global memstore size once a region 
opening failed.
-049  private final ConcurrentMap replayEditsPerRegion =
+049  private final ConcurrentMap replayEditsPerRegion =
 050new 
ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
 051
 052  private long globalMemStoreLimit;
@@ -65,7 +65,7 @@
 057  private long 
globalOnHeapMemstoreLimitLowMark;
 058
 059  public 
RegionServerAccounting(Configuration conf) {
-060Pair 
globalMemstoreSizePair = MemorySizeUtil.getGlobalMemstoreSize(conf);
+060Pair 
globalMemstoreSizePair = MemorySizeUtil.getGlobalMemStoreSize(conf);
 061this.globalMemStoreLimit = 
globalMemstoreSizePair.getFirst();
 062this.memType = 
globalMemstoreSizePair.getSecond();
 063
this.globalMemStoreLimitLowMarkPercent =
@@ -81,21 +81,21 @@
 073// 
"hbase.regionserver.global.memstore.lowerLimit". Can get rid of this boolean 
passing then.
 074this.globalMemStoreLimitLowMark =
 075(long) (this.globalMemStoreLimit 
* this.globalMemStoreLimitLowMarkPercent);
-076this.globalOnHeapMemstoreLimit = 
MemorySizeUtil.getOnheapGlobalMemstoreSize(conf);
+076this.globalOnHeapMemstoreLimit = 
MemorySizeUtil.getOnheapGlobalMemStoreSize(conf);
 077this.globalOnHeapMemstoreLimitLowMark 
=
 078(long) 
(this.globalOnHeapMemstoreLimit * this.globalMemStoreLimitLowMarkPercent);
 079  }
 080
-081  long getGlobalMemstoreLimit() {
+081  long getGlobalMemStoreLimit() {
 082return this.globalMemStoreLimit;
 083  }
 084
-085  long getGlobalOnHeapMemstoreLimit() {
+085  long getGlobalOnHeapMemStoreLimit() {
 086return 
this.globalOnHeapMemstoreLimit;
 087  }
 088
 089  // Called by the tuners.
-090  void setGlobalMemstoreLimits(long 
newGlobalMemstoreLimit) {
+090  void setGlobalMemStoreLimits(long 
newGlobalMemstoreLimit) {
 091if (this.memType == MemoryType.HEAP) 
{
 092  this.globalMemStoreLimit = 
newGlobalMemstoreLimit;
 093  this.globalMemStoreLimitLowMark =
@@ -111,38 +111,38 @@
 103return this.memType == 
MemoryType.NON_HEAP;
 104  }
 105
-106  long getGlobalMemstoreLimitLowMark() 
{
+106  long getGlobalMemStoreLimitLowMark() 
{
 107return 
this.globalMemStoreLimitLowMark;
 108  }
 109
-110  float 
getGlobalMemstoreLimitLowMarkPercent() {
+110  float 
getGlobalMemStoreLimitLowMarkPercent() {
 111return 
this.globalMemStoreLimitLowMarkPercent;
 112  }
 113
 114  /**
 115   * @return the global Memstore data 
size in the RegionServer
 116   */
-117  public long getGlobalMemstoreDataSize() 
{
+117  public long getGlobalMemStoreDataSize() 
{
 118return 
globalMemstoreDataSize.sum();
 119  }
 120
 121  /**
 122   * @return the global memstore heap 
size in the RegionServer
 123   */
-124  public long getGlobalMemstoreHeapSize() 
{
+124  public long getGlobalMemStoreHeapSize() 
{
 125return 
this.globalMemstoreHeapSize.sum();
 126  }
 127
 128  /**
-129   * @param memStoreSize the Memstore 
size will be added to 
-130   *the global Memstore size 
+129   * @param memStoreSize the Memstore 
size will be added to
+130   *the global Memstore size
 131   */
-132  public void 
incGlobalMemstoreSize(MemstoreSize memStoreSize) {
+132  public void 
incGlobalMemStoreSize(MemStoreSize memStoreSize) {
 133
globalMemstoreDataSize.add(memStoreSize.getDataSize());
 134
globalMemstoreHeapSize.add(memStoreSize.getHeapSize());
 135  }
 136
-137  public void 
decGlobalMemstoreSize(MemstoreSize memStoreSize) {
+137  public void 
decGlobalMemStoreSize(MemStoreSize memStoreSize) {
 138
globalMemstoreDataSize.add(-memStoreSize.getDataSize());
 139
globalMemstoreHeapSize.add(-memStoreSize.getHeapSize());
 140  }
@@ -155,7 +155,7 @@
 147// for onheap memstore we check if 
the global memstore size and the
 148// global heap overhead is greater 
than the global memstore limit
 149if (memType == MemoryType.HEAP) {
-150  if (getGlobalMemstoreHeapSize() 
>= globalMemStoreLimit) {
+150  if (getGlobalMemStoreHeapSize() 
>= globalMemStoreLimit) {
 151return 
FlushType.ABOVE_ONHEAP_HIGHER_MARK;
 152  }
 153} else {
@@ -166,11 +166,11 @@
 158  // global memstore limit 
'hbase.regionserver.global.m

[16/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index a0961f7..4a7f4ae 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -526,23 +526,23 @@
 518final FlushResultImpl result; // 
indicating a failure result from prepare
 519final TreeMap storeFlushCtxs;
 520final TreeMap> committedFiles;
-521final TreeMap storeFlushableSize;
+521final TreeMap storeFlushableSize;
 522final long startTime;
 523final long flushOpSeqId;
 524final long flushedSeqId;
-525final MemstoreSize 
totalFlushableSize;
+525final MemStoreSize 
totalFlushableSize;
 526
 527/** Constructs an early exit case 
*/
 528PrepareFlushResult(FlushResultImpl 
result, long flushSeqId) {
-529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemstoreSize());
+529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemStoreSize());
 530}
 531
 532/** Constructs a successful prepare 
flush result */
 533PrepareFlushResult(
 534  TreeMap storeFlushCtxs,
 535  TreeMap> committedFiles,
-536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-537  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+537  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 538  this(null, storeFlushCtxs, 
committedFiles, storeFlushableSize, startTime,
 539flushSeqId, flushedSeqId, 
totalFlushableSize);
 540}
@@ -551,8 +551,8 @@
 543FlushResultImpl result,
 544  TreeMap storeFlushCtxs,
 545  TreeMap> committedFiles,
-546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-547  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+547  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 548  this.result = result;
 549  this.storeFlushCtxs = 
storeFlushCtxs;
 550  this.committedFiles = 
committedFiles;
@@ -1015,7 +1015,7 @@
 1007  Future future = 
completionService.take();
 1008  HStore store = future.get();
 1009  
this.stores.put(store.getColumnFamilyDescriptor().getName(), store);
-1010  if (store.isSloppyMemstore()) 
{
+1010  if (store.isSloppyMemStore()) 
{
 1011hasSloppyStores = true;
 1012  }
 1013
@@ -1025,7 +1025,7 @@
 1017  if (maxSeqId == -1 || 
storeMaxSequenceId > maxSeqId) {
 1018maxSeqId = 
storeMaxSequenceId;
 1019  }
-1020  long maxStoreMemstoreTS = 
store.getMaxMemstoreTS().orElse(0L);
+1020  long maxStoreMemstoreTS = 
store.getMaxMemStoreTS().orElse(0L);
 1021  if (maxStoreMemstoreTS > 
maxMemstoreTS) {
 1022maxMemstoreTS = 
maxStoreMemstoreTS;
 1023  }
@@ -1202,24 +1202,24 @@
 1194   * store
 1195   * @return the size of memstore in 
this region
 1196   */
-1197  public long 
addAndGetMemstoreSize(MemstoreSize memstoreSize) {
+1197  public long 
addAndGetMemStoreSize(MemStoreSize memstoreSize) {
 1198if (this.rsAccounting != null) {
-1199  
rsAccounting.incGlobalMemstoreSize(memstoreSize);
+1199  
rsAccounting.incGlobalMemStoreSize(memstoreSize);
 1200}
 1201long size = 
this.memstoreDataSize.addAndGet(memstoreSize.getDataSize());
-1202checkNegativeMemstoreDataSize(size, 
memstoreSize.getDataSize());
+1202checkNegativeMemStoreDataSize(size, 
memstoreSize.getDataSize());
 1203return size;
 1204  }
 1205
-1206  public void 
decrMemstoreSize(MemstoreSize memstoreSize) {
+1206  public void 
decrMemStoreSize(MemStoreSize memstoreSize) {
 1207if (this.rsAccounting != null) {
-1208  
rsAccounting.decGlobalMemstoreSize(memstoreSize);
+1208  
rsAccounting.decGlobalMemStoreSize(memstoreSize);
 1209}
 1210long size = 
this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize());
-1211checkNegativeMemstoreDataSize(size, 
-memstoreSize.getDataSize());
+1211checkNegativeMemStoreDataSize(size, 
-memstoreSize.getDataSize());
 1212  }
 1213
-1214  private void 
checkNegativeMemstoreDataSize(long memstoreDataSize, long delta) {
+1214  private void 
checkNegativeMemStoreDataSize(long memstoreDataSize, long delta) {
 1215// This is extr

[12/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
index ce2e1a7..8596033 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
@@ -115,10 +115,10 @@
 107this.flushHandlers = new 
FlushHandler[handlerCount];
 108LOG.info("globalMemStoreLimit="
 109+ TraditionalBinaryPrefix
-110
.long2String(this.server.getRegionServerAccounting().getGlobalMemstoreLimit(), 
"", 1)
+110
.long2String(this.server.getRegionServerAccounting().getGlobalMemStoreLimit(), 
"", 1)
 111+ ", 
globalMemStoreLimitLowMark="
 112+ 
TraditionalBinaryPrefix.long2String(
-113  
this.server.getRegionServerAccounting().getGlobalMemstoreLimitLowMark(), "", 
1)
+113  
this.server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 
1)
 114+ ", Offheap="
 115+ 
(this.server.getRegionServerAccounting().isOffheap()));
 116  }
@@ -144,12 +144,12 @@
 136while (!flushedOne) {
 137  // Find the biggest region that 
doesn't have too many storefiles
 138  // (might be null!)
-139  Region bestFlushableRegion = 
getBiggestMemstoreRegion(regionsBySize, excludedRegions, true);
+139  Region bestFlushableRegion = 
getBiggestMemStoreRegion(regionsBySize, excludedRegions, true);
 140  // Find the biggest region, total, 
even if it might have too many flushes.
-141  Region bestAnyRegion = 
getBiggestMemstoreRegion(
+141  Region bestAnyRegion = 
getBiggestMemStoreRegion(
 142  regionsBySize, excludedRegions, 
false);
 143  // Find the biggest region that is 
a secondary region
-144  Region bestRegionReplica = 
getBiggestMemstoreOfRegionReplica(regionsBySize,
+144  Region bestRegionReplica = 
getBiggestMemStoreOfRegionReplica(regionsBySize,
 145excludedRegions);
 146
 147  if (bestAnyRegion == null 
&& bestRegionReplica == null) {
@@ -159,7 +159,7 @@
 151
 152  Region regionToFlush;
 153  if (bestFlushableRegion != null 
&&
-154  bestAnyRegion.getMemstoreSize() 
> 2 * bestFlushableRegion.getMemstoreSize()) {
+154  bestAnyRegion.getMemStoreSize() 
> 2 * bestFlushableRegion.getMemStoreSize()) {
 155// Even if it's not supposed to 
be flushed, pick a region if it's more than twice
 156// as big as the best flushable 
one - otherwise when we're under pressure we make
 157// lots of little flushes and 
cause lots of compactions, etc, which just makes
@@ -168,9 +168,9 @@
 160  LOG.debug("Under global heap 
pressure: " + "Region "
 161  + 
bestAnyRegion.getRegionInfo().getRegionNameAsString()
 162  + " has too many " + "store 
files, but is "
-163  + 
TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemstoreSize(), "", 1)
+163  + 
TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemStoreSize(), "", 1)
 164  + " vs best flushable 
region's "
-165  + 
TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemstoreSize(), "", 
1)
+165  + 
TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemStoreSize(), "", 
1)
 166  + ". Choosing the 
bigger.");
 167}
 168regionToFlush = bestAnyRegion;
@@ -183,20 +183,20 @@
 175  }
 176
 177  Preconditions.checkState(
-178(regionToFlush != null && 
regionToFlush.getMemstoreSize() > 0) ||
-179(bestRegionReplica != null 
&& bestRegionReplica.getMemstoreSize() > 0));
+178(regionToFlush != null && 
regionToFlush.getMemStoreSize() > 0) ||
+179(bestRegionReplica != null 
&& bestRegionReplica.getMemStoreSize() > 0));
 180
 181  if (regionToFlush == null ||
 182  (bestRegionReplica != null 
&&
 183   
ServerRegionReplicaUtil.isRegionReplicaStoreFileRefreshEnabled(conf) 
&&
-184   
(bestRegionReplica.getMemstoreSize()
-185   > secondaryMultiplier * 
regionToFlush.getMemstoreSize( {
+184   
(bestRegionReplica.getMemStoreSize()
+185   > secondaryMultiplier * 
regionToFlush.getMemStoreSize( {
 186LOG.info("Refreshing storefiles 
of region " + bestRegionReplica
 187+ " due to global heap 
pressure. Total memstore datasize="
 188+ StringUtils
-189
.humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreDataSize())
+189
.humanReadableInt(server.getRegionServerAccounting().getGlobalMemStoreDataSize())
 190

[29/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
new file mode 100644
index 000..788fb93
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
@@ -0,0 +1,1786 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.master.balancer;
+019
+020import java.util.ArrayDeque;
+021import java.util.ArrayList;
+022import java.util.Arrays;
+023import java.util.Collection;
+024import java.util.Collections;
+025import java.util.Deque;
+026import java.util.HashMap;
+027import java.util.LinkedList;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Map.Entry;
+031import java.util.Random;
+032
+033import org.apache.commons.logging.Log;
+034import 
org.apache.commons.logging.LogFactory;
+035import 
org.apache.hadoop.conf.Configuration;
+036import 
org.apache.hadoop.hbase.ClusterStatus;
+037import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+038import 
org.apache.hadoop.hbase.HConstants;
+039import 
org.apache.hadoop.hbase.RegionLoad;
+040import 
org.apache.hadoop.hbase.ServerLoad;
+041import 
org.apache.hadoop.hbase.ServerName;
+042import 
org.apache.hadoop.hbase.TableName;
+043import 
org.apache.hadoop.hbase.client.RegionInfo;
+044import 
org.apache.hadoop.hbase.master.MasterServices;
+045import 
org.apache.hadoop.hbase.master.RegionPlan;
+046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
+047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
+048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
+049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
+050import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
+051import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
+052import 
org.apache.hadoop.hbase.util.Bytes;
+053import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+054import 
org.apache.yetus.audience.InterfaceAudience;
+055
+056import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Optional;
+057import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+058
+059import 
com.google.common.annotations.VisibleForTesting;
+060
+061/**
+062 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will +063 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +064 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+065 *
    +066 *
  • Region Load
  • +067 *
  • Table Load
  • +068 *
  • Data Locality
  • +069 *
  • Memstore Sizes
  • +070 *
  • Storefile Sizes
  • +071 *
+072 * +073 * +074 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost +075 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are +076 * scaled by their respective multipliers:

+077 * +078 *
    +079 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • +080 *
  • hbase.master.balancer.stochastic.moveCost
  • +081 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • +082 *
  • hbase.master.balancer.stochastic.localityCost
  • +083 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • +084 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • +085 *
+086 * +087 *

In addition to the above configurations, the balancer can be tuned by the following +088 * configuration values:

+089 *
    +0

[46/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index 7037038..c2cdb35 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -1074,31 +1074,31 @@
  
 add(ProcedureProtos.Procedure)
 - Method in class org.apache.hadoop.hbase.procedure2.store.wal.ProcedureWALFormatReader.WalProcedureMap
  
-add(Iterable,
 MemstoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.AbstractMemStore
+add(Iterable,
 MemStoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.AbstractMemStore
  
-add(Cell,
 MemstoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.AbstractMemStore
+add(Cell,
 MemStoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.AbstractMemStore
  
 add(Cell)
 - Method in class org.apache.hadoop.hbase.regionserver.CellFlatMap.CellFlatMapCollection
  
 add(Cell)
 - Method in class org.apache.hadoop.hbase.regionserver.CellSet
  
-add(Cell,
 MemstoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.HStore
+add(Cell,
 MemStoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.HStore
 
 Adds a value to the memstore
 
-add(Iterable,
 MemstoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.HStore
+add(Iterable,
 MemStoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.HStore
 
 Adds the specified value to the memstore
 
-add(Cell,
 MemstoreSize) - Method in interface 
org.apache.hadoop.hbase.regionserver.MemStore
+add(Cell,
 MemStoreSize) - Method in interface 
org.apache.hadoop.hbase.regionserver.MemStore
 
 Write an update
 
-add(Iterable,
 MemstoreSize) - Method in interface 
org.apache.hadoop.hbase.regionserver.MemStore
+add(Iterable,
 MemStoreSize) - Method in interface 
org.apache.hadoop.hbase.regionserver.MemStore
 
 Write the updates
 
-add(Cell,
 boolean, MemstoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.MutableSegment
+add(Cell,
 boolean, MemStoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.MutableSegment
 
 Adds the given cell into the segment
 
@@ -1255,7 +1255,7 @@
 
 Add the given results to cache and get valid results 
back.
 
-addAndGetMemstoreSize(MemstoreSize)
 - Method in class org.apache.hadoop.hbase.regionserver.HRegion
+addAndGetMemStoreSize(MemStoreSize)
 - Method in class org.apache.hadoop.hbase.regionserver.HRegion
 
 Increase the size of mem store in this region and the size 
of global mem
  store
@@ -1916,7 +1916,7 @@
  
 addMaster(Configuration,
 int, User) - Method in class org.apache.hadoop.hbase.LocalHBaseCluster
  
-addMemstoreSize(MemstoreSize)
 - Method in class org.apache.hadoop.hbase.regionserver.RegionServicesForStores
+addMemStoreSize(MemStoreSize)
 - Method in class org.apache.hadoop.hbase.regionserver.RegionServicesForStores
  
 addMetaData(ByteBuffer)
 - Method in class org.apache.hadoop.hbase.io.hfile.HFileBlock
 
@@ -2098,7 +2098,7 @@
 
 Add a region from the head or tail to the List of regions 
to return.
 
-addRegionReplayEditsSize(byte[],
 MemstoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.RegionServerAccounting
+addRegionReplayEditsSize(byte[],
 MemStoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.RegionServerAccounting
 
 Add memStoreSize to replayEditsPerRegion.
 
@@ -3278,11 +3278,11 @@
  choose CompactSelection from candidates --
  First exclude bulk-load files if indicated in configuration.
 
-applyFamilyMapToMemstore(Map>, MemstoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.HRegion
+applyFamilyMapToMemStore(Map>, MemStoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.HRegion
  
-applyToMemstore(HStore,
 List, boolean, MemstoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.HRegion
+applyToMemStore(HStore,
 List, boolean, MemStoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.HRegion
  
-applyToMemstore(HStore,
 Cell, MemstoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.HRegion
+applyToMemStore(HStore,
 Cell, MemStoreSize) - Method in class 
org.apache.hadoop.hbase.regionserver.HRegion
  
 archivedTables
 - Variable in class org.apache.hadoop.hbase.backup.example.HFileArchiveTableMonitor
  
@@ -9199,7 +9199,7 @@
  C-tor to be used when new CellArrayImmutableSegment is a result of compaction 
of a
  list of older ImmutableSegments.
 
-CellArrayImmutableSegment(CSLMImmutableSegment,
 MemstoreSize) - Constructor for class 
org.apache.hadoop.hbase.regionserver.CellArrayImmutableSegment
+CellArrayImmutableSegment(CSLMImmutableSegment,
 MemStoreSize) - Constructor for class 
org.apache.hadoop.hbase.regionserver.CellArrayImmutableSegment
 
 
  C-tor to be used when new CellChu

[31/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/client/backoff/ServerStatistics.RegionStatistics.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/backoff/ServerStatistics.RegionStatistics.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/backoff/ServerStatistics.RegionStatistics.html
index 12cfca6..1e841d6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/backoff/ServerStatistics.RegionStatistics.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/backoff/ServerStatistics.RegionStatistics.html
@@ -67,12 +67,12 @@
 059private int compactionPressure = 0;
 060
 061public void update(RegionLoadStats 
currentStats) {
-062  this.memstoreLoad = 
currentStats.getMemstoreLoad();
+062  this.memstoreLoad = 
currentStats.getMemStoreLoad();
 063  this.heapOccupancy = 
currentStats.getHeapOccupancy();
 064  this.compactionPressure = 
currentStats.getCompactionPressure();
 065}
 066
-067public int 
getMemstoreLoadPercent(){
+067public int 
getMemStoreLoadPercent(){
 068  return this.memstoreLoad;
 069}
 070

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/client/backoff/ServerStatistics.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/backoff/ServerStatistics.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/backoff/ServerStatistics.html
index 12cfca6..1e841d6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/backoff/ServerStatistics.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/backoff/ServerStatistics.html
@@ -67,12 +67,12 @@
 059private int compactionPressure = 0;
 060
 061public void update(RegionLoadStats 
currentStats) {
-062  this.memstoreLoad = 
currentStats.getMemstoreLoad();
+062  this.memstoreLoad = 
currentStats.getMemStoreLoad();
 063  this.heapOccupancy = 
currentStats.getHeapOccupancy();
 064  this.compactionPressure = 
currentStats.getCompactionPressure();
 065}
 066
-067public int 
getMemstoreLoadPercent(){
+067public int 
getMemStoreLoadPercent(){
 068  return this.memstoreLoad;
 069}
 070

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
index 85f0bf2..daafefd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
@@ -497,9 +497,9 @@
 489
 490boolean isPrimaryReplicaReader();
 491
-492boolean shouldIncludeMemstoreTS();
+492boolean shouldIncludeMemStoreTS();
 493
-494boolean isDecodeMemstoreTS();
+494boolean isDecodeMemStoreTS();
 495
 496DataBlockEncoding 
getEffectiveEncodingInCache(boolean isCompaction);
 497

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.FileInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.FileInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.FileInfo.html
index 85f0bf2..daafefd 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.FileInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.FileInfo.html
@@ -497,9 +497,9 @@
 489
 490boolean isPrimaryReplicaReader();
 491
-492boolean shouldIncludeMemstoreTS();
+492boolean shouldIncludeMemStoreTS();
 493
-494boolean isDecodeMemstoreTS();
+494boolean isDecodeMemStoreTS();
 495
 496DataBlockEncoding 
getEffectiveEncodingInCache(boolean isCompaction);
 497

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
index 85f0bf2..daafefd 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
@@ -497,9 +497,9 @@
 489
 490boolean isPrimaryReplicaReader();
 491
-492boolean shouldIncludeMemstoreTS();
+492boolean shouldIncludeMemStoreTS();
 493
-494boolean isDecodeMemstoreTS();
+494boolean isDecodeM

[41/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
index bfe663b..2445d1e 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
@@ -213,15 +213,15 @@ implements 
 void
-add(Cell cell,
-   MemstoreSize memstoreSize)
+add(Cell cell,
+   MemStoreSize memstoreSize)
 Write an update
 
 
 
 void
-add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
-   MemstoreSize memstoreSize)
+add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
+   MemStoreSize memstoreSize)
 Write the updates
 
 
@@ -290,7 +290,7 @@ implements getSnapshot() 
 
 
-MemstoreSize
+MemStoreSize
 getSnapshotSize()
 Return the size of the snapshot(s) if any
 
@@ -301,9 +301,9 @@ implements 
 private void
-internalAdd(Cell toAdd,
+internalAdd(Cell toAdd,
boolean mslabUsed,
-   MemstoreSize memstoreSize) 
+   MemStoreSize memstoreSize) 
 
 
 protected abstract long
@@ -337,15 +337,15 @@ implements 
 private void
-upsert(Cell cell,
+upsert(Cell cell,
   long readpoint,
-  MemstoreSize memstoreSize) 
+  MemStoreSize memstoreSize) 
 
 
 void
-upsert(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
+upsert(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
   long readpoint,
-  MemstoreSize memstoreSize)
+  MemStoreSize memstoreSize)
 Update or insert the specified cells.
 
 
@@ -537,37 +537,37 @@ implements 
+
 
 
 
 
 add
 public void add(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
-MemstoreSize memstoreSize)
-Description copied from 
interface: MemStore
+MemStoreSize memstoreSize)
+Description copied from 
interface: MemStore
 Write the updates
 
 Specified by:
-add in
 interface MemStore
+add in
 interface MemStore
 memstoreSize - The delta in memstore size will be passed back 
via this.
 This will include both data size and heap overhead delta.
 
 
 
-
+
 
 
 
 
 add
 public void add(Cell cell,
-MemstoreSize memstoreSize)
-Description copied from 
interface: MemStore
+MemStoreSize memstoreSize)
+Description copied from 
interface: MemStore
 Write an update
 
 Specified by:
-add in
 interface MemStore
+add in
 interface MemStore
 memstoreSize - The delta in memstore size will be passed back 
via this.
 This will include both data size and heap overhead delta.
 
@@ -582,7 +582,7 @@ implements Cell deepCopyIfNeeded(Cell cell)
 
 
-
+
 
 
 
@@ -590,8 +590,8 @@ implements upsert(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
long readpoint,
-   MemstoreSize memstoreSize)
-Description copied from 
interface: MemStore
+   MemStoreSize memstoreSize)
+Description copied from 
interface: MemStore
 Update or insert the specified cells.
  
  For each Cell, insert into MemStore. This will atomically upsert the value 
for that
@@ -604,7 +604,7 @@ implements Specified by:
-upsert in
 interface MemStore
+upsert in
 interface MemStore
 readpoint - readpoint below which we can safely remove 
duplicate Cells.
 memstoreSize - The delta in memstore size will be passed back 
via this.
 This will include both data size and heap overhead delta.
@@ -653,7 +653,7 @@ implements 
 
 getSnapshotSize
-public MemstoreSize getSnapshotSize()
+public MemStoreSize getSnapshotSize()
 Description copied from 
interface: MemStore
 Return the size of the snapshot(s) if any
 
@@ -695,7 +695,7 @@ implements dump(org.apache.commons.logging.Log log)
 
 
-
+
 
 
 
@@ -703,7 +703,7 @@ implements upsert(Cell cell,
 long readpoint,
-MemstoreSize memstoreSize)
+MemStoreSize memstoreSize)
 
 
 
@@ -735,7 +735,7 @@ implements Cell maybeCloneWithAllocator(Cell cell)
 
 
-
+
 
 
 
@@ -743,7 +743,7 @@ implements internalAdd(Cell toAdd,
  boolean mslabUsed,
- MemstoreSize memstoreSize)
+ MemStoreSize memstoreSize)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs

[27/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
index 5c157b5..788fb93 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
@@ -200,7 +200,7 @@
 192regionLoadFunctions = new 
CostFromRegionLoadFunction[] {
 193  new 
ReadRequestCostFunction(conf),
 194  new 
WriteRequestCostFunction(conf),
-195  new 
MemstoreSizeCostFunction(conf),
+195  new 
MemStoreSizeCostFunction(conf),
 196  new StoreFileCostFunction(conf)
 197};
 198regionReplicaHostCostFunction = new 
RegionReplicaHostCostFunction(conf);
@@ -1676,13 +1676,13 @@
 1668   * Compute the cost of total memstore 
size.  The more unbalanced the higher the
 1669   * computed cost will be.  This uses a 
rolling average of regionload.
 1670   */
-1671  static class MemstoreSizeCostFunction 
extends CostFromRegionLoadAsRateFunction {
+1671  static class MemStoreSizeCostFunction 
extends CostFromRegionLoadAsRateFunction {
 1672
 1673private static final String 
MEMSTORE_SIZE_COST_KEY =
 1674
"hbase.master.balancer.stochastic.memstoreSizeCost";
 1675private static final float 
DEFAULT_MEMSTORE_SIZE_COST = 5;
 1676
-1677
MemstoreSizeCostFunction(Configuration conf) {
+1677
MemStoreSizeCostFunction(Configuration conf) {
 1678  super(conf);
 1679  
this.setMultiplier(conf.getFloat(MEMSTORE_SIZE_COST_KEY, 
DEFAULT_MEMSTORE_SIZE_COST));
 1680}

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
index 5c157b5..788fb93 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
@@ -200,7 +200,7 @@
 192regionLoadFunctions = new 
CostFromRegionLoadFunction[] {
 193  new 
ReadRequestCostFunction(conf),
 194  new 
WriteRequestCostFunction(conf),
-195  new 
MemstoreSizeCostFunction(conf),
+195  new 
MemStoreSizeCostFunction(conf),
 196  new StoreFileCostFunction(conf)
 197};
 198regionReplicaHostCostFunction = new 
RegionReplicaHostCostFunction(conf);
@@ -1676,13 +1676,13 @@
 1668   * Compute the cost of total memstore 
size.  The more unbalanced the higher the
 1669   * computed cost will be.  This uses a 
rolling average of regionload.
 1670   */
-1671  static class MemstoreSizeCostFunction 
extends CostFromRegionLoadAsRateFunction {
+1671  static class MemStoreSizeCostFunction 
extends CostFromRegionLoadAsRateFunction {
 1672
 1673private static final String 
MEMSTORE_SIZE_COST_KEY =
 1674
"hbase.master.balancer.stochastic.memstoreSizeCost";
 1675private static final float 
DEFAULT_MEMSTORE_SIZE_COST = 5;
 1676
-1677
MemstoreSizeCostFunction(Configuration conf) {
+1677
MemStoreSizeCostFunction(Configuration conf) {
 1678  super(conf);
 1679  
this.setMultiplier(conf.getFloat(MEMSTORE_SIZE_COST_KEY, 
DEFAULT_MEMSTORE_SIZE_COST));
 1680}

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
index 5c157b5..788fb93 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
@@ -200,7 +200,7 @@
 192regionLoadFunctions = new 
CostFromRegionLoadFunction[] {
 193  new 
ReadRequestCostFunction(conf),
 194  new 
WriteRequestCostFunction(conf),
-195  new 
MemstoreSizeC

[11/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreSize.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreSize.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreSize.html
new file mode 100644
index 000..14d65f4
--- /dev/null
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreSize.html
@@ -0,0 +1,179 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.regionserver;
+019
+020import 
org.apache.yetus.audience.InterfaceAudience;
+021
+022/**
+023 * Wraps the data size part and total 
heap space occupied by the memstore.
+024 */
+025@InterfaceAudience.Private
+026public class MemStoreSize {
+027
+028  // 'dataSize' tracks the Cell's data 
bytes size alone (Key bytes, value bytes). A cell's data can
+029  // be in on heap or off heap area 
depending on the MSLAB and its configuration to be using on heap
+030  // or off heap LABs
+031  private long dataSize;
+032  // 'heapSize' tracks all Cell's heap 
size occupancy. This will include Cell POJO heap overhead.
+033  // When Cells in on heap area, this 
will include the cells data size as well.
+034  private long heapSize;
+035  final private boolean isEmpty;
+036
+037  public MemStoreSize() {
+038dataSize = 0;
+039heapSize = 0;
+040isEmpty = false;
+041  }
+042
+043  public MemStoreSize(boolean isEmpty) 
{
+044dataSize = 0;
+045heapSize = 0;
+046this.isEmpty = isEmpty;
+047  }
+048
+049  public boolean isEmpty() {
+050return isEmpty;
+051  }
+052
+053  public MemStoreSize(long dataSize, long 
heapSize) {
+054this.dataSize = dataSize;
+055this.heapSize = heapSize;
+056this.isEmpty = false;
+057  }
+058
+059  public void incMemStoreSize(long 
dataSizeDelta, long heapSizeDelta) {
+060this.dataSize += dataSizeDelta;
+061this.heapSize += heapSizeDelta;
+062  }
+063
+064  public void 
incMemStoreSize(MemStoreSize delta) {
+065this.dataSize += delta.dataSize;
+066this.heapSize += delta.heapSize;
+067  }
+068
+069  public void decMemStoreSize(long 
dataSizeDelta, long heapSizeDelta) {
+070this.dataSize -= dataSizeDelta;
+071this.heapSize -= heapSizeDelta;
+072  }
+073
+074  public void 
decMemStoreSize(MemStoreSize delta) {
+075this.dataSize -= delta.dataSize;
+076this.heapSize -= delta.heapSize;
+077  }
+078
+079  public long getDataSize() {
+080return isEmpty ? 0 : dataSize;
+081  }
+082
+083  public long getHeapSize() {
+084return isEmpty ? 0 : heapSize;
+085  }
+086
+087  @Override
+088  public boolean equals(Object obj) {
+089if (obj == null || !(obj instanceof 
MemStoreSize)) {
+090  return false;
+091}
+092MemStoreSize other = (MemStoreSize) 
obj;
+093return this.dataSize == 
other.dataSize && this.heapSize == other.heapSize;
+094  }
+095
+096  @Override
+097  public int hashCode() {
+098long h = 13 * this.dataSize;
+099h = h + 14 * this.heapSize;
+100return (int) h;
+101  }
+102
+103  @Override
+104  public String toString() {
+105return "dataSize=" + this.dataSize + 
" , heapSize=" + this.heapSize;
+106  }
+107}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemstoreSize.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemstoreSize.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemstoreSize.html
deleted file mode 100644
index ed00ac9..000
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemstoreSize.html
+++ /dev/null
@@ -1,179 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-Source code
-
-
-
-
-001/**
-002 * Licensed to the Apache Software 
Foundation (ASF) under 

[26/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.html
index 58c6ad7..939d24a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.html
@@ -150,7 +150,7 @@
 142long blockedFlushCount = 
context.getBlockedFlushCount();
 143long unblockedFlushCount = 
context.getUnblockedFlushCount();
 144long totalOnheapFlushCount = 
blockedFlushCount + unblockedFlushCount;
-145boolean offheapMemstore = 
context.isOffheapMemstore();
+145boolean offheapMemstore = 
context.isOffheapMemStore();
 146float newMemstoreSize;
 147float newBlockCacheSize;
 148
@@ -231,7 +231,7 @@
 223  newBlockCacheSize = 
blockCachePercentMinRange;
 224}
 225
TUNER_RESULT.setBlockCacheSize(newBlockCacheSize);
-226
TUNER_RESULT.setMemstoreSize(newMemstoreSize);
+226
TUNER_RESULT.setMemStoreSize(newMemstoreSize);
 227prevTuneDirection = 
newTuneDirection;
 228return TUNER_RESULT;
 229  }

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
index a92b42e..5400ab5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
@@ -111,10 +111,10 @@
 103   * @return size of data that is going 
to be flushed from active set
 104   */
 105  @Override
-106  public MemstoreSize getFlushableSize() 
{
-107MemstoreSize snapshotSize = 
getSnapshotSize();
+106  public MemStoreSize getFlushableSize() 
{
+107MemStoreSize snapshotSize = 
getSnapshotSize();
 108return snapshotSize.getDataSize() 
> 0 ? snapshotSize
-109: new MemstoreSize(keySize(), 
heapSize());
+109: new MemStoreSize(keySize(), 
heapSize());
 110  }
 111
 112  @Override
@@ -162,8 +162,8 @@
 154  }
 155
 156  @Override
-157  public MemstoreSize size() {
-158return new 
MemstoreSize(this.active.keySize(), this.active.heapSize());
+157  public MemStoreSize size() {
+158return new 
MemStoreSize(this.active.keySize(), this.active.heapSize());
 159  }
 160
 161  /**
@@ -202,7 +202,7 @@
 194byte [] fam = Bytes.toBytes("col");
 195byte [] qf = Bytes.toBytes("umn");
 196byte [] empty = new byte[0];
-197MemstoreSize memstoreSize = new 
MemstoreSize();
+197MemStoreSize memstoreSize = new 
MemStoreSize();
 198for (int i = 0; i < count; i++) 
{
 199  // Give each its own ts
 200  memstore1.add(new 
KeyValue(Bytes.toBytes(i), fam, qf, i, empty), memstoreSize);
@@ -216,7 +216,7 @@
 208+ (memstoreSize.getDataSize() + 
memstoreSize.getHeapSize()));
 209// Make a variably sized memstore.
 210DefaultMemStore memstore2 = new 
DefaultMemStore();
-211memstoreSize = new MemstoreSize();
+211memstoreSize = new MemStoreSize();
 212for (int i = 0; i < count; i++) 
{
 213  memstore2.add(new 
KeyValue(Bytes.toBytes(i), fam, qf, i, new byte[i]), memstoreSize);
 214}

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
index c661130..62d811d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
@@ -54,7 +54,7 @@
 046int familyNumber = 
region.getTableDescriptor().getColumnFamilyCount();
 047// For multiple families, lower bound 
is the "average flush size" by default
 048// unless setting in configuration is 
larger.
-049long flushSizeLowerBound = 
region.getMemstoreFlushSize() / familyNumber;
+049long flushSizeLowerBound = 
region.getMemStoreFlushSize() / familyNumber;
 050long minimumLowerBound =
 051
getConf().getLong(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,
 052  
DEFAULT_HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN);

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/h

[22/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index a0961f7..4a7f4ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -526,23 +526,23 @@
 518final FlushResultImpl result; // 
indicating a failure result from prepare
 519final TreeMap storeFlushCtxs;
 520final TreeMap> committedFiles;
-521final TreeMap storeFlushableSize;
+521final TreeMap storeFlushableSize;
 522final long startTime;
 523final long flushOpSeqId;
 524final long flushedSeqId;
-525final MemstoreSize 
totalFlushableSize;
+525final MemStoreSize 
totalFlushableSize;
 526
 527/** Constructs an early exit case 
*/
 528PrepareFlushResult(FlushResultImpl 
result, long flushSeqId) {
-529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemstoreSize());
+529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemStoreSize());
 530}
 531
 532/** Constructs a successful prepare 
flush result */
 533PrepareFlushResult(
 534  TreeMap storeFlushCtxs,
 535  TreeMap> committedFiles,
-536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-537  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+537  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 538  this(null, storeFlushCtxs, 
committedFiles, storeFlushableSize, startTime,
 539flushSeqId, flushedSeqId, 
totalFlushableSize);
 540}
@@ -551,8 +551,8 @@
 543FlushResultImpl result,
 544  TreeMap storeFlushCtxs,
 545  TreeMap> committedFiles,
-546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-547  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+547  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 548  this.result = result;
 549  this.storeFlushCtxs = 
storeFlushCtxs;
 550  this.committedFiles = 
committedFiles;
@@ -1015,7 +1015,7 @@
 1007  Future future = 
completionService.take();
 1008  HStore store = future.get();
 1009  
this.stores.put(store.getColumnFamilyDescriptor().getName(), store);
-1010  if (store.isSloppyMemstore()) 
{
+1010  if (store.isSloppyMemStore()) 
{
 1011hasSloppyStores = true;
 1012  }
 1013
@@ -1025,7 +1025,7 @@
 1017  if (maxSeqId == -1 || 
storeMaxSequenceId > maxSeqId) {
 1018maxSeqId = 
storeMaxSequenceId;
 1019  }
-1020  long maxStoreMemstoreTS = 
store.getMaxMemstoreTS().orElse(0L);
+1020  long maxStoreMemstoreTS = 
store.getMaxMemStoreTS().orElse(0L);
 1021  if (maxStoreMemstoreTS > 
maxMemstoreTS) {
 1022maxMemstoreTS = 
maxStoreMemstoreTS;
 1023  }
@@ -1202,24 +1202,24 @@
 1194   * store
 1195   * @return the size of memstore in 
this region
 1196   */
-1197  public long 
addAndGetMemstoreSize(MemstoreSize memstoreSize) {
+1197  public long 
addAndGetMemStoreSize(MemStoreSize memstoreSize) {
 1198if (this.rsAccounting != null) {
-1199  
rsAccounting.incGlobalMemstoreSize(memstoreSize);
+1199  
rsAccounting.incGlobalMemStoreSize(memstoreSize);
 1200}
 1201long size = 
this.memstoreDataSize.addAndGet(memstoreSize.getDataSize());
-1202checkNegativeMemstoreDataSize(size, 
memstoreSize.getDataSize());
+1202checkNegativeMemStoreDataSize(size, 
memstoreSize.getDataSize());
 1203return size;
 1204  }
 1205
-1206  public void 
decrMemstoreSize(MemstoreSize memstoreSize) {
+1206  public void 
decrMemStoreSize(MemStoreSize memstoreSize) {
 1207if (this.rsAccounting != null) {
-1208  
rsAccounting.decGlobalMemstoreSize(memstoreSize);
+1208  
rsAccounting.decGlobalMemStoreSize(memstoreSize);
 1209}
 1210long size = 
this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize());
-1211checkNegativeMemstoreDataSize(size, 
-memstoreSize.getDataSize());
+1211checkNegativeMemStoreDataSize(size, 
-memstoreSize.getDataSize());
 1212  }
 1213
-1214  private void 
checkNegativeMemstoreDataSize(long memstoreDataSize, long delta) {
+1214  private

[01/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 98cfcf4b4 -> 3332cacab


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.MockWAL.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.MockWAL.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.MockWAL.html
index ff7319f..5d18a7d 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.MockWAL.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.MockWAL.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class AbstractTestWALReplay.MockWAL
+static class AbstractTestWALReplay.MockWAL
 extends org.apache.hadoop.hbase.regionserver.wal.FSHLog
 
 
@@ -239,7 +239,7 @@ extends 
org.apache.hadoop.hbase.regionserver.wal.FSHLog
 
 
 Methods inherited from 
class org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL
-abortCacheFlush, append, atHeadOfRingBufferEventHandlerAppend, 
blockOnSync, close, computeFilename, findRegionsToForceFlush, 
getCoprocessorHost, getCurrentFileName, getEarliestMemstoreSeqNum, 
getEarliestMemstoreSeqNum, getFilenum, getFileNumFromFileName, getFiles, 
getLogFileSize, getLogFileSizeIfBeingWritten, getNumLogFiles, 
getNumRolledLogFiles, getOldPath, getPreallocatedEventCount, getSyncFuture, 
getUnflushedEntriesCount, getWALArchivePath, isUnflushedEntries, postSync, 
registerWALActionsListener, replaceWriter, requestLogRoll, requestLogRoll, 
rollWriter, rollWriter, shutdown, stampSequenceIdAndPublishToRingBuffer, 
startCacheFlush, startCacheFlush, toString, unregisterWALActionsListener, 
updateStore
+abortCacheFlush, append, atHeadOfRingBufferEventHandlerAppend, 
blockOnSync, close, computeFilename, findRegionsToForceFlush, 
getCoprocessorHost, getCurrentFileName, getEarliestMemStoreSeqNum, 
getEarliestMemStoreSeqNum, getFilenum, getFileNumFromFileName, getFiles, 
getLogFileSize, getLogFileSizeIfBeingWritten, getNumLogFiles, 
getNumRolledLogFiles, getOldPath, getPreallocatedEventCount, getSyncFuture, 
getUnflushedEntriesCount, getWALArchivePath, isUnflushedEntries, postSync, 
registerWALActionsListener, replaceWriter, requestLogRoll, requestLogRoll, 
rollWriter, rollWriter, shutdown, stampSequenceIdAndPublishToRingBuffer, 
startCacheFlush, startCacheFlush, toString, unregisterWALActionsListener, 
updateStore
 
 
 
@@ -268,7 +268,7 @@ extends 
org.apache.hadoop.hbase.regionserver.wal.FSHLog
 
 
 doCompleteCacheFlush
-boolean doCompleteCacheFlush
+boolean doCompleteCacheFlush
 
 
 
@@ -285,7 +285,7 @@ extends 
org.apache.hadoop.hbase.regionserver.wal.FSHLog
 
 
 MockWAL
-public MockWAL(org.apache.hadoop.fs.FileSystem fs,
+public MockWAL(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootDir,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String logName,
org.apache.hadoop.conf.Configuration conf)
@@ -310,7 +310,7 @@ extends 
org.apache.hadoop.hbase.regionserver.wal.FSHLog
 
 
 completeCacheFlush
-public void completeCacheFlush(byte[] encodedRegionName)
+public void completeCacheFlush(byte[] encodedRegionName)
 
 Specified by:
 completeCacheFlush in 
interface org.apache.hadoop.hbase.wal.WAL



[13/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index 95e2ca9..03692cd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -957,7 +957,7 @@
 949// Background thread to check for 
compactions; needed if region has not gotten updates
 950// in a while. It will take care of 
not checking too frequently on store-by-store basis.
 951this.compactionChecker = new 
CompactionChecker(this, this.threadWakeFrequency, this);
-952this.periodicFlusher = new 
PeriodicMemstoreFlusher(this.threadWakeFrequency, this);
+952this.periodicFlusher = new 
PeriodicMemStoreFlusher(this.threadWakeFrequency, this);
 953this.leases = new 
Leases(this.threadWakeFrequency);
 954
 955// Create the thread to clean the 
moved regions list
@@ -1630,7 +1630,7 @@
 1622  // MSLAB is enabled. So initialize 
MemStoreChunkPool
 1623  // By this time, the 
MemstoreFlusher is already initialized. We can get the global limits from
 1624  // it.
-1625  Pair pair 
= MemorySizeUtil.getGlobalMemstoreSize(conf);
+1625  Pair pair 
= MemorySizeUtil.getGlobalMemStoreSize(conf);
 1626  long globalMemStoreSize = 
pair.getFirst();
 1627  boolean offheap = 
this.regionServerAccounting.isOffheap();
 1628  // When off heap memstore in use, 
take full area for chunk pool.
@@ -1687,7 +1687,7 @@
 1679int storefiles = 0;
 1680int storeUncompressedSizeMB = 0;
 1681int storefileSizeMB = 0;
-1682int memstoreSizeMB = (int) 
(r.getMemstoreSize() / 1024 / 1024);
+1682int memstoreSizeMB = (int) 
(r.getMemStoreSize() / 1024 / 1024);
 1683long storefileIndexSizeKB = 0;
 1684int rootIndexSizeKB = 0;
 1685int totalStaticIndexSizeKB = 0;
@@ -1726,7 +1726,7 @@
 1718  .setStorefiles(storefiles)
 1719  
.setStoreUncompressedSizeMB(storeUncompressedSizeMB)
 1720  
.setStorefileSizeMB(storefileSizeMB)
-1721  
.setMemstoreSizeMB(memstoreSizeMB)
+1721  
.setMemStoreSizeMB(memstoreSizeMB)
 1722  
.setStorefileIndexSizeKB(storefileIndexSizeKB)
 1723  
.setRootIndexSizeKB(rootIndexSizeKB)
 1724  
.setTotalStaticIndexSizeKB(totalStaticIndexSizeKB)
@@ -1817,11 +1817,11 @@
 1809}
 1810  }
 1811
-1812  static class PeriodicMemstoreFlusher 
extends ScheduledChore {
+1812  static class PeriodicMemStoreFlusher 
extends ScheduledChore {
 1813final HRegionServer server;
 1814final static int RANGE_OF_DELAY = 5 
* 60 * 1000; // 5 min in milliseconds
 1815final static int MIN_DELAY_TIME = 0; 
// millisec
-1816public PeriodicMemstoreFlusher(int 
cacheFlushInterval, final HRegionServer server) {
+1816public PeriodicMemStoreFlusher(int 
cacheFlushInterval, final HRegionServer server) {
 1817  super("MemstoreFlusherChore", 
server, cacheFlushInterval);
 1818  this.server = server;
 1819}
@@ -2786,7 +2786,7 @@
 2778});
 2779// Copy over all regions. Regions 
are sorted by size with biggest first.
 2780for (Region region : 
this.onlineRegions.values()) {
-2781  
sortedRegions.put(region.getMemstoreSize(), region);
+2781  
sortedRegions.put(region.getMemStoreSize(), region);
 2782}
 2783return sortedRegions;
 2784  }

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
index 5a5ae00..c38335c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
@@ -401,18 +401,18 @@
 393  }
 394
 395  @Override
-396  public long getMemstoreFlushSize() {
+396  public long getMemStoreFlushSize() {
 397// TODO: Why is this in here?  The 
flushsize of the region rather than the store?  St.Ack
 398return 
this.region.memstoreFlushSize;
 399  }
 400
 401  @Override
-402  public MemstoreSize getFlushableSize() 
{
+402  public MemStoreSize getFlushableSize() 
{
 403return 
this.memstore.getFlushableSize();
 404  }
 405
 406  @Override
-407  public MemstoreSize getSnapshotSize() 
{
+407  public MemStoreSize getSnapshotSize() 
{
 408return 
this.memstore.getSnapshotSize();
 409  }
 410
@@ -469,8 +469,8 @@
 461  }
 462
 463  @Override
-464  public OptionalLon

[06/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/plugin-management.html
--
diff --git a/plugin-management.html b/plugin-management.html
index 6855bb9..6222899 100644
--- a/plugin-management.html
+++ b/plugin-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Plugin Management
 
@@ -441,7 +441,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/plugins.html
--
diff --git a/plugins.html b/plugins.html
index b7b00c8..19f02e5 100644
--- a/plugins.html
+++ b/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Plugins
 
@@ -380,7 +380,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/poweredbyhbase.html
--
diff --git a/poweredbyhbase.html b/poweredbyhbase.html
index cadbed8..6573c29 100644
--- a/poweredbyhbase.html
+++ b/poweredbyhbase.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Powered By Apache HBase™
 
@@ -774,7 +774,7 @@ under the License. -->
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/project-info.html
--
diff --git a/project-info.html b/project-info.html
index ea2b4a3..14169a1 100644
--- a/project-info.html
+++ b/project-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Information
 
@@ -340,7 +340,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/project-reports.html
--
diff --git a/project-reports.html b/project-reports.html
index af79f25..35f3168 100644
--- a/project-reports.html
+++ b/project-reports.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Generated Reports
 
@@ -310,7 +310,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/project-summary.html
--
diff --git a/project-summary.html b/project-summary.html
index 4c35652..b6c93b5 100644
--- a/project-summary.html
+++ b/project-summary.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Summary
 
@@ -336,7 +336,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/pseudo-distributed.html
--
diff --git a/pseudo-distributed.html b/pseudo-distributed.html
index 268da33..0909301 100644
--- a/pseudo-distributed.html
+++ b/pseudo-distributed.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase –  
 Running Apache HBase (TM) in pseudo-distributed mode
@@ -313,7 +313,7 @@ under the License. -->
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/replication.html
---

[05/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/testdevapidocs/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.html 
b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.html
index c2a5e16..129c65e 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 Direct Known Subclasses:
-TestCoprocessorHost.SimpleRegionObserverV2,
 TestIncrementTimeRange.MyObserver, TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver,
 TestRegionServerCoprocessorExceptionWithAbort.BuggyRegionObserver,
 TestRegionServerCoprocessorExceptionWithRemove.BuggyRegionObserver
+TestCoprocessorHost.SimpleRegionObserverV2,
 TestIncrementTimeRange.MyObserver, TestNegativeMemStoreSizeWithSlowCoprocessor.FlushingRegionObserver,
 TestRegionServerCoprocessorExceptionWithAbort.BuggyRegionObserver,
 TestRegionServerCoprocessorExceptionWithRemove.BuggyRegionObserver
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html
 
b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html
index 16a7f84..903663b 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 Prev Class
-Next Class
+Next Class
 
 
 Frames
@@ -5469,7 +5469,7 @@ implements 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessor, org.apache.had
 
 
 Prev Class
-Next Class
+Next Class
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.FlushingRegionObserver.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.FlushingRegionObserver.html
 
b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.FlushingRegionObserver.html
new file mode 100644
index 000..6729ef7
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.FlushingRegionObserver.html
@@ -0,0 +1,372 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+TestNegativeMemStoreSizeWithSlowCoprocessor.FlushingRegionObserver 
(Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.coprocessor
+Class 
TestNegativeMemStoreSizeWithSlowCoprocessor.FlushingRegionObserver
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver
+
+
+org.apache.hadoop.hbase.coprocessor.TestNegativeMemStoreSizeWithSlowCoprocessor.FlushingRegionObserver
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+org.apache.hadoop.hbase.Coprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionObserver
+
+
+Enclosing class:
+TestNegativeMemStoreSizeWithSlowCoprocessor
+
+
+
+public static class TestNegativeMemStoreSizeWithSlowCoprocessor.FlushingRegionObserver
+extends SimpleRegionObserver
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Su

[03/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/testdevapidocs/org/apache/hadoop/hbase/coprocessor/class-use/TestNegativeMemStoreSizeWithSlowCoprocessor.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/class-use/TestNegativeMemStoreSizeWithSlowCoprocessor.html
 
b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/class-use/TestNegativeMemStoreSizeWithSlowCoprocessor.html
new file mode 100644
index 000..bd6fb36
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/class-use/TestNegativeMemStoreSizeWithSlowCoprocessor.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.coprocessor.TestNegativeMemStoreSizeWithSlowCoprocessor 
(Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.coprocessor.TestNegativeMemStoreSizeWithSlowCoprocessor
+
+No usage of 
org.apache.hadoop.hbase.coprocessor.TestNegativeMemStoreSizeWithSlowCoprocessor
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/testdevapidocs/org/apache/hadoop/hbase/coprocessor/class-use/TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/class-use/TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver.html
 
b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/class-use/TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver.html
deleted file mode 100644
index 78e0f54..000
--- 
a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/class-use/TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver.html
+++ /dev/null
@@ -1,125 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-
-
-
-Uses of Class 
org.apache.hadoop.hbase.coprocessor.TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver
 (Apache HBase 3.0.0-SNAPSHOT Test API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-No Frames
-
-
-All Classes
-
-
-
-
-
-
-
-
-
-
-Uses of 
Classorg.apache.hadoop.hbase.coprocessor.TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver
-
-No usage of 
org.apache.hadoop.hbase.coprocessor.TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-No Frames
-
-
-All Classes
-
-
-
-
-
-
-
-
-
-Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
-
-

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/testdevapidocs/org/apache/hadoop/hbase/coprocessor/class-use/TestNegativeMemstoreSizeWithSlowCoprocessor.html
-

[18/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
index a0961f7..4a7f4ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
@@ -526,23 +526,23 @@
 518final FlushResultImpl result; // 
indicating a failure result from prepare
 519final TreeMap storeFlushCtxs;
 520final TreeMap> committedFiles;
-521final TreeMap storeFlushableSize;
+521final TreeMap storeFlushableSize;
 522final long startTime;
 523final long flushOpSeqId;
 524final long flushedSeqId;
-525final MemstoreSize 
totalFlushableSize;
+525final MemStoreSize 
totalFlushableSize;
 526
 527/** Constructs an early exit case 
*/
 528PrepareFlushResult(FlushResultImpl 
result, long flushSeqId) {
-529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemstoreSize());
+529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemStoreSize());
 530}
 531
 532/** Constructs a successful prepare 
flush result */
 533PrepareFlushResult(
 534  TreeMap storeFlushCtxs,
 535  TreeMap> committedFiles,
-536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-537  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+537  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 538  this(null, storeFlushCtxs, 
committedFiles, storeFlushableSize, startTime,
 539flushSeqId, flushedSeqId, 
totalFlushableSize);
 540}
@@ -551,8 +551,8 @@
 543FlushResultImpl result,
 544  TreeMap storeFlushCtxs,
 545  TreeMap> committedFiles,
-546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-547  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+547  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 548  this.result = result;
 549  this.storeFlushCtxs = 
storeFlushCtxs;
 550  this.committedFiles = 
committedFiles;
@@ -1015,7 +1015,7 @@
 1007  Future future = 
completionService.take();
 1008  HStore store = future.get();
 1009  
this.stores.put(store.getColumnFamilyDescriptor().getName(), store);
-1010  if (store.isSloppyMemstore()) 
{
+1010  if (store.isSloppyMemStore()) 
{
 1011hasSloppyStores = true;
 1012  }
 1013
@@ -1025,7 +1025,7 @@
 1017  if (maxSeqId == -1 || 
storeMaxSequenceId > maxSeqId) {
 1018maxSeqId = 
storeMaxSequenceId;
 1019  }
-1020  long maxStoreMemstoreTS = 
store.getMaxMemstoreTS().orElse(0L);
+1020  long maxStoreMemstoreTS = 
store.getMaxMemStoreTS().orElse(0L);
 1021  if (maxStoreMemstoreTS > 
maxMemstoreTS) {
 1022maxMemstoreTS = 
maxStoreMemstoreTS;
 1023  }
@@ -1202,24 +1202,24 @@
 1194   * store
 1195   * @return the size of memstore in 
this region
 1196   */
-1197  public long 
addAndGetMemstoreSize(MemstoreSize memstoreSize) {
+1197  public long 
addAndGetMemStoreSize(MemStoreSize memstoreSize) {
 1198if (this.rsAccounting != null) {
-1199  
rsAccounting.incGlobalMemstoreSize(memstoreSize);
+1199  
rsAccounting.incGlobalMemStoreSize(memstoreSize);
 1200}
 1201long size = 
this.memstoreDataSize.addAndGet(memstoreSize.getDataSize());
-1202checkNegativeMemstoreDataSize(size, 
memstoreSize.getDataSize());
+1202checkNegativeMemStoreDataSize(size, 
memstoreSize.getDataSize());
 1203return size;
 1204  }
 1205
-1206  public void 
decrMemstoreSize(MemstoreSize memstoreSize) {
+1206  public void 
decrMemStoreSize(MemStoreSize memstoreSize) {
 1207if (this.rsAccounting != null) {
-1208  
rsAccounting.decGlobalMemstoreSize(memstoreSize);
+1208  
rsAccounting.decGlobalMemStoreSize(memstoreSize);
 1209}
 1210long size = 
this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize());
-1211checkNegativeMemstoreDataSize(size, 
-memstoreSize.getDataSize());
+1211checkNegativeMemStoreDataSize(size, 
-memstoreSize.getDataSize());
 1212  }
 1213
-1214  private void 
checkNegativeMemstoreDataSize(long memstoreDataSize, long delta) {
+1214  private void 
checkNegativeMemStoreDataSiz

[09/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
index f0c3ab9..9cfa5f7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
@@ -85,7 +85,7 @@
 077  protected final int compactionKVMax;
 078  protected final Compression.Algorithm 
compactionCompression;
 079
-080  /** specify how many days to keep MVCC 
values during major compaction **/ 
+080  /** specify how many days to keep MVCC 
values during major compaction **/
 081  protected int keepSeqIdPeriod;
 082
 083  // Configs that drive whether we drop 
page cache behind compactions
@@ -149,15 +149,15 @@
 141  protected FileDetails getFileDetails(
 142  Collection 
filesToCompact, boolean allFiles) throws IOException {
 143FileDetails fd = new FileDetails();
-144long oldestHFileTimeStampToKeepMVCC = 
System.currentTimeMillis() - 
-145  (1000L * 60 * 60 * 24 * 
this.keepSeqIdPeriod);  
+144long oldestHFileTimeStampToKeepMVCC = 
System.currentTimeMillis() -
+145  (1000L * 60 * 60 * 24 * 
this.keepSeqIdPeriod);
 146
 147for (HStoreFile file : 
filesToCompact) {
 148  if(allFiles && 
(file.getModificationTimeStamp() < oldestHFileTimeStampToKeepMVCC)) {
-149// when isAllFiles is true, all 
files are compacted so we can calculate the smallest 
+149// when isAllFiles is true, all 
files are compacted so we can calculate the smallest
 150// MVCC value to keep
-151if(fd.minSeqIdToKeep < 
file.getMaxMemstoreTS()) {
-152  fd.minSeqIdToKeep = 
file.getMaxMemstoreTS();
+151if(fd.minSeqIdToKeep < 
file.getMaxMemStoreTS()) {
+152  fd.minSeqIdToKeep = 
file.getMaxMemStoreTS();
 153}
 154  }
 155  long seqNum = 
file.getMaxSequenceId();

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.WalProps.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.WalProps.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.WalProps.html
index 050751a..c8b1dfe 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.WalProps.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.WalProps.html
@@ -310,7 +310,7 @@
 302  }
 303
 304  private int 
calculateMaxLogFiles(Configuration conf, long logRollSize) {
-305Pair 
globalMemstoreSize = MemorySizeUtil.getGlobalMemstoreSize(conf);
+305Pair 
globalMemstoreSize = MemorySizeUtil.getGlobalMemStoreSize(conf);
 306return (int) 
((globalMemstoreSize.getFirst() * 2) / logRollSize);
 307  }
 308
@@ -476,13 +476,13 @@
 468  }
 469
 470  @Override
-471  public long 
getEarliestMemstoreSeqNum(byte[] encodedRegionName) {
+471  public long 
getEarliestMemStoreSeqNum(byte[] encodedRegionName) {
 472// Used by tests. Deprecated as too 
subtle for general usage.
 473return 
this.sequenceIdAccounting.getLowestSequenceId(encodedRegionName);
 474  }
 475
 476  @Override
-477  public long 
getEarliestMemstoreSeqNum(byte[] encodedRegionName, byte[] familyName) {
+477  public long 
getEarliestMemStoreSeqNum(byte[] encodedRegionName, byte[] familyName) {
 478// This method is used by tests and 
for figuring if we should flush or not because our
 479// sequenceids are too old. It is 
also used reporting the master our oldest sequenceid for use
 480// figuring what edits can be skipped 
during log recovery. getEarliestMemStoreSequenceId
@@ -932,7 +932,7 @@
 924assert highestUnsyncedTxid < 
entry.getTxid();
 925highestUnsyncedTxid = 
entry.getTxid();
 926
sequenceIdAccounting.update(encodedRegionName, entry.getFamilyNames(), 
regionSequenceId,
-927  entry.isInMemstore());
+927  entry.isInMemStore());
 928
coprocessorHost.postWALWrite(entry.getRegionInfo(), entry.getKey(), 
entry.getEdit());
 929// Update metrics.
 930postAppend(entry, 
EnvironmentEdgeManager.currentTime() - start);

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.html
index 050751a..c

[08/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
index a291215..023decd 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetype builder – Dependency 
Information
 
@@ -148,7 +148,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-management.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-management.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-management.html
index aecfb9e..7f81b50 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-management.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetype builder – Project Dependency 
Management
 
@@ -766,7 +766,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/index.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/index.html 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/index.html
index 2c21c0d..451dd79 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/index.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetype builder – About
 
@@ -119,7 +119,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/integration.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/integration.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/integration.html
index 320e2d3..8eff03b 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/integration.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetype builder – CI Management
 
@@ -126,7 +126,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/issue-tracking.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/issue-tracking.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/issue-tracking.html
index 9e5058d..a6ae67b 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/issue-tracking.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetype builder – Issue Management
 
@@ -123,7 +123,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.

[15/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
new file mode 100644
index 000..03692cd
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
@@ -0,0 +1,3841 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019package 
org.apache.hadoop.hbase.regionserver;
+020
+021import 
javax.management.MalformedObjectNameException;
+022import javax.management.ObjectName;
+023import javax.servlet.http.HttpServlet;
+024import java.io.IOException;
+025import java.io.InterruptedIOException;
+026import 
java.lang.Thread.UncaughtExceptionHandler;
+027import java.lang.management.MemoryType;
+028import 
java.lang.management.MemoryUsage;
+029import java.lang.reflect.Constructor;
+030import java.net.BindException;
+031import java.net.InetAddress;
+032import java.net.InetSocketAddress;
+033import java.util.ArrayList;
+034import java.util.Collection;
+035import java.util.Collections;
+036import java.util.Comparator;
+037import java.util.HashMap;
+038import java.util.HashSet;
+039import java.util.Iterator;
+040import java.util.List;
+041import java.util.Map;
+042import java.util.Map.Entry;
+043import java.util.Objects;
+044import java.util.Set;
+045import java.util.SortedMap;
+046import java.util.TreeMap;
+047import java.util.TreeSet;
+048import 
java.util.concurrent.ConcurrentHashMap;
+049import 
java.util.concurrent.ConcurrentMap;
+050import 
java.util.concurrent.ConcurrentSkipListMap;
+051import 
java.util.concurrent.CountDownLatch;
+052import java.util.concurrent.TimeUnit;
+053import 
java.util.concurrent.atomic.AtomicBoolean;
+054import 
java.util.concurrent.locks.ReentrantReadWriteLock;
+055import java.util.function.Function;
+056
+057import 
org.apache.commons.lang3.RandomUtils;
+058import 
org.apache.commons.lang3.SystemUtils;
+059import org.apache.commons.logging.Log;
+060import 
org.apache.commons.logging.LogFactory;
+061import 
org.apache.hadoop.conf.Configuration;
+062import org.apache.hadoop.fs.FileSystem;
+063import org.apache.hadoop.fs.Path;
+064import 
org.apache.hadoop.hbase.Abortable;
+065import 
org.apache.hadoop.hbase.ChoreService;
+066import 
org.apache.hadoop.hbase.ClockOutOfSyncException;
+067import 
org.apache.hadoop.hbase.CoordinatedStateManager;
+068import 
org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
+069import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+070import 
org.apache.hadoop.hbase.HBaseConfiguration;
+071import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+072import 
org.apache.hadoop.hbase.HConstants;
+073import 
org.apache.hadoop.hbase.HealthCheckChore;
+074import 
org.apache.hadoop.hbase.MetaTableAccessor;
+075import 
org.apache.hadoop.hbase.NotServingRegionException;
+076import 
org.apache.hadoop.hbase.PleaseHoldException;
+077import 
org.apache.hadoop.hbase.ScheduledChore;
+078import 
org.apache.hadoop.hbase.ServerName;
+079import 
org.apache.hadoop.hbase.Stoppable;
+080import 
org.apache.hadoop.hbase.TableDescriptors;
+081import 
org.apache.hadoop.hbase.TableName;
+082import 
org.apache.hadoop.hbase.YouAreDeadException;
+083import 
org.apache.hadoop.hbase.ZNodeClearer;
+084import 
org.apache.hadoop.hbase.client.ClusterConnection;
+085import 
org.apache.hadoop.hbase.client.Connection;
+086import 
org.apache.hadoop.hbase.client.ConnectionUtils;
+087import 
org.apache.hadoop.hbase.client.Put;
+088import 
org.apache.hadoop.hbase.client.RegionInfo;
+089import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
+090import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+091import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+092import 
org.apache.hadoop.hbase.client.locking.EntityLock;
+093impo

[23/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index a0961f7..4a7f4ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -526,23 +526,23 @@
 518final FlushResultImpl result; // 
indicating a failure result from prepare
 519final TreeMap storeFlushCtxs;
 520final TreeMap> committedFiles;
-521final TreeMap storeFlushableSize;
+521final TreeMap storeFlushableSize;
 522final long startTime;
 523final long flushOpSeqId;
 524final long flushedSeqId;
-525final MemstoreSize 
totalFlushableSize;
+525final MemStoreSize 
totalFlushableSize;
 526
 527/** Constructs an early exit case 
*/
 528PrepareFlushResult(FlushResultImpl 
result, long flushSeqId) {
-529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemstoreSize());
+529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemStoreSize());
 530}
 531
 532/** Constructs a successful prepare 
flush result */
 533PrepareFlushResult(
 534  TreeMap storeFlushCtxs,
 535  TreeMap> committedFiles,
-536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-537  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+537  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 538  this(null, storeFlushCtxs, 
committedFiles, storeFlushableSize, startTime,
 539flushSeqId, flushedSeqId, 
totalFlushableSize);
 540}
@@ -551,8 +551,8 @@
 543FlushResultImpl result,
 544  TreeMap storeFlushCtxs,
 545  TreeMap> committedFiles,
-546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-547  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+547  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 548  this.result = result;
 549  this.storeFlushCtxs = 
storeFlushCtxs;
 550  this.committedFiles = 
committedFiles;
@@ -1015,7 +1015,7 @@
 1007  Future future = 
completionService.take();
 1008  HStore store = future.get();
 1009  
this.stores.put(store.getColumnFamilyDescriptor().getName(), store);
-1010  if (store.isSloppyMemstore()) 
{
+1010  if (store.isSloppyMemStore()) 
{
 1011hasSloppyStores = true;
 1012  }
 1013
@@ -1025,7 +1025,7 @@
 1017  if (maxSeqId == -1 || 
storeMaxSequenceId > maxSeqId) {
 1018maxSeqId = 
storeMaxSequenceId;
 1019  }
-1020  long maxStoreMemstoreTS = 
store.getMaxMemstoreTS().orElse(0L);
+1020  long maxStoreMemstoreTS = 
store.getMaxMemStoreTS().orElse(0L);
 1021  if (maxStoreMemstoreTS > 
maxMemstoreTS) {
 1022maxMemstoreTS = 
maxStoreMemstoreTS;
 1023  }
@@ -1202,24 +1202,24 @@
 1194   * store
 1195   * @return the size of memstore in 
this region
 1196   */
-1197  public long 
addAndGetMemstoreSize(MemstoreSize memstoreSize) {
+1197  public long 
addAndGetMemStoreSize(MemStoreSize memstoreSize) {
 1198if (this.rsAccounting != null) {
-1199  
rsAccounting.incGlobalMemstoreSize(memstoreSize);
+1199  
rsAccounting.incGlobalMemStoreSize(memstoreSize);
 1200}
 1201long size = 
this.memstoreDataSize.addAndGet(memstoreSize.getDataSize());
-1202checkNegativeMemstoreDataSize(size, 
memstoreSize.getDataSize());
+1202checkNegativeMemStoreDataSize(size, 
memstoreSize.getDataSize());
 1203return size;
 1204  }
 1205
-1206  public void 
decrMemstoreSize(MemstoreSize memstoreSize) {
+1206  public void 
decrMemStoreSize(MemStoreSize memstoreSize) {
 1207if (this.rsAccounting != null) {
-1208  
rsAccounting.decGlobalMemstoreSize(memstoreSize);
+1208  
rsAccounting.decGlobalMemStoreSize(memstoreSize);
 1209}
 1210long size = 
this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize());
-1211checkNegativeMemstoreDataSize(size, 
-memstoreSize.getDataSize());
+1211checkNegativeMemStoreDataSize(size, 
-memstoreSize.getDataSize());
 1212  }
 1213
-1214  private void 
checkNegativeMemstoreDataSize(long memstoreDat

[21/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index a0961f7..4a7f4ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -526,23 +526,23 @@
 518final FlushResultImpl result; // 
indicating a failure result from prepare
 519final TreeMap storeFlushCtxs;
 520final TreeMap> committedFiles;
-521final TreeMap storeFlushableSize;
+521final TreeMap storeFlushableSize;
 522final long startTime;
 523final long flushOpSeqId;
 524final long flushedSeqId;
-525final MemstoreSize 
totalFlushableSize;
+525final MemStoreSize 
totalFlushableSize;
 526
 527/** Constructs an early exit case 
*/
 528PrepareFlushResult(FlushResultImpl 
result, long flushSeqId) {
-529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemstoreSize());
+529  this(result, null, null, null, 
Math.max(0, flushSeqId), 0, 0, new MemStoreSize());
 530}
 531
 532/** Constructs a successful prepare 
flush result */
 533PrepareFlushResult(
 534  TreeMap storeFlushCtxs,
 535  TreeMap> committedFiles,
-536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-537  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+536  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+537  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 538  this(null, storeFlushCtxs, 
committedFiles, storeFlushableSize, startTime,
 539flushSeqId, flushedSeqId, 
totalFlushableSize);
 540}
@@ -551,8 +551,8 @@
 543FlushResultImpl result,
 544  TreeMap storeFlushCtxs,
 545  TreeMap> committedFiles,
-546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
-547  long flushedSeqId, MemstoreSize 
totalFlushableSize) {
+546  TreeMap 
storeFlushableSize, long startTime, long flushSeqId,
+547  long flushedSeqId, MemStoreSize 
totalFlushableSize) {
 548  this.result = result;
 549  this.storeFlushCtxs = 
storeFlushCtxs;
 550  this.committedFiles = 
committedFiles;
@@ -1015,7 +1015,7 @@
 1007  Future future = 
completionService.take();
 1008  HStore store = future.get();
 1009  
this.stores.put(store.getColumnFamilyDescriptor().getName(), store);
-1010  if (store.isSloppyMemstore()) 
{
+1010  if (store.isSloppyMemStore()) 
{
 1011hasSloppyStores = true;
 1012  }
 1013
@@ -1025,7 +1025,7 @@
 1017  if (maxSeqId == -1 || 
storeMaxSequenceId > maxSeqId) {
 1018maxSeqId = 
storeMaxSequenceId;
 1019  }
-1020  long maxStoreMemstoreTS = 
store.getMaxMemstoreTS().orElse(0L);
+1020  long maxStoreMemstoreTS = 
store.getMaxMemStoreTS().orElse(0L);
 1021  if (maxStoreMemstoreTS > 
maxMemstoreTS) {
 1022maxMemstoreTS = 
maxStoreMemstoreTS;
 1023  }
@@ -1202,24 +1202,24 @@
 1194   * store
 1195   * @return the size of memstore in 
this region
 1196   */
-1197  public long 
addAndGetMemstoreSize(MemstoreSize memstoreSize) {
+1197  public long 
addAndGetMemStoreSize(MemStoreSize memstoreSize) {
 1198if (this.rsAccounting != null) {
-1199  
rsAccounting.incGlobalMemstoreSize(memstoreSize);
+1199  
rsAccounting.incGlobalMemStoreSize(memstoreSize);
 1200}
 1201long size = 
this.memstoreDataSize.addAndGet(memstoreSize.getDataSize());
-1202checkNegativeMemstoreDataSize(size, 
memstoreSize.getDataSize());
+1202checkNegativeMemStoreDataSize(size, 
memstoreSize.getDataSize());
 1203return size;
 1204  }
 1205
-1206  public void 
decrMemstoreSize(MemstoreSize memstoreSize) {
+1206  public void 
decrMemStoreSize(MemStoreSize memstoreSize) {
 1207if (this.rsAccounting != null) {
-1208  
rsAccounting.decGlobalMemstoreSize(memstoreSize);
+1208  
rsAccounting.decGlobalMemStoreSize(memstoreSize);
 1209}
 1210long size = 
this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize());
-1211checkNegativeMemstoreDataSize(size, 
-memstoreSize.getDataSize());
+1211checkNegativeMemStoreDataSize(size, 
-memstoreSize.getDataSize());
 1212  }
 1213
-1214  private void 
checkNegativeMemstoreDataSize(long memstoreDataSize, long delta) {
+1214  private void

[04/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver.html
 
b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver.html
deleted file mode 100644
index f22a697..000
--- 
a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver.html
+++ /dev/null
@@ -1,372 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-
-
-
-TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver 
(Apache HBase 3.0.0-SNAPSHOT Test API)
-
-
-
-
-
-var methods = {"i0":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev Class
-Next Class
-
-
-Frames
-No Frames
-
-
-All Classes
-
-
-
-
-
-
-
-Summary: 
-Nested | 
-Field | 
-Constr | 
-Method
-
-
-Detail: 
-Field | 
-Constr | 
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.coprocessor
-Class 
TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver
-
-
-org.apache.hadoop.hbase.coprocessor.TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-org.apache.hadoop.hbase.Coprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionObserver
-
-
-Enclosing class:
-TestNegativeMemstoreSizeWithSlowCoprocessor
-
-
-
-public static class TestNegativeMemstoreSizeWithSlowCoprocessor.FlushingRegionObserver
-extends SimpleRegionObserver
-
-
-
-
-
-
-
-
-
-
-
-Nested Class Summary
-
-
-
-
-Nested classes/interfaces inherited from 
interface org.apache.hadoop.hbase.Coprocessor
-org.apache.hadoop.hbase.Coprocessor.State
-
-
-
-
-
-Nested classes/interfaces inherited from 
interface org.apache.hadoop.hbase.coprocessor.RegionObserver
-org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType
-
-
-
-
-
-
-
-
-Field Summary
-
-
-
-
-Fields inherited from class org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver
-ctBeforeDelete,
 ctPostAppend,
 ctPostBatchMutate,
 ctPostBatchMutateIndispensably,
 ctPostBulkLoadHFile,
 ctPostCheckAndDelete,
 ctPostCheckAndPut,
 ctPostClose, ctPostCloseRegionOperation,
 ctPostCompact,
 ctPostCompactSelect,
 ctPostDeleted,
 ctPostFlush,
 ctPostGet,
 ctPostIncrement,
 ctPostOpen, ctPostPut,
 ctPostReplayWALs,
 ctPostScannerClose,
 ctPostScannerFilterRow,
 ctPostScannerNext,
 ctPostScannerOpen,
 ctPostStartRegionOperation,
 ctPostStoreFileReaderOpen,
 ctPostWALRestore,
 ctPreAppend,
 ctPreAppendAfterRowLock,
 ctPreBatchMutate,
 ctPreBulkLoadHFile,
 ctPreCheckAndDelete,
 ctPreCheckAndDeleteAfterR
 owLock, ctPreCheckAndPut,
 ctPreCheckAndPutAfterRowLock,
 ctPreClose,
 ctPreCompact,
 ctPreCompactScanner,
 ctPreCompactSelect,
 ctPreDeleted,
 ctPreFlush, href="../../../../../org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.html#ctPreFlushScannerOpen">ctPreFlushScannerOpen,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.html#ctPreGet">ctPreGet,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.html#ctPreIncrement">ctPreIncrement,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.html#ctPreIncrementAfterRowLock">ctPreIncrementAfterRowLock,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.html#ctPreOpen">ctPreOpen,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.html#ctPrePrepareDeleteTS">ctPrePrepareDeleteTS,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.html#ctPrePut">ctPrePut,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.html#ctPreRepla

[02/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.html
index 90d49b4..0a22ab8 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestEndToEndSplitTransaction
+public class TestEndToEndSplitTransaction
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -281,7 +281,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -290,7 +290,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-private static final HBaseTestingUtility TEST_UTIL
+private static final HBaseTestingUtility TEST_UTIL
 
 
 
@@ -299,7 +299,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CONF
-private static final org.apache.hadoop.conf.Configuration CONF
+private static final org.apache.hadoop.conf.Configuration CONF
 
 
 
@@ -308,7 +308,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 name
-public org.junit.rules.TestName name
+public org.junit.rules.TestName name
 
 
 
@@ -325,7 +325,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TestEndToEndSplitTransaction
-public TestEndToEndSplitTransaction()
+public TestEndToEndSplitTransaction()
 
 
 
@@ -342,7 +342,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 beforeAllTests
-public static void beforeAllTests()
+public static void beforeAllTests()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -356,7 +356,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 afterAllTests
-public static void afterAllTests()
+public static void afterAllTests()
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -370,7 +370,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testFromClientSideWhileSplitting
-public void testFromClientSideWhileSplitting()
+public void testFromClientSideWhileSplitting()
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable
 Tests that the client sees meta table changes as atomic 
during splits
 
@@ -385,7 +385,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 log
-public static void log(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String msg)
+public static void log(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String msg)
 
 
 
@@ -394,7 +394,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 flushAndBlockUntilDone
-public static void flushAndBlockUntilDone(org.apache.hadoop.hbase.client.Admin admin,
+public static void flushAndBlockUntilDone(org.apache.hadoop.hbase.client.Admin admin,
   
org.apache.hadoop.hbase.regionserver.HRegionServer rs,
   byte[] regionName)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException,
@@ -412,7 +412,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 compactAndBlockUntilDone
-public static void compactAndBlockUntilDone(org.apache.hadoop.hbase.client.Admin admin,
+public static void compactAndBlockUntilDone(org.apache.hadoop.hbase.client.Admin admin,
 
org.apache.hadoop.hbase.regionserver.HRegionServer rs,
 byte[] regionName)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException,
@@ -430,7 +430,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.htm

[07/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/hbase-build-configuration/hbase-archetypes/mail-lists.html
--
diff --git a/hbase-build-configuration/hbase-archetypes/mail-lists.html 
b/hbase-build-configuration/hbase-archetypes/mail-lists.html
index 61bff1f..8b728da 100644
--- a/hbase-build-configuration/hbase-archetypes/mail-lists.html
+++ b/hbase-build-configuration/hbase-archetypes/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetypes – Project Mailing Lists
 
@@ -176,7 +176,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/hbase-build-configuration/hbase-archetypes/plugin-management.html
--
diff --git a/hbase-build-configuration/hbase-archetypes/plugin-management.html 
b/hbase-build-configuration/hbase-archetypes/plugin-management.html
index 1f6b916..d537e20 100644
--- a/hbase-build-configuration/hbase-archetypes/plugin-management.html
+++ b/hbase-build-configuration/hbase-archetypes/plugin-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetypes – Project Plugin Management
 
@@ -271,7 +271,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/hbase-build-configuration/hbase-archetypes/plugins.html
--
diff --git a/hbase-build-configuration/hbase-archetypes/plugins.html 
b/hbase-build-configuration/hbase-archetypes/plugins.html
index b2a1f44..3eaddd1 100644
--- a/hbase-build-configuration/hbase-archetypes/plugins.html
+++ b/hbase-build-configuration/hbase-archetypes/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetypes – Project Plugins
 
@@ -214,7 +214,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/hbase-build-configuration/hbase-archetypes/project-info.html
--
diff --git a/hbase-build-configuration/hbase-archetypes/project-info.html 
b/hbase-build-configuration/hbase-archetypes/project-info.html
index 52fc546..0525d8b 100644
--- a/hbase-build-configuration/hbase-archetypes/project-info.html
+++ b/hbase-build-configuration/hbase-archetypes/project-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetypes – Project Information
 
@@ -167,7 +167,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/hbase-build-configuration/hbase-archetypes/project-summary.html
--
diff --git a/hbase-build-configuration/hbase-archetypes/project-summary.html 
b/hbase-build-configuration/hbase-archetypes/project-summary.html
index f15c1b9..5d99524 100644
--- a/hbase-build-configuration/hbase-archetypes/project-summary.html
+++ b/hbase-build-configuration/hbase-archetypes/project-summary.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetypes – Project Summary
 
@@ -163,7 +163,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-01
+  Last Published: 
2017-10-02
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/hbase-build-configuration/hbase-archetypes/source-repository.html
--
diff --git a/hbase-build-configuration/hbase-archetypes/source-repository.html 
b/hbase-build-configuration/hbase-archetypes/source-repository.html
index f361e62..12e2ac6 100644
--- a/hbase-build-configuration/hbase-archetypes/source-repository.html
+++ b/hbase-build-configuration/h

[4/4] hbase git commit: HBASE-18897 Substitute MemStore for Memstore

2017-10-02 Thread chia7712
HBASE-18897 Substitute MemStore for Memstore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e047f518
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e047f518
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e047f518

Branch: refs/heads/branch-2
Commit: e047f518ef128aeda7af4359b28a840149769385
Parents: 8c6ed57
Author: Chia-Ping Tsai 
Authored: Mon Oct 2 21:10:11 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Mon Oct 2 21:10:11 2017 +0800

--
 .../apache/hadoop/hbase/HTableDescriptor.java   |  26 +++-
 .../org/apache/hadoop/hbase/RegionLoad.java |   2 +-
 .../org/apache/hadoop/hbase/ServerLoad.java |   6 +-
 .../hadoop/hbase/client/MetricsConnection.java  |   2 +-
 .../hadoop/hbase/client/RegionLoadStats.java|   9 ++
 .../hadoop/hbase/client/TableDescriptor.java|   2 +-
 .../hbase/client/TableDescriptorBuilder.java|   8 +-
 .../backoff/ExponentialClientBackoffPolicy.java |   2 +-
 .../hbase/client/backoff/ServerStatistics.java  |   4 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |   2 +-
 .../client/TestClientExponentialBackoff.java|   4 +-
 .../regionserver/MetricsRegionServerSource.java |   2 +-
 .../MetricsRegionServerWrapper.java |   6 +-
 .../regionserver/MetricsRegionWrapper.java  |   2 +-
 .../MetricsTableWrapperAggregate.java   |   2 +-
 .../MetricsRegionServerSourceImpl.java  |   4 +-
 .../regionserver/MetricsRegionSourceImpl.java   |   2 +-
 .../regionserver/MetricsTableSourceImpl.java|   2 +-
 .../TestMetricsRegionSourceImpl.java|   2 +-
 .../TestMetricsTableSourceImpl.java |   2 +-
 .../src/main/protobuf/Client.proto  |   6 +-
 .../src/main/protobuf/ClusterStatus.proto   |   2 +-
 hbase-protocol/src/main/protobuf/Client.proto   |   6 +-
 .../rest/model/StorageClusterStatusModel.java   |   8 +-
 .../protobuf/StorageClusterStatusMessage.proto  |   4 +-
 .../model/TestStorageClusterStatusModel.java|  12 +-
 .../tmpl/master/RegionServerListTmpl.jamon  |   4 +-
 .../tmpl/regionserver/RegionListTmpl.jamon  |   2 +-
 .../tmpl/regionserver/ServerMetricsTmpl.jamon   |   4 +-
 .../org/apache/hadoop/hbase/io/hfile/HFile.java |   4 +-
 .../hadoop/hbase/io/hfile/HFileBlock.java   |   4 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java  |  20 +--
 .../hadoop/hbase/io/util/MemorySizeUtil.java|   6 +-
 .../master/balancer/StochasticLoadBalancer.java |   6 +-
 .../org/apache/hadoop/hbase/mob/MobFile.java|   4 +-
 .../hbase/regionserver/AbstractMemStore.java|  14 +-
 .../regionserver/CellArrayImmutableSegment.java |   4 +-
 .../regionserver/CellChunkImmutableSegment.java |   4 +-
 .../hbase/regionserver/CompactingMemStore.java  |  18 +--
 .../hbase/regionserver/CompactionPipeline.java  |  18 +--
 .../regionserver/CompositeImmutableSegment.java |   4 +-
 .../regionserver/DefaultHeapMemoryTuner.java|   4 +-
 .../hbase/regionserver/DefaultMemStore.java |  14 +-
 .../regionserver/FlushLargeStoresPolicy.java|   2 +-
 .../FlushNonSloppyStoresFirstPolicy.java|   2 +-
 .../hbase/regionserver/FlushRequester.java  |   2 +-
 .../hadoop/hbase/regionserver/HRegion.java  | 154 +--
 .../hbase/regionserver/HRegionServer.java   |  14 +-
 .../hadoop/hbase/regionserver/HStore.java   |  22 +--
 .../hadoop/hbase/regionserver/HStoreFile.java   |   2 +-
 .../hbase/regionserver/HeapMemoryManager.java   |  26 ++--
 .../hadoop/hbase/regionserver/MemStore.java |  12 +-
 .../hbase/regionserver/MemStoreFlusher.java |  54 +++
 .../hadoop/hbase/regionserver/MemStoreSize.java | 107 +
 .../hadoop/hbase/regionserver/MemstoreSize.java | 107 -
 .../hbase/regionserver/MetricsRegionServer.java |   2 +-
 .../MetricsRegionServerWrapperImpl.java |  10 +-
 .../regionserver/MetricsRegionWrapperImpl.java  |   2 +-
 .../MetricsTableWrapperAggregateImpl.java   |  12 +-
 .../hbase/regionserver/MutableSegment.java  |   6 +-
 .../hadoop/hbase/regionserver/Region.java   |   2 +-
 .../regionserver/RegionServerAccounting.java|  64 
 .../regionserver/RegionServicesForStores.java   |  16 +-
 .../hadoop/hbase/regionserver/Segment.java  |   8 +-
 .../hbase/regionserver/SegmentFactory.java  |   2 +-
 .../apache/hadoop/hbase/regionserver/Store.java |  10 +-
 .../regionserver/StoreConfigInformation.java|   2 +-
 .../hadoop/hbase/regionserver/StoreFile.java|   2 +-
 .../hadoop/hbase/regionserver/StoreUtils.java   |   4 +-
 .../hbase/regionserver/StripeStoreConfig.java   |   2 +-
 .../compactions/CompactionConfiguration.java|   8 +-
 .../regionserver/compactions/Compactor.java |  12 +-
 .../hbase/regionserver/wal/AbstractFSWAL.java   |   8 +-
 .../hbase/regionserver/wal/FSWALEntry.java  |   2 +-
 .../RegionReplicaReplicationEndpoint.java   

[2/4] hbase git commit: HBASE-18897 Substitute MemStore for Memstore

2017-10-02 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/e047f518/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
index 354b056..2ada5a9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
@@ -81,8 +81,8 @@ public class StoreUtils {
* were created by a mapreduce bulk load are ignored, as they do not 
correspond to any specific
* put operation, and thus do not have a memstoreTS associated with them.
*/
-  public static OptionalLong getMaxMemstoreTSInList(Collection 
sfs) {
-return sfs.stream().filter(sf -> 
!sf.isBulkLoadResult()).mapToLong(HStoreFile::getMaxMemstoreTS)
+  public static OptionalLong getMaxMemStoreTSInList(Collection 
sfs) {
+return sfs.stream().filter(sf -> 
!sf.isBulkLoadResult()).mapToLong(HStoreFile::getMaxMemStoreTS)
 .max();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e047f518/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
index 169d1d8..eb2a9b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
@@ -96,7 +96,7 @@ public class StripeStoreConfig {
 this.splitPartCount = splitPartCount;
 // Arbitrary default split size - 4 times the size of one L0 compaction.
 // If we flush into L0 there's no split compaction, but for default value 
it is ok.
-double flushSize = sci.getMemstoreFlushSize();
+double flushSize = sci.getMemStoreFlushSize();
 if (flushSize == 0) {
   flushSize = 128 * 1024 * 1024;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e047f518/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
index fe9ae30..b8194eb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
@@ -115,10 +115,10 @@ public class CompactionConfiguration {
 this.storeConfigInfo = storeConfigInfo;
 
 maxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, 
Long.MAX_VALUE);
-offPeakMaxCompactSize = 
conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY, 
-  maxCompactSize);  
+offPeakMaxCompactSize = 
conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY,
+  maxCompactSize);
 minCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY,
-storeConfigInfo.getMemstoreFlushSize());
+storeConfigInfo.getMemStoreFlushSize());
 minFilesToCompact = Math.max(2, 
conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY,
   /*old name*/ conf.getInt("hbase.hstore.compactionThreshold", 3)));
 maxFilesToCompact = conf.getInt(HBASE_HSTORE_COMPACTION_MAX_KEY, 10);
@@ -126,7 +126,7 @@ public class CompactionConfiguration {
 offPeakCompactionRatio = 
conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_OFFPEAK_KEY, 5.0F);
 
 throttlePoint = 
conf.getLong("hbase.regionserver.thread.compaction.throttle",
-  2 * maxFilesToCompact * storeConfigInfo.getMemstoreFlushSize());
+  2 * maxFilesToCompact * storeConfigInfo.getMemStoreFlushSize());
 majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, 
1000*60*60*24*7);
 // Make it 0.5 so jitter has us fall evenly either side of when the 
compaction should run
 majorCompactionJitter = 
conf.getFloat("hbase.hregion.majorcompaction.jitter", 0.50F);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e047f518/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index 2c9a519..5865ed5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop

[3/4] hbase git commit: HBASE-18897 Substitute MemStore for Memstore

2017-10-02 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/e047f518/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index d059977..80c0433 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -518,23 +518,23 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 final FlushResultImpl result; // indicating a failure result from prepare
 final TreeMap storeFlushCtxs;
 final TreeMap> committedFiles;
-final TreeMap storeFlushableSize;
+final TreeMap storeFlushableSize;
 final long startTime;
 final long flushOpSeqId;
 final long flushedSeqId;
-final MemstoreSize totalFlushableSize;
+final MemStoreSize totalFlushableSize;
 
 /** Constructs an early exit case */
 PrepareFlushResult(FlushResultImpl result, long flushSeqId) {
-  this(result, null, null, null, Math.max(0, flushSeqId), 0, 0, new 
MemstoreSize());
+  this(result, null, null, null, Math.max(0, flushSeqId), 0, 0, new 
MemStoreSize());
 }
 
 /** Constructs a successful prepare flush result */
 PrepareFlushResult(
   TreeMap storeFlushCtxs,
   TreeMap> committedFiles,
-  TreeMap storeFlushableSize, long startTime, long 
flushSeqId,
-  long flushedSeqId, MemstoreSize totalFlushableSize) {
+  TreeMap storeFlushableSize, long startTime, long 
flushSeqId,
+  long flushedSeqId, MemStoreSize totalFlushableSize) {
   this(null, storeFlushCtxs, committedFiles, storeFlushableSize, startTime,
 flushSeqId, flushedSeqId, totalFlushableSize);
 }
@@ -543,8 +543,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 FlushResultImpl result,
   TreeMap storeFlushCtxs,
   TreeMap> committedFiles,
-  TreeMap storeFlushableSize, long startTime, long 
flushSeqId,
-  long flushedSeqId, MemstoreSize totalFlushableSize) {
+  TreeMap storeFlushableSize, long startTime, long 
flushSeqId,
+  long flushedSeqId, MemStoreSize totalFlushableSize) {
   this.result = result;
   this.storeFlushCtxs = storeFlushCtxs;
   this.committedFiles = committedFiles;
@@ -1007,7 +1007,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   Future future = completionService.take();
   HStore store = future.get();
   this.stores.put(store.getColumnFamilyDescriptor().getName(), store);
-  if (store.isSloppyMemstore()) {
+  if (store.isSloppyMemStore()) {
 hasSloppyStores = true;
   }
 
@@ -1017,7 +1017,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) {
 maxSeqId = storeMaxSequenceId;
   }
-  long maxStoreMemstoreTS = store.getMaxMemstoreTS().orElse(0L);
+  long maxStoreMemstoreTS = store.getMaxMemStoreTS().orElse(0L);
   if (maxStoreMemstoreTS > maxMemstoreTS) {
 maxMemstoreTS = maxStoreMemstoreTS;
   }
@@ -1194,24 +1194,24 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* store
* @return the size of memstore in this region
*/
-  public long addAndGetMemstoreSize(MemstoreSize memstoreSize) {
+  public long addAndGetMemStoreSize(MemStoreSize memstoreSize) {
 if (this.rsAccounting != null) {
-  rsAccounting.incGlobalMemstoreSize(memstoreSize);
+  rsAccounting.incGlobalMemStoreSize(memstoreSize);
 }
 long size = this.memstoreDataSize.addAndGet(memstoreSize.getDataSize());
-checkNegativeMemstoreDataSize(size, memstoreSize.getDataSize());
+checkNegativeMemStoreDataSize(size, memstoreSize.getDataSize());
 return size;
   }
 
-  public void decrMemstoreSize(MemstoreSize memstoreSize) {
+  public void decrMemStoreSize(MemStoreSize memstoreSize) {
 if (this.rsAccounting != null) {
-  rsAccounting.decGlobalMemstoreSize(memstoreSize);
+  rsAccounting.decGlobalMemStoreSize(memstoreSize);
 }
 long size = this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize());
-checkNegativeMemstoreDataSize(size, -memstoreSize.getDataSize());
+checkNegativeMemStoreDataSize(size, -memstoreSize.getDataSize());
   }
 
-  private void checkNegativeMemstoreDataSize(long memstoreDataSize, long 
delta) {
+  private void checkNegativeMemStoreDataSize(long memstoreDataSize, long 
delta) {
 // This is extremely bad if we make memstoreSize negative. Log as much 
info on the offending
 // caller as possible. (memStoreSize might be a negative value already -- 
freeing memory)
  

[1/4] hbase git commit: HBASE-18897 Substitute MemStore for Memstore

2017-10-02 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 8c6ed571b -> e047f518e


http://git-wip-us.apache.org/repos/asf/hbase/blob/e047f518/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
index a0d953e..e4f7663 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.WAL;
-import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -144,7 +143,7 @@ public class TestPerColumnFamilyFlush {
   }
 }
 
-long totalMemstoreSize = region.getMemstoreSize();
+long totalMemstoreSize = region.getMemStoreSize();
 
 // Find the smallest LSNs for edits wrt to each CF.
 long smallestSeqCF1 = region.getOldestSeqIdOfStore(FAMILY1);
@@ -152,13 +151,13 @@ public class TestPerColumnFamilyFlush {
 long smallestSeqCF3 = region.getOldestSeqIdOfStore(FAMILY3);
 
 // Find the sizes of the memstores of each CF.
-MemstoreSize cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
-MemstoreSize cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
-MemstoreSize cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
+MemStoreSize cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
+MemStoreSize cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
+MemStoreSize cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
 
 // Get the overall smallest LSN in the region's memstores.
 long smallestSeqInRegionCurrentMemstore = getWAL(region)
-
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+
.getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
 
 // The overall smallest LSN in the region's memstores should be the same as
 // the LSN of the smallest edit in CF1
@@ -180,16 +179,16 @@ public class TestPerColumnFamilyFlush {
 region.flush(false);
 
 // Will use these to check if anything changed.
-MemstoreSize oldCF2MemstoreSize = cf2MemstoreSize;
-MemstoreSize oldCF3MemstoreSize = cf3MemstoreSize;
+MemStoreSize oldCF2MemstoreSize = cf2MemstoreSize;
+MemStoreSize oldCF3MemstoreSize = cf3MemstoreSize;
 
 // Recalculate everything
 cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
 cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
 cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
-totalMemstoreSize = region.getMemstoreSize();
+totalMemstoreSize = region.getMemStoreSize();
 smallestSeqInRegionCurrentMemstore = getWAL(region)
-
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+
.getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
 
 // We should have cleared out only CF1, since we chose the flush thresholds
 // and number of puts accordingly.
@@ -225,9 +224,9 @@ public class TestPerColumnFamilyFlush {
 cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
 cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
 cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
-totalMemstoreSize = region.getMemstoreSize();
+totalMemstoreSize = region.getMemStoreSize();
 smallestSeqInRegionCurrentMemstore = getWAL(region)
-
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+
.getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
 
 // CF1 and CF2, both should be absent.
 assertEquals(0, cf1MemstoreSize.getDataSize());
@@ -261,7 +260,7 @@ public class TestPerColumnFamilyFlush {
 
 // Since we won't find any CF above the threshold, and hence no specific
 // store to flush, we should flush all the memstores.
-assertEquals(0, region.getMemstoreSize());
+assertEquals(0, region.getMemStoreSize());
 HBaseTestingUtility.closeRegionAndWAL(region);
   }
 
@@ -285,12 +284,12 @@ public class TestPerColumnFamilyFlush {
   }
 }
 
-long totalMemstoreSize = region.getMemstoreSize();
+long totalMemstoreSize = region.getMemStoreSize();
 
 // Find the sizes of the memstores of each CF.
-MemstoreSize cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
-MemstoreSize cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
-  

[3/4] hbase git commit: HBASE-18897 Substitute MemStore for Memstore

2017-10-02 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index d059977..80c0433 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -518,23 +518,23 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 final FlushResultImpl result; // indicating a failure result from prepare
 final TreeMap storeFlushCtxs;
 final TreeMap> committedFiles;
-final TreeMap storeFlushableSize;
+final TreeMap storeFlushableSize;
 final long startTime;
 final long flushOpSeqId;
 final long flushedSeqId;
-final MemstoreSize totalFlushableSize;
+final MemStoreSize totalFlushableSize;
 
 /** Constructs an early exit case */
 PrepareFlushResult(FlushResultImpl result, long flushSeqId) {
-  this(result, null, null, null, Math.max(0, flushSeqId), 0, 0, new 
MemstoreSize());
+  this(result, null, null, null, Math.max(0, flushSeqId), 0, 0, new 
MemStoreSize());
 }
 
 /** Constructs a successful prepare flush result */
 PrepareFlushResult(
   TreeMap storeFlushCtxs,
   TreeMap> committedFiles,
-  TreeMap storeFlushableSize, long startTime, long 
flushSeqId,
-  long flushedSeqId, MemstoreSize totalFlushableSize) {
+  TreeMap storeFlushableSize, long startTime, long 
flushSeqId,
+  long flushedSeqId, MemStoreSize totalFlushableSize) {
   this(null, storeFlushCtxs, committedFiles, storeFlushableSize, startTime,
 flushSeqId, flushedSeqId, totalFlushableSize);
 }
@@ -543,8 +543,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 FlushResultImpl result,
   TreeMap storeFlushCtxs,
   TreeMap> committedFiles,
-  TreeMap storeFlushableSize, long startTime, long 
flushSeqId,
-  long flushedSeqId, MemstoreSize totalFlushableSize) {
+  TreeMap storeFlushableSize, long startTime, long 
flushSeqId,
+  long flushedSeqId, MemStoreSize totalFlushableSize) {
   this.result = result;
   this.storeFlushCtxs = storeFlushCtxs;
   this.committedFiles = committedFiles;
@@ -1007,7 +1007,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   Future future = completionService.take();
   HStore store = future.get();
   this.stores.put(store.getColumnFamilyDescriptor().getName(), store);
-  if (store.isSloppyMemstore()) {
+  if (store.isSloppyMemStore()) {
 hasSloppyStores = true;
   }
 
@@ -1017,7 +1017,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) {
 maxSeqId = storeMaxSequenceId;
   }
-  long maxStoreMemstoreTS = store.getMaxMemstoreTS().orElse(0L);
+  long maxStoreMemstoreTS = store.getMaxMemStoreTS().orElse(0L);
   if (maxStoreMemstoreTS > maxMemstoreTS) {
 maxMemstoreTS = maxStoreMemstoreTS;
   }
@@ -1194,24 +1194,24 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* store
* @return the size of memstore in this region
*/
-  public long addAndGetMemstoreSize(MemstoreSize memstoreSize) {
+  public long addAndGetMemStoreSize(MemStoreSize memstoreSize) {
 if (this.rsAccounting != null) {
-  rsAccounting.incGlobalMemstoreSize(memstoreSize);
+  rsAccounting.incGlobalMemStoreSize(memstoreSize);
 }
 long size = this.memstoreDataSize.addAndGet(memstoreSize.getDataSize());
-checkNegativeMemstoreDataSize(size, memstoreSize.getDataSize());
+checkNegativeMemStoreDataSize(size, memstoreSize.getDataSize());
 return size;
   }
 
-  public void decrMemstoreSize(MemstoreSize memstoreSize) {
+  public void decrMemStoreSize(MemStoreSize memstoreSize) {
 if (this.rsAccounting != null) {
-  rsAccounting.decGlobalMemstoreSize(memstoreSize);
+  rsAccounting.decGlobalMemStoreSize(memstoreSize);
 }
 long size = this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize());
-checkNegativeMemstoreDataSize(size, -memstoreSize.getDataSize());
+checkNegativeMemStoreDataSize(size, -memstoreSize.getDataSize());
   }
 
-  private void checkNegativeMemstoreDataSize(long memstoreDataSize, long 
delta) {
+  private void checkNegativeMemStoreDataSize(long memstoreDataSize, long 
delta) {
 // This is extremely bad if we make memstoreSize negative. Log as much 
info on the offending
 // caller as possible. (memStoreSize might be a negative value already -- 
freeing memory)
  

[4/4] hbase git commit: HBASE-18897 Substitute MemStore for Memstore

2017-10-02 Thread chia7712
HBASE-18897 Substitute MemStore for Memstore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d35d8376
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d35d8376
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d35d8376

Branch: refs/heads/master
Commit: d35d8376a70a8de63c5d232a46e39657ba739eef
Parents: 869b90c
Author: Chia-Ping Tsai 
Authored: Mon Oct 2 14:53:02 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Mon Oct 2 20:55:06 2017 +0800

--
 .../apache/hadoop/hbase/HTableDescriptor.java   |  26 +++-
 .../org/apache/hadoop/hbase/RegionLoad.java |   2 +-
 .../org/apache/hadoop/hbase/ServerLoad.java |   6 +-
 .../hadoop/hbase/client/MetricsConnection.java  |   2 +-
 .../hadoop/hbase/client/RegionLoadStats.java|   9 ++
 .../hadoop/hbase/client/TableDescriptor.java|   2 +-
 .../hbase/client/TableDescriptorBuilder.java|   8 +-
 .../backoff/ExponentialClientBackoffPolicy.java |   2 +-
 .../hbase/client/backoff/ServerStatistics.java  |   4 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |   2 +-
 .../client/TestClientExponentialBackoff.java|   4 +-
 .../regionserver/MetricsRegionServerSource.java |   2 +-
 .../MetricsRegionServerWrapper.java |   6 +-
 .../regionserver/MetricsRegionWrapper.java  |   2 +-
 .../MetricsTableWrapperAggregate.java   |   2 +-
 .../MetricsRegionServerSourceImpl.java  |   4 +-
 .../regionserver/MetricsRegionSourceImpl.java   |   2 +-
 .../regionserver/MetricsTableSourceImpl.java|   2 +-
 .../TestMetricsRegionSourceImpl.java|   2 +-
 .../TestMetricsTableSourceImpl.java |   2 +-
 .../src/main/protobuf/Client.proto  |   6 +-
 .../src/main/protobuf/ClusterStatus.proto   |   2 +-
 hbase-protocol/src/main/protobuf/Client.proto   |   6 +-
 .../rest/model/StorageClusterStatusModel.java   |   8 +-
 .../protobuf/StorageClusterStatusMessage.proto  |   4 +-
 .../model/TestStorageClusterStatusModel.java|  12 +-
 .../tmpl/master/RegionServerListTmpl.jamon  |   4 +-
 .../tmpl/regionserver/RegionListTmpl.jamon  |   2 +-
 .../tmpl/regionserver/ServerMetricsTmpl.jamon   |   4 +-
 .../org/apache/hadoop/hbase/io/hfile/HFile.java |   4 +-
 .../hadoop/hbase/io/hfile/HFileBlock.java   |   4 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java  |  20 +--
 .../hadoop/hbase/io/util/MemorySizeUtil.java|   6 +-
 .../master/balancer/StochasticLoadBalancer.java |   6 +-
 .../org/apache/hadoop/hbase/mob/MobFile.java|   4 +-
 .../hbase/regionserver/AbstractMemStore.java|  14 +-
 .../regionserver/CellArrayImmutableSegment.java |   4 +-
 .../regionserver/CellChunkImmutableSegment.java |   4 +-
 .../hbase/regionserver/CompactingMemStore.java  |  18 +--
 .../hbase/regionserver/CompactionPipeline.java  |  18 +--
 .../regionserver/CompositeImmutableSegment.java |   4 +-
 .../regionserver/DefaultHeapMemoryTuner.java|   4 +-
 .../hbase/regionserver/DefaultMemStore.java |  14 +-
 .../regionserver/FlushLargeStoresPolicy.java|   2 +-
 .../FlushNonSloppyStoresFirstPolicy.java|   2 +-
 .../hbase/regionserver/FlushRequester.java  |   2 +-
 .../hadoop/hbase/regionserver/HRegion.java  | 154 +--
 .../hbase/regionserver/HRegionServer.java   |  14 +-
 .../hadoop/hbase/regionserver/HStore.java   |  22 +--
 .../hadoop/hbase/regionserver/HStoreFile.java   |   2 +-
 .../hbase/regionserver/HeapMemoryManager.java   |  26 ++--
 .../hadoop/hbase/regionserver/MemStore.java |  12 +-
 .../hbase/regionserver/MemStoreFlusher.java |  54 +++
 .../hadoop/hbase/regionserver/MemStoreSize.java | 107 +
 .../hadoop/hbase/regionserver/MemstoreSize.java | 107 -
 .../hbase/regionserver/MetricsRegionServer.java |   2 +-
 .../MetricsRegionServerWrapperImpl.java |  10 +-
 .../regionserver/MetricsRegionWrapperImpl.java  |   2 +-
 .../MetricsTableWrapperAggregateImpl.java   |  12 +-
 .../hbase/regionserver/MutableSegment.java  |   6 +-
 .../hadoop/hbase/regionserver/Region.java   |   2 +-
 .../regionserver/RegionServerAccounting.java|  64 
 .../regionserver/RegionServicesForStores.java   |  16 +-
 .../hadoop/hbase/regionserver/Segment.java  |   8 +-
 .../hbase/regionserver/SegmentFactory.java  |   3 +-
 .../apache/hadoop/hbase/regionserver/Store.java |  10 +-
 .../regionserver/StoreConfigInformation.java|   2 +-
 .../hadoop/hbase/regionserver/StoreFile.java|   2 +-
 .../hadoop/hbase/regionserver/StoreUtils.java   |   4 +-
 .../hbase/regionserver/StripeStoreConfig.java   |   2 +-
 .../compactions/CompactionConfiguration.java|   8 +-
 .../regionserver/compactions/Compactor.java |  12 +-
 .../hbase/regionserver/wal/AbstractFSWAL.java   |   8 +-
 .../hbase/regionserver/wal/FSWALEntry.java  |   2 +-
 .../RegionReplicaReplicationEndpoint.java 

[2/4] hbase git commit: HBASE-18897 Substitute MemStore for Memstore

2017-10-02 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
index 354b056..2ada5a9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
@@ -81,8 +81,8 @@ public class StoreUtils {
* were created by a mapreduce bulk load are ignored, as they do not 
correspond to any specific
* put operation, and thus do not have a memstoreTS associated with them.
*/
-  public static OptionalLong getMaxMemstoreTSInList(Collection 
sfs) {
-return sfs.stream().filter(sf -> 
!sf.isBulkLoadResult()).mapToLong(HStoreFile::getMaxMemstoreTS)
+  public static OptionalLong getMaxMemStoreTSInList(Collection 
sfs) {
+return sfs.stream().filter(sf -> 
!sf.isBulkLoadResult()).mapToLong(HStoreFile::getMaxMemStoreTS)
 .max();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
index 169d1d8..eb2a9b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
@@ -96,7 +96,7 @@ public class StripeStoreConfig {
 this.splitPartCount = splitPartCount;
 // Arbitrary default split size - 4 times the size of one L0 compaction.
 // If we flush into L0 there's no split compaction, but for default value 
it is ok.
-double flushSize = sci.getMemstoreFlushSize();
+double flushSize = sci.getMemStoreFlushSize();
 if (flushSize == 0) {
   flushSize = 128 * 1024 * 1024;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
index fe9ae30..b8194eb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
@@ -115,10 +115,10 @@ public class CompactionConfiguration {
 this.storeConfigInfo = storeConfigInfo;
 
 maxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, 
Long.MAX_VALUE);
-offPeakMaxCompactSize = 
conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY, 
-  maxCompactSize);  
+offPeakMaxCompactSize = 
conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY,
+  maxCompactSize);
 minCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY,
-storeConfigInfo.getMemstoreFlushSize());
+storeConfigInfo.getMemStoreFlushSize());
 minFilesToCompact = Math.max(2, 
conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY,
   /*old name*/ conf.getInt("hbase.hstore.compactionThreshold", 3)));
 maxFilesToCompact = conf.getInt(HBASE_HSTORE_COMPACTION_MAX_KEY, 10);
@@ -126,7 +126,7 @@ public class CompactionConfiguration {
 offPeakCompactionRatio = 
conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_OFFPEAK_KEY, 5.0F);
 
 throttlePoint = 
conf.getLong("hbase.regionserver.thread.compaction.throttle",
-  2 * maxFilesToCompact * storeConfigInfo.getMemstoreFlushSize());
+  2 * maxFilesToCompact * storeConfigInfo.getMemStoreFlushSize());
 majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, 
1000*60*60*24*7);
 // Make it 0.5 so jitter has us fall evenly either side of when the 
compaction should run
 majorCompactionJitter = 
conf.getFloat("hbase.hregion.majorcompaction.jitter", 0.50F);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index 2c9a519..5865ed5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop

[1/4] hbase git commit: HBASE-18897 Substitute MemStore for Memstore

2017-10-02 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master 869b90c61 -> d35d8376a


http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
index a0d953e..e4f7663 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.WAL;
-import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -144,7 +143,7 @@ public class TestPerColumnFamilyFlush {
   }
 }
 
-long totalMemstoreSize = region.getMemstoreSize();
+long totalMemstoreSize = region.getMemStoreSize();
 
 // Find the smallest LSNs for edits wrt to each CF.
 long smallestSeqCF1 = region.getOldestSeqIdOfStore(FAMILY1);
@@ -152,13 +151,13 @@ public class TestPerColumnFamilyFlush {
 long smallestSeqCF3 = region.getOldestSeqIdOfStore(FAMILY3);
 
 // Find the sizes of the memstores of each CF.
-MemstoreSize cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
-MemstoreSize cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
-MemstoreSize cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
+MemStoreSize cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
+MemStoreSize cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
+MemStoreSize cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
 
 // Get the overall smallest LSN in the region's memstores.
 long smallestSeqInRegionCurrentMemstore = getWAL(region)
-
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+
.getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
 
 // The overall smallest LSN in the region's memstores should be the same as
 // the LSN of the smallest edit in CF1
@@ -180,16 +179,16 @@ public class TestPerColumnFamilyFlush {
 region.flush(false);
 
 // Will use these to check if anything changed.
-MemstoreSize oldCF2MemstoreSize = cf2MemstoreSize;
-MemstoreSize oldCF3MemstoreSize = cf3MemstoreSize;
+MemStoreSize oldCF2MemstoreSize = cf2MemstoreSize;
+MemStoreSize oldCF3MemstoreSize = cf3MemstoreSize;
 
 // Recalculate everything
 cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
 cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
 cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
-totalMemstoreSize = region.getMemstoreSize();
+totalMemstoreSize = region.getMemStoreSize();
 smallestSeqInRegionCurrentMemstore = getWAL(region)
-
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+
.getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
 
 // We should have cleared out only CF1, since we chose the flush thresholds
 // and number of puts accordingly.
@@ -225,9 +224,9 @@ public class TestPerColumnFamilyFlush {
 cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
 cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
 cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
-totalMemstoreSize = region.getMemstoreSize();
+totalMemstoreSize = region.getMemStoreSize();
 smallestSeqInRegionCurrentMemstore = getWAL(region)
-
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+
.getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
 
 // CF1 and CF2, both should be absent.
 assertEquals(0, cf1MemstoreSize.getDataSize());
@@ -261,7 +260,7 @@ public class TestPerColumnFamilyFlush {
 
 // Since we won't find any CF above the threshold, and hence no specific
 // store to flush, we should flush all the memstores.
-assertEquals(0, region.getMemstoreSize());
+assertEquals(0, region.getMemStoreSize());
 HBaseTestingUtility.closeRegionAndWAL(region);
   }
 
@@ -285,12 +284,12 @@ public class TestPerColumnFamilyFlush {
   }
 }
 
-long totalMemstoreSize = region.getMemstoreSize();
+long totalMemstoreSize = region.getMemStoreSize();
 
 // Find the sizes of the memstores of each CF.
-MemstoreSize cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
-MemstoreSize cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
-